prompt
stringlengths 94
42.6k
| completion
stringlengths 6
120
| api
stringlengths 14
68
|
---|---|---|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from io import BytesIO
import numpy as np
from helpers import MLP, graph_mode
import megengine.functional as F
from megengine import load, save
from megengine.core import TensorDict, tensor
from megengine.jit import trace
from megengine.optimizer import SGD, Adam
from megengine.test import assertTensorClose
def get_input():
batch_size = 2
input_dim = 28
data_shape = (batch_size, input_dim)
label_shape = (batch_size,)
data = tensor()
label = tensor(dtype=np.int32)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
return data, data_shape, label, label_shape
def test_sgd_simple():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, weight_decay=0.1)
for idx in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
if idx % 2:
opt.zero_grad()
else:
mlp.zero_grad()
opt.backward(loss)
grads = TensorDict()
orig_params = TensorDict()
for param in mlp.parameters():
grad = F.grad(loss, param, use_virtual_grad=False)
assertTensorClose(grad.numpy(), param.grad.numpy())
grads[param] = np.copy(grad.numpy())
orig_params[param] = np.copy(param.numpy())
opt.step()
for param in mlp.parameters():
assertTensorClose(
param.numpy(), orig_params[param] * 0.999 - grads[param] * 0.01
)
def test_sgd_momentum():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, momentum=0.9)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
for _ in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
orig_params = TensorDict()
grads = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
grads[param] = np.copy(param.grad.numpy())
opt.step()
for param in mlp.parameters():
slot = slots[param]
orig_param = orig_params[param]
slot *= 0.9
slot -= param.grad.numpy() * 0.01
assertTensorClose(param.numpy(), orig_param + slot)
# TODO: put opt.step() inside trace
def test_sgd_momentum_static():
_, data_shape, _, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, momentum=0.9)
@trace
def f(data, label):
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
for _ in range(3):
f(
np.random.random(data_shape).astype(np.float32),
np.random.randint(0, 10, label_shape).astype(np.int32),
)
orig_params = TensorDict()
grads = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
grads[param] = np.copy(param.grad.numpy())
opt.step()
for param in mlp.parameters():
slot = slots[param]
orig_param = orig_params[param]
slot *= 0.9
slot -= param.grad.numpy() * 0.01
assertTensorClose(param.numpy(), orig_param + slot)
def test_update_lr():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01)
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
opt.step()
for group in opt.param_groups:
group["lr"] += 0.02
for _ in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
for param in mlp.parameters():
grad = F.grad(loss, param, use_virtual_grad=False)
assertTensorClose(grad.numpy(), param.grad.numpy())
orig_params = []
for param in mlp.parameters():
orig_params.append(np.copy(param.numpy()))
opt.step()
for param, orig_param in zip(mlp.parameters(), orig_params):
assertTensorClose(param.numpy(), orig_param - param.grad.numpy() * 0.03)
def test_adam():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
beta0 = 0.8
beta1 = 0.9
eps = 1e-4
opt = Adam(mlp.parameters(), lr=0.01, betas=(beta0, beta1), eps=eps)
m_slots = | TensorDict() | megengine.core.TensorDict |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from io import BytesIO
import numpy as np
from helpers import MLP, graph_mode
import megengine.functional as F
from megengine import load, save
from megengine.core import TensorDict, tensor
from megengine.jit import trace
from megengine.optimizer import SGD, Adam
from megengine.test import assertTensorClose
def get_input():
batch_size = 2
input_dim = 28
data_shape = (batch_size, input_dim)
label_shape = (batch_size,)
data = tensor()
label = tensor(dtype=np.int32)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
return data, data_shape, label, label_shape
def test_sgd_simple():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, weight_decay=0.1)
for idx in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
if idx % 2:
opt.zero_grad()
else:
mlp.zero_grad()
opt.backward(loss)
grads = TensorDict()
orig_params = TensorDict()
for param in mlp.parameters():
grad = F.grad(loss, param, use_virtual_grad=False)
assertTensorClose(grad.numpy(), param.grad.numpy())
grads[param] = np.copy(grad.numpy())
orig_params[param] = np.copy(param.numpy())
opt.step()
for param in mlp.parameters():
assertTensorClose(
param.numpy(), orig_params[param] * 0.999 - grads[param] * 0.01
)
def test_sgd_momentum():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, momentum=0.9)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
for _ in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
orig_params = TensorDict()
grads = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
grads[param] = np.copy(param.grad.numpy())
opt.step()
for param in mlp.parameters():
slot = slots[param]
orig_param = orig_params[param]
slot *= 0.9
slot -= param.grad.numpy() * 0.01
assertTensorClose(param.numpy(), orig_param + slot)
# TODO: put opt.step() inside trace
def test_sgd_momentum_static():
_, data_shape, _, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, momentum=0.9)
@trace
def f(data, label):
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
for _ in range(3):
f(
np.random.random(data_shape).astype(np.float32),
np.random.randint(0, 10, label_shape).astype(np.int32),
)
orig_params = TensorDict()
grads = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
grads[param] = np.copy(param.grad.numpy())
opt.step()
for param in mlp.parameters():
slot = slots[param]
orig_param = orig_params[param]
slot *= 0.9
slot -= param.grad.numpy() * 0.01
assertTensorClose(param.numpy(), orig_param + slot)
def test_update_lr():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01)
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
opt.step()
for group in opt.param_groups:
group["lr"] += 0.02
for _ in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
for param in mlp.parameters():
grad = F.grad(loss, param, use_virtual_grad=False)
assertTensorClose(grad.numpy(), param.grad.numpy())
orig_params = []
for param in mlp.parameters():
orig_params.append(np.copy(param.numpy()))
opt.step()
for param, orig_param in zip(mlp.parameters(), orig_params):
assertTensorClose(param.numpy(), orig_param - param.grad.numpy() * 0.03)
def test_adam():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
beta0 = 0.8
beta1 = 0.9
eps = 1e-4
opt = Adam(mlp.parameters(), lr=0.01, betas=(beta0, beta1), eps=eps)
m_slots = TensorDict()
v_slots = | TensorDict() | megengine.core.TensorDict |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from io import BytesIO
import numpy as np
from helpers import MLP, graph_mode
import megengine.functional as F
from megengine import load, save
from megengine.core import TensorDict, tensor
from megengine.jit import trace
from megengine.optimizer import SGD, Adam
from megengine.test import assertTensorClose
def get_input():
batch_size = 2
input_dim = 28
data_shape = (batch_size, input_dim)
label_shape = (batch_size,)
data = tensor()
label = tensor(dtype=np.int32)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
return data, data_shape, label, label_shape
def test_sgd_simple():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, weight_decay=0.1)
for idx in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
if idx % 2:
opt.zero_grad()
else:
mlp.zero_grad()
opt.backward(loss)
grads = TensorDict()
orig_params = TensorDict()
for param in mlp.parameters():
grad = F.grad(loss, param, use_virtual_grad=False)
assertTensorClose(grad.numpy(), param.grad.numpy())
grads[param] = np.copy(grad.numpy())
orig_params[param] = np.copy(param.numpy())
opt.step()
for param in mlp.parameters():
assertTensorClose(
param.numpy(), orig_params[param] * 0.999 - grads[param] * 0.01
)
def test_sgd_momentum():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, momentum=0.9)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
for _ in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
orig_params = TensorDict()
grads = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
grads[param] = np.copy(param.grad.numpy())
opt.step()
for param in mlp.parameters():
slot = slots[param]
orig_param = orig_params[param]
slot *= 0.9
slot -= param.grad.numpy() * 0.01
assertTensorClose(param.numpy(), orig_param + slot)
# TODO: put opt.step() inside trace
def test_sgd_momentum_static():
_, data_shape, _, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, momentum=0.9)
@trace
def f(data, label):
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
for _ in range(3):
f(
np.random.random(data_shape).astype(np.float32),
np.random.randint(0, 10, label_shape).astype(np.int32),
)
orig_params = TensorDict()
grads = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
grads[param] = np.copy(param.grad.numpy())
opt.step()
for param in mlp.parameters():
slot = slots[param]
orig_param = orig_params[param]
slot *= 0.9
slot -= param.grad.numpy() * 0.01
assertTensorClose(param.numpy(), orig_param + slot)
def test_update_lr():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01)
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
opt.step()
for group in opt.param_groups:
group["lr"] += 0.02
for _ in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
for param in mlp.parameters():
grad = F.grad(loss, param, use_virtual_grad=False)
assertTensorClose(grad.numpy(), param.grad.numpy())
orig_params = []
for param in mlp.parameters():
orig_params.append(np.copy(param.numpy()))
opt.step()
for param, orig_param in zip(mlp.parameters(), orig_params):
assertTensorClose(param.numpy(), orig_param - param.grad.numpy() * 0.03)
def test_adam():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
beta0 = 0.8
beta1 = 0.9
eps = 1e-4
opt = Adam(mlp.parameters(), lr=0.01, betas=(beta0, beta1), eps=eps)
m_slots = TensorDict()
v_slots = TensorDict()
for param in mlp.parameters():
m_slots[param] = np.zeros(param.shape).astype(np.float32)
v_slots[param] = np.zeros(param.shape).astype(np.float32)
step_size = 0
def check_value():
for param in mlp.parameters():
grad = param.grad.numpy()
orig_param = orig_params[param]
m = m_slots[param]
v = v_slots[param]
m *= beta0
m += (1 - beta0) * grad
v *= beta1
v += (1 - beta1) * grad * grad
update = (m / (1 - beta0 ** step_size)) / (
np.sqrt(v / (1 - beta1 ** step_size)) + eps
)
assertTensorClose(param.numpy(), orig_param - 0.01 * update)
# eager
for _ in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
grads = opt.backward(loss)
orig_params = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
opt.step()
step_size += 1
check_value()
# static
@trace
def f(data, label):
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.backward(loss)
for _ in range(3):
opt.zero_grad()
orig_params = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
f(
np.random.random(data_shape).astype(np.float32),
np.random.randint(0, 10, label_shape).astype(np.int32),
)
opt.step()
step_size += 1
check_value()
@graph_mode("eager", "static")
def test_optimizer_serialization():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, momentum=0.9)
slots = | TensorDict() | megengine.core.TensorDict |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from io import BytesIO
import numpy as np
from helpers import MLP, graph_mode
import megengine.functional as F
from megengine import load, save
from megengine.core import TensorDict, tensor
from megengine.jit import trace
from megengine.optimizer import SGD, Adam
from megengine.test import assertTensorClose
def get_input():
batch_size = 2
input_dim = 28
data_shape = (batch_size, input_dim)
label_shape = (batch_size,)
data = tensor()
label = tensor(dtype=np.int32)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
return data, data_shape, label, label_shape
def test_sgd_simple():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, weight_decay=0.1)
for idx in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
if idx % 2:
opt.zero_grad()
else:
mlp.zero_grad()
opt.backward(loss)
grads = | TensorDict() | megengine.core.TensorDict |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from io import BytesIO
import numpy as np
from helpers import MLP, graph_mode
import megengine.functional as F
from megengine import load, save
from megengine.core import TensorDict, tensor
from megengine.jit import trace
from megengine.optimizer import SGD, Adam
from megengine.test import assertTensorClose
def get_input():
batch_size = 2
input_dim = 28
data_shape = (batch_size, input_dim)
label_shape = (batch_size,)
data = tensor()
label = tensor(dtype=np.int32)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
return data, data_shape, label, label_shape
def test_sgd_simple():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, weight_decay=0.1)
for idx in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
if idx % 2:
opt.zero_grad()
else:
mlp.zero_grad()
opt.backward(loss)
grads = TensorDict()
orig_params = | TensorDict() | megengine.core.TensorDict |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from io import BytesIO
import numpy as np
from helpers import MLP, graph_mode
import megengine.functional as F
from megengine import load, save
from megengine.core import TensorDict, tensor
from megengine.jit import trace
from megengine.optimizer import SGD, Adam
from megengine.test import assertTensorClose
def get_input():
batch_size = 2
input_dim = 28
data_shape = (batch_size, input_dim)
label_shape = (batch_size,)
data = tensor()
label = tensor(dtype=np.int32)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
return data, data_shape, label, label_shape
def test_sgd_simple():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, weight_decay=0.1)
for idx in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
if idx % 2:
opt.zero_grad()
else:
mlp.zero_grad()
opt.backward(loss)
grads = TensorDict()
orig_params = TensorDict()
for param in mlp.parameters():
grad = F.grad(loss, param, use_virtual_grad=False)
assertTensorClose(grad.numpy(), param.grad.numpy())
grads[param] = np.copy(grad.numpy())
orig_params[param] = np.copy(param.numpy())
opt.step()
for param in mlp.parameters():
assertTensorClose(
param.numpy(), orig_params[param] * 0.999 - grads[param] * 0.01
)
def test_sgd_momentum():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, momentum=0.9)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
for _ in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
orig_params = | TensorDict() | megengine.core.TensorDict |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from io import BytesIO
import numpy as np
from helpers import MLP, graph_mode
import megengine.functional as F
from megengine import load, save
from megengine.core import TensorDict, tensor
from megengine.jit import trace
from megengine.optimizer import SGD, Adam
from megengine.test import assertTensorClose
def get_input():
batch_size = 2
input_dim = 28
data_shape = (batch_size, input_dim)
label_shape = (batch_size,)
data = tensor()
label = tensor(dtype=np.int32)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
return data, data_shape, label, label_shape
def test_sgd_simple():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, weight_decay=0.1)
for idx in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
if idx % 2:
opt.zero_grad()
else:
mlp.zero_grad()
opt.backward(loss)
grads = TensorDict()
orig_params = TensorDict()
for param in mlp.parameters():
grad = F.grad(loss, param, use_virtual_grad=False)
assertTensorClose(grad.numpy(), param.grad.numpy())
grads[param] = np.copy(grad.numpy())
orig_params[param] = np.copy(param.numpy())
opt.step()
for param in mlp.parameters():
assertTensorClose(
param.numpy(), orig_params[param] * 0.999 - grads[param] * 0.01
)
def test_sgd_momentum():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, momentum=0.9)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
for _ in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
orig_params = TensorDict()
grads = | TensorDict() | megengine.core.TensorDict |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from io import BytesIO
import numpy as np
from helpers import MLP, graph_mode
import megengine.functional as F
from megengine import load, save
from megengine.core import TensorDict, tensor
from megengine.jit import trace
from megengine.optimizer import SGD, Adam
from megengine.test import assertTensorClose
def get_input():
batch_size = 2
input_dim = 28
data_shape = (batch_size, input_dim)
label_shape = (batch_size,)
data = tensor()
label = tensor(dtype=np.int32)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
return data, data_shape, label, label_shape
def test_sgd_simple():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, weight_decay=0.1)
for idx in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
if idx % 2:
opt.zero_grad()
else:
mlp.zero_grad()
opt.backward(loss)
grads = TensorDict()
orig_params = TensorDict()
for param in mlp.parameters():
grad = F.grad(loss, param, use_virtual_grad=False)
assertTensorClose(grad.numpy(), param.grad.numpy())
grads[param] = np.copy(grad.numpy())
orig_params[param] = np.copy(param.numpy())
opt.step()
for param in mlp.parameters():
assertTensorClose(
param.numpy(), orig_params[param] * 0.999 - grads[param] * 0.01
)
def test_sgd_momentum():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, momentum=0.9)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
for _ in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
orig_params = TensorDict()
grads = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
grads[param] = np.copy(param.grad.numpy())
opt.step()
for param in mlp.parameters():
slot = slots[param]
orig_param = orig_params[param]
slot *= 0.9
slot -= param.grad.numpy() * 0.01
assertTensorClose(param.numpy(), orig_param + slot)
# TODO: put opt.step() inside trace
def test_sgd_momentum_static():
_, data_shape, _, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, momentum=0.9)
@trace
def f(data, label):
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
for _ in range(3):
f(
np.random.random(data_shape).astype(np.float32),
np.random.randint(0, 10, label_shape).astype(np.int32),
)
orig_params = | TensorDict() | megengine.core.TensorDict |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from io import BytesIO
import numpy as np
from helpers import MLP, graph_mode
import megengine.functional as F
from megengine import load, save
from megengine.core import TensorDict, tensor
from megengine.jit import trace
from megengine.optimizer import SGD, Adam
from megengine.test import assertTensorClose
def get_input():
batch_size = 2
input_dim = 28
data_shape = (batch_size, input_dim)
label_shape = (batch_size,)
data = tensor()
label = tensor(dtype=np.int32)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
return data, data_shape, label, label_shape
def test_sgd_simple():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, weight_decay=0.1)
for idx in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
if idx % 2:
opt.zero_grad()
else:
mlp.zero_grad()
opt.backward(loss)
grads = TensorDict()
orig_params = TensorDict()
for param in mlp.parameters():
grad = F.grad(loss, param, use_virtual_grad=False)
assertTensorClose(grad.numpy(), param.grad.numpy())
grads[param] = np.copy(grad.numpy())
orig_params[param] = np.copy(param.numpy())
opt.step()
for param in mlp.parameters():
assertTensorClose(
param.numpy(), orig_params[param] * 0.999 - grads[param] * 0.01
)
def test_sgd_momentum():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, momentum=0.9)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
for _ in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
orig_params = TensorDict()
grads = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
grads[param] = np.copy(param.grad.numpy())
opt.step()
for param in mlp.parameters():
slot = slots[param]
orig_param = orig_params[param]
slot *= 0.9
slot -= param.grad.numpy() * 0.01
assertTensorClose(param.numpy(), orig_param + slot)
# TODO: put opt.step() inside trace
def test_sgd_momentum_static():
_, data_shape, _, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, momentum=0.9)
@trace
def f(data, label):
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
for _ in range(3):
f(
np.random.random(data_shape).astype(np.float32),
np.random.randint(0, 10, label_shape).astype(np.int32),
)
orig_params = TensorDict()
grads = | TensorDict() | megengine.core.TensorDict |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from io import BytesIO
import numpy as np
from helpers import MLP, graph_mode
import megengine.functional as F
from megengine import load, save
from megengine.core import TensorDict, tensor
from megengine.jit import trace
from megengine.optimizer import SGD, Adam
from megengine.test import assertTensorClose
def get_input():
batch_size = 2
input_dim = 28
data_shape = (batch_size, input_dim)
label_shape = (batch_size,)
data = tensor()
label = tensor(dtype=np.int32)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
return data, data_shape, label, label_shape
def test_sgd_simple():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, weight_decay=0.1)
for idx in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
if idx % 2:
opt.zero_grad()
else:
mlp.zero_grad()
opt.backward(loss)
grads = TensorDict()
orig_params = TensorDict()
for param in mlp.parameters():
grad = F.grad(loss, param, use_virtual_grad=False)
assertTensorClose(grad.numpy(), param.grad.numpy())
grads[param] = np.copy(grad.numpy())
orig_params[param] = np.copy(param.numpy())
opt.step()
for param in mlp.parameters():
assertTensorClose(
param.numpy(), orig_params[param] * 0.999 - grads[param] * 0.01
)
def test_sgd_momentum():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, momentum=0.9)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
for _ in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
orig_params = TensorDict()
grads = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
grads[param] = np.copy(param.grad.numpy())
opt.step()
for param in mlp.parameters():
slot = slots[param]
orig_param = orig_params[param]
slot *= 0.9
slot -= param.grad.numpy() * 0.01
assertTensorClose(param.numpy(), orig_param + slot)
# TODO: put opt.step() inside trace
def test_sgd_momentum_static():
_, data_shape, _, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, momentum=0.9)
@trace
def f(data, label):
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
for _ in range(3):
f(
np.random.random(data_shape).astype(np.float32),
np.random.randint(0, 10, label_shape).astype(np.int32),
)
orig_params = TensorDict()
grads = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
grads[param] = np.copy(param.grad.numpy())
opt.step()
for param in mlp.parameters():
slot = slots[param]
orig_param = orig_params[param]
slot *= 0.9
slot -= param.grad.numpy() * 0.01
assertTensorClose(param.numpy(), orig_param + slot)
def test_update_lr():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01)
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
opt.step()
for group in opt.param_groups:
group["lr"] += 0.02
for _ in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
for param in mlp.parameters():
grad = F.grad(loss, param, use_virtual_grad=False)
assertTensorClose(grad.numpy(), param.grad.numpy())
orig_params = []
for param in mlp.parameters():
orig_params.append(np.copy(param.numpy()))
opt.step()
for param, orig_param in zip(mlp.parameters(), orig_params):
assertTensorClose(param.numpy(), orig_param - param.grad.numpy() * 0.03)
def test_adam():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
beta0 = 0.8
beta1 = 0.9
eps = 1e-4
opt = Adam(mlp.parameters(), lr=0.01, betas=(beta0, beta1), eps=eps)
m_slots = TensorDict()
v_slots = TensorDict()
for param in mlp.parameters():
m_slots[param] = np.zeros(param.shape).astype(np.float32)
v_slots[param] = np.zeros(param.shape).astype(np.float32)
step_size = 0
def check_value():
for param in mlp.parameters():
grad = param.grad.numpy()
orig_param = orig_params[param]
m = m_slots[param]
v = v_slots[param]
m *= beta0
m += (1 - beta0) * grad
v *= beta1
v += (1 - beta1) * grad * grad
update = (m / (1 - beta0 ** step_size)) / (
np.sqrt(v / (1 - beta1 ** step_size)) + eps
)
assertTensorClose(param.numpy(), orig_param - 0.01 * update)
# eager
for _ in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
grads = opt.backward(loss)
orig_params = | TensorDict() | megengine.core.TensorDict |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from io import BytesIO
import numpy as np
from helpers import MLP, graph_mode
import megengine.functional as F
from megengine import load, save
from megengine.core import TensorDict, tensor
from megengine.jit import trace
from megengine.optimizer import SGD, Adam
from megengine.test import assertTensorClose
def get_input():
batch_size = 2
input_dim = 28
data_shape = (batch_size, input_dim)
label_shape = (batch_size,)
data = tensor()
label = tensor(dtype=np.int32)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
return data, data_shape, label, label_shape
def test_sgd_simple():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, weight_decay=0.1)
for idx in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
if idx % 2:
opt.zero_grad()
else:
mlp.zero_grad()
opt.backward(loss)
grads = TensorDict()
orig_params = TensorDict()
for param in mlp.parameters():
grad = F.grad(loss, param, use_virtual_grad=False)
assertTensorClose(grad.numpy(), param.grad.numpy())
grads[param] = np.copy(grad.numpy())
orig_params[param] = np.copy(param.numpy())
opt.step()
for param in mlp.parameters():
assertTensorClose(
param.numpy(), orig_params[param] * 0.999 - grads[param] * 0.01
)
def test_sgd_momentum():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, momentum=0.9)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
for _ in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
orig_params = TensorDict()
grads = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
grads[param] = np.copy(param.grad.numpy())
opt.step()
for param in mlp.parameters():
slot = slots[param]
orig_param = orig_params[param]
slot *= 0.9
slot -= param.grad.numpy() * 0.01
assertTensorClose(param.numpy(), orig_param + slot)
# TODO: put opt.step() inside trace
def test_sgd_momentum_static():
_, data_shape, _, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, momentum=0.9)
@trace
def f(data, label):
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
for _ in range(3):
f(
np.random.random(data_shape).astype(np.float32),
np.random.randint(0, 10, label_shape).astype(np.int32),
)
orig_params = TensorDict()
grads = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
grads[param] = np.copy(param.grad.numpy())
opt.step()
for param in mlp.parameters():
slot = slots[param]
orig_param = orig_params[param]
slot *= 0.9
slot -= param.grad.numpy() * 0.01
assertTensorClose(param.numpy(), orig_param + slot)
def test_update_lr():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01)
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
opt.step()
for group in opt.param_groups:
group["lr"] += 0.02
for _ in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
for param in mlp.parameters():
grad = F.grad(loss, param, use_virtual_grad=False)
assertTensorClose(grad.numpy(), param.grad.numpy())
orig_params = []
for param in mlp.parameters():
orig_params.append(np.copy(param.numpy()))
opt.step()
for param, orig_param in zip(mlp.parameters(), orig_params):
assertTensorClose(param.numpy(), orig_param - param.grad.numpy() * 0.03)
def test_adam():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
beta0 = 0.8
beta1 = 0.9
eps = 1e-4
opt = Adam(mlp.parameters(), lr=0.01, betas=(beta0, beta1), eps=eps)
m_slots = TensorDict()
v_slots = TensorDict()
for param in mlp.parameters():
m_slots[param] = np.zeros(param.shape).astype(np.float32)
v_slots[param] = np.zeros(param.shape).astype(np.float32)
step_size = 0
def check_value():
for param in mlp.parameters():
grad = param.grad.numpy()
orig_param = orig_params[param]
m = m_slots[param]
v = v_slots[param]
m *= beta0
m += (1 - beta0) * grad
v *= beta1
v += (1 - beta1) * grad * grad
update = (m / (1 - beta0 ** step_size)) / (
np.sqrt(v / (1 - beta1 ** step_size)) + eps
)
assertTensorClose(param.numpy(), orig_param - 0.01 * update)
# eager
for _ in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
grads = opt.backward(loss)
orig_params = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
opt.step()
step_size += 1
check_value()
# static
@trace
def f(data, label):
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.backward(loss)
for _ in range(3):
opt.zero_grad()
orig_params = | TensorDict() | megengine.core.TensorDict |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from io import BytesIO
import numpy as np
from helpers import MLP, graph_mode
import megengine.functional as F
from megengine import load, save
from megengine.core import TensorDict, tensor
from megengine.jit import trace
from megengine.optimizer import SGD, Adam
from megengine.test import assertTensorClose
def get_input():
batch_size = 2
input_dim = 28
data_shape = (batch_size, input_dim)
label_shape = (batch_size,)
data = tensor()
label = tensor(dtype=np.int32)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
return data, data_shape, label, label_shape
def test_sgd_simple():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, weight_decay=0.1)
for idx in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
if idx % 2:
opt.zero_grad()
else:
mlp.zero_grad()
opt.backward(loss)
grads = TensorDict()
orig_params = TensorDict()
for param in mlp.parameters():
grad = F.grad(loss, param, use_virtual_grad=False)
assertTensorClose(grad.numpy(), param.grad.numpy())
grads[param] = np.copy(grad.numpy())
orig_params[param] = np.copy(param.numpy())
opt.step()
for param in mlp.parameters():
assertTensorClose(
param.numpy(), orig_params[param] * 0.999 - grads[param] * 0.01
)
def test_sgd_momentum():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, momentum=0.9)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
for _ in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
orig_params = TensorDict()
grads = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
grads[param] = np.copy(param.grad.numpy())
opt.step()
for param in mlp.parameters():
slot = slots[param]
orig_param = orig_params[param]
slot *= 0.9
slot -= param.grad.numpy() * 0.01
assertTensorClose(param.numpy(), orig_param + slot)
# TODO: put opt.step() inside trace
def test_sgd_momentum_static():
_, data_shape, _, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, momentum=0.9)
@trace
def f(data, label):
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
for _ in range(3):
f(
np.random.random(data_shape).astype(np.float32),
np.random.randint(0, 10, label_shape).astype(np.int32),
)
orig_params = TensorDict()
grads = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
grads[param] = np.copy(param.grad.numpy())
opt.step()
for param in mlp.parameters():
slot = slots[param]
orig_param = orig_params[param]
slot *= 0.9
slot -= param.grad.numpy() * 0.01
assertTensorClose(param.numpy(), orig_param + slot)
def test_update_lr():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01)
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
opt.step()
for group in opt.param_groups:
group["lr"] += 0.02
for _ in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
for param in mlp.parameters():
grad = F.grad(loss, param, use_virtual_grad=False)
assertTensorClose(grad.numpy(), param.grad.numpy())
orig_params = []
for param in mlp.parameters():
orig_params.append(np.copy(param.numpy()))
opt.step()
for param, orig_param in zip(mlp.parameters(), orig_params):
assertTensorClose(param.numpy(), orig_param - param.grad.numpy() * 0.03)
def test_adam():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
beta0 = 0.8
beta1 = 0.9
eps = 1e-4
opt = Adam(mlp.parameters(), lr=0.01, betas=(beta0, beta1), eps=eps)
m_slots = TensorDict()
v_slots = TensorDict()
for param in mlp.parameters():
m_slots[param] = np.zeros(param.shape).astype(np.float32)
v_slots[param] = np.zeros(param.shape).astype(np.float32)
step_size = 0
def check_value():
for param in mlp.parameters():
grad = param.grad.numpy()
orig_param = orig_params[param]
m = m_slots[param]
v = v_slots[param]
m *= beta0
m += (1 - beta0) * grad
v *= beta1
v += (1 - beta1) * grad * grad
update = (m / (1 - beta0 ** step_size)) / (
np.sqrt(v / (1 - beta1 ** step_size)) + eps
)
assertTensorClose(param.numpy(), orig_param - 0.01 * update)
# eager
for _ in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
grads = opt.backward(loss)
orig_params = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
opt.step()
step_size += 1
check_value()
# static
@trace
def f(data, label):
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.backward(loss)
for _ in range(3):
opt.zero_grad()
orig_params = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
f(
np.random.random(data_shape).astype(np.float32),
np.random.randint(0, 10, label_shape).astype(np.int32),
)
opt.step()
step_size += 1
check_value()
@graph_mode("eager", "static")
def test_optimizer_serialization():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, momentum=0.9)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
opt.step()
for param in mlp.parameters():
slot = slots[param]
slot *= 0.9
slot -= param.grad.numpy() * 0.01
with BytesIO() as fout:
save(opt.state_dict(), fout)
fout.seek(0)
state_dict = | load(fout) | megengine.load |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from io import BytesIO
import numpy as np
from helpers import MLP, graph_mode
import megengine.functional as F
from megengine import load, save
from megengine.core import TensorDict, tensor
from megengine.jit import trace
from megengine.optimizer import SGD, Adam
from megengine.test import assertTensorClose
def get_input():
batch_size = 2
input_dim = 28
data_shape = (batch_size, input_dim)
label_shape = (batch_size,)
data = tensor()
label = tensor(dtype=np.int32)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
return data, data_shape, label, label_shape
def test_sgd_simple():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, weight_decay=0.1)
for idx in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
if idx % 2:
opt.zero_grad()
else:
mlp.zero_grad()
opt.backward(loss)
grads = TensorDict()
orig_params = TensorDict()
for param in mlp.parameters():
grad = F.grad(loss, param, use_virtual_grad=False)
assertTensorClose(grad.numpy(), param.grad.numpy())
grads[param] = np.copy(grad.numpy())
orig_params[param] = np.copy(param.numpy())
opt.step()
for param in mlp.parameters():
assertTensorClose(
param.numpy(), orig_params[param] * 0.999 - grads[param] * 0.01
)
def test_sgd_momentum():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, momentum=0.9)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
for _ in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
orig_params = TensorDict()
grads = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
grads[param] = np.copy(param.grad.numpy())
opt.step()
for param in mlp.parameters():
slot = slots[param]
orig_param = orig_params[param]
slot *= 0.9
slot -= param.grad.numpy() * 0.01
assertTensorClose(param.numpy(), orig_param + slot)
# TODO: put opt.step() inside trace
def test_sgd_momentum_static():
_, data_shape, _, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, momentum=0.9)
@trace
def f(data, label):
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
for _ in range(3):
f(
np.random.random(data_shape).astype(np.float32),
np.random.randint(0, 10, label_shape).astype(np.int32),
)
orig_params = TensorDict()
grads = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
grads[param] = np.copy(param.grad.numpy())
opt.step()
for param in mlp.parameters():
slot = slots[param]
orig_param = orig_params[param]
slot *= 0.9
slot -= param.grad.numpy() * 0.01
assertTensorClose(param.numpy(), orig_param + slot)
def test_update_lr():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01)
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
opt.step()
for group in opt.param_groups:
group["lr"] += 0.02
for _ in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
for param in mlp.parameters():
grad = F.grad(loss, param, use_virtual_grad=False)
assertTensorClose(grad.numpy(), param.grad.numpy())
orig_params = []
for param in mlp.parameters():
orig_params.append(np.copy(param.numpy()))
opt.step()
for param, orig_param in zip(mlp.parameters(), orig_params):
assertTensorClose(param.numpy(), orig_param - param.grad.numpy() * 0.03)
def test_adam():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
beta0 = 0.8
beta1 = 0.9
eps = 1e-4
opt = Adam(mlp.parameters(), lr=0.01, betas=(beta0, beta1), eps=eps)
m_slots = TensorDict()
v_slots = TensorDict()
for param in mlp.parameters():
m_slots[param] = np.zeros(param.shape).astype(np.float32)
v_slots[param] = np.zeros(param.shape).astype(np.float32)
step_size = 0
def check_value():
for param in mlp.parameters():
grad = param.grad.numpy()
orig_param = orig_params[param]
m = m_slots[param]
v = v_slots[param]
m *= beta0
m += (1 - beta0) * grad
v *= beta1
v += (1 - beta1) * grad * grad
update = (m / (1 - beta0 ** step_size)) / (
np.sqrt(v / (1 - beta1 ** step_size)) + eps
)
assertTensorClose(param.numpy(), orig_param - 0.01 * update)
# eager
for _ in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
grads = opt.backward(loss)
orig_params = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
opt.step()
step_size += 1
check_value()
# static
@trace
def f(data, label):
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.backward(loss)
for _ in range(3):
opt.zero_grad()
orig_params = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
f(
np.random.random(data_shape).astype(np.float32),
np.random.randint(0, 10, label_shape).astype(np.int32),
)
opt.step()
step_size += 1
check_value()
@graph_mode("eager", "static")
def test_optimizer_serialization():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, momentum=0.9)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
opt.step()
for param in mlp.parameters():
slot = slots[param]
slot *= 0.9
slot -= param.grad.numpy() * 0.01
with BytesIO() as fout:
save(opt.state_dict(), fout)
fout.seek(0)
state_dict = load(fout)
opt1 = SGD(mlp.parameters(), lr=0.02, momentum=0.8)
opt1.load_state_dict(state_dict)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt1.zero_grad()
opt1.backward(loss)
orig_params = | TensorDict() | megengine.core.TensorDict |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from io import BytesIO
import numpy as np
from helpers import MLP, graph_mode
import megengine.functional as F
from megengine import load, save
from megengine.core import TensorDict, tensor
from megengine.jit import trace
from megengine.optimizer import SGD, Adam
from megengine.test import assertTensorClose
def get_input():
batch_size = 2
input_dim = 28
data_shape = (batch_size, input_dim)
label_shape = (batch_size,)
data = tensor()
label = tensor(dtype=np.int32)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
return data, data_shape, label, label_shape
def test_sgd_simple():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, weight_decay=0.1)
for idx in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
if idx % 2:
opt.zero_grad()
else:
mlp.zero_grad()
opt.backward(loss)
grads = TensorDict()
orig_params = TensorDict()
for param in mlp.parameters():
grad = | F.grad(loss, param, use_virtual_grad=False) | megengine.functional.grad |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from io import BytesIO
import numpy as np
from helpers import MLP, graph_mode
import megengine.functional as F
from megengine import load, save
from megengine.core import TensorDict, tensor
from megengine.jit import trace
from megengine.optimizer import SGD, Adam
from megengine.test import assertTensorClose
def get_input():
batch_size = 2
input_dim = 28
data_shape = (batch_size, input_dim)
label_shape = (batch_size,)
data = tensor()
label = tensor(dtype=np.int32)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
return data, data_shape, label, label_shape
def test_sgd_simple():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, weight_decay=0.1)
for idx in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
if idx % 2:
opt.zero_grad()
else:
mlp.zero_grad()
opt.backward(loss)
grads = TensorDict()
orig_params = TensorDict()
for param in mlp.parameters():
grad = F.grad(loss, param, use_virtual_grad=False)
assertTensorClose(grad.numpy(), param.grad.numpy())
grads[param] = np.copy(grad.numpy())
orig_params[param] = np.copy(param.numpy())
opt.step()
for param in mlp.parameters():
assertTensorClose(
param.numpy(), orig_params[param] * 0.999 - grads[param] * 0.01
)
def test_sgd_momentum():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, momentum=0.9)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
for _ in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
orig_params = TensorDict()
grads = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
grads[param] = np.copy(param.grad.numpy())
opt.step()
for param in mlp.parameters():
slot = slots[param]
orig_param = orig_params[param]
slot *= 0.9
slot -= param.grad.numpy() * 0.01
assertTensorClose(param.numpy(), orig_param + slot)
# TODO: put opt.step() inside trace
def test_sgd_momentum_static():
_, data_shape, _, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, momentum=0.9)
@trace
def f(data, label):
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
for _ in range(3):
f(
np.random.random(data_shape).astype(np.float32),
np.random.randint(0, 10, label_shape).astype(np.int32),
)
orig_params = TensorDict()
grads = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
grads[param] = np.copy(param.grad.numpy())
opt.step()
for param in mlp.parameters():
slot = slots[param]
orig_param = orig_params[param]
slot *= 0.9
slot -= param.grad.numpy() * 0.01
assertTensorClose(param.numpy(), orig_param + slot)
def test_update_lr():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01)
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
opt.step()
for group in opt.param_groups:
group["lr"] += 0.02
for _ in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
for param in mlp.parameters():
grad = | F.grad(loss, param, use_virtual_grad=False) | megengine.functional.grad |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import megengine.functional as F
import megengine.module as M
from .darknet import Darknet
from .network_blocks import BaseConv, UpSample
class YOLOFPN(M.Module):
"""
YOLOFPN module. Darknet 53 is the default backbone of this model.
"""
def __init__(
self, depth=53, in_features=["dark3", "dark4", "dark5"],
):
super().__init__()
self.backbone = Darknet(depth)
self.in_features = in_features
# out 1
self.out1_cbl = self._make_cbl(512, 256, 1)
self.out1 = self._make_embedding([256, 512], 512 + 256)
# out 2
self.out2_cbl = self._make_cbl(256, 128, 1)
self.out2 = self._make_embedding([128, 256], 256 + 128)
# upsample
self.upsample = UpSample(scale_factor=2, mode="bilinear")
def _make_cbl(self, _in, _out, ks):
return BaseConv(_in, _out, ks, stride=1, act="lrelu")
def _make_embedding(self, filters_list, in_filters):
m = M.Sequential(
*[
self._make_cbl(in_filters, filters_list[0], 1),
self._make_cbl(filters_list[0], filters_list[1], 3),
self._make_cbl(filters_list[1], filters_list[0], 1),
self._make_cbl(filters_list[0], filters_list[1], 3),
self._make_cbl(filters_list[1], filters_list[0], 1),
]
)
return m
def forward(self, inputs):
"""
Args:
inputs (Tensor): input image.
Returns:
Tuple[Tensor]: FPN output features..
"""
# backbone
out_features = self.backbone(inputs)
x2, x1, x0 = [out_features[f] for f in self.in_features]
# yolo branch 1
x1_in = self.out1_cbl(x0)
x1_in = self.upsample(x1_in)
x1_in = | F.concat([x1_in, x1], 1) | megengine.functional.concat |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import megengine.functional as F
import megengine.module as M
from .darknet import Darknet
from .network_blocks import BaseConv, UpSample
class YOLOFPN(M.Module):
"""
YOLOFPN module. Darknet 53 is the default backbone of this model.
"""
def __init__(
self, depth=53, in_features=["dark3", "dark4", "dark5"],
):
super().__init__()
self.backbone = Darknet(depth)
self.in_features = in_features
# out 1
self.out1_cbl = self._make_cbl(512, 256, 1)
self.out1 = self._make_embedding([256, 512], 512 + 256)
# out 2
self.out2_cbl = self._make_cbl(256, 128, 1)
self.out2 = self._make_embedding([128, 256], 256 + 128)
# upsample
self.upsample = UpSample(scale_factor=2, mode="bilinear")
def _make_cbl(self, _in, _out, ks):
return BaseConv(_in, _out, ks, stride=1, act="lrelu")
def _make_embedding(self, filters_list, in_filters):
m = M.Sequential(
*[
self._make_cbl(in_filters, filters_list[0], 1),
self._make_cbl(filters_list[0], filters_list[1], 3),
self._make_cbl(filters_list[1], filters_list[0], 1),
self._make_cbl(filters_list[0], filters_list[1], 3),
self._make_cbl(filters_list[1], filters_list[0], 1),
]
)
return m
def forward(self, inputs):
"""
Args:
inputs (Tensor): input image.
Returns:
Tuple[Tensor]: FPN output features..
"""
# backbone
out_features = self.backbone(inputs)
x2, x1, x0 = [out_features[f] for f in self.in_features]
# yolo branch 1
x1_in = self.out1_cbl(x0)
x1_in = self.upsample(x1_in)
x1_in = F.concat([x1_in, x1], 1)
out_dark4 = self.out1(x1_in)
# yolo branch 2
x2_in = self.out2_cbl(out_dark4)
x2_in = self.upsample(x2_in)
x2_in = | F.concat([x2_in, x2], 1) | megengine.functional.concat |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import os
import time
import numpy as np
# pylint: disable=import-error
import model as snet_model
import quantizable_model as quantizable_snet_model
import megengine
import megengine.device as device
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
import megengine.jit as jit
import megengine.amp as amp
import megengine.quantization as Q
logging = | megengine.logger.get_logger() | megengine.logger.get_logger |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import os
import time
import numpy as np
# pylint: disable=import-error
import model as snet_model
import quantizable_model as quantizable_snet_model
import megengine
import megengine.device as device
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
import megengine.jit as jit
import megengine.amp as amp
import megengine.quantization as Q
logging = megengine.logger.get_logger()
from dataset import get_dataloader
DEFAULT_QAT_CONFIG = {
"ema":Q.ema_fakequant_qconfig,
"ema_lowbi":Q.ema_lowbit_fakequant_qconfig,
"sync_ema":Q.sync_ema_fakequant_qconfig,
"min_max":Q.min_max_fakequant_qconfig,
"tqt":Q.tqt_qconfig
}
def get_qconifg(config_name: str):
return DEFAULT_QAT_CONFIG[config_name]
def main():
parser = argparse.ArgumentParser(description="shufflenet benchmark")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x2_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=1,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-s",
"--steps",
default=200,
type=int,
help="number of train steps (default: 200)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 128)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"--lr",
metavar="LR",
default=0.001,
help="learning rate for single GPU (default: 0.001)",
)
parser.add_argument("--momentum", default=0.9, help="momentum (default: 0.9)")
parser.add_argument(
"--weight-decay", default=4e-5, help="weight decay (default: 4e-5)"
)
parser.add_argument(
"-p",
"--print-freq",
default=1,
type=int,
metavar="N",
help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp", "qat"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16\n"
"qat: quantization aware training"
)
parser.add_argument(
"--qat-config",
default="min_max",
type=str,
choices=["min_max", "ema", "ema_lowbit", "sync_ema", "tqt"],
help="quantization aware training config\n"
"min_max: min_max_fakequant_qconfig\n"
"ema: ema_fakequant_qconfig\n"
"ema_lowbit: ema_lowbit_fakequant_qconfig\n"
"sync_ema: sync_ema_fakequant_qconfig\n"
"tqt: tqt_qconfig"
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", type=int, default=0)
parser.add_argument("--world-size", type=int, default=None)
parser.add_argument("--rank", default=0)
parser.add_argument("--loader", default=False, action="store_true", help="whether use loader")
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
args = parser.parse_args()
if args.world_size is None:
args.world_size = args.ngpus
if args.world_size > 1:
# launch processes
train_func = dist.launcher(worker, master_ip=args.dist_addr, port=args.dist_port,
world_size=args.world_size, n_gpus=args.ngpus, rank_start=args.rank * args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
steps = args.steps
# build model
shufflenet = quantizable_snet_model if args.mode == "qat" else snet_model
model = shufflenet.__dict__[args.arch]()
if args.mode == "qat":
if args.qat_config == "sync_ema":
assert args.ngpus > 1, "sync_ema does not support ngpus={}".format(args.ngpus)
qconfig = get_qconifg(args.qat_config)
model = Q.quantize_qat(module=model, qconfig= qconfig)
model.train()
Q.enable_observer(model)
Q.enable_fake_quant(model)
# Sync parameters
if args.world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if args.world_size > 1 else None,
)
# Optimizer
params_wd = []
params_nwd = []
params_scale = []
for n, p in model.named_parameters():
if n.find("weight") >= 0 and len(p.shape) > 1:
params_wd.append(p)
elif n.find("scale") >= 0:
params_scale.append(p)
else:
params_nwd.append(p)
opt = optim.SGD(
[{"params": params_wd},
{"params": params_nwd, "weight_decay": 0},
],
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * args.world_size, # scale weight decay in "SUM" mode
)
# train and valid func
@ | amp.autocast(enabled=args.mode == "mp") | megengine.amp.autocast |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import os
import time
import numpy as np
# pylint: disable=import-error
import model as snet_model
import quantizable_model as quantizable_snet_model
import megengine
import megengine.device as device
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
import megengine.jit as jit
import megengine.amp as amp
import megengine.quantization as Q
logging = megengine.logger.get_logger()
from dataset import get_dataloader
DEFAULT_QAT_CONFIG = {
"ema":Q.ema_fakequant_qconfig,
"ema_lowbi":Q.ema_lowbit_fakequant_qconfig,
"sync_ema":Q.sync_ema_fakequant_qconfig,
"min_max":Q.min_max_fakequant_qconfig,
"tqt":Q.tqt_qconfig
}
def get_qconifg(config_name: str):
return DEFAULT_QAT_CONFIG[config_name]
def main():
parser = argparse.ArgumentParser(description="shufflenet benchmark")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x2_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=1,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-s",
"--steps",
default=200,
type=int,
help="number of train steps (default: 200)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 128)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"--lr",
metavar="LR",
default=0.001,
help="learning rate for single GPU (default: 0.001)",
)
parser.add_argument("--momentum", default=0.9, help="momentum (default: 0.9)")
parser.add_argument(
"--weight-decay", default=4e-5, help="weight decay (default: 4e-5)"
)
parser.add_argument(
"-p",
"--print-freq",
default=1,
type=int,
metavar="N",
help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp", "qat"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16\n"
"qat: quantization aware training"
)
parser.add_argument(
"--qat-config",
default="min_max",
type=str,
choices=["min_max", "ema", "ema_lowbit", "sync_ema", "tqt"],
help="quantization aware training config\n"
"min_max: min_max_fakequant_qconfig\n"
"ema: ema_fakequant_qconfig\n"
"ema_lowbit: ema_lowbit_fakequant_qconfig\n"
"sync_ema: sync_ema_fakequant_qconfig\n"
"tqt: tqt_qconfig"
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", type=int, default=0)
parser.add_argument("--world-size", type=int, default=None)
parser.add_argument("--rank", default=0)
parser.add_argument("--loader", default=False, action="store_true", help="whether use loader")
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
args = parser.parse_args()
if args.world_size is None:
args.world_size = args.ngpus
if args.world_size > 1:
# launch processes
train_func = dist.launcher(worker, master_ip=args.dist_addr, port=args.dist_port,
world_size=args.world_size, n_gpus=args.ngpus, rank_start=args.rank * args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
steps = args.steps
# build model
shufflenet = quantizable_snet_model if args.mode == "qat" else snet_model
model = shufflenet.__dict__[args.arch]()
if args.mode == "qat":
if args.qat_config == "sync_ema":
assert args.ngpus > 1, "sync_ema does not support ngpus={}".format(args.ngpus)
qconfig = get_qconifg(args.qat_config)
model = Q.quantize_qat(module=model, qconfig= qconfig)
model.train()
| Q.enable_observer(model) | megengine.quantization.enable_observer |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import os
import time
import numpy as np
# pylint: disable=import-error
import model as snet_model
import quantizable_model as quantizable_snet_model
import megengine
import megengine.device as device
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
import megengine.jit as jit
import megengine.amp as amp
import megengine.quantization as Q
logging = megengine.logger.get_logger()
from dataset import get_dataloader
DEFAULT_QAT_CONFIG = {
"ema":Q.ema_fakequant_qconfig,
"ema_lowbi":Q.ema_lowbit_fakequant_qconfig,
"sync_ema":Q.sync_ema_fakequant_qconfig,
"min_max":Q.min_max_fakequant_qconfig,
"tqt":Q.tqt_qconfig
}
def get_qconifg(config_name: str):
return DEFAULT_QAT_CONFIG[config_name]
def main():
parser = argparse.ArgumentParser(description="shufflenet benchmark")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x2_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=1,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-s",
"--steps",
default=200,
type=int,
help="number of train steps (default: 200)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 128)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"--lr",
metavar="LR",
default=0.001,
help="learning rate for single GPU (default: 0.001)",
)
parser.add_argument("--momentum", default=0.9, help="momentum (default: 0.9)")
parser.add_argument(
"--weight-decay", default=4e-5, help="weight decay (default: 4e-5)"
)
parser.add_argument(
"-p",
"--print-freq",
default=1,
type=int,
metavar="N",
help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp", "qat"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16\n"
"qat: quantization aware training"
)
parser.add_argument(
"--qat-config",
default="min_max",
type=str,
choices=["min_max", "ema", "ema_lowbit", "sync_ema", "tqt"],
help="quantization aware training config\n"
"min_max: min_max_fakequant_qconfig\n"
"ema: ema_fakequant_qconfig\n"
"ema_lowbit: ema_lowbit_fakequant_qconfig\n"
"sync_ema: sync_ema_fakequant_qconfig\n"
"tqt: tqt_qconfig"
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", type=int, default=0)
parser.add_argument("--world-size", type=int, default=None)
parser.add_argument("--rank", default=0)
parser.add_argument("--loader", default=False, action="store_true", help="whether use loader")
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
args = parser.parse_args()
if args.world_size is None:
args.world_size = args.ngpus
if args.world_size > 1:
# launch processes
train_func = dist.launcher(worker, master_ip=args.dist_addr, port=args.dist_port,
world_size=args.world_size, n_gpus=args.ngpus, rank_start=args.rank * args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
steps = args.steps
# build model
shufflenet = quantizable_snet_model if args.mode == "qat" else snet_model
model = shufflenet.__dict__[args.arch]()
if args.mode == "qat":
if args.qat_config == "sync_ema":
assert args.ngpus > 1, "sync_ema does not support ngpus={}".format(args.ngpus)
qconfig = get_qconifg(args.qat_config)
model = Q.quantize_qat(module=model, qconfig= qconfig)
model.train()
Q.enable_observer(model)
| Q.enable_fake_quant(model) | megengine.quantization.enable_fake_quant |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import os
import time
import numpy as np
# pylint: disable=import-error
import model as snet_model
import quantizable_model as quantizable_snet_model
import megengine
import megengine.device as device
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
import megengine.jit as jit
import megengine.amp as amp
import megengine.quantization as Q
logging = megengine.logger.get_logger()
from dataset import get_dataloader
DEFAULT_QAT_CONFIG = {
"ema":Q.ema_fakequant_qconfig,
"ema_lowbi":Q.ema_lowbit_fakequant_qconfig,
"sync_ema":Q.sync_ema_fakequant_qconfig,
"min_max":Q.min_max_fakequant_qconfig,
"tqt":Q.tqt_qconfig
}
def get_qconifg(config_name: str):
return DEFAULT_QAT_CONFIG[config_name]
def main():
parser = argparse.ArgumentParser(description="shufflenet benchmark")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x2_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=1,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-s",
"--steps",
default=200,
type=int,
help="number of train steps (default: 200)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 128)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"--lr",
metavar="LR",
default=0.001,
help="learning rate for single GPU (default: 0.001)",
)
parser.add_argument("--momentum", default=0.9, help="momentum (default: 0.9)")
parser.add_argument(
"--weight-decay", default=4e-5, help="weight decay (default: 4e-5)"
)
parser.add_argument(
"-p",
"--print-freq",
default=1,
type=int,
metavar="N",
help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp", "qat"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16\n"
"qat: quantization aware training"
)
parser.add_argument(
"--qat-config",
default="min_max",
type=str,
choices=["min_max", "ema", "ema_lowbit", "sync_ema", "tqt"],
help="quantization aware training config\n"
"min_max: min_max_fakequant_qconfig\n"
"ema: ema_fakequant_qconfig\n"
"ema_lowbit: ema_lowbit_fakequant_qconfig\n"
"sync_ema: sync_ema_fakequant_qconfig\n"
"tqt: tqt_qconfig"
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", type=int, default=0)
parser.add_argument("--world-size", type=int, default=None)
parser.add_argument("--rank", default=0)
parser.add_argument("--loader", default=False, action="store_true", help="whether use loader")
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
args = parser.parse_args()
if args.world_size is None:
args.world_size = args.ngpus
if args.world_size > 1:
# launch processes
train_func = dist.launcher(worker, master_ip=args.dist_addr, port=args.dist_port,
world_size=args.world_size, n_gpus=args.ngpus, rank_start=args.rank * args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
steps = args.steps
# build model
shufflenet = quantizable_snet_model if args.mode == "qat" else snet_model
model = shufflenet.__dict__[args.arch]()
if args.mode == "qat":
if args.qat_config == "sync_ema":
assert args.ngpus > 1, "sync_ema does not support ngpus={}".format(args.ngpus)
qconfig = get_qconifg(args.qat_config)
model = Q.quantize_qat(module=model, qconfig= qconfig)
model.train()
Q.enable_observer(model)
Q.enable_fake_quant(model)
# Sync parameters
if args.world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if args.world_size > 1 else None,
)
# Optimizer
params_wd = []
params_nwd = []
params_scale = []
for n, p in model.named_parameters():
if n.find("weight") >= 0 and len(p.shape) > 1:
params_wd.append(p)
elif n.find("scale") >= 0:
params_scale.append(p)
else:
params_nwd.append(p)
opt = optim.SGD(
[{"params": params_wd},
{"params": params_nwd, "weight_decay": 0},
],
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * args.world_size, # scale weight decay in "SUM" mode
)
# train and valid func
@amp.autocast(enabled=args.mode == "mp")
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label, label_smooth=0.1)
gm.backward(loss)
opt.step().clear_grad()
return loss
if args.trace:
if args.symbolic:
train_step = jit.trace(train_step, symbolic=True, sublinear_memory_config=jit.SublinearMemoryConfig(genetic_nr_iter=50), symbolic_shape=False)
else:
train_step = jit.trace(train_step, symbolic=False, symbolic_shape=False)
else:
assert args.symbolic==False, "invalid arguments: trace=Trace, symbolic=True"
# start training
objs = AverageMeter("Loss")
clck = AverageMeter("Time")
if args.loader:
dataloader = iter(get_dataloader(args))
image,label = next(dataloader)
else:
image = np.random.randn(args.batch_size, 3, 224, 224).astype("float32")
label = np.random.randint(0, 1000, size=(args.batch_size,)).astype("int32")
# warm up
for step in range(10):
if args.loader:
image,label = next(dataloader)
if not args.preload:
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
else:
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
loss = train_step(image, label)
loss.item()
for step in range(0, steps):
t = time.time()
if args.loader:
image,label = next(dataloader)
if not args.preload:
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
else:
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
loss = train_step(image, label)
objs.update(loss.item())
clck.update(time.time() - t)
if step % args.print_freq == 0 and dist.get_rank() == 0:
print(
"Step {}, {}, {}".format(
step,
objs,
clck,
))
objs.reset()
if | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import os
import time
import numpy as np
# pylint: disable=import-error
import model as snet_model
import quantizable_model as quantizable_snet_model
import megengine
import megengine.device as device
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
import megengine.jit as jit
import megengine.amp as amp
import megengine.quantization as Q
logging = megengine.logger.get_logger()
from dataset import get_dataloader
DEFAULT_QAT_CONFIG = {
"ema":Q.ema_fakequant_qconfig,
"ema_lowbi":Q.ema_lowbit_fakequant_qconfig,
"sync_ema":Q.sync_ema_fakequant_qconfig,
"min_max":Q.min_max_fakequant_qconfig,
"tqt":Q.tqt_qconfig
}
def get_qconifg(config_name: str):
return DEFAULT_QAT_CONFIG[config_name]
def main():
parser = argparse.ArgumentParser(description="shufflenet benchmark")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x2_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=1,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-s",
"--steps",
default=200,
type=int,
help="number of train steps (default: 200)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 128)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"--lr",
metavar="LR",
default=0.001,
help="learning rate for single GPU (default: 0.001)",
)
parser.add_argument("--momentum", default=0.9, help="momentum (default: 0.9)")
parser.add_argument(
"--weight-decay", default=4e-5, help="weight decay (default: 4e-5)"
)
parser.add_argument(
"-p",
"--print-freq",
default=1,
type=int,
metavar="N",
help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp", "qat"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16\n"
"qat: quantization aware training"
)
parser.add_argument(
"--qat-config",
default="min_max",
type=str,
choices=["min_max", "ema", "ema_lowbit", "sync_ema", "tqt"],
help="quantization aware training config\n"
"min_max: min_max_fakequant_qconfig\n"
"ema: ema_fakequant_qconfig\n"
"ema_lowbit: ema_lowbit_fakequant_qconfig\n"
"sync_ema: sync_ema_fakequant_qconfig\n"
"tqt: tqt_qconfig"
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", type=int, default=0)
parser.add_argument("--world-size", type=int, default=None)
parser.add_argument("--rank", default=0)
parser.add_argument("--loader", default=False, action="store_true", help="whether use loader")
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
args = parser.parse_args()
if args.world_size is None:
args.world_size = args.ngpus
if args.world_size > 1:
# launch processes
train_func = dist.launcher(worker, master_ip=args.dist_addr, port=args.dist_port,
world_size=args.world_size, n_gpus=args.ngpus, rank_start=args.rank * args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
steps = args.steps
# build model
shufflenet = quantizable_snet_model if args.mode == "qat" else snet_model
model = shufflenet.__dict__[args.arch]()
if args.mode == "qat":
if args.qat_config == "sync_ema":
assert args.ngpus > 1, "sync_ema does not support ngpus={}".format(args.ngpus)
qconfig = get_qconifg(args.qat_config)
model = Q.quantize_qat(module=model, qconfig= qconfig)
model.train()
Q.enable_observer(model)
Q.enable_fake_quant(model)
# Sync parameters
if args.world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = | autodiff.GradManager() | megengine.autodiff.GradManager |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import os
import time
import numpy as np
# pylint: disable=import-error
import model as snet_model
import quantizable_model as quantizable_snet_model
import megengine
import megengine.device as device
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
import megengine.jit as jit
import megengine.amp as amp
import megengine.quantization as Q
logging = megengine.logger.get_logger()
from dataset import get_dataloader
DEFAULT_QAT_CONFIG = {
"ema":Q.ema_fakequant_qconfig,
"ema_lowbi":Q.ema_lowbit_fakequant_qconfig,
"sync_ema":Q.sync_ema_fakequant_qconfig,
"min_max":Q.min_max_fakequant_qconfig,
"tqt":Q.tqt_qconfig
}
def get_qconifg(config_name: str):
return DEFAULT_QAT_CONFIG[config_name]
def main():
parser = argparse.ArgumentParser(description="shufflenet benchmark")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x2_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=1,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-s",
"--steps",
default=200,
type=int,
help="number of train steps (default: 200)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 128)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"--lr",
metavar="LR",
default=0.001,
help="learning rate for single GPU (default: 0.001)",
)
parser.add_argument("--momentum", default=0.9, help="momentum (default: 0.9)")
parser.add_argument(
"--weight-decay", default=4e-5, help="weight decay (default: 4e-5)"
)
parser.add_argument(
"-p",
"--print-freq",
default=1,
type=int,
metavar="N",
help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp", "qat"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16\n"
"qat: quantization aware training"
)
parser.add_argument(
"--qat-config",
default="min_max",
type=str,
choices=["min_max", "ema", "ema_lowbit", "sync_ema", "tqt"],
help="quantization aware training config\n"
"min_max: min_max_fakequant_qconfig\n"
"ema: ema_fakequant_qconfig\n"
"ema_lowbit: ema_lowbit_fakequant_qconfig\n"
"sync_ema: sync_ema_fakequant_qconfig\n"
"tqt: tqt_qconfig"
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", type=int, default=0)
parser.add_argument("--world-size", type=int, default=None)
parser.add_argument("--rank", default=0)
parser.add_argument("--loader", default=False, action="store_true", help="whether use loader")
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
args = parser.parse_args()
if args.world_size is None:
args.world_size = args.ngpus
if args.world_size > 1:
# launch processes
train_func = dist.launcher(worker, master_ip=args.dist_addr, port=args.dist_port,
world_size=args.world_size, n_gpus=args.ngpus, rank_start=args.rank * args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
steps = args.steps
# build model
shufflenet = quantizable_snet_model if args.mode == "qat" else snet_model
model = shufflenet.__dict__[args.arch]()
if args.mode == "qat":
if args.qat_config == "sync_ema":
assert args.ngpus > 1, "sync_ema does not support ngpus={}".format(args.ngpus)
qconfig = get_qconifg(args.qat_config)
model = Q.quantize_qat(module=model, qconfig= qconfig)
model.train()
Q.enable_observer(model)
Q.enable_fake_quant(model)
# Sync parameters
if args.world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if args.world_size > 1 else None,
)
# Optimizer
params_wd = []
params_nwd = []
params_scale = []
for n, p in model.named_parameters():
if n.find("weight") >= 0 and len(p.shape) > 1:
params_wd.append(p)
elif n.find("scale") >= 0:
params_scale.append(p)
else:
params_nwd.append(p)
opt = optim.SGD(
[{"params": params_wd},
{"params": params_nwd, "weight_decay": 0},
],
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * args.world_size, # scale weight decay in "SUM" mode
)
# train and valid func
@amp.autocast(enabled=args.mode == "mp")
def train_step(image, label):
with gm:
logits = model(image)
loss = | F.nn.cross_entropy(logits, label, label_smooth=0.1) | megengine.functional.nn.cross_entropy |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import os
import time
import numpy as np
# pylint: disable=import-error
import model as snet_model
import quantizable_model as quantizable_snet_model
import megengine
import megengine.device as device
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
import megengine.jit as jit
import megengine.amp as amp
import megengine.quantization as Q
logging = megengine.logger.get_logger()
from dataset import get_dataloader
DEFAULT_QAT_CONFIG = {
"ema":Q.ema_fakequant_qconfig,
"ema_lowbi":Q.ema_lowbit_fakequant_qconfig,
"sync_ema":Q.sync_ema_fakequant_qconfig,
"min_max":Q.min_max_fakequant_qconfig,
"tqt":Q.tqt_qconfig
}
def get_qconifg(config_name: str):
return DEFAULT_QAT_CONFIG[config_name]
def main():
parser = argparse.ArgumentParser(description="shufflenet benchmark")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x2_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=1,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-s",
"--steps",
default=200,
type=int,
help="number of train steps (default: 200)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 128)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"--lr",
metavar="LR",
default=0.001,
help="learning rate for single GPU (default: 0.001)",
)
parser.add_argument("--momentum", default=0.9, help="momentum (default: 0.9)")
parser.add_argument(
"--weight-decay", default=4e-5, help="weight decay (default: 4e-5)"
)
parser.add_argument(
"-p",
"--print-freq",
default=1,
type=int,
metavar="N",
help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp", "qat"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16\n"
"qat: quantization aware training"
)
parser.add_argument(
"--qat-config",
default="min_max",
type=str,
choices=["min_max", "ema", "ema_lowbit", "sync_ema", "tqt"],
help="quantization aware training config\n"
"min_max: min_max_fakequant_qconfig\n"
"ema: ema_fakequant_qconfig\n"
"ema_lowbit: ema_lowbit_fakequant_qconfig\n"
"sync_ema: sync_ema_fakequant_qconfig\n"
"tqt: tqt_qconfig"
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", type=int, default=0)
parser.add_argument("--world-size", type=int, default=None)
parser.add_argument("--rank", default=0)
parser.add_argument("--loader", default=False, action="store_true", help="whether use loader")
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
args = parser.parse_args()
if args.world_size is None:
args.world_size = args.ngpus
if args.world_size > 1:
# launch processes
train_func = dist.launcher(worker, master_ip=args.dist_addr, port=args.dist_port,
world_size=args.world_size, n_gpus=args.ngpus, rank_start=args.rank * args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
steps = args.steps
# build model
shufflenet = quantizable_snet_model if args.mode == "qat" else snet_model
model = shufflenet.__dict__[args.arch]()
if args.mode == "qat":
if args.qat_config == "sync_ema":
assert args.ngpus > 1, "sync_ema does not support ngpus={}".format(args.ngpus)
qconfig = get_qconifg(args.qat_config)
model = Q.quantize_qat(module=model, qconfig= qconfig)
model.train()
Q.enable_observer(model)
Q.enable_fake_quant(model)
# Sync parameters
if args.world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if args.world_size > 1 else None,
)
# Optimizer
params_wd = []
params_nwd = []
params_scale = []
for n, p in model.named_parameters():
if n.find("weight") >= 0 and len(p.shape) > 1:
params_wd.append(p)
elif n.find("scale") >= 0:
params_scale.append(p)
else:
params_nwd.append(p)
opt = optim.SGD(
[{"params": params_wd},
{"params": params_nwd, "weight_decay": 0},
],
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * args.world_size, # scale weight decay in "SUM" mode
)
# train and valid func
@amp.autocast(enabled=args.mode == "mp")
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label, label_smooth=0.1)
gm.backward(loss)
opt.step().clear_grad()
return loss
if args.trace:
if args.symbolic:
train_step = jit.trace(train_step, symbolic=True, sublinear_memory_config=jit.SublinearMemoryConfig(genetic_nr_iter=50), symbolic_shape=False)
else:
train_step = | jit.trace(train_step, symbolic=False, symbolic_shape=False) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import os
import time
import numpy as np
# pylint: disable=import-error
import model as snet_model
import quantizable_model as quantizable_snet_model
import megengine
import megengine.device as device
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
import megengine.jit as jit
import megengine.amp as amp
import megengine.quantization as Q
logging = megengine.logger.get_logger()
from dataset import get_dataloader
DEFAULT_QAT_CONFIG = {
"ema":Q.ema_fakequant_qconfig,
"ema_lowbi":Q.ema_lowbit_fakequant_qconfig,
"sync_ema":Q.sync_ema_fakequant_qconfig,
"min_max":Q.min_max_fakequant_qconfig,
"tqt":Q.tqt_qconfig
}
def get_qconifg(config_name: str):
return DEFAULT_QAT_CONFIG[config_name]
def main():
parser = argparse.ArgumentParser(description="shufflenet benchmark")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x2_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=1,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-s",
"--steps",
default=200,
type=int,
help="number of train steps (default: 200)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 128)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"--lr",
metavar="LR",
default=0.001,
help="learning rate for single GPU (default: 0.001)",
)
parser.add_argument("--momentum", default=0.9, help="momentum (default: 0.9)")
parser.add_argument(
"--weight-decay", default=4e-5, help="weight decay (default: 4e-5)"
)
parser.add_argument(
"-p",
"--print-freq",
default=1,
type=int,
metavar="N",
help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp", "qat"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16\n"
"qat: quantization aware training"
)
parser.add_argument(
"--qat-config",
default="min_max",
type=str,
choices=["min_max", "ema", "ema_lowbit", "sync_ema", "tqt"],
help="quantization aware training config\n"
"min_max: min_max_fakequant_qconfig\n"
"ema: ema_fakequant_qconfig\n"
"ema_lowbit: ema_lowbit_fakequant_qconfig\n"
"sync_ema: sync_ema_fakequant_qconfig\n"
"tqt: tqt_qconfig"
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", type=int, default=0)
parser.add_argument("--world-size", type=int, default=None)
parser.add_argument("--rank", default=0)
parser.add_argument("--loader", default=False, action="store_true", help="whether use loader")
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
args = parser.parse_args()
if args.world_size is None:
args.world_size = args.ngpus
if args.world_size > 1:
# launch processes
train_func = dist.launcher(worker, master_ip=args.dist_addr, port=args.dist_port,
world_size=args.world_size, n_gpus=args.ngpus, rank_start=args.rank * args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
steps = args.steps
# build model
shufflenet = quantizable_snet_model if args.mode == "qat" else snet_model
model = shufflenet.__dict__[args.arch]()
if args.mode == "qat":
if args.qat_config == "sync_ema":
assert args.ngpus > 1, "sync_ema does not support ngpus={}".format(args.ngpus)
qconfig = get_qconifg(args.qat_config)
model = Q.quantize_qat(module=model, qconfig= qconfig)
model.train()
Q.enable_observer(model)
Q.enable_fake_quant(model)
# Sync parameters
if args.world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if args.world_size > 1 else None,
)
# Optimizer
params_wd = []
params_nwd = []
params_scale = []
for n, p in model.named_parameters():
if n.find("weight") >= 0 and len(p.shape) > 1:
params_wd.append(p)
elif n.find("scale") >= 0:
params_scale.append(p)
else:
params_nwd.append(p)
opt = optim.SGD(
[{"params": params_wd},
{"params": params_nwd, "weight_decay": 0},
],
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * args.world_size, # scale weight decay in "SUM" mode
)
# train and valid func
@amp.autocast(enabled=args.mode == "mp")
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label, label_smooth=0.1)
gm.backward(loss)
opt.step().clear_grad()
return loss
if args.trace:
if args.symbolic:
train_step = jit.trace(train_step, symbolic=True, sublinear_memory_config=jit.SublinearMemoryConfig(genetic_nr_iter=50), symbolic_shape=False)
else:
train_step = jit.trace(train_step, symbolic=False, symbolic_shape=False)
else:
assert args.symbolic==False, "invalid arguments: trace=Trace, symbolic=True"
# start training
objs = AverageMeter("Loss")
clck = AverageMeter("Time")
if args.loader:
dataloader = iter(get_dataloader(args))
image,label = next(dataloader)
else:
image = np.random.randn(args.batch_size, 3, 224, 224).astype("float32")
label = np.random.randint(0, 1000, size=(args.batch_size,)).astype("int32")
# warm up
for step in range(10):
if args.loader:
image,label = next(dataloader)
if not args.preload:
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
else:
image = | megengine.tensor(image, dtype="float32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import os
import time
import numpy as np
# pylint: disable=import-error
import model as snet_model
import quantizable_model as quantizable_snet_model
import megengine
import megengine.device as device
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
import megengine.jit as jit
import megengine.amp as amp
import megengine.quantization as Q
logging = megengine.logger.get_logger()
from dataset import get_dataloader
DEFAULT_QAT_CONFIG = {
"ema":Q.ema_fakequant_qconfig,
"ema_lowbi":Q.ema_lowbit_fakequant_qconfig,
"sync_ema":Q.sync_ema_fakequant_qconfig,
"min_max":Q.min_max_fakequant_qconfig,
"tqt":Q.tqt_qconfig
}
def get_qconifg(config_name: str):
return DEFAULT_QAT_CONFIG[config_name]
def main():
parser = argparse.ArgumentParser(description="shufflenet benchmark")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x2_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=1,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-s",
"--steps",
default=200,
type=int,
help="number of train steps (default: 200)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 128)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"--lr",
metavar="LR",
default=0.001,
help="learning rate for single GPU (default: 0.001)",
)
parser.add_argument("--momentum", default=0.9, help="momentum (default: 0.9)")
parser.add_argument(
"--weight-decay", default=4e-5, help="weight decay (default: 4e-5)"
)
parser.add_argument(
"-p",
"--print-freq",
default=1,
type=int,
metavar="N",
help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp", "qat"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16\n"
"qat: quantization aware training"
)
parser.add_argument(
"--qat-config",
default="min_max",
type=str,
choices=["min_max", "ema", "ema_lowbit", "sync_ema", "tqt"],
help="quantization aware training config\n"
"min_max: min_max_fakequant_qconfig\n"
"ema: ema_fakequant_qconfig\n"
"ema_lowbit: ema_lowbit_fakequant_qconfig\n"
"sync_ema: sync_ema_fakequant_qconfig\n"
"tqt: tqt_qconfig"
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", type=int, default=0)
parser.add_argument("--world-size", type=int, default=None)
parser.add_argument("--rank", default=0)
parser.add_argument("--loader", default=False, action="store_true", help="whether use loader")
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
args = parser.parse_args()
if args.world_size is None:
args.world_size = args.ngpus
if args.world_size > 1:
# launch processes
train_func = dist.launcher(worker, master_ip=args.dist_addr, port=args.dist_port,
world_size=args.world_size, n_gpus=args.ngpus, rank_start=args.rank * args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
steps = args.steps
# build model
shufflenet = quantizable_snet_model if args.mode == "qat" else snet_model
model = shufflenet.__dict__[args.arch]()
if args.mode == "qat":
if args.qat_config == "sync_ema":
assert args.ngpus > 1, "sync_ema does not support ngpus={}".format(args.ngpus)
qconfig = get_qconifg(args.qat_config)
model = Q.quantize_qat(module=model, qconfig= qconfig)
model.train()
Q.enable_observer(model)
Q.enable_fake_quant(model)
# Sync parameters
if args.world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if args.world_size > 1 else None,
)
# Optimizer
params_wd = []
params_nwd = []
params_scale = []
for n, p in model.named_parameters():
if n.find("weight") >= 0 and len(p.shape) > 1:
params_wd.append(p)
elif n.find("scale") >= 0:
params_scale.append(p)
else:
params_nwd.append(p)
opt = optim.SGD(
[{"params": params_wd},
{"params": params_nwd, "weight_decay": 0},
],
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * args.world_size, # scale weight decay in "SUM" mode
)
# train and valid func
@amp.autocast(enabled=args.mode == "mp")
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label, label_smooth=0.1)
gm.backward(loss)
opt.step().clear_grad()
return loss
if args.trace:
if args.symbolic:
train_step = jit.trace(train_step, symbolic=True, sublinear_memory_config=jit.SublinearMemoryConfig(genetic_nr_iter=50), symbolic_shape=False)
else:
train_step = jit.trace(train_step, symbolic=False, symbolic_shape=False)
else:
assert args.symbolic==False, "invalid arguments: trace=Trace, symbolic=True"
# start training
objs = AverageMeter("Loss")
clck = AverageMeter("Time")
if args.loader:
dataloader = iter(get_dataloader(args))
image,label = next(dataloader)
else:
image = np.random.randn(args.batch_size, 3, 224, 224).astype("float32")
label = np.random.randint(0, 1000, size=(args.batch_size,)).astype("int32")
# warm up
for step in range(10):
if args.loader:
image,label = next(dataloader)
if not args.preload:
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
else:
image = megengine.tensor(image, dtype="float32")
label = | megengine.tensor(label, dtype="int32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import os
import time
import numpy as np
# pylint: disable=import-error
import model as snet_model
import quantizable_model as quantizable_snet_model
import megengine
import megengine.device as device
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
import megengine.jit as jit
import megengine.amp as amp
import megengine.quantization as Q
logging = megengine.logger.get_logger()
from dataset import get_dataloader
DEFAULT_QAT_CONFIG = {
"ema":Q.ema_fakequant_qconfig,
"ema_lowbi":Q.ema_lowbit_fakequant_qconfig,
"sync_ema":Q.sync_ema_fakequant_qconfig,
"min_max":Q.min_max_fakequant_qconfig,
"tqt":Q.tqt_qconfig
}
def get_qconifg(config_name: str):
return DEFAULT_QAT_CONFIG[config_name]
def main():
parser = argparse.ArgumentParser(description="shufflenet benchmark")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x2_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=1,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-s",
"--steps",
default=200,
type=int,
help="number of train steps (default: 200)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 128)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"--lr",
metavar="LR",
default=0.001,
help="learning rate for single GPU (default: 0.001)",
)
parser.add_argument("--momentum", default=0.9, help="momentum (default: 0.9)")
parser.add_argument(
"--weight-decay", default=4e-5, help="weight decay (default: 4e-5)"
)
parser.add_argument(
"-p",
"--print-freq",
default=1,
type=int,
metavar="N",
help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp", "qat"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16\n"
"qat: quantization aware training"
)
parser.add_argument(
"--qat-config",
default="min_max",
type=str,
choices=["min_max", "ema", "ema_lowbit", "sync_ema", "tqt"],
help="quantization aware training config\n"
"min_max: min_max_fakequant_qconfig\n"
"ema: ema_fakequant_qconfig\n"
"ema_lowbit: ema_lowbit_fakequant_qconfig\n"
"sync_ema: sync_ema_fakequant_qconfig\n"
"tqt: tqt_qconfig"
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", type=int, default=0)
parser.add_argument("--world-size", type=int, default=None)
parser.add_argument("--rank", default=0)
parser.add_argument("--loader", default=False, action="store_true", help="whether use loader")
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
args = parser.parse_args()
if args.world_size is None:
args.world_size = args.ngpus
if args.world_size > 1:
# launch processes
train_func = dist.launcher(worker, master_ip=args.dist_addr, port=args.dist_port,
world_size=args.world_size, n_gpus=args.ngpus, rank_start=args.rank * args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
steps = args.steps
# build model
shufflenet = quantizable_snet_model if args.mode == "qat" else snet_model
model = shufflenet.__dict__[args.arch]()
if args.mode == "qat":
if args.qat_config == "sync_ema":
assert args.ngpus > 1, "sync_ema does not support ngpus={}".format(args.ngpus)
qconfig = get_qconifg(args.qat_config)
model = Q.quantize_qat(module=model, qconfig= qconfig)
model.train()
Q.enable_observer(model)
Q.enable_fake_quant(model)
# Sync parameters
if args.world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if args.world_size > 1 else None,
)
# Optimizer
params_wd = []
params_nwd = []
params_scale = []
for n, p in model.named_parameters():
if n.find("weight") >= 0 and len(p.shape) > 1:
params_wd.append(p)
elif n.find("scale") >= 0:
params_scale.append(p)
else:
params_nwd.append(p)
opt = optim.SGD(
[{"params": params_wd},
{"params": params_nwd, "weight_decay": 0},
],
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * args.world_size, # scale weight decay in "SUM" mode
)
# train and valid func
@amp.autocast(enabled=args.mode == "mp")
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label, label_smooth=0.1)
gm.backward(loss)
opt.step().clear_grad()
return loss
if args.trace:
if args.symbolic:
train_step = jit.trace(train_step, symbolic=True, sublinear_memory_config=jit.SublinearMemoryConfig(genetic_nr_iter=50), symbolic_shape=False)
else:
train_step = jit.trace(train_step, symbolic=False, symbolic_shape=False)
else:
assert args.symbolic==False, "invalid arguments: trace=Trace, symbolic=True"
# start training
objs = AverageMeter("Loss")
clck = AverageMeter("Time")
if args.loader:
dataloader = iter(get_dataloader(args))
image,label = next(dataloader)
else:
image = np.random.randn(args.batch_size, 3, 224, 224).astype("float32")
label = np.random.randint(0, 1000, size=(args.batch_size,)).astype("int32")
# warm up
for step in range(10):
if args.loader:
image,label = next(dataloader)
if not args.preload:
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
else:
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
loss = train_step(image, label)
loss.item()
for step in range(0, steps):
t = time.time()
if args.loader:
image,label = next(dataloader)
if not args.preload:
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
else:
image = | megengine.tensor(image, dtype="float32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import os
import time
import numpy as np
# pylint: disable=import-error
import model as snet_model
import quantizable_model as quantizable_snet_model
import megengine
import megengine.device as device
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
import megengine.jit as jit
import megengine.amp as amp
import megengine.quantization as Q
logging = megengine.logger.get_logger()
from dataset import get_dataloader
DEFAULT_QAT_CONFIG = {
"ema":Q.ema_fakequant_qconfig,
"ema_lowbi":Q.ema_lowbit_fakequant_qconfig,
"sync_ema":Q.sync_ema_fakequant_qconfig,
"min_max":Q.min_max_fakequant_qconfig,
"tqt":Q.tqt_qconfig
}
def get_qconifg(config_name: str):
return DEFAULT_QAT_CONFIG[config_name]
def main():
parser = argparse.ArgumentParser(description="shufflenet benchmark")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x2_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=1,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-s",
"--steps",
default=200,
type=int,
help="number of train steps (default: 200)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 128)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"--lr",
metavar="LR",
default=0.001,
help="learning rate for single GPU (default: 0.001)",
)
parser.add_argument("--momentum", default=0.9, help="momentum (default: 0.9)")
parser.add_argument(
"--weight-decay", default=4e-5, help="weight decay (default: 4e-5)"
)
parser.add_argument(
"-p",
"--print-freq",
default=1,
type=int,
metavar="N",
help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp", "qat"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16\n"
"qat: quantization aware training"
)
parser.add_argument(
"--qat-config",
default="min_max",
type=str,
choices=["min_max", "ema", "ema_lowbit", "sync_ema", "tqt"],
help="quantization aware training config\n"
"min_max: min_max_fakequant_qconfig\n"
"ema: ema_fakequant_qconfig\n"
"ema_lowbit: ema_lowbit_fakequant_qconfig\n"
"sync_ema: sync_ema_fakequant_qconfig\n"
"tqt: tqt_qconfig"
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", type=int, default=0)
parser.add_argument("--world-size", type=int, default=None)
parser.add_argument("--rank", default=0)
parser.add_argument("--loader", default=False, action="store_true", help="whether use loader")
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
args = parser.parse_args()
if args.world_size is None:
args.world_size = args.ngpus
if args.world_size > 1:
# launch processes
train_func = dist.launcher(worker, master_ip=args.dist_addr, port=args.dist_port,
world_size=args.world_size, n_gpus=args.ngpus, rank_start=args.rank * args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
steps = args.steps
# build model
shufflenet = quantizable_snet_model if args.mode == "qat" else snet_model
model = shufflenet.__dict__[args.arch]()
if args.mode == "qat":
if args.qat_config == "sync_ema":
assert args.ngpus > 1, "sync_ema does not support ngpus={}".format(args.ngpus)
qconfig = get_qconifg(args.qat_config)
model = Q.quantize_qat(module=model, qconfig= qconfig)
model.train()
Q.enable_observer(model)
Q.enable_fake_quant(model)
# Sync parameters
if args.world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if args.world_size > 1 else None,
)
# Optimizer
params_wd = []
params_nwd = []
params_scale = []
for n, p in model.named_parameters():
if n.find("weight") >= 0 and len(p.shape) > 1:
params_wd.append(p)
elif n.find("scale") >= 0:
params_scale.append(p)
else:
params_nwd.append(p)
opt = optim.SGD(
[{"params": params_wd},
{"params": params_nwd, "weight_decay": 0},
],
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * args.world_size, # scale weight decay in "SUM" mode
)
# train and valid func
@amp.autocast(enabled=args.mode == "mp")
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label, label_smooth=0.1)
gm.backward(loss)
opt.step().clear_grad()
return loss
if args.trace:
if args.symbolic:
train_step = jit.trace(train_step, symbolic=True, sublinear_memory_config=jit.SublinearMemoryConfig(genetic_nr_iter=50), symbolic_shape=False)
else:
train_step = jit.trace(train_step, symbolic=False, symbolic_shape=False)
else:
assert args.symbolic==False, "invalid arguments: trace=Trace, symbolic=True"
# start training
objs = AverageMeter("Loss")
clck = AverageMeter("Time")
if args.loader:
dataloader = iter(get_dataloader(args))
image,label = next(dataloader)
else:
image = np.random.randn(args.batch_size, 3, 224, 224).astype("float32")
label = np.random.randint(0, 1000, size=(args.batch_size,)).astype("int32")
# warm up
for step in range(10):
if args.loader:
image,label = next(dataloader)
if not args.preload:
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
else:
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
loss = train_step(image, label)
loss.item()
for step in range(0, steps):
t = time.time()
if args.loader:
image,label = next(dataloader)
if not args.preload:
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
else:
image = megengine.tensor(image, dtype="float32")
label = | megengine.tensor(label, dtype="int32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import os
import time
import numpy as np
# pylint: disable=import-error
import model as snet_model
import quantizable_model as quantizable_snet_model
import megengine
import megengine.device as device
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
import megengine.jit as jit
import megengine.amp as amp
import megengine.quantization as Q
logging = megengine.logger.get_logger()
from dataset import get_dataloader
DEFAULT_QAT_CONFIG = {
"ema":Q.ema_fakequant_qconfig,
"ema_lowbi":Q.ema_lowbit_fakequant_qconfig,
"sync_ema":Q.sync_ema_fakequant_qconfig,
"min_max":Q.min_max_fakequant_qconfig,
"tqt":Q.tqt_qconfig
}
def get_qconifg(config_name: str):
return DEFAULT_QAT_CONFIG[config_name]
def main():
parser = argparse.ArgumentParser(description="shufflenet benchmark")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x2_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=1,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-s",
"--steps",
default=200,
type=int,
help="number of train steps (default: 200)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 128)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"--lr",
metavar="LR",
default=0.001,
help="learning rate for single GPU (default: 0.001)",
)
parser.add_argument("--momentum", default=0.9, help="momentum (default: 0.9)")
parser.add_argument(
"--weight-decay", default=4e-5, help="weight decay (default: 4e-5)"
)
parser.add_argument(
"-p",
"--print-freq",
default=1,
type=int,
metavar="N",
help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp", "qat"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16\n"
"qat: quantization aware training"
)
parser.add_argument(
"--qat-config",
default="min_max",
type=str,
choices=["min_max", "ema", "ema_lowbit", "sync_ema", "tqt"],
help="quantization aware training config\n"
"min_max: min_max_fakequant_qconfig\n"
"ema: ema_fakequant_qconfig\n"
"ema_lowbit: ema_lowbit_fakequant_qconfig\n"
"sync_ema: sync_ema_fakequant_qconfig\n"
"tqt: tqt_qconfig"
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", type=int, default=0)
parser.add_argument("--world-size", type=int, default=None)
parser.add_argument("--rank", default=0)
parser.add_argument("--loader", default=False, action="store_true", help="whether use loader")
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
args = parser.parse_args()
if args.world_size is None:
args.world_size = args.ngpus
if args.world_size > 1:
# launch processes
train_func = dist.launcher(worker, master_ip=args.dist_addr, port=args.dist_port,
world_size=args.world_size, n_gpus=args.ngpus, rank_start=args.rank * args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
steps = args.steps
# build model
shufflenet = quantizable_snet_model if args.mode == "qat" else snet_model
model = shufflenet.__dict__[args.arch]()
if args.mode == "qat":
if args.qat_config == "sync_ema":
assert args.ngpus > 1, "sync_ema does not support ngpus={}".format(args.ngpus)
qconfig = get_qconifg(args.qat_config)
model = Q.quantize_qat(module=model, qconfig= qconfig)
model.train()
Q.enable_observer(model)
Q.enable_fake_quant(model)
# Sync parameters
if args.world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks= | dist.make_allreduce_cb("SUM") | megengine.distributed.make_allreduce_cb |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import os
import time
import numpy as np
# pylint: disable=import-error
import model as snet_model
import quantizable_model as quantizable_snet_model
import megengine
import megengine.device as device
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
import megengine.jit as jit
import megengine.amp as amp
import megengine.quantization as Q
logging = megengine.logger.get_logger()
from dataset import get_dataloader
DEFAULT_QAT_CONFIG = {
"ema":Q.ema_fakequant_qconfig,
"ema_lowbi":Q.ema_lowbit_fakequant_qconfig,
"sync_ema":Q.sync_ema_fakequant_qconfig,
"min_max":Q.min_max_fakequant_qconfig,
"tqt":Q.tqt_qconfig
}
def get_qconifg(config_name: str):
return DEFAULT_QAT_CONFIG[config_name]
def main():
parser = argparse.ArgumentParser(description="shufflenet benchmark")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x2_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=1,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-s",
"--steps",
default=200,
type=int,
help="number of train steps (default: 200)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 128)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"--lr",
metavar="LR",
default=0.001,
help="learning rate for single GPU (default: 0.001)",
)
parser.add_argument("--momentum", default=0.9, help="momentum (default: 0.9)")
parser.add_argument(
"--weight-decay", default=4e-5, help="weight decay (default: 4e-5)"
)
parser.add_argument(
"-p",
"--print-freq",
default=1,
type=int,
metavar="N",
help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp", "qat"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16\n"
"qat: quantization aware training"
)
parser.add_argument(
"--qat-config",
default="min_max",
type=str,
choices=["min_max", "ema", "ema_lowbit", "sync_ema", "tqt"],
help="quantization aware training config\n"
"min_max: min_max_fakequant_qconfig\n"
"ema: ema_fakequant_qconfig\n"
"ema_lowbit: ema_lowbit_fakequant_qconfig\n"
"sync_ema: sync_ema_fakequant_qconfig\n"
"tqt: tqt_qconfig"
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", type=int, default=0)
parser.add_argument("--world-size", type=int, default=None)
parser.add_argument("--rank", default=0)
parser.add_argument("--loader", default=False, action="store_true", help="whether use loader")
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
args = parser.parse_args()
if args.world_size is None:
args.world_size = args.ngpus
if args.world_size > 1:
# launch processes
train_func = dist.launcher(worker, master_ip=args.dist_addr, port=args.dist_port,
world_size=args.world_size, n_gpus=args.ngpus, rank_start=args.rank * args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
steps = args.steps
# build model
shufflenet = quantizable_snet_model if args.mode == "qat" else snet_model
model = shufflenet.__dict__[args.arch]()
if args.mode == "qat":
if args.qat_config == "sync_ema":
assert args.ngpus > 1, "sync_ema does not support ngpus={}".format(args.ngpus)
qconfig = get_qconifg(args.qat_config)
model = Q.quantize_qat(module=model, qconfig= qconfig)
model.train()
Q.enable_observer(model)
Q.enable_fake_quant(model)
# Sync parameters
if args.world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if args.world_size > 1 else None,
)
# Optimizer
params_wd = []
params_nwd = []
params_scale = []
for n, p in model.named_parameters():
if n.find("weight") >= 0 and len(p.shape) > 1:
params_wd.append(p)
elif n.find("scale") >= 0:
params_scale.append(p)
else:
params_nwd.append(p)
opt = optim.SGD(
[{"params": params_wd},
{"params": params_nwd, "weight_decay": 0},
],
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * args.world_size, # scale weight decay in "SUM" mode
)
# train and valid func
@amp.autocast(enabled=args.mode == "mp")
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label, label_smooth=0.1)
gm.backward(loss)
opt.step().clear_grad()
return loss
if args.trace:
if args.symbolic:
train_step = jit.trace(train_step, symbolic=True, sublinear_memory_config=jit.SublinearMemoryConfig(genetic_nr_iter=50), symbolic_shape=False)
else:
train_step = jit.trace(train_step, symbolic=False, symbolic_shape=False)
else:
assert args.symbolic==False, "invalid arguments: trace=Trace, symbolic=True"
# start training
objs = AverageMeter("Loss")
clck = AverageMeter("Time")
if args.loader:
dataloader = iter(get_dataloader(args))
image,label = next(dataloader)
else:
image = np.random.randn(args.batch_size, 3, 224, 224).astype("float32")
label = np.random.randint(0, 1000, size=(args.batch_size,)).astype("int32")
# warm up
for step in range(10):
if args.loader:
image,label = next(dataloader)
if not args.preload:
image = | megengine.tensor(image, dtype="float32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import os
import time
import numpy as np
# pylint: disable=import-error
import model as snet_model
import quantizable_model as quantizable_snet_model
import megengine
import megengine.device as device
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
import megengine.jit as jit
import megengine.amp as amp
import megengine.quantization as Q
logging = megengine.logger.get_logger()
from dataset import get_dataloader
DEFAULT_QAT_CONFIG = {
"ema":Q.ema_fakequant_qconfig,
"ema_lowbi":Q.ema_lowbit_fakequant_qconfig,
"sync_ema":Q.sync_ema_fakequant_qconfig,
"min_max":Q.min_max_fakequant_qconfig,
"tqt":Q.tqt_qconfig
}
def get_qconifg(config_name: str):
return DEFAULT_QAT_CONFIG[config_name]
def main():
parser = argparse.ArgumentParser(description="shufflenet benchmark")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x2_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=1,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-s",
"--steps",
default=200,
type=int,
help="number of train steps (default: 200)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 128)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"--lr",
metavar="LR",
default=0.001,
help="learning rate for single GPU (default: 0.001)",
)
parser.add_argument("--momentum", default=0.9, help="momentum (default: 0.9)")
parser.add_argument(
"--weight-decay", default=4e-5, help="weight decay (default: 4e-5)"
)
parser.add_argument(
"-p",
"--print-freq",
default=1,
type=int,
metavar="N",
help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp", "qat"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16\n"
"qat: quantization aware training"
)
parser.add_argument(
"--qat-config",
default="min_max",
type=str,
choices=["min_max", "ema", "ema_lowbit", "sync_ema", "tqt"],
help="quantization aware training config\n"
"min_max: min_max_fakequant_qconfig\n"
"ema: ema_fakequant_qconfig\n"
"ema_lowbit: ema_lowbit_fakequant_qconfig\n"
"sync_ema: sync_ema_fakequant_qconfig\n"
"tqt: tqt_qconfig"
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", type=int, default=0)
parser.add_argument("--world-size", type=int, default=None)
parser.add_argument("--rank", default=0)
parser.add_argument("--loader", default=False, action="store_true", help="whether use loader")
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
args = parser.parse_args()
if args.world_size is None:
args.world_size = args.ngpus
if args.world_size > 1:
# launch processes
train_func = dist.launcher(worker, master_ip=args.dist_addr, port=args.dist_port,
world_size=args.world_size, n_gpus=args.ngpus, rank_start=args.rank * args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
steps = args.steps
# build model
shufflenet = quantizable_snet_model if args.mode == "qat" else snet_model
model = shufflenet.__dict__[args.arch]()
if args.mode == "qat":
if args.qat_config == "sync_ema":
assert args.ngpus > 1, "sync_ema does not support ngpus={}".format(args.ngpus)
qconfig = get_qconifg(args.qat_config)
model = Q.quantize_qat(module=model, qconfig= qconfig)
model.train()
Q.enable_observer(model)
Q.enable_fake_quant(model)
# Sync parameters
if args.world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if args.world_size > 1 else None,
)
# Optimizer
params_wd = []
params_nwd = []
params_scale = []
for n, p in model.named_parameters():
if n.find("weight") >= 0 and len(p.shape) > 1:
params_wd.append(p)
elif n.find("scale") >= 0:
params_scale.append(p)
else:
params_nwd.append(p)
opt = optim.SGD(
[{"params": params_wd},
{"params": params_nwd, "weight_decay": 0},
],
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * args.world_size, # scale weight decay in "SUM" mode
)
# train and valid func
@amp.autocast(enabled=args.mode == "mp")
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label, label_smooth=0.1)
gm.backward(loss)
opt.step().clear_grad()
return loss
if args.trace:
if args.symbolic:
train_step = jit.trace(train_step, symbolic=True, sublinear_memory_config=jit.SublinearMemoryConfig(genetic_nr_iter=50), symbolic_shape=False)
else:
train_step = jit.trace(train_step, symbolic=False, symbolic_shape=False)
else:
assert args.symbolic==False, "invalid arguments: trace=Trace, symbolic=True"
# start training
objs = AverageMeter("Loss")
clck = AverageMeter("Time")
if args.loader:
dataloader = iter(get_dataloader(args))
image,label = next(dataloader)
else:
image = np.random.randn(args.batch_size, 3, 224, 224).astype("float32")
label = np.random.randint(0, 1000, size=(args.batch_size,)).astype("int32")
# warm up
for step in range(10):
if args.loader:
image,label = next(dataloader)
if not args.preload:
image = megengine.tensor(image, dtype="float32")
label = | megengine.tensor(label, dtype="int32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import os
import time
import numpy as np
# pylint: disable=import-error
import model as snet_model
import quantizable_model as quantizable_snet_model
import megengine
import megengine.device as device
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
import megengine.jit as jit
import megengine.amp as amp
import megengine.quantization as Q
logging = megengine.logger.get_logger()
from dataset import get_dataloader
DEFAULT_QAT_CONFIG = {
"ema":Q.ema_fakequant_qconfig,
"ema_lowbi":Q.ema_lowbit_fakequant_qconfig,
"sync_ema":Q.sync_ema_fakequant_qconfig,
"min_max":Q.min_max_fakequant_qconfig,
"tqt":Q.tqt_qconfig
}
def get_qconifg(config_name: str):
return DEFAULT_QAT_CONFIG[config_name]
def main():
parser = argparse.ArgumentParser(description="shufflenet benchmark")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x2_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=1,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-s",
"--steps",
default=200,
type=int,
help="number of train steps (default: 200)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 128)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"--lr",
metavar="LR",
default=0.001,
help="learning rate for single GPU (default: 0.001)",
)
parser.add_argument("--momentum", default=0.9, help="momentum (default: 0.9)")
parser.add_argument(
"--weight-decay", default=4e-5, help="weight decay (default: 4e-5)"
)
parser.add_argument(
"-p",
"--print-freq",
default=1,
type=int,
metavar="N",
help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp", "qat"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16\n"
"qat: quantization aware training"
)
parser.add_argument(
"--qat-config",
default="min_max",
type=str,
choices=["min_max", "ema", "ema_lowbit", "sync_ema", "tqt"],
help="quantization aware training config\n"
"min_max: min_max_fakequant_qconfig\n"
"ema: ema_fakequant_qconfig\n"
"ema_lowbit: ema_lowbit_fakequant_qconfig\n"
"sync_ema: sync_ema_fakequant_qconfig\n"
"tqt: tqt_qconfig"
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", type=int, default=0)
parser.add_argument("--world-size", type=int, default=None)
parser.add_argument("--rank", default=0)
parser.add_argument("--loader", default=False, action="store_true", help="whether use loader")
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
args = parser.parse_args()
if args.world_size is None:
args.world_size = args.ngpus
if args.world_size > 1:
# launch processes
train_func = dist.launcher(worker, master_ip=args.dist_addr, port=args.dist_port,
world_size=args.world_size, n_gpus=args.ngpus, rank_start=args.rank * args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
steps = args.steps
# build model
shufflenet = quantizable_snet_model if args.mode == "qat" else snet_model
model = shufflenet.__dict__[args.arch]()
if args.mode == "qat":
if args.qat_config == "sync_ema":
assert args.ngpus > 1, "sync_ema does not support ngpus={}".format(args.ngpus)
qconfig = get_qconifg(args.qat_config)
model = Q.quantize_qat(module=model, qconfig= qconfig)
model.train()
Q.enable_observer(model)
Q.enable_fake_quant(model)
# Sync parameters
if args.world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if args.world_size > 1 else None,
)
# Optimizer
params_wd = []
params_nwd = []
params_scale = []
for n, p in model.named_parameters():
if n.find("weight") >= 0 and len(p.shape) > 1:
params_wd.append(p)
elif n.find("scale") >= 0:
params_scale.append(p)
else:
params_nwd.append(p)
opt = optim.SGD(
[{"params": params_wd},
{"params": params_nwd, "weight_decay": 0},
],
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * args.world_size, # scale weight decay in "SUM" mode
)
# train and valid func
@amp.autocast(enabled=args.mode == "mp")
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label, label_smooth=0.1)
gm.backward(loss)
opt.step().clear_grad()
return loss
if args.trace:
if args.symbolic:
train_step = jit.trace(train_step, symbolic=True, sublinear_memory_config=jit.SublinearMemoryConfig(genetic_nr_iter=50), symbolic_shape=False)
else:
train_step = jit.trace(train_step, symbolic=False, symbolic_shape=False)
else:
assert args.symbolic==False, "invalid arguments: trace=Trace, symbolic=True"
# start training
objs = AverageMeter("Loss")
clck = AverageMeter("Time")
if args.loader:
dataloader = iter(get_dataloader(args))
image,label = next(dataloader)
else:
image = np.random.randn(args.batch_size, 3, 224, 224).astype("float32")
label = np.random.randint(0, 1000, size=(args.batch_size,)).astype("int32")
# warm up
for step in range(10):
if args.loader:
image,label = next(dataloader)
if not args.preload:
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
else:
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
loss = train_step(image, label)
loss.item()
for step in range(0, steps):
t = time.time()
if args.loader:
image,label = next(dataloader)
if not args.preload:
image = | megengine.tensor(image, dtype="float32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import os
import time
import numpy as np
# pylint: disable=import-error
import model as snet_model
import quantizable_model as quantizable_snet_model
import megengine
import megengine.device as device
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
import megengine.jit as jit
import megengine.amp as amp
import megengine.quantization as Q
logging = megengine.logger.get_logger()
from dataset import get_dataloader
DEFAULT_QAT_CONFIG = {
"ema":Q.ema_fakequant_qconfig,
"ema_lowbi":Q.ema_lowbit_fakequant_qconfig,
"sync_ema":Q.sync_ema_fakequant_qconfig,
"min_max":Q.min_max_fakequant_qconfig,
"tqt":Q.tqt_qconfig
}
def get_qconifg(config_name: str):
return DEFAULT_QAT_CONFIG[config_name]
def main():
parser = argparse.ArgumentParser(description="shufflenet benchmark")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x2_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=1,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-s",
"--steps",
default=200,
type=int,
help="number of train steps (default: 200)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 128)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"--lr",
metavar="LR",
default=0.001,
help="learning rate for single GPU (default: 0.001)",
)
parser.add_argument("--momentum", default=0.9, help="momentum (default: 0.9)")
parser.add_argument(
"--weight-decay", default=4e-5, help="weight decay (default: 4e-5)"
)
parser.add_argument(
"-p",
"--print-freq",
default=1,
type=int,
metavar="N",
help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp", "qat"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16\n"
"qat: quantization aware training"
)
parser.add_argument(
"--qat-config",
default="min_max",
type=str,
choices=["min_max", "ema", "ema_lowbit", "sync_ema", "tqt"],
help="quantization aware training config\n"
"min_max: min_max_fakequant_qconfig\n"
"ema: ema_fakequant_qconfig\n"
"ema_lowbit: ema_lowbit_fakequant_qconfig\n"
"sync_ema: sync_ema_fakequant_qconfig\n"
"tqt: tqt_qconfig"
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", type=int, default=0)
parser.add_argument("--world-size", type=int, default=None)
parser.add_argument("--rank", default=0)
parser.add_argument("--loader", default=False, action="store_true", help="whether use loader")
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
args = parser.parse_args()
if args.world_size is None:
args.world_size = args.ngpus
if args.world_size > 1:
# launch processes
train_func = dist.launcher(worker, master_ip=args.dist_addr, port=args.dist_port,
world_size=args.world_size, n_gpus=args.ngpus, rank_start=args.rank * args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
steps = args.steps
# build model
shufflenet = quantizable_snet_model if args.mode == "qat" else snet_model
model = shufflenet.__dict__[args.arch]()
if args.mode == "qat":
if args.qat_config == "sync_ema":
assert args.ngpus > 1, "sync_ema does not support ngpus={}".format(args.ngpus)
qconfig = get_qconifg(args.qat_config)
model = Q.quantize_qat(module=model, qconfig= qconfig)
model.train()
Q.enable_observer(model)
Q.enable_fake_quant(model)
# Sync parameters
if args.world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if args.world_size > 1 else None,
)
# Optimizer
params_wd = []
params_nwd = []
params_scale = []
for n, p in model.named_parameters():
if n.find("weight") >= 0 and len(p.shape) > 1:
params_wd.append(p)
elif n.find("scale") >= 0:
params_scale.append(p)
else:
params_nwd.append(p)
opt = optim.SGD(
[{"params": params_wd},
{"params": params_nwd, "weight_decay": 0},
],
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * args.world_size, # scale weight decay in "SUM" mode
)
# train and valid func
@amp.autocast(enabled=args.mode == "mp")
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label, label_smooth=0.1)
gm.backward(loss)
opt.step().clear_grad()
return loss
if args.trace:
if args.symbolic:
train_step = jit.trace(train_step, symbolic=True, sublinear_memory_config=jit.SublinearMemoryConfig(genetic_nr_iter=50), symbolic_shape=False)
else:
train_step = jit.trace(train_step, symbolic=False, symbolic_shape=False)
else:
assert args.symbolic==False, "invalid arguments: trace=Trace, symbolic=True"
# start training
objs = AverageMeter("Loss")
clck = AverageMeter("Time")
if args.loader:
dataloader = iter(get_dataloader(args))
image,label = next(dataloader)
else:
image = np.random.randn(args.batch_size, 3, 224, 224).astype("float32")
label = np.random.randint(0, 1000, size=(args.batch_size,)).astype("int32")
# warm up
for step in range(10):
if args.loader:
image,label = next(dataloader)
if not args.preload:
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
else:
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
loss = train_step(image, label)
loss.item()
for step in range(0, steps):
t = time.time()
if args.loader:
image,label = next(dataloader)
if not args.preload:
image = megengine.tensor(image, dtype="float32")
label = | megengine.tensor(label, dtype="int32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import os
import time
import numpy as np
# pylint: disable=import-error
import model as snet_model
import quantizable_model as quantizable_snet_model
import megengine
import megengine.device as device
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
import megengine.jit as jit
import megengine.amp as amp
import megengine.quantization as Q
logging = megengine.logger.get_logger()
from dataset import get_dataloader
DEFAULT_QAT_CONFIG = {
"ema":Q.ema_fakequant_qconfig,
"ema_lowbi":Q.ema_lowbit_fakequant_qconfig,
"sync_ema":Q.sync_ema_fakequant_qconfig,
"min_max":Q.min_max_fakequant_qconfig,
"tqt":Q.tqt_qconfig
}
def get_qconifg(config_name: str):
return DEFAULT_QAT_CONFIG[config_name]
def main():
parser = argparse.ArgumentParser(description="shufflenet benchmark")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x2_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=1,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-s",
"--steps",
default=200,
type=int,
help="number of train steps (default: 200)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 128)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"--lr",
metavar="LR",
default=0.001,
help="learning rate for single GPU (default: 0.001)",
)
parser.add_argument("--momentum", default=0.9, help="momentum (default: 0.9)")
parser.add_argument(
"--weight-decay", default=4e-5, help="weight decay (default: 4e-5)"
)
parser.add_argument(
"-p",
"--print-freq",
default=1,
type=int,
metavar="N",
help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp", "qat"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16\n"
"qat: quantization aware training"
)
parser.add_argument(
"--qat-config",
default="min_max",
type=str,
choices=["min_max", "ema", "ema_lowbit", "sync_ema", "tqt"],
help="quantization aware training config\n"
"min_max: min_max_fakequant_qconfig\n"
"ema: ema_fakequant_qconfig\n"
"ema_lowbit: ema_lowbit_fakequant_qconfig\n"
"sync_ema: sync_ema_fakequant_qconfig\n"
"tqt: tqt_qconfig"
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", type=int, default=0)
parser.add_argument("--world-size", type=int, default=None)
parser.add_argument("--rank", default=0)
parser.add_argument("--loader", default=False, action="store_true", help="whether use loader")
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
args = parser.parse_args()
if args.world_size is None:
args.world_size = args.ngpus
if args.world_size > 1:
# launch processes
train_func = dist.launcher(worker, master_ip=args.dist_addr, port=args.dist_port,
world_size=args.world_size, n_gpus=args.ngpus, rank_start=args.rank * args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
steps = args.steps
# build model
shufflenet = quantizable_snet_model if args.mode == "qat" else snet_model
model = shufflenet.__dict__[args.arch]()
if args.mode == "qat":
if args.qat_config == "sync_ema":
assert args.ngpus > 1, "sync_ema does not support ngpus={}".format(args.ngpus)
qconfig = get_qconifg(args.qat_config)
model = Q.quantize_qat(module=model, qconfig= qconfig)
model.train()
Q.enable_observer(model)
Q.enable_fake_quant(model)
# Sync parameters
if args.world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if args.world_size > 1 else None,
)
# Optimizer
params_wd = []
params_nwd = []
params_scale = []
for n, p in model.named_parameters():
if n.find("weight") >= 0 and len(p.shape) > 1:
params_wd.append(p)
elif n.find("scale") >= 0:
params_scale.append(p)
else:
params_nwd.append(p)
opt = optim.SGD(
[{"params": params_wd},
{"params": params_nwd, "weight_decay": 0},
],
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * args.world_size, # scale weight decay in "SUM" mode
)
# train and valid func
@amp.autocast(enabled=args.mode == "mp")
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label, label_smooth=0.1)
gm.backward(loss)
opt.step().clear_grad()
return loss
if args.trace:
if args.symbolic:
train_step = jit.trace(train_step, symbolic=True, sublinear_memory_config=jit.SublinearMemoryConfig(genetic_nr_iter=50), symbolic_shape=False)
else:
train_step = jit.trace(train_step, symbolic=False, symbolic_shape=False)
else:
assert args.symbolic==False, "invalid arguments: trace=Trace, symbolic=True"
# start training
objs = AverageMeter("Loss")
clck = AverageMeter("Time")
if args.loader:
dataloader = iter(get_dataloader(args))
image,label = next(dataloader)
else:
image = np.random.randn(args.batch_size, 3, 224, 224).astype("float32")
label = np.random.randint(0, 1000, size=(args.batch_size,)).astype("int32")
# warm up
for step in range(10):
if args.loader:
image,label = next(dataloader)
if not args.preload:
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
else:
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
loss = train_step(image, label)
loss.item()
for step in range(0, steps):
t = time.time()
if args.loader:
image,label = next(dataloader)
if not args.preload:
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
else:
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
loss = train_step(image, label)
objs.update(loss.item())
clck.update(time.time() - t)
if step % args.print_freq == 0 and | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import os
import time
import numpy as np
# pylint: disable=import-error
import model as snet_model
import quantizable_model as quantizable_snet_model
import megengine
import megengine.device as device
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
import megengine.jit as jit
import megengine.amp as amp
import megengine.quantization as Q
logging = megengine.logger.get_logger()
from dataset import get_dataloader
DEFAULT_QAT_CONFIG = {
"ema":Q.ema_fakequant_qconfig,
"ema_lowbi":Q.ema_lowbit_fakequant_qconfig,
"sync_ema":Q.sync_ema_fakequant_qconfig,
"min_max":Q.min_max_fakequant_qconfig,
"tqt":Q.tqt_qconfig
}
def get_qconifg(config_name: str):
return DEFAULT_QAT_CONFIG[config_name]
def main():
parser = argparse.ArgumentParser(description="shufflenet benchmark")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x2_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=1,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-s",
"--steps",
default=200,
type=int,
help="number of train steps (default: 200)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 128)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"--lr",
metavar="LR",
default=0.001,
help="learning rate for single GPU (default: 0.001)",
)
parser.add_argument("--momentum", default=0.9, help="momentum (default: 0.9)")
parser.add_argument(
"--weight-decay", default=4e-5, help="weight decay (default: 4e-5)"
)
parser.add_argument(
"-p",
"--print-freq",
default=1,
type=int,
metavar="N",
help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp", "qat"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16\n"
"qat: quantization aware training"
)
parser.add_argument(
"--qat-config",
default="min_max",
type=str,
choices=["min_max", "ema", "ema_lowbit", "sync_ema", "tqt"],
help="quantization aware training config\n"
"min_max: min_max_fakequant_qconfig\n"
"ema: ema_fakequant_qconfig\n"
"ema_lowbit: ema_lowbit_fakequant_qconfig\n"
"sync_ema: sync_ema_fakequant_qconfig\n"
"tqt: tqt_qconfig"
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", type=int, default=0)
parser.add_argument("--world-size", type=int, default=None)
parser.add_argument("--rank", default=0)
parser.add_argument("--loader", default=False, action="store_true", help="whether use loader")
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
args = parser.parse_args()
if args.world_size is None:
args.world_size = args.ngpus
if args.world_size > 1:
# launch processes
train_func = dist.launcher(worker, master_ip=args.dist_addr, port=args.dist_port,
world_size=args.world_size, n_gpus=args.ngpus, rank_start=args.rank * args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
steps = args.steps
# build model
shufflenet = quantizable_snet_model if args.mode == "qat" else snet_model
model = shufflenet.__dict__[args.arch]()
if args.mode == "qat":
if args.qat_config == "sync_ema":
assert args.ngpus > 1, "sync_ema does not support ngpus={}".format(args.ngpus)
qconfig = get_qconifg(args.qat_config)
model = Q.quantize_qat(module=model, qconfig= qconfig)
model.train()
Q.enable_observer(model)
Q.enable_fake_quant(model)
# Sync parameters
if args.world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if args.world_size > 1 else None,
)
# Optimizer
params_wd = []
params_nwd = []
params_scale = []
for n, p in model.named_parameters():
if n.find("weight") >= 0 and len(p.shape) > 1:
params_wd.append(p)
elif n.find("scale") >= 0:
params_scale.append(p)
else:
params_nwd.append(p)
opt = optim.SGD(
[{"params": params_wd},
{"params": params_nwd, "weight_decay": 0},
],
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * args.world_size, # scale weight decay in "SUM" mode
)
# train and valid func
@amp.autocast(enabled=args.mode == "mp")
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label, label_smooth=0.1)
gm.backward(loss)
opt.step().clear_grad()
return loss
if args.trace:
if args.symbolic:
train_step = jit.trace(train_step, symbolic=True, sublinear_memory_config= | jit.SublinearMemoryConfig(genetic_nr_iter=50) | megengine.jit.SublinearMemoryConfig |
import os.path as osp
from abc import ABCMeta, abstractmethod
import megengine as mge
import megengine.distributed as dist
from megengine.optimizer.optimizer import Optimizer
from megengine.module import Module
from edit.utils import mkdir_or_exist, build_from_cfg, get_root_logger
from ..hook import Hook, HOOKS, get_priority
module_ckpt_suffix = "_module.mge"
optim_ckpt_suffix = "_optim.mge"
class BaseRunner(metaclass=ABCMeta):
"""The base class of Runner, a training helper for Mge.
All subclasses should implement the following APIs:
- ``run()``
- ``train()``
- ``test()``
- ``save_checkpoint()``
- ``resume()``
Args:
model (:obj:`megengine.module.Module`): The model to be run.
optimizers_cfg (dict): optimizer configs
work_dir (str, optional): The working directory to save checkpoints and logs. Defaults to None.
"""
def __init__(self, model, optimizers_cfg=None, work_dir=None):
assert hasattr(model, 'train_step')
assert hasattr(model, 'test_step')
assert hasattr(model, 'create_gradmanager_and_optimizers')
assert hasattr(model, 'cal_for_eval')
self.model = model
self.optimizers_cfg = optimizers_cfg
self.logger = get_root_logger()
self.work_dir = work_dir
assert self.work_dir is not None
# get model name from the model class
self._model_name = self.model.__class__.__name__
self.mode = None
self._hooks = []
self._epoch = 0
self._iter = 0
self._inner_iter = 0
self._max_epochs = 0
self._max_iters = 0
@property
def model_name(self):
"""str: Name of the model, usually the module class name."""
return self._model_name
@property
def hooks(self):
"""list[:obj:`Hook`]: A list of registered hooks."""
return self._hooks
@property
def epoch(self):
"""int: Current epoch."""
return self._epoch
@property
def iter(self):
"""int: Current iteration."""
return self._iter
@property
def inner_iter(self):
"""int: Iteration in an epoch."""
return self._inner_iter
@property
def max_epochs(self):
"""int: Maximum training epochs."""
return self._max_epochs
@property
def max_iters(self):
"""int: Maximum training iterations."""
return self._max_iters
@abstractmethod
def train(self, data_loader):
pass
@abstractmethod
def test(self, data_loader):
pass
@abstractmethod
def run(self, data_loaders, workflow, max_iters):
pass
@abstractmethod
def save_checkpoint(self, out_dir, create_symlink=True):
pass
@abstractmethod
def resume(self, path2checkpoint):
pass
@abstractmethod
def register_training_hooks(self, lr_config, checkpoint_config, log_config):
"""Register default hooks for training.
Default hooks include:
- LrUpdaterHook
- CheckpointSaverHook
- log_config
"""
pass
def create_gradmanager_and_optimizers(self):
self.model.create_gradmanager_and_optimizers(self.optimizers_cfg)
def sync_model_params(self):
if | dist.is_distributed() | megengine.distributed.is_distributed |
import os.path as osp
from abc import ABCMeta, abstractmethod
import megengine as mge
import megengine.distributed as dist
from megengine.optimizer.optimizer import Optimizer
from megengine.module import Module
from edit.utils import mkdir_or_exist, build_from_cfg, get_root_logger
from ..hook import Hook, HOOKS, get_priority
module_ckpt_suffix = "_module.mge"
optim_ckpt_suffix = "_optim.mge"
class BaseRunner(metaclass=ABCMeta):
"""The base class of Runner, a training helper for Mge.
All subclasses should implement the following APIs:
- ``run()``
- ``train()``
- ``test()``
- ``save_checkpoint()``
- ``resume()``
Args:
model (:obj:`megengine.module.Module`): The model to be run.
optimizers_cfg (dict): optimizer configs
work_dir (str, optional): The working directory to save checkpoints and logs. Defaults to None.
"""
def __init__(self, model, optimizers_cfg=None, work_dir=None):
assert hasattr(model, 'train_step')
assert hasattr(model, 'test_step')
assert hasattr(model, 'create_gradmanager_and_optimizers')
assert hasattr(model, 'cal_for_eval')
self.model = model
self.optimizers_cfg = optimizers_cfg
self.logger = get_root_logger()
self.work_dir = work_dir
assert self.work_dir is not None
# get model name from the model class
self._model_name = self.model.__class__.__name__
self.mode = None
self._hooks = []
self._epoch = 0
self._iter = 0
self._inner_iter = 0
self._max_epochs = 0
self._max_iters = 0
@property
def model_name(self):
"""str: Name of the model, usually the module class name."""
return self._model_name
@property
def hooks(self):
"""list[:obj:`Hook`]: A list of registered hooks."""
return self._hooks
@property
def epoch(self):
"""int: Current epoch."""
return self._epoch
@property
def iter(self):
"""int: Current iteration."""
return self._iter
@property
def inner_iter(self):
"""int: Iteration in an epoch."""
return self._inner_iter
@property
def max_epochs(self):
"""int: Maximum training epochs."""
return self._max_epochs
@property
def max_iters(self):
"""int: Maximum training iterations."""
return self._max_iters
@abstractmethod
def train(self, data_loader):
pass
@abstractmethod
def test(self, data_loader):
pass
@abstractmethod
def run(self, data_loaders, workflow, max_iters):
pass
@abstractmethod
def save_checkpoint(self, out_dir, create_symlink=True):
pass
@abstractmethod
def resume(self, path2checkpoint):
pass
@abstractmethod
def register_training_hooks(self, lr_config, checkpoint_config, log_config):
"""Register default hooks for training.
Default hooks include:
- LrUpdaterHook
- CheckpointSaverHook
- log_config
"""
pass
def create_gradmanager_and_optimizers(self):
self.model.create_gradmanager_and_optimizers(self.optimizers_cfg)
def sync_model_params(self):
if dist.is_distributed():
self.logger.info("syncing the model's parameters...")
dist.bcast_list_(self.model.parameters(), dist.WORLD)
else:
pass # do nothing
def current_lr(self):
"""Get current learning rates.
Returns:
list[float] | dict[str, list[float]]: Current learning rates of all
param groups. If the runner has a dict of optimizers, this
method will return a dict.
"""
raise NotImplementedError("")
# if isinstance(self.optimizer, Optimizer):
# lr = [group['lr'] for group in self.optimizer.param_groups]
# elif isinstance(self.optimizer, dict):
# lr = dict()
# for name, optim in self.optimizer.items():
# lr[name] = [group['lr'] for group in optim.param_groups]
# else:
# raise RuntimeError('lr is not applicable because optimizer does not exist.')
# return lr
def current_momentum(self):
"""Get current momentums.
Returns:
list[float] | dict[str, list[float]]: Current momentums of all
param groups. If the runner has a dict of optimizers, this
method will return a dict.
"""
raise NotImplementedError("")
# def _get_momentum(optimizer):
# momentums = []
# for group in optimizer.param_groups:
# if 'momentum' in group.keys():
# momentums.append(group['momentum'])
# elif 'betas' in group.keys():
# momentums.append(group['betas'][0])
# else:
# momentums.append(0)
# return momentums
#
# if self.optimizer is None:
# raise RuntimeError('momentum is not applicable because optimizer does not exist.')
# elif isinstance(self.optimizer, Optimizer):
# momentums = _get_momentum(self.optimizer)
# elif isinstance(self.optimizer, dict):
# momentums = dict()
# for name, optim in self.optimizer.items():
# momentums[name] = _get_momentum(optim)
# return momentums
def register_hook(self, hook, priority='NORMAL'):
"""Register a hook into the hook list.
The hook will be inserted into a priority queue, with the specified
priority (See :class:`Priority` for details of priorities).
For hooks with the same priority, they will be triggered in the same
order as they are registered.
Args:
hook (:obj:`Hook`): The hook to be registered.
priority (int or str or :obj:`Priority`): Hook priority.
Lower value means higher priority.
"""
assert isinstance(hook, Hook)
if hasattr(hook, 'priority'):
raise ValueError('"priority" is a reserved attribute for hook')
priority = get_priority(priority)
hook.priority = priority
# insert the hook to a sorted list
inserted = False
for i in range(len(self._hooks) - 1, -1, -1):
if priority >= self._hooks[i].priority:
self._hooks.insert(i + 1, hook)
inserted = True
break
if not inserted:
self._hooks.insert(0, hook)
def call_hook(self, fn_name):
"""Call all hooks.
Args:
fn_name (str): The function name in each hook to be called, such as
"before_train_epoch".
"""
for hook in self._hooks:
getattr(hook, fn_name)(self)
def load_checkpoint(self, path2checkpoint, load_optim=True):
"""
:param path2checkpoint: e.g. workdirs/xxxxx/checkpoint/epoch_10
:return: dict
"""
assert osp.exists(path2checkpoint), "{} do not exist".format(path2checkpoint)
dirname = osp.split(path2checkpoint)[-1]
epoch, nums = dirname.split("_")
assert epoch in ("epoch", )
self.logger.info('load checkpoint from {}'.format(path2checkpoint))
# 遍历model中的所有配置optimizer的model,并进行load
res = dict()
res['nums'] = int(nums)
for submodule_name in self.optimizers_cfg.keys():
submodule = getattr(self.model, submodule_name, None)
assert submodule is not None, "model should have submodule {}".format(submodule_name)
assert isinstance(submodule, Module), "submodule should be instance of mge.module.Module"
if | dist.get_rank() | megengine.distributed.get_rank |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
"""VGG Series
VGG: `"Very Deep Convolutional Networks for Large-Scale Image Recognition"
<https://arxiv.org/abs/1409.1556>`_
"""
from typing import Any, Mapping, Sequence
import megengine as mge
import megengine.hub as hub
import megengine.module as M
from basecls.layers import activation, build_head, conv2d, init_weights, norm2d
from basecls.utils import recursive_update, registers
__all__ = ["VGGStage", "VGG"]
class VGGStage(M.Module):
"""VGG stage (sequence of blocks w/ the same output shape)."""
def __init__(self, w_in: int, w_out: int, depth: int, norm_name: str, act_name: str):
super().__init__()
self.depth = depth
for i in range(depth):
block = M.Sequential(
conv2d(w_in, w_out, 3), norm2d(norm_name, w_out), activation(act_name)
)
setattr(self, f"b{i + 1}", block)
w_in = w_out
self.max_pool = M.MaxPool2d(kernel_size=2, stride=2)
def __len__(self):
return self.depth
def forward(self, x: mge.Tensor) -> mge.Tensor:
for i in range(self.depth):
block = getattr(self, f"b{i + 1}")
x = block(x)
x = self.max_pool(x)
return x
@registers.models.register()
class VGG(M.Module):
"""VGG model.
Args:
depths: depth for each stage (number of blocks in the stage).
widths: width for each stage (width of each block in the stage).
norm_name: normalization function. Default: ``None``
act_name: activation function. Default: ``"relu"``
head: head args. Default: ``None``
"""
def __init__(
self,
depths: Sequence[int],
widths: Sequence[int],
norm_name: str = None,
act_name: str = "relu",
head: Mapping[str, Any] = None,
):
super().__init__()
self.depths = depths
model_args = [depths, widths]
prev_w = 3
for i, (d, w) in enumerate(zip(*model_args)):
stage = VGGStage(prev_w, w, d, norm_name, act_name)
setattr(self, f"s{i + 1}", stage)
prev_w = w
self.head = build_head(prev_w, head, None, act_name)
self.apply(init_weights)
def forward(self, x: mge.Tensor) -> mge.Tensor:
for i in range(len(self.depths)):
stage = getattr(self, f"s{i + 1}")
x = stage(x)
if getattr(self, "head", None) is not None:
x = self.head(x)
return x
def _build_vgg(**kwargs):
model_args = dict(head=dict(name="VGGHead", dropout_prob=0.5))
recursive_update(model_args, kwargs)
return VGG(**model_args)
@registers.models.register()
@ | hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg11/vgg11.pkl") | megengine.hub.pretrained |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
"""VGG Series
VGG: `"Very Deep Convolutional Networks for Large-Scale Image Recognition"
<https://arxiv.org/abs/1409.1556>`_
"""
from typing import Any, Mapping, Sequence
import megengine as mge
import megengine.hub as hub
import megengine.module as M
from basecls.layers import activation, build_head, conv2d, init_weights, norm2d
from basecls.utils import recursive_update, registers
__all__ = ["VGGStage", "VGG"]
class VGGStage(M.Module):
"""VGG stage (sequence of blocks w/ the same output shape)."""
def __init__(self, w_in: int, w_out: int, depth: int, norm_name: str, act_name: str):
super().__init__()
self.depth = depth
for i in range(depth):
block = M.Sequential(
conv2d(w_in, w_out, 3), norm2d(norm_name, w_out), activation(act_name)
)
setattr(self, f"b{i + 1}", block)
w_in = w_out
self.max_pool = M.MaxPool2d(kernel_size=2, stride=2)
def __len__(self):
return self.depth
def forward(self, x: mge.Tensor) -> mge.Tensor:
for i in range(self.depth):
block = getattr(self, f"b{i + 1}")
x = block(x)
x = self.max_pool(x)
return x
@registers.models.register()
class VGG(M.Module):
"""VGG model.
Args:
depths: depth for each stage (number of blocks in the stage).
widths: width for each stage (width of each block in the stage).
norm_name: normalization function. Default: ``None``
act_name: activation function. Default: ``"relu"``
head: head args. Default: ``None``
"""
def __init__(
self,
depths: Sequence[int],
widths: Sequence[int],
norm_name: str = None,
act_name: str = "relu",
head: Mapping[str, Any] = None,
):
super().__init__()
self.depths = depths
model_args = [depths, widths]
prev_w = 3
for i, (d, w) in enumerate(zip(*model_args)):
stage = VGGStage(prev_w, w, d, norm_name, act_name)
setattr(self, f"s{i + 1}", stage)
prev_w = w
self.head = build_head(prev_w, head, None, act_name)
self.apply(init_weights)
def forward(self, x: mge.Tensor) -> mge.Tensor:
for i in range(len(self.depths)):
stage = getattr(self, f"s{i + 1}")
x = stage(x)
if getattr(self, "head", None) is not None:
x = self.head(x)
return x
def _build_vgg(**kwargs):
model_args = dict(head=dict(name="VGGHead", dropout_prob=0.5))
recursive_update(model_args, kwargs)
return VGG(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg11/vgg11.pkl")
def vgg11(**kwargs):
model_args = dict(depths=[1, 1, 2, 2, 2], widths=[64, 128, 256, 512, 512])
recursive_update(model_args, kwargs)
return _build_vgg(**model_args)
@registers.models.register()
@ | hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg11_bn/vgg11_bn.pkl") | megengine.hub.pretrained |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
"""VGG Series
VGG: `"Very Deep Convolutional Networks for Large-Scale Image Recognition"
<https://arxiv.org/abs/1409.1556>`_
"""
from typing import Any, Mapping, Sequence
import megengine as mge
import megengine.hub as hub
import megengine.module as M
from basecls.layers import activation, build_head, conv2d, init_weights, norm2d
from basecls.utils import recursive_update, registers
__all__ = ["VGGStage", "VGG"]
class VGGStage(M.Module):
"""VGG stage (sequence of blocks w/ the same output shape)."""
def __init__(self, w_in: int, w_out: int, depth: int, norm_name: str, act_name: str):
super().__init__()
self.depth = depth
for i in range(depth):
block = M.Sequential(
conv2d(w_in, w_out, 3), norm2d(norm_name, w_out), activation(act_name)
)
setattr(self, f"b{i + 1}", block)
w_in = w_out
self.max_pool = M.MaxPool2d(kernel_size=2, stride=2)
def __len__(self):
return self.depth
def forward(self, x: mge.Tensor) -> mge.Tensor:
for i in range(self.depth):
block = getattr(self, f"b{i + 1}")
x = block(x)
x = self.max_pool(x)
return x
@registers.models.register()
class VGG(M.Module):
"""VGG model.
Args:
depths: depth for each stage (number of blocks in the stage).
widths: width for each stage (width of each block in the stage).
norm_name: normalization function. Default: ``None``
act_name: activation function. Default: ``"relu"``
head: head args. Default: ``None``
"""
def __init__(
self,
depths: Sequence[int],
widths: Sequence[int],
norm_name: str = None,
act_name: str = "relu",
head: Mapping[str, Any] = None,
):
super().__init__()
self.depths = depths
model_args = [depths, widths]
prev_w = 3
for i, (d, w) in enumerate(zip(*model_args)):
stage = VGGStage(prev_w, w, d, norm_name, act_name)
setattr(self, f"s{i + 1}", stage)
prev_w = w
self.head = build_head(prev_w, head, None, act_name)
self.apply(init_weights)
def forward(self, x: mge.Tensor) -> mge.Tensor:
for i in range(len(self.depths)):
stage = getattr(self, f"s{i + 1}")
x = stage(x)
if getattr(self, "head", None) is not None:
x = self.head(x)
return x
def _build_vgg(**kwargs):
model_args = dict(head=dict(name="VGGHead", dropout_prob=0.5))
recursive_update(model_args, kwargs)
return VGG(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg11/vgg11.pkl")
def vgg11(**kwargs):
model_args = dict(depths=[1, 1, 2, 2, 2], widths=[64, 128, 256, 512, 512])
recursive_update(model_args, kwargs)
return _build_vgg(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg11_bn/vgg11_bn.pkl")
def vgg11_bn(**kwargs):
model_args = dict(norm_name="BN")
recursive_update(model_args, kwargs)
return vgg11(**model_args)
@registers.models.register()
@ | hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg13/vgg13.pkl") | megengine.hub.pretrained |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
"""VGG Series
VGG: `"Very Deep Convolutional Networks for Large-Scale Image Recognition"
<https://arxiv.org/abs/1409.1556>`_
"""
from typing import Any, Mapping, Sequence
import megengine as mge
import megengine.hub as hub
import megengine.module as M
from basecls.layers import activation, build_head, conv2d, init_weights, norm2d
from basecls.utils import recursive_update, registers
__all__ = ["VGGStage", "VGG"]
class VGGStage(M.Module):
"""VGG stage (sequence of blocks w/ the same output shape)."""
def __init__(self, w_in: int, w_out: int, depth: int, norm_name: str, act_name: str):
super().__init__()
self.depth = depth
for i in range(depth):
block = M.Sequential(
conv2d(w_in, w_out, 3), norm2d(norm_name, w_out), activation(act_name)
)
setattr(self, f"b{i + 1}", block)
w_in = w_out
self.max_pool = M.MaxPool2d(kernel_size=2, stride=2)
def __len__(self):
return self.depth
def forward(self, x: mge.Tensor) -> mge.Tensor:
for i in range(self.depth):
block = getattr(self, f"b{i + 1}")
x = block(x)
x = self.max_pool(x)
return x
@registers.models.register()
class VGG(M.Module):
"""VGG model.
Args:
depths: depth for each stage (number of blocks in the stage).
widths: width for each stage (width of each block in the stage).
norm_name: normalization function. Default: ``None``
act_name: activation function. Default: ``"relu"``
head: head args. Default: ``None``
"""
def __init__(
self,
depths: Sequence[int],
widths: Sequence[int],
norm_name: str = None,
act_name: str = "relu",
head: Mapping[str, Any] = None,
):
super().__init__()
self.depths = depths
model_args = [depths, widths]
prev_w = 3
for i, (d, w) in enumerate(zip(*model_args)):
stage = VGGStage(prev_w, w, d, norm_name, act_name)
setattr(self, f"s{i + 1}", stage)
prev_w = w
self.head = build_head(prev_w, head, None, act_name)
self.apply(init_weights)
def forward(self, x: mge.Tensor) -> mge.Tensor:
for i in range(len(self.depths)):
stage = getattr(self, f"s{i + 1}")
x = stage(x)
if getattr(self, "head", None) is not None:
x = self.head(x)
return x
def _build_vgg(**kwargs):
model_args = dict(head=dict(name="VGGHead", dropout_prob=0.5))
recursive_update(model_args, kwargs)
return VGG(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg11/vgg11.pkl")
def vgg11(**kwargs):
model_args = dict(depths=[1, 1, 2, 2, 2], widths=[64, 128, 256, 512, 512])
recursive_update(model_args, kwargs)
return _build_vgg(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg11_bn/vgg11_bn.pkl")
def vgg11_bn(**kwargs):
model_args = dict(norm_name="BN")
recursive_update(model_args, kwargs)
return vgg11(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg13/vgg13.pkl")
def vgg13(**kwargs):
model_args = dict(depths=[2, 2, 2, 2, 2], widths=[64, 128, 256, 512, 512])
recursive_update(model_args, kwargs)
return _build_vgg(**model_args)
@registers.models.register()
@ | hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg13_bn/vgg13_bn.pkl") | megengine.hub.pretrained |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
"""VGG Series
VGG: `"Very Deep Convolutional Networks for Large-Scale Image Recognition"
<https://arxiv.org/abs/1409.1556>`_
"""
from typing import Any, Mapping, Sequence
import megengine as mge
import megengine.hub as hub
import megengine.module as M
from basecls.layers import activation, build_head, conv2d, init_weights, norm2d
from basecls.utils import recursive_update, registers
__all__ = ["VGGStage", "VGG"]
class VGGStage(M.Module):
"""VGG stage (sequence of blocks w/ the same output shape)."""
def __init__(self, w_in: int, w_out: int, depth: int, norm_name: str, act_name: str):
super().__init__()
self.depth = depth
for i in range(depth):
block = M.Sequential(
conv2d(w_in, w_out, 3), norm2d(norm_name, w_out), activation(act_name)
)
setattr(self, f"b{i + 1}", block)
w_in = w_out
self.max_pool = M.MaxPool2d(kernel_size=2, stride=2)
def __len__(self):
return self.depth
def forward(self, x: mge.Tensor) -> mge.Tensor:
for i in range(self.depth):
block = getattr(self, f"b{i + 1}")
x = block(x)
x = self.max_pool(x)
return x
@registers.models.register()
class VGG(M.Module):
"""VGG model.
Args:
depths: depth for each stage (number of blocks in the stage).
widths: width for each stage (width of each block in the stage).
norm_name: normalization function. Default: ``None``
act_name: activation function. Default: ``"relu"``
head: head args. Default: ``None``
"""
def __init__(
self,
depths: Sequence[int],
widths: Sequence[int],
norm_name: str = None,
act_name: str = "relu",
head: Mapping[str, Any] = None,
):
super().__init__()
self.depths = depths
model_args = [depths, widths]
prev_w = 3
for i, (d, w) in enumerate(zip(*model_args)):
stage = VGGStage(prev_w, w, d, norm_name, act_name)
setattr(self, f"s{i + 1}", stage)
prev_w = w
self.head = build_head(prev_w, head, None, act_name)
self.apply(init_weights)
def forward(self, x: mge.Tensor) -> mge.Tensor:
for i in range(len(self.depths)):
stage = getattr(self, f"s{i + 1}")
x = stage(x)
if getattr(self, "head", None) is not None:
x = self.head(x)
return x
def _build_vgg(**kwargs):
model_args = dict(head=dict(name="VGGHead", dropout_prob=0.5))
recursive_update(model_args, kwargs)
return VGG(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg11/vgg11.pkl")
def vgg11(**kwargs):
model_args = dict(depths=[1, 1, 2, 2, 2], widths=[64, 128, 256, 512, 512])
recursive_update(model_args, kwargs)
return _build_vgg(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg11_bn/vgg11_bn.pkl")
def vgg11_bn(**kwargs):
model_args = dict(norm_name="BN")
recursive_update(model_args, kwargs)
return vgg11(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg13/vgg13.pkl")
def vgg13(**kwargs):
model_args = dict(depths=[2, 2, 2, 2, 2], widths=[64, 128, 256, 512, 512])
recursive_update(model_args, kwargs)
return _build_vgg(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg13_bn/vgg13_bn.pkl")
def vgg13_bn(**kwargs):
model_args = dict(norm_name="BN")
recursive_update(model_args, kwargs)
return vgg13(**model_args)
@registers.models.register()
@ | hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg16/vgg16.pkl") | megengine.hub.pretrained |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
"""VGG Series
VGG: `"Very Deep Convolutional Networks for Large-Scale Image Recognition"
<https://arxiv.org/abs/1409.1556>`_
"""
from typing import Any, Mapping, Sequence
import megengine as mge
import megengine.hub as hub
import megengine.module as M
from basecls.layers import activation, build_head, conv2d, init_weights, norm2d
from basecls.utils import recursive_update, registers
__all__ = ["VGGStage", "VGG"]
class VGGStage(M.Module):
"""VGG stage (sequence of blocks w/ the same output shape)."""
def __init__(self, w_in: int, w_out: int, depth: int, norm_name: str, act_name: str):
super().__init__()
self.depth = depth
for i in range(depth):
block = M.Sequential(
conv2d(w_in, w_out, 3), norm2d(norm_name, w_out), activation(act_name)
)
setattr(self, f"b{i + 1}", block)
w_in = w_out
self.max_pool = M.MaxPool2d(kernel_size=2, stride=2)
def __len__(self):
return self.depth
def forward(self, x: mge.Tensor) -> mge.Tensor:
for i in range(self.depth):
block = getattr(self, f"b{i + 1}")
x = block(x)
x = self.max_pool(x)
return x
@registers.models.register()
class VGG(M.Module):
"""VGG model.
Args:
depths: depth for each stage (number of blocks in the stage).
widths: width for each stage (width of each block in the stage).
norm_name: normalization function. Default: ``None``
act_name: activation function. Default: ``"relu"``
head: head args. Default: ``None``
"""
def __init__(
self,
depths: Sequence[int],
widths: Sequence[int],
norm_name: str = None,
act_name: str = "relu",
head: Mapping[str, Any] = None,
):
super().__init__()
self.depths = depths
model_args = [depths, widths]
prev_w = 3
for i, (d, w) in enumerate(zip(*model_args)):
stage = VGGStage(prev_w, w, d, norm_name, act_name)
setattr(self, f"s{i + 1}", stage)
prev_w = w
self.head = build_head(prev_w, head, None, act_name)
self.apply(init_weights)
def forward(self, x: mge.Tensor) -> mge.Tensor:
for i in range(len(self.depths)):
stage = getattr(self, f"s{i + 1}")
x = stage(x)
if getattr(self, "head", None) is not None:
x = self.head(x)
return x
def _build_vgg(**kwargs):
model_args = dict(head=dict(name="VGGHead", dropout_prob=0.5))
recursive_update(model_args, kwargs)
return VGG(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg11/vgg11.pkl")
def vgg11(**kwargs):
model_args = dict(depths=[1, 1, 2, 2, 2], widths=[64, 128, 256, 512, 512])
recursive_update(model_args, kwargs)
return _build_vgg(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg11_bn/vgg11_bn.pkl")
def vgg11_bn(**kwargs):
model_args = dict(norm_name="BN")
recursive_update(model_args, kwargs)
return vgg11(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg13/vgg13.pkl")
def vgg13(**kwargs):
model_args = dict(depths=[2, 2, 2, 2, 2], widths=[64, 128, 256, 512, 512])
recursive_update(model_args, kwargs)
return _build_vgg(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg13_bn/vgg13_bn.pkl")
def vgg13_bn(**kwargs):
model_args = dict(norm_name="BN")
recursive_update(model_args, kwargs)
return vgg13(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg16/vgg16.pkl")
def vgg16(**kwargs):
model_args = dict(depths=[2, 2, 3, 3, 3], widths=[64, 128, 256, 512, 512])
recursive_update(model_args, kwargs)
return _build_vgg(**model_args)
@registers.models.register()
@ | hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg16_bn/vgg16_bn.pkl") | megengine.hub.pretrained |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
"""VGG Series
VGG: `"Very Deep Convolutional Networks for Large-Scale Image Recognition"
<https://arxiv.org/abs/1409.1556>`_
"""
from typing import Any, Mapping, Sequence
import megengine as mge
import megengine.hub as hub
import megengine.module as M
from basecls.layers import activation, build_head, conv2d, init_weights, norm2d
from basecls.utils import recursive_update, registers
__all__ = ["VGGStage", "VGG"]
class VGGStage(M.Module):
"""VGG stage (sequence of blocks w/ the same output shape)."""
def __init__(self, w_in: int, w_out: int, depth: int, norm_name: str, act_name: str):
super().__init__()
self.depth = depth
for i in range(depth):
block = M.Sequential(
conv2d(w_in, w_out, 3), norm2d(norm_name, w_out), activation(act_name)
)
setattr(self, f"b{i + 1}", block)
w_in = w_out
self.max_pool = M.MaxPool2d(kernel_size=2, stride=2)
def __len__(self):
return self.depth
def forward(self, x: mge.Tensor) -> mge.Tensor:
for i in range(self.depth):
block = getattr(self, f"b{i + 1}")
x = block(x)
x = self.max_pool(x)
return x
@registers.models.register()
class VGG(M.Module):
"""VGG model.
Args:
depths: depth for each stage (number of blocks in the stage).
widths: width for each stage (width of each block in the stage).
norm_name: normalization function. Default: ``None``
act_name: activation function. Default: ``"relu"``
head: head args. Default: ``None``
"""
def __init__(
self,
depths: Sequence[int],
widths: Sequence[int],
norm_name: str = None,
act_name: str = "relu",
head: Mapping[str, Any] = None,
):
super().__init__()
self.depths = depths
model_args = [depths, widths]
prev_w = 3
for i, (d, w) in enumerate(zip(*model_args)):
stage = VGGStage(prev_w, w, d, norm_name, act_name)
setattr(self, f"s{i + 1}", stage)
prev_w = w
self.head = build_head(prev_w, head, None, act_name)
self.apply(init_weights)
def forward(self, x: mge.Tensor) -> mge.Tensor:
for i in range(len(self.depths)):
stage = getattr(self, f"s{i + 1}")
x = stage(x)
if getattr(self, "head", None) is not None:
x = self.head(x)
return x
def _build_vgg(**kwargs):
model_args = dict(head=dict(name="VGGHead", dropout_prob=0.5))
recursive_update(model_args, kwargs)
return VGG(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg11/vgg11.pkl")
def vgg11(**kwargs):
model_args = dict(depths=[1, 1, 2, 2, 2], widths=[64, 128, 256, 512, 512])
recursive_update(model_args, kwargs)
return _build_vgg(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg11_bn/vgg11_bn.pkl")
def vgg11_bn(**kwargs):
model_args = dict(norm_name="BN")
recursive_update(model_args, kwargs)
return vgg11(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg13/vgg13.pkl")
def vgg13(**kwargs):
model_args = dict(depths=[2, 2, 2, 2, 2], widths=[64, 128, 256, 512, 512])
recursive_update(model_args, kwargs)
return _build_vgg(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg13_bn/vgg13_bn.pkl")
def vgg13_bn(**kwargs):
model_args = dict(norm_name="BN")
recursive_update(model_args, kwargs)
return vgg13(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg16/vgg16.pkl")
def vgg16(**kwargs):
model_args = dict(depths=[2, 2, 3, 3, 3], widths=[64, 128, 256, 512, 512])
recursive_update(model_args, kwargs)
return _build_vgg(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg16_bn/vgg16_bn.pkl")
def vgg16_bn(**kwargs):
model_args = dict(norm_name="BN")
recursive_update(model_args, kwargs)
return vgg16(**model_args)
@registers.models.register()
@ | hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg19/vgg19.pkl") | megengine.hub.pretrained |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
"""VGG Series
VGG: `"Very Deep Convolutional Networks for Large-Scale Image Recognition"
<https://arxiv.org/abs/1409.1556>`_
"""
from typing import Any, Mapping, Sequence
import megengine as mge
import megengine.hub as hub
import megengine.module as M
from basecls.layers import activation, build_head, conv2d, init_weights, norm2d
from basecls.utils import recursive_update, registers
__all__ = ["VGGStage", "VGG"]
class VGGStage(M.Module):
"""VGG stage (sequence of blocks w/ the same output shape)."""
def __init__(self, w_in: int, w_out: int, depth: int, norm_name: str, act_name: str):
super().__init__()
self.depth = depth
for i in range(depth):
block = M.Sequential(
conv2d(w_in, w_out, 3), norm2d(norm_name, w_out), activation(act_name)
)
setattr(self, f"b{i + 1}", block)
w_in = w_out
self.max_pool = M.MaxPool2d(kernel_size=2, stride=2)
def __len__(self):
return self.depth
def forward(self, x: mge.Tensor) -> mge.Tensor:
for i in range(self.depth):
block = getattr(self, f"b{i + 1}")
x = block(x)
x = self.max_pool(x)
return x
@registers.models.register()
class VGG(M.Module):
"""VGG model.
Args:
depths: depth for each stage (number of blocks in the stage).
widths: width for each stage (width of each block in the stage).
norm_name: normalization function. Default: ``None``
act_name: activation function. Default: ``"relu"``
head: head args. Default: ``None``
"""
def __init__(
self,
depths: Sequence[int],
widths: Sequence[int],
norm_name: str = None,
act_name: str = "relu",
head: Mapping[str, Any] = None,
):
super().__init__()
self.depths = depths
model_args = [depths, widths]
prev_w = 3
for i, (d, w) in enumerate(zip(*model_args)):
stage = VGGStage(prev_w, w, d, norm_name, act_name)
setattr(self, f"s{i + 1}", stage)
prev_w = w
self.head = build_head(prev_w, head, None, act_name)
self.apply(init_weights)
def forward(self, x: mge.Tensor) -> mge.Tensor:
for i in range(len(self.depths)):
stage = getattr(self, f"s{i + 1}")
x = stage(x)
if getattr(self, "head", None) is not None:
x = self.head(x)
return x
def _build_vgg(**kwargs):
model_args = dict(head=dict(name="VGGHead", dropout_prob=0.5))
recursive_update(model_args, kwargs)
return VGG(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg11/vgg11.pkl")
def vgg11(**kwargs):
model_args = dict(depths=[1, 1, 2, 2, 2], widths=[64, 128, 256, 512, 512])
recursive_update(model_args, kwargs)
return _build_vgg(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg11_bn/vgg11_bn.pkl")
def vgg11_bn(**kwargs):
model_args = dict(norm_name="BN")
recursive_update(model_args, kwargs)
return vgg11(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg13/vgg13.pkl")
def vgg13(**kwargs):
model_args = dict(depths=[2, 2, 2, 2, 2], widths=[64, 128, 256, 512, 512])
recursive_update(model_args, kwargs)
return _build_vgg(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg13_bn/vgg13_bn.pkl")
def vgg13_bn(**kwargs):
model_args = dict(norm_name="BN")
recursive_update(model_args, kwargs)
return vgg13(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg16/vgg16.pkl")
def vgg16(**kwargs):
model_args = dict(depths=[2, 2, 3, 3, 3], widths=[64, 128, 256, 512, 512])
recursive_update(model_args, kwargs)
return _build_vgg(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg16_bn/vgg16_bn.pkl")
def vgg16_bn(**kwargs):
model_args = dict(norm_name="BN")
recursive_update(model_args, kwargs)
return vgg16(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg19/vgg19.pkl")
def vgg19(**kwargs):
model_args = dict(depths=[2, 2, 4, 4, 4], widths=[64, 128, 256, 512, 512])
recursive_update(model_args, kwargs)
return _build_vgg(**model_args)
@registers.models.register()
@ | hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg19_bn/vgg19_bn.pkl") | megengine.hub.pretrained |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
"""VGG Series
VGG: `"Very Deep Convolutional Networks for Large-Scale Image Recognition"
<https://arxiv.org/abs/1409.1556>`_
"""
from typing import Any, Mapping, Sequence
import megengine as mge
import megengine.hub as hub
import megengine.module as M
from basecls.layers import activation, build_head, conv2d, init_weights, norm2d
from basecls.utils import recursive_update, registers
__all__ = ["VGGStage", "VGG"]
class VGGStage(M.Module):
"""VGG stage (sequence of blocks w/ the same output shape)."""
def __init__(self, w_in: int, w_out: int, depth: int, norm_name: str, act_name: str):
super().__init__()
self.depth = depth
for i in range(depth):
block = M.Sequential(
conv2d(w_in, w_out, 3), norm2d(norm_name, w_out), activation(act_name)
)
setattr(self, f"b{i + 1}", block)
w_in = w_out
self.max_pool = | M.MaxPool2d(kernel_size=2, stride=2) | megengine.module.MaxPool2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import numpy as np
from megengine import get_logger as mge_get_logger
mge_version = mge.__version__
if mge_version <= "0.6.0":
# pylint: disable=import-error, no-name-in-module
import megengine._internal as mgb
from megengine._internal import cgtools
else:
import megengine.core.tensor.megbrain_graph as G
import megengine.core._imperative_rt as rt
import megengine.utils.comp_graph_tools as cgtools
if mge_version <= "1.1.0":
from megengine.core.tensor.raw_tensor import ( # pylint: disable=no-name-in-module,import-error
as_raw_tensor as Tensor,
)
else:
from megengine.tensor import Tensor
def get_logger(*args):
return | mge_get_logger(*args) | megengine.get_logger |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import numpy as np
from megengine import get_logger as mge_get_logger
mge_version = mge.__version__
if mge_version <= "0.6.0":
# pylint: disable=import-error, no-name-in-module
import megengine._internal as mgb
from megengine._internal import cgtools
else:
import megengine.core.tensor.megbrain_graph as G
import megengine.core._imperative_rt as rt
import megengine.utils.comp_graph_tools as cgtools
if mge_version <= "1.1.0":
from megengine.core.tensor.raw_tensor import ( # pylint: disable=no-name-in-module,import-error
as_raw_tensor as Tensor,
)
else:
from megengine.tensor import Tensor
def get_logger(*args):
return mge_get_logger(*args)
def get_mge_version():
return mge_version
def get_symvar_value(sym_var):
if mge_version <= "0.6.0":
if sym_var.inferred_value is not None:
val = sym_var.inferred_value
return val
else:
cg = sym_var.owner_graph
func = cg.compile_outonly(sym_var)
val = func()
return val
else:
if sym_var.value is not None:
return sym_var.value
else:
out_node = G.ValueOutputNode(sym_var)
cg = out_node.outputs[0].graph
func = cg.compile(out_node.outputs)
func.execute()
return out_node.get_value()
def isnum(x):
return isinstance(x, (int, float))
def isconst(x):
return x.np_data is not None
def isvar(x):
return (
isinstance(x, mgb.SymbolVar)
if mge_version <= "0.6.0"
else isinstance(x, rt.VarNode) # pylint: disable=c-extension-no-member
)
def get_shape(x):
return x._get_imm_shape() if mge_version <= "0.6.0" else x.shape
def get_dep_vars(x, type=None):
return | cgtools.get_dep_vars(x, type) | megengine.utils.comp_graph_tools.get_dep_vars |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import numpy as np
from megengine import get_logger as mge_get_logger
mge_version = mge.__version__
if mge_version <= "0.6.0":
# pylint: disable=import-error, no-name-in-module
import megengine._internal as mgb
from megengine._internal import cgtools
else:
import megengine.core.tensor.megbrain_graph as G
import megengine.core._imperative_rt as rt
import megengine.utils.comp_graph_tools as cgtools
if mge_version <= "1.1.0":
from megengine.core.tensor.raw_tensor import ( # pylint: disable=no-name-in-module,import-error
as_raw_tensor as Tensor,
)
else:
from megengine.tensor import Tensor
def get_logger(*args):
return mge_get_logger(*args)
def get_mge_version():
return mge_version
def get_symvar_value(sym_var):
if mge_version <= "0.6.0":
if sym_var.inferred_value is not None:
val = sym_var.inferred_value
return val
else:
cg = sym_var.owner_graph
func = cg.compile_outonly(sym_var)
val = func()
return val
else:
if sym_var.value is not None:
return sym_var.value
else:
out_node = G.ValueOutputNode(sym_var)
cg = out_node.outputs[0].graph
func = cg.compile(out_node.outputs)
func.execute()
return out_node.get_value()
def isnum(x):
return isinstance(x, (int, float))
def isconst(x):
return x.np_data is not None
def isvar(x):
return (
isinstance(x, mgb.SymbolVar)
if mge_version <= "0.6.0"
else isinstance(x, rt.VarNode) # pylint: disable=c-extension-no-member
)
def get_shape(x):
return x._get_imm_shape() if mge_version <= "0.6.0" else x.shape
def get_dep_vars(x, type=None):
return cgtools.get_dep_vars(x, type)
def get_dtype_name(x):
return (
x.dtype.metadata["mgb_dtype"]["name"] if isinstance(x.dtype, np.dtype) else None
)
def get_opr_type(x):
return | cgtools.get_opr_type(x) | megengine.utils.comp_graph_tools.get_opr_type |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import numpy as np
from megengine import get_logger as mge_get_logger
mge_version = mge.__version__
if mge_version <= "0.6.0":
# pylint: disable=import-error, no-name-in-module
import megengine._internal as mgb
from megengine._internal import cgtools
else:
import megengine.core.tensor.megbrain_graph as G
import megengine.core._imperative_rt as rt
import megengine.utils.comp_graph_tools as cgtools
if mge_version <= "1.1.0":
from megengine.core.tensor.raw_tensor import ( # pylint: disable=no-name-in-module,import-error
as_raw_tensor as Tensor,
)
else:
from megengine.tensor import Tensor
def get_logger(*args):
return mge_get_logger(*args)
def get_mge_version():
return mge_version
def get_symvar_value(sym_var):
if mge_version <= "0.6.0":
if sym_var.inferred_value is not None:
val = sym_var.inferred_value
return val
else:
cg = sym_var.owner_graph
func = cg.compile_outonly(sym_var)
val = func()
return val
else:
if sym_var.value is not None:
return sym_var.value
else:
out_node = G.ValueOutputNode(sym_var)
cg = out_node.outputs[0].graph
func = cg.compile(out_node.outputs)
func.execute()
return out_node.get_value()
def isnum(x):
return isinstance(x, (int, float))
def isconst(x):
return x.np_data is not None
def isvar(x):
return (
isinstance(x, mgb.SymbolVar)
if mge_version <= "0.6.0"
else isinstance(x, rt.VarNode) # pylint: disable=c-extension-no-member
)
def get_shape(x):
return x._get_imm_shape() if mge_version <= "0.6.0" else x.shape
def get_dep_vars(x, type=None):
return cgtools.get_dep_vars(x, type)
def get_dtype_name(x):
return (
x.dtype.metadata["mgb_dtype"]["name"] if isinstance(x.dtype, np.dtype) else None
)
def get_opr_type(x):
return cgtools.get_opr_type(x)
def get_owner_opr_type(x):
if mge_version <= "0.6.0":
return cgtools.get_type(x._var)
else:
return cgtools.get_owner_opr_type(x._var)
def load_comp_graph_from_file(path):
if mge_version <= "0.6.0":
cg, _, outputs = mgb.load_comp_graph_from_file(path)
else:
ret = G.load_graph(path)
cg = ret.graph
outputs = ret.output_vars_list
return cg, outputs
def graph_traversal(outputs):
(
map_oprs,
map_vars,
var2oprs,
opr2receivers,
indegree2opr,
opr2indegree,
) = | cgtools.graph_traversal(outputs) | megengine.utils.comp_graph_tools.graph_traversal |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import numpy as np
from megengine import get_logger as mge_get_logger
mge_version = mge.__version__
if mge_version <= "0.6.0":
# pylint: disable=import-error, no-name-in-module
import megengine._internal as mgb
from megengine._internal import cgtools
else:
import megengine.core.tensor.megbrain_graph as G
import megengine.core._imperative_rt as rt
import megengine.utils.comp_graph_tools as cgtools
if mge_version <= "1.1.0":
from megengine.core.tensor.raw_tensor import ( # pylint: disable=no-name-in-module,import-error
as_raw_tensor as Tensor,
)
else:
from megengine.tensor import Tensor
def get_logger(*args):
return mge_get_logger(*args)
def get_mge_version():
return mge_version
def get_symvar_value(sym_var):
if mge_version <= "0.6.0":
if sym_var.inferred_value is not None:
val = sym_var.inferred_value
return val
else:
cg = sym_var.owner_graph
func = cg.compile_outonly(sym_var)
val = func()
return val
else:
if sym_var.value is not None:
return sym_var.value
else:
out_node = G.ValueOutputNode(sym_var)
cg = out_node.outputs[0].graph
func = cg.compile(out_node.outputs)
func.execute()
return out_node.get_value()
def isnum(x):
return isinstance(x, (int, float))
def isconst(x):
return x.np_data is not None
def isvar(x):
return (
isinstance(x, mgb.SymbolVar)
if mge_version <= "0.6.0"
else isinstance(x, rt.VarNode) # pylint: disable=c-extension-no-member
)
def get_shape(x):
return x._get_imm_shape() if mge_version <= "0.6.0" else x.shape
def get_dep_vars(x, type=None):
return cgtools.get_dep_vars(x, type)
def get_dtype_name(x):
return (
x.dtype.metadata["mgb_dtype"]["name"] if isinstance(x.dtype, np.dtype) else None
)
def get_opr_type(x):
return cgtools.get_opr_type(x)
def get_owner_opr_type(x):
if mge_version <= "0.6.0":
return cgtools.get_type(x._var)
else:
return cgtools.get_owner_opr_type(x._var)
def load_comp_graph_from_file(path):
if mge_version <= "0.6.0":
cg, _, outputs = mgb.load_comp_graph_from_file(path)
else:
ret = G.load_graph(path)
cg = ret.graph
outputs = ret.output_vars_list
return cg, outputs
def graph_traversal(outputs):
(
map_oprs,
map_vars,
var2oprs,
opr2receivers,
indegree2opr,
opr2indegree,
) = cgtools.graph_traversal(outputs)
return map_oprs, map_vars, var2oprs, opr2receivers, indegree2opr, opr2indegree
def get_oprs_seq(outputs, prune_reshape=True):
all_oprs = | cgtools.get_oprs_seq(outputs, prune_reshape=prune_reshape) | megengine.utils.comp_graph_tools.get_oprs_seq |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import numpy as np
from megengine import get_logger as mge_get_logger
mge_version = mge.__version__
if mge_version <= "0.6.0":
# pylint: disable=import-error, no-name-in-module
import megengine._internal as mgb
from megengine._internal import cgtools
else:
import megengine.core.tensor.megbrain_graph as G
import megengine.core._imperative_rt as rt
import megengine.utils.comp_graph_tools as cgtools
if mge_version <= "1.1.0":
from megengine.core.tensor.raw_tensor import ( # pylint: disable=no-name-in-module,import-error
as_raw_tensor as Tensor,
)
else:
from megengine.tensor import Tensor
def get_logger(*args):
return mge_get_logger(*args)
def get_mge_version():
return mge_version
def get_symvar_value(sym_var):
if mge_version <= "0.6.0":
if sym_var.inferred_value is not None:
val = sym_var.inferred_value
return val
else:
cg = sym_var.owner_graph
func = cg.compile_outonly(sym_var)
val = func()
return val
else:
if sym_var.value is not None:
return sym_var.value
else:
out_node = G.ValueOutputNode(sym_var)
cg = out_node.outputs[0].graph
func = cg.compile(out_node.outputs)
func.execute()
return out_node.get_value()
def isnum(x):
return isinstance(x, (int, float))
def isconst(x):
return x.np_data is not None
def isvar(x):
return (
isinstance(x, mgb.SymbolVar)
if mge_version <= "0.6.0"
else isinstance(x, rt.VarNode) # pylint: disable=c-extension-no-member
)
def get_shape(x):
return x._get_imm_shape() if mge_version <= "0.6.0" else x.shape
def get_dep_vars(x, type=None):
return cgtools.get_dep_vars(x, type)
def get_dtype_name(x):
return (
x.dtype.metadata["mgb_dtype"]["name"] if isinstance(x.dtype, np.dtype) else None
)
def get_opr_type(x):
return cgtools.get_opr_type(x)
def get_owner_opr_type(x):
if mge_version <= "0.6.0":
return cgtools.get_type(x._var)
else:
return cgtools.get_owner_opr_type(x._var)
def load_comp_graph_from_file(path):
if mge_version <= "0.6.0":
cg, _, outputs = mgb.load_comp_graph_from_file(path)
else:
ret = G.load_graph(path)
cg = ret.graph
outputs = ret.output_vars_list
return cg, outputs
def graph_traversal(outputs):
(
map_oprs,
map_vars,
var2oprs,
opr2receivers,
indegree2opr,
opr2indegree,
) = cgtools.graph_traversal(outputs)
return map_oprs, map_vars, var2oprs, opr2receivers, indegree2opr, opr2indegree
def get_oprs_seq(outputs, prune_reshape=True):
all_oprs = cgtools.get_oprs_seq(outputs, prune_reshape=prune_reshape)
return all_oprs
def eval_partial(inp, oup):
if not isinstance(oup, (list, tuple)):
oup = (oup,)
inputs = | cgtools.get_dep_vars(oup, "Host2DeviceCopy") | megengine.utils.comp_graph_tools.get_dep_vars |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import numpy as np
from megengine import get_logger as mge_get_logger
mge_version = mge.__version__
if mge_version <= "0.6.0":
# pylint: disable=import-error, no-name-in-module
import megengine._internal as mgb
from megengine._internal import cgtools
else:
import megengine.core.tensor.megbrain_graph as G
import megengine.core._imperative_rt as rt
import megengine.utils.comp_graph_tools as cgtools
if mge_version <= "1.1.0":
from megengine.core.tensor.raw_tensor import ( # pylint: disable=no-name-in-module,import-error
as_raw_tensor as Tensor,
)
else:
from megengine.tensor import Tensor
def get_logger(*args):
return mge_get_logger(*args)
def get_mge_version():
return mge_version
def get_symvar_value(sym_var):
if mge_version <= "0.6.0":
if sym_var.inferred_value is not None:
val = sym_var.inferred_value
return val
else:
cg = sym_var.owner_graph
func = cg.compile_outonly(sym_var)
val = func()
return val
else:
if sym_var.value is not None:
return sym_var.value
else:
out_node = G.ValueOutputNode(sym_var)
cg = out_node.outputs[0].graph
func = cg.compile(out_node.outputs)
func.execute()
return out_node.get_value()
def isnum(x):
return isinstance(x, (int, float))
def isconst(x):
return x.np_data is not None
def isvar(x):
return (
isinstance(x, mgb.SymbolVar)
if mge_version <= "0.6.0"
else isinstance(x, rt.VarNode) # pylint: disable=c-extension-no-member
)
def get_shape(x):
return x._get_imm_shape() if mge_version <= "0.6.0" else x.shape
def get_dep_vars(x, type=None):
return cgtools.get_dep_vars(x, type)
def get_dtype_name(x):
return (
x.dtype.metadata["mgb_dtype"]["name"] if isinstance(x.dtype, np.dtype) else None
)
def get_opr_type(x):
return cgtools.get_opr_type(x)
def get_owner_opr_type(x):
if mge_version <= "0.6.0":
return | cgtools.get_type(x._var) | megengine.utils.comp_graph_tools.get_type |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import numpy as np
from megengine import get_logger as mge_get_logger
mge_version = mge.__version__
if mge_version <= "0.6.0":
# pylint: disable=import-error, no-name-in-module
import megengine._internal as mgb
from megengine._internal import cgtools
else:
import megengine.core.tensor.megbrain_graph as G
import megengine.core._imperative_rt as rt
import megengine.utils.comp_graph_tools as cgtools
if mge_version <= "1.1.0":
from megengine.core.tensor.raw_tensor import ( # pylint: disable=no-name-in-module,import-error
as_raw_tensor as Tensor,
)
else:
from megengine.tensor import Tensor
def get_logger(*args):
return mge_get_logger(*args)
def get_mge_version():
return mge_version
def get_symvar_value(sym_var):
if mge_version <= "0.6.0":
if sym_var.inferred_value is not None:
val = sym_var.inferred_value
return val
else:
cg = sym_var.owner_graph
func = cg.compile_outonly(sym_var)
val = func()
return val
else:
if sym_var.value is not None:
return sym_var.value
else:
out_node = G.ValueOutputNode(sym_var)
cg = out_node.outputs[0].graph
func = cg.compile(out_node.outputs)
func.execute()
return out_node.get_value()
def isnum(x):
return isinstance(x, (int, float))
def isconst(x):
return x.np_data is not None
def isvar(x):
return (
isinstance(x, mgb.SymbolVar)
if mge_version <= "0.6.0"
else isinstance(x, rt.VarNode) # pylint: disable=c-extension-no-member
)
def get_shape(x):
return x._get_imm_shape() if mge_version <= "0.6.0" else x.shape
def get_dep_vars(x, type=None):
return cgtools.get_dep_vars(x, type)
def get_dtype_name(x):
return (
x.dtype.metadata["mgb_dtype"]["name"] if isinstance(x.dtype, np.dtype) else None
)
def get_opr_type(x):
return cgtools.get_opr_type(x)
def get_owner_opr_type(x):
if mge_version <= "0.6.0":
return cgtools.get_type(x._var)
else:
return | cgtools.get_owner_opr_type(x._var) | megengine.utils.comp_graph_tools.get_owner_opr_type |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import numpy as np
from megengine import get_logger as mge_get_logger
mge_version = mge.__version__
if mge_version <= "0.6.0":
# pylint: disable=import-error, no-name-in-module
import megengine._internal as mgb
from megengine._internal import cgtools
else:
import megengine.core.tensor.megbrain_graph as G
import megengine.core._imperative_rt as rt
import megengine.utils.comp_graph_tools as cgtools
if mge_version <= "1.1.0":
from megengine.core.tensor.raw_tensor import ( # pylint: disable=no-name-in-module,import-error
as_raw_tensor as Tensor,
)
else:
from megengine.tensor import Tensor
def get_logger(*args):
return mge_get_logger(*args)
def get_mge_version():
return mge_version
def get_symvar_value(sym_var):
if mge_version <= "0.6.0":
if sym_var.inferred_value is not None:
val = sym_var.inferred_value
return val
else:
cg = sym_var.owner_graph
func = cg.compile_outonly(sym_var)
val = func()
return val
else:
if sym_var.value is not None:
return sym_var.value
else:
out_node = G.ValueOutputNode(sym_var)
cg = out_node.outputs[0].graph
func = cg.compile(out_node.outputs)
func.execute()
return out_node.get_value()
def isnum(x):
return isinstance(x, (int, float))
def isconst(x):
return x.np_data is not None
def isvar(x):
return (
isinstance(x, mgb.SymbolVar)
if mge_version <= "0.6.0"
else isinstance(x, rt.VarNode) # pylint: disable=c-extension-no-member
)
def get_shape(x):
return x._get_imm_shape() if mge_version <= "0.6.0" else x.shape
def get_dep_vars(x, type=None):
return cgtools.get_dep_vars(x, type)
def get_dtype_name(x):
return (
x.dtype.metadata["mgb_dtype"]["name"] if isinstance(x.dtype, np.dtype) else None
)
def get_opr_type(x):
return cgtools.get_opr_type(x)
def get_owner_opr_type(x):
if mge_version <= "0.6.0":
return cgtools.get_type(x._var)
else:
return cgtools.get_owner_opr_type(x._var)
def load_comp_graph_from_file(path):
if mge_version <= "0.6.0":
cg, _, outputs = | mgb.load_comp_graph_from_file(path) | megengine._internal.load_comp_graph_from_file |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import numpy as np
from megengine import get_logger as mge_get_logger
mge_version = mge.__version__
if mge_version <= "0.6.0":
# pylint: disable=import-error, no-name-in-module
import megengine._internal as mgb
from megengine._internal import cgtools
else:
import megengine.core.tensor.megbrain_graph as G
import megengine.core._imperative_rt as rt
import megengine.utils.comp_graph_tools as cgtools
if mge_version <= "1.1.0":
from megengine.core.tensor.raw_tensor import ( # pylint: disable=no-name-in-module,import-error
as_raw_tensor as Tensor,
)
else:
from megengine.tensor import Tensor
def get_logger(*args):
return mge_get_logger(*args)
def get_mge_version():
return mge_version
def get_symvar_value(sym_var):
if mge_version <= "0.6.0":
if sym_var.inferred_value is not None:
val = sym_var.inferred_value
return val
else:
cg = sym_var.owner_graph
func = cg.compile_outonly(sym_var)
val = func()
return val
else:
if sym_var.value is not None:
return sym_var.value
else:
out_node = G.ValueOutputNode(sym_var)
cg = out_node.outputs[0].graph
func = cg.compile(out_node.outputs)
func.execute()
return out_node.get_value()
def isnum(x):
return isinstance(x, (int, float))
def isconst(x):
return x.np_data is not None
def isvar(x):
return (
isinstance(x, mgb.SymbolVar)
if mge_version <= "0.6.0"
else isinstance(x, rt.VarNode) # pylint: disable=c-extension-no-member
)
def get_shape(x):
return x._get_imm_shape() if mge_version <= "0.6.0" else x.shape
def get_dep_vars(x, type=None):
return cgtools.get_dep_vars(x, type)
def get_dtype_name(x):
return (
x.dtype.metadata["mgb_dtype"]["name"] if isinstance(x.dtype, np.dtype) else None
)
def get_opr_type(x):
return cgtools.get_opr_type(x)
def get_owner_opr_type(x):
if mge_version <= "0.6.0":
return cgtools.get_type(x._var)
else:
return cgtools.get_owner_opr_type(x._var)
def load_comp_graph_from_file(path):
if mge_version <= "0.6.0":
cg, _, outputs = mgb.load_comp_graph_from_file(path)
else:
ret = | G.load_graph(path) | megengine.core.tensor.megbrain_graph.load_graph |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import numpy as np
from megengine import get_logger as mge_get_logger
mge_version = mge.__version__
if mge_version <= "0.6.0":
# pylint: disable=import-error, no-name-in-module
import megengine._internal as mgb
from megengine._internal import cgtools
else:
import megengine.core.tensor.megbrain_graph as G
import megengine.core._imperative_rt as rt
import megengine.utils.comp_graph_tools as cgtools
if mge_version <= "1.1.0":
from megengine.core.tensor.raw_tensor import ( # pylint: disable=no-name-in-module,import-error
as_raw_tensor as Tensor,
)
else:
from megengine.tensor import Tensor
def get_logger(*args):
return mge_get_logger(*args)
def get_mge_version():
return mge_version
def get_symvar_value(sym_var):
if mge_version <= "0.6.0":
if sym_var.inferred_value is not None:
val = sym_var.inferred_value
return val
else:
cg = sym_var.owner_graph
func = cg.compile_outonly(sym_var)
val = func()
return val
else:
if sym_var.value is not None:
return sym_var.value
else:
out_node = G.ValueOutputNode(sym_var)
cg = out_node.outputs[0].graph
func = cg.compile(out_node.outputs)
func.execute()
return out_node.get_value()
def isnum(x):
return isinstance(x, (int, float))
def isconst(x):
return x.np_data is not None
def isvar(x):
return (
isinstance(x, mgb.SymbolVar)
if mge_version <= "0.6.0"
else isinstance(x, rt.VarNode) # pylint: disable=c-extension-no-member
)
def get_shape(x):
return x._get_imm_shape() if mge_version <= "0.6.0" else x.shape
def get_dep_vars(x, type=None):
return cgtools.get_dep_vars(x, type)
def get_dtype_name(x):
return (
x.dtype.metadata["mgb_dtype"]["name"] if isinstance(x.dtype, np.dtype) else None
)
def get_opr_type(x):
return cgtools.get_opr_type(x)
def get_owner_opr_type(x):
if mge_version <= "0.6.0":
return cgtools.get_type(x._var)
else:
return cgtools.get_owner_opr_type(x._var)
def load_comp_graph_from_file(path):
if mge_version <= "0.6.0":
cg, _, outputs = mgb.load_comp_graph_from_file(path)
else:
ret = G.load_graph(path)
cg = ret.graph
outputs = ret.output_vars_list
return cg, outputs
def graph_traversal(outputs):
(
map_oprs,
map_vars,
var2oprs,
opr2receivers,
indegree2opr,
opr2indegree,
) = cgtools.graph_traversal(outputs)
return map_oprs, map_vars, var2oprs, opr2receivers, indegree2opr, opr2indegree
def get_oprs_seq(outputs, prune_reshape=True):
all_oprs = cgtools.get_oprs_seq(outputs, prune_reshape=prune_reshape)
return all_oprs
def eval_partial(inp, oup):
if not isinstance(oup, (list, tuple)):
oup = (oup,)
inputs = cgtools.get_dep_vars(oup, "Host2DeviceCopy")
if mge_version <= "0.6.0":
cg = oup[0].owner_graph
outputs = list(map(mgb.copy_output, oup))
f = cg.compile(inputs, outputs)
result = f(inp)
else:
if not isinstance(inp, (list, tuple)):
inp = (inp,)
replace_dict = {}
inp_node_list = []
for i in inputs:
inp_node = G.InputNode(
device="xpux", dtype=inputs[0].dtype, graph=inputs[0].graph
)
replace_dict[i] = inp_node.outputs[0]
inp_node_list.append(inp_node)
new_out = | cgtools.replace_vars(oup, replace_dict) | megengine.utils.comp_graph_tools.replace_vars |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import numpy as np
from megengine import get_logger as mge_get_logger
mge_version = mge.__version__
if mge_version <= "0.6.0":
# pylint: disable=import-error, no-name-in-module
import megengine._internal as mgb
from megengine._internal import cgtools
else:
import megengine.core.tensor.megbrain_graph as G
import megengine.core._imperative_rt as rt
import megengine.utils.comp_graph_tools as cgtools
if mge_version <= "1.1.0":
from megengine.core.tensor.raw_tensor import ( # pylint: disable=no-name-in-module,import-error
as_raw_tensor as Tensor,
)
else:
from megengine.tensor import Tensor
def get_logger(*args):
return mge_get_logger(*args)
def get_mge_version():
return mge_version
def get_symvar_value(sym_var):
if mge_version <= "0.6.0":
if sym_var.inferred_value is not None:
val = sym_var.inferred_value
return val
else:
cg = sym_var.owner_graph
func = cg.compile_outonly(sym_var)
val = func()
return val
else:
if sym_var.value is not None:
return sym_var.value
else:
out_node = | G.ValueOutputNode(sym_var) | megengine.core.tensor.megbrain_graph.ValueOutputNode |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import numpy as np
from megengine import get_logger as mge_get_logger
mge_version = mge.__version__
if mge_version <= "0.6.0":
# pylint: disable=import-error, no-name-in-module
import megengine._internal as mgb
from megengine._internal import cgtools
else:
import megengine.core.tensor.megbrain_graph as G
import megengine.core._imperative_rt as rt
import megengine.utils.comp_graph_tools as cgtools
if mge_version <= "1.1.0":
from megengine.core.tensor.raw_tensor import ( # pylint: disable=no-name-in-module,import-error
as_raw_tensor as Tensor,
)
else:
from megengine.tensor import Tensor
def get_logger(*args):
return mge_get_logger(*args)
def get_mge_version():
return mge_version
def get_symvar_value(sym_var):
if mge_version <= "0.6.0":
if sym_var.inferred_value is not None:
val = sym_var.inferred_value
return val
else:
cg = sym_var.owner_graph
func = cg.compile_outonly(sym_var)
val = func()
return val
else:
if sym_var.value is not None:
return sym_var.value
else:
out_node = G.ValueOutputNode(sym_var)
cg = out_node.outputs[0].graph
func = cg.compile(out_node.outputs)
func.execute()
return out_node.get_value()
def isnum(x):
return isinstance(x, (int, float))
def isconst(x):
return x.np_data is not None
def isvar(x):
return (
isinstance(x, mgb.SymbolVar)
if mge_version <= "0.6.0"
else isinstance(x, rt.VarNode) # pylint: disable=c-extension-no-member
)
def get_shape(x):
return x._get_imm_shape() if mge_version <= "0.6.0" else x.shape
def get_dep_vars(x, type=None):
return cgtools.get_dep_vars(x, type)
def get_dtype_name(x):
return (
x.dtype.metadata["mgb_dtype"]["name"] if isinstance(x.dtype, np.dtype) else None
)
def get_opr_type(x):
return cgtools.get_opr_type(x)
def get_owner_opr_type(x):
if mge_version <= "0.6.0":
return cgtools.get_type(x._var)
else:
return cgtools.get_owner_opr_type(x._var)
def load_comp_graph_from_file(path):
if mge_version <= "0.6.0":
cg, _, outputs = mgb.load_comp_graph_from_file(path)
else:
ret = G.load_graph(path)
cg = ret.graph
outputs = ret.output_vars_list
return cg, outputs
def graph_traversal(outputs):
(
map_oprs,
map_vars,
var2oprs,
opr2receivers,
indegree2opr,
opr2indegree,
) = cgtools.graph_traversal(outputs)
return map_oprs, map_vars, var2oprs, opr2receivers, indegree2opr, opr2indegree
def get_oprs_seq(outputs, prune_reshape=True):
all_oprs = cgtools.get_oprs_seq(outputs, prune_reshape=prune_reshape)
return all_oprs
def eval_partial(inp, oup):
if not isinstance(oup, (list, tuple)):
oup = (oup,)
inputs = cgtools.get_dep_vars(oup, "Host2DeviceCopy")
if mge_version <= "0.6.0":
cg = oup[0].owner_graph
outputs = list(map(mgb.copy_output, oup))
f = cg.compile(inputs, outputs)
result = f(inp)
else:
if not isinstance(inp, (list, tuple)):
inp = (inp,)
replace_dict = {}
inp_node_list = []
for i in inputs:
inp_node = G.InputNode(
device="xpux", dtype=inputs[0].dtype, graph=inputs[0].graph
)
replace_dict[i] = inp_node.outputs[0]
inp_node_list.append(inp_node)
new_out = cgtools.replace_vars(oup, replace_dict)
out_node_list = [ | G.OutputNode(i) | megengine.core.tensor.megbrain_graph.OutputNode |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import numpy as np
from megengine import get_logger as mge_get_logger
mge_version = mge.__version__
if mge_version <= "0.6.0":
# pylint: disable=import-error, no-name-in-module
import megengine._internal as mgb
from megengine._internal import cgtools
else:
import megengine.core.tensor.megbrain_graph as G
import megengine.core._imperative_rt as rt
import megengine.utils.comp_graph_tools as cgtools
if mge_version <= "1.1.0":
from megengine.core.tensor.raw_tensor import ( # pylint: disable=no-name-in-module,import-error
as_raw_tensor as Tensor,
)
else:
from megengine.tensor import Tensor
def get_logger(*args):
return mge_get_logger(*args)
def get_mge_version():
return mge_version
def get_symvar_value(sym_var):
if mge_version <= "0.6.0":
if sym_var.inferred_value is not None:
val = sym_var.inferred_value
return val
else:
cg = sym_var.owner_graph
func = cg.compile_outonly(sym_var)
val = func()
return val
else:
if sym_var.value is not None:
return sym_var.value
else:
out_node = G.ValueOutputNode(sym_var)
cg = out_node.outputs[0].graph
func = cg.compile(out_node.outputs)
func.execute()
return out_node.get_value()
def isnum(x):
return isinstance(x, (int, float))
def isconst(x):
return x.np_data is not None
def isvar(x):
return (
isinstance(x, mgb.SymbolVar)
if mge_version <= "0.6.0"
else isinstance(x, rt.VarNode) # pylint: disable=c-extension-no-member
)
def get_shape(x):
return x._get_imm_shape() if mge_version <= "0.6.0" else x.shape
def get_dep_vars(x, type=None):
return cgtools.get_dep_vars(x, type)
def get_dtype_name(x):
return (
x.dtype.metadata["mgb_dtype"]["name"] if isinstance(x.dtype, np.dtype) else None
)
def get_opr_type(x):
return cgtools.get_opr_type(x)
def get_owner_opr_type(x):
if mge_version <= "0.6.0":
return cgtools.get_type(x._var)
else:
return cgtools.get_owner_opr_type(x._var)
def load_comp_graph_from_file(path):
if mge_version <= "0.6.0":
cg, _, outputs = mgb.load_comp_graph_from_file(path)
else:
ret = G.load_graph(path)
cg = ret.graph
outputs = ret.output_vars_list
return cg, outputs
def graph_traversal(outputs):
(
map_oprs,
map_vars,
var2oprs,
opr2receivers,
indegree2opr,
opr2indegree,
) = cgtools.graph_traversal(outputs)
return map_oprs, map_vars, var2oprs, opr2receivers, indegree2opr, opr2indegree
def get_oprs_seq(outputs, prune_reshape=True):
all_oprs = cgtools.get_oprs_seq(outputs, prune_reshape=prune_reshape)
return all_oprs
def eval_partial(inp, oup):
if not isinstance(oup, (list, tuple)):
oup = (oup,)
inputs = cgtools.get_dep_vars(oup, "Host2DeviceCopy")
if mge_version <= "0.6.0":
cg = oup[0].owner_graph
outputs = list(map(mgb.copy_output, oup))
f = cg.compile(inputs, outputs)
result = f(inp)
else:
if not isinstance(inp, (list, tuple)):
inp = (inp,)
replace_dict = {}
inp_node_list = []
for i in inputs:
inp_node = G.InputNode(
device="xpux", dtype=inputs[0].dtype, graph=inputs[0].graph
)
replace_dict[i] = inp_node.outputs[0]
inp_node_list.append(inp_node)
new_out = cgtools.replace_vars(oup, replace_dict)
out_node_list = [G.OutputNode(i) for i in new_out]
new_out_list = [i.outputs[0] for i in out_node_list]
cg = new_out_list[0].graph
func = cg.compile(new_out_list)
for node, value in zip(inp_node_list, inp):
node.set_value( | Tensor(value) | megengine.tensor.Tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine.core import tensor
from megengine.module import BatchNorm1d, BatchNorm2d
from megengine.test import assertTensorClose
def test_batchnorm():
nr_chan = 8
data_shape = (3, nr_chan, 4)
momentum = 0.9
bn = | BatchNorm1d(nr_chan, momentum=momentum) | megengine.module.BatchNorm1d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine.core import tensor
from megengine.module import BatchNorm1d, BatchNorm2d
from megengine.test import assertTensorClose
def test_batchnorm():
nr_chan = 8
data_shape = (3, nr_chan, 4)
momentum = 0.9
bn = BatchNorm1d(nr_chan, momentum=momentum)
running_mean = np.zeros((1, nr_chan, 1), dtype=np.float32)
running_var = np.ones((1, nr_chan, 1), dtype=np.float32)
data = | tensor() | megengine.core.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine.core import tensor
from megengine.module import BatchNorm1d, BatchNorm2d
from megengine.test import assertTensorClose
def test_batchnorm():
nr_chan = 8
data_shape = (3, nr_chan, 4)
momentum = 0.9
bn = BatchNorm1d(nr_chan, momentum=momentum)
running_mean = np.zeros((1, nr_chan, 1), dtype=np.float32)
running_var = np.ones((1, nr_chan, 1), dtype=np.float32)
data = tensor()
for i in range(3):
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
mean = np.mean(np.mean(xv, axis=0, keepdims=True), axis=2, keepdims=True)
xv_transposed = np.transpose(xv, [0, 2, 1]).reshape(
(data_shape[0] * data_shape[2], nr_chan)
)
var_biased = np.var(xv_transposed, axis=0).reshape((1, nr_chan, 1))
sd = np.sqrt(var_biased + bn.eps)
var_unbiased = np.var(xv_transposed, axis=0, ddof=1).reshape((1, nr_chan, 1))
running_mean = running_mean * momentum + mean * (1 - momentum)
running_var = running_var * momentum + var_unbiased * (1 - momentum)
data.set_value(xv)
yv = bn(data)
yv_expect = (xv - mean) / sd
assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6)
assertTensorClose(
running_mean.reshape(-1), bn.running_mean.numpy().reshape(-1), max_err=5e-6
)
assertTensorClose(
running_var.reshape(-1), bn.running_var.numpy().reshape(-1), max_err=5e-6
)
# test set 'training' flag to False
mean_backup = bn.running_mean.numpy()
var_backup = bn.running_var.numpy()
bn.training = False
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
data.set_value(xv)
yv1 = bn(data)
yv2 = bn(data)
assertTensorClose(yv1.numpy(), yv2.numpy(), max_err=0)
assertTensorClose(mean_backup, bn.running_mean.numpy(), max_err=0)
assertTensorClose(var_backup, bn.running_var.numpy(), max_err=0)
yv_expect = (xv - running_mean) / np.sqrt(running_var + bn.eps)
assertTensorClose(yv_expect, yv1.numpy(), max_err=5e-6)
def test_batchnorm2d():
nr_chan = 8
data_shape = (3, nr_chan, 16, 16)
momentum = 0.9
bn = | BatchNorm2d(nr_chan, momentum=momentum) | megengine.module.BatchNorm2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine.core import tensor
from megengine.module import BatchNorm1d, BatchNorm2d
from megengine.test import assertTensorClose
def test_batchnorm():
nr_chan = 8
data_shape = (3, nr_chan, 4)
momentum = 0.9
bn = BatchNorm1d(nr_chan, momentum=momentum)
running_mean = np.zeros((1, nr_chan, 1), dtype=np.float32)
running_var = np.ones((1, nr_chan, 1), dtype=np.float32)
data = tensor()
for i in range(3):
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
mean = np.mean(np.mean(xv, axis=0, keepdims=True), axis=2, keepdims=True)
xv_transposed = np.transpose(xv, [0, 2, 1]).reshape(
(data_shape[0] * data_shape[2], nr_chan)
)
var_biased = np.var(xv_transposed, axis=0).reshape((1, nr_chan, 1))
sd = np.sqrt(var_biased + bn.eps)
var_unbiased = np.var(xv_transposed, axis=0, ddof=1).reshape((1, nr_chan, 1))
running_mean = running_mean * momentum + mean * (1 - momentum)
running_var = running_var * momentum + var_unbiased * (1 - momentum)
data.set_value(xv)
yv = bn(data)
yv_expect = (xv - mean) / sd
assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6)
assertTensorClose(
running_mean.reshape(-1), bn.running_mean.numpy().reshape(-1), max_err=5e-6
)
assertTensorClose(
running_var.reshape(-1), bn.running_var.numpy().reshape(-1), max_err=5e-6
)
# test set 'training' flag to False
mean_backup = bn.running_mean.numpy()
var_backup = bn.running_var.numpy()
bn.training = False
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
data.set_value(xv)
yv1 = bn(data)
yv2 = bn(data)
assertTensorClose(yv1.numpy(), yv2.numpy(), max_err=0)
assertTensorClose(mean_backup, bn.running_mean.numpy(), max_err=0)
assertTensorClose(var_backup, bn.running_var.numpy(), max_err=0)
yv_expect = (xv - running_mean) / np.sqrt(running_var + bn.eps)
assertTensorClose(yv_expect, yv1.numpy(), max_err=5e-6)
def test_batchnorm2d():
nr_chan = 8
data_shape = (3, nr_chan, 16, 16)
momentum = 0.9
bn = BatchNorm2d(nr_chan, momentum=momentum)
running_mean = np.zeros((1, nr_chan, 1, 1), dtype=np.float32)
running_var = np.ones((1, nr_chan, 1, 1), dtype=np.float32)
data = | tensor() | megengine.core.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine.core import tensor
from megengine.module import BatchNorm1d, BatchNorm2d
from megengine.test import assertTensorClose
def test_batchnorm():
nr_chan = 8
data_shape = (3, nr_chan, 4)
momentum = 0.9
bn = BatchNorm1d(nr_chan, momentum=momentum)
running_mean = np.zeros((1, nr_chan, 1), dtype=np.float32)
running_var = np.ones((1, nr_chan, 1), dtype=np.float32)
data = tensor()
for i in range(3):
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
mean = np.mean(np.mean(xv, axis=0, keepdims=True), axis=2, keepdims=True)
xv_transposed = np.transpose(xv, [0, 2, 1]).reshape(
(data_shape[0] * data_shape[2], nr_chan)
)
var_biased = np.var(xv_transposed, axis=0).reshape((1, nr_chan, 1))
sd = np.sqrt(var_biased + bn.eps)
var_unbiased = np.var(xv_transposed, axis=0, ddof=1).reshape((1, nr_chan, 1))
running_mean = running_mean * momentum + mean * (1 - momentum)
running_var = running_var * momentum + var_unbiased * (1 - momentum)
data.set_value(xv)
yv = bn(data)
yv_expect = (xv - mean) / sd
assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6)
assertTensorClose(
running_mean.reshape(-1), bn.running_mean.numpy().reshape(-1), max_err=5e-6
)
assertTensorClose(
running_var.reshape(-1), bn.running_var.numpy().reshape(-1), max_err=5e-6
)
# test set 'training' flag to False
mean_backup = bn.running_mean.numpy()
var_backup = bn.running_var.numpy()
bn.training = False
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
data.set_value(xv)
yv1 = bn(data)
yv2 = bn(data)
assertTensorClose(yv1.numpy(), yv2.numpy(), max_err=0)
assertTensorClose(mean_backup, bn.running_mean.numpy(), max_err=0)
assertTensorClose(var_backup, bn.running_var.numpy(), max_err=0)
yv_expect = (xv - running_mean) / np.sqrt(running_var + bn.eps)
assertTensorClose(yv_expect, yv1.numpy(), max_err=5e-6)
def test_batchnorm2d():
nr_chan = 8
data_shape = (3, nr_chan, 16, 16)
momentum = 0.9
bn = BatchNorm2d(nr_chan, momentum=momentum)
running_mean = np.zeros((1, nr_chan, 1, 1), dtype=np.float32)
running_var = np.ones((1, nr_chan, 1, 1), dtype=np.float32)
data = tensor()
for i in range(3):
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
xv_transposed = np.transpose(xv, [0, 2, 3, 1]).reshape(
(data_shape[0] * data_shape[2] * data_shape[3], nr_chan)
)
mean = np.mean(xv_transposed, axis=0).reshape(1, nr_chan, 1, 1)
var_biased = np.var(xv_transposed, axis=0).reshape((1, nr_chan, 1, 1))
sd = np.sqrt(var_biased + bn.eps)
var_unbiased = np.var(xv_transposed, axis=0, ddof=1).reshape((1, nr_chan, 1, 1))
running_mean = running_mean * momentum + mean * (1 - momentum)
running_var = running_var * momentum + var_unbiased * (1 - momentum)
data.set_value(xv)
yv = bn(data)
yv_expect = (xv - mean) / sd
assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6)
assertTensorClose(running_mean, bn.running_mean.numpy(), max_err=5e-6)
assertTensorClose(running_var, bn.running_var.numpy(), max_err=5e-6)
# test set 'training' flag to False
mean_backup = bn.running_mean.numpy()
var_backup = bn.running_var.numpy()
bn.training = False
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
data.set_value(xv)
yv1 = bn(data)
yv2 = bn(data)
assertTensorClose(yv1.numpy(), yv2.numpy(), max_err=0)
assertTensorClose(mean_backup, bn.running_mean.numpy(), max_err=0)
assertTensorClose(var_backup, bn.running_var.numpy(), max_err=0)
yv_expect = (xv - running_mean) / np.sqrt(running_var + bn.eps)
assertTensorClose(yv_expect, yv1.numpy(), max_err=5e-6)
def test_batchnorm_no_stats():
nr_chan = 8
data_shape = (3, nr_chan, 4)
bn = | BatchNorm1d(8, track_running_stats=False) | megengine.module.BatchNorm1d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine.core import tensor
from megengine.module import BatchNorm1d, BatchNorm2d
from megengine.test import assertTensorClose
def test_batchnorm():
nr_chan = 8
data_shape = (3, nr_chan, 4)
momentum = 0.9
bn = BatchNorm1d(nr_chan, momentum=momentum)
running_mean = np.zeros((1, nr_chan, 1), dtype=np.float32)
running_var = np.ones((1, nr_chan, 1), dtype=np.float32)
data = tensor()
for i in range(3):
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
mean = np.mean(np.mean(xv, axis=0, keepdims=True), axis=2, keepdims=True)
xv_transposed = np.transpose(xv, [0, 2, 1]).reshape(
(data_shape[0] * data_shape[2], nr_chan)
)
var_biased = np.var(xv_transposed, axis=0).reshape((1, nr_chan, 1))
sd = np.sqrt(var_biased + bn.eps)
var_unbiased = np.var(xv_transposed, axis=0, ddof=1).reshape((1, nr_chan, 1))
running_mean = running_mean * momentum + mean * (1 - momentum)
running_var = running_var * momentum + var_unbiased * (1 - momentum)
data.set_value(xv)
yv = bn(data)
yv_expect = (xv - mean) / sd
assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6)
assertTensorClose(
running_mean.reshape(-1), bn.running_mean.numpy().reshape(-1), max_err=5e-6
)
assertTensorClose(
running_var.reshape(-1), bn.running_var.numpy().reshape(-1), max_err=5e-6
)
# test set 'training' flag to False
mean_backup = bn.running_mean.numpy()
var_backup = bn.running_var.numpy()
bn.training = False
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
data.set_value(xv)
yv1 = bn(data)
yv2 = bn(data)
assertTensorClose(yv1.numpy(), yv2.numpy(), max_err=0)
assertTensorClose(mean_backup, bn.running_mean.numpy(), max_err=0)
assertTensorClose(var_backup, bn.running_var.numpy(), max_err=0)
yv_expect = (xv - running_mean) / np.sqrt(running_var + bn.eps)
assertTensorClose(yv_expect, yv1.numpy(), max_err=5e-6)
def test_batchnorm2d():
nr_chan = 8
data_shape = (3, nr_chan, 16, 16)
momentum = 0.9
bn = BatchNorm2d(nr_chan, momentum=momentum)
running_mean = np.zeros((1, nr_chan, 1, 1), dtype=np.float32)
running_var = np.ones((1, nr_chan, 1, 1), dtype=np.float32)
data = tensor()
for i in range(3):
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
xv_transposed = np.transpose(xv, [0, 2, 3, 1]).reshape(
(data_shape[0] * data_shape[2] * data_shape[3], nr_chan)
)
mean = np.mean(xv_transposed, axis=0).reshape(1, nr_chan, 1, 1)
var_biased = np.var(xv_transposed, axis=0).reshape((1, nr_chan, 1, 1))
sd = np.sqrt(var_biased + bn.eps)
var_unbiased = np.var(xv_transposed, axis=0, ddof=1).reshape((1, nr_chan, 1, 1))
running_mean = running_mean * momentum + mean * (1 - momentum)
running_var = running_var * momentum + var_unbiased * (1 - momentum)
data.set_value(xv)
yv = bn(data)
yv_expect = (xv - mean) / sd
assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6)
assertTensorClose(running_mean, bn.running_mean.numpy(), max_err=5e-6)
assertTensorClose(running_var, bn.running_var.numpy(), max_err=5e-6)
# test set 'training' flag to False
mean_backup = bn.running_mean.numpy()
var_backup = bn.running_var.numpy()
bn.training = False
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
data.set_value(xv)
yv1 = bn(data)
yv2 = bn(data)
assertTensorClose(yv1.numpy(), yv2.numpy(), max_err=0)
assertTensorClose(mean_backup, bn.running_mean.numpy(), max_err=0)
assertTensorClose(var_backup, bn.running_var.numpy(), max_err=0)
yv_expect = (xv - running_mean) / np.sqrt(running_var + bn.eps)
assertTensorClose(yv_expect, yv1.numpy(), max_err=5e-6)
def test_batchnorm_no_stats():
nr_chan = 8
data_shape = (3, nr_chan, 4)
bn = BatchNorm1d(8, track_running_stats=False)
data = | tensor() | megengine.core.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine.core import tensor
from megengine.module import BatchNorm1d, BatchNorm2d
from megengine.test import assertTensorClose
def test_batchnorm():
nr_chan = 8
data_shape = (3, nr_chan, 4)
momentum = 0.9
bn = BatchNorm1d(nr_chan, momentum=momentum)
running_mean = np.zeros((1, nr_chan, 1), dtype=np.float32)
running_var = np.ones((1, nr_chan, 1), dtype=np.float32)
data = tensor()
for i in range(3):
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
mean = np.mean(np.mean(xv, axis=0, keepdims=True), axis=2, keepdims=True)
xv_transposed = np.transpose(xv, [0, 2, 1]).reshape(
(data_shape[0] * data_shape[2], nr_chan)
)
var_biased = np.var(xv_transposed, axis=0).reshape((1, nr_chan, 1))
sd = np.sqrt(var_biased + bn.eps)
var_unbiased = np.var(xv_transposed, axis=0, ddof=1).reshape((1, nr_chan, 1))
running_mean = running_mean * momentum + mean * (1 - momentum)
running_var = running_var * momentum + var_unbiased * (1 - momentum)
data.set_value(xv)
yv = bn(data)
yv_expect = (xv - mean) / sd
assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6)
assertTensorClose(
running_mean.reshape(-1), bn.running_mean.numpy().reshape(-1), max_err=5e-6
)
assertTensorClose(
running_var.reshape(-1), bn.running_var.numpy().reshape(-1), max_err=5e-6
)
# test set 'training' flag to False
mean_backup = bn.running_mean.numpy()
var_backup = bn.running_var.numpy()
bn.training = False
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
data.set_value(xv)
yv1 = bn(data)
yv2 = bn(data)
assertTensorClose(yv1.numpy(), yv2.numpy(), max_err=0)
assertTensorClose(mean_backup, bn.running_mean.numpy(), max_err=0)
assertTensorClose(var_backup, bn.running_var.numpy(), max_err=0)
yv_expect = (xv - running_mean) / np.sqrt(running_var + bn.eps)
assertTensorClose(yv_expect, yv1.numpy(), max_err=5e-6)
def test_batchnorm2d():
nr_chan = 8
data_shape = (3, nr_chan, 16, 16)
momentum = 0.9
bn = BatchNorm2d(nr_chan, momentum=momentum)
running_mean = np.zeros((1, nr_chan, 1, 1), dtype=np.float32)
running_var = np.ones((1, nr_chan, 1, 1), dtype=np.float32)
data = tensor()
for i in range(3):
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
xv_transposed = np.transpose(xv, [0, 2, 3, 1]).reshape(
(data_shape[0] * data_shape[2] * data_shape[3], nr_chan)
)
mean = np.mean(xv_transposed, axis=0).reshape(1, nr_chan, 1, 1)
var_biased = np.var(xv_transposed, axis=0).reshape((1, nr_chan, 1, 1))
sd = np.sqrt(var_biased + bn.eps)
var_unbiased = np.var(xv_transposed, axis=0, ddof=1).reshape((1, nr_chan, 1, 1))
running_mean = running_mean * momentum + mean * (1 - momentum)
running_var = running_var * momentum + var_unbiased * (1 - momentum)
data.set_value(xv)
yv = bn(data)
yv_expect = (xv - mean) / sd
assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6)
assertTensorClose(running_mean, bn.running_mean.numpy(), max_err=5e-6)
assertTensorClose(running_var, bn.running_var.numpy(), max_err=5e-6)
# test set 'training' flag to False
mean_backup = bn.running_mean.numpy()
var_backup = bn.running_var.numpy()
bn.training = False
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
data.set_value(xv)
yv1 = bn(data)
yv2 = bn(data)
assertTensorClose(yv1.numpy(), yv2.numpy(), max_err=0)
assertTensorClose(mean_backup, bn.running_mean.numpy(), max_err=0)
assertTensorClose(var_backup, bn.running_var.numpy(), max_err=0)
yv_expect = (xv - running_mean) / np.sqrt(running_var + bn.eps)
assertTensorClose(yv_expect, yv1.numpy(), max_err=5e-6)
def test_batchnorm_no_stats():
nr_chan = 8
data_shape = (3, nr_chan, 4)
bn = BatchNorm1d(8, track_running_stats=False)
data = tensor()
for i in range(4):
if i == 2:
bn.training = False
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
mean = np.mean(np.mean(xv, axis=0, keepdims=True), axis=2, keepdims=True)
var = np.var(
np.transpose(xv, [0, 2, 1]).reshape(
(data_shape[0] * data_shape[2], nr_chan)
),
axis=0,
).reshape((1, nr_chan, 1))
sd = np.sqrt(var + bn.eps)
data.set_value(xv)
yv = bn(data)
yv_expect = (xv - mean) / sd
assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6)
def test_batchnorm2d_no_stats():
nr_chan = 8
data_shape = (3, nr_chan, 16, 16)
bn = | BatchNorm2d(8, track_running_stats=False) | megengine.module.BatchNorm2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine.core import tensor
from megengine.module import BatchNorm1d, BatchNorm2d
from megengine.test import assertTensorClose
def test_batchnorm():
nr_chan = 8
data_shape = (3, nr_chan, 4)
momentum = 0.9
bn = BatchNorm1d(nr_chan, momentum=momentum)
running_mean = np.zeros((1, nr_chan, 1), dtype=np.float32)
running_var = np.ones((1, nr_chan, 1), dtype=np.float32)
data = tensor()
for i in range(3):
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
mean = np.mean(np.mean(xv, axis=0, keepdims=True), axis=2, keepdims=True)
xv_transposed = np.transpose(xv, [0, 2, 1]).reshape(
(data_shape[0] * data_shape[2], nr_chan)
)
var_biased = np.var(xv_transposed, axis=0).reshape((1, nr_chan, 1))
sd = np.sqrt(var_biased + bn.eps)
var_unbiased = np.var(xv_transposed, axis=0, ddof=1).reshape((1, nr_chan, 1))
running_mean = running_mean * momentum + mean * (1 - momentum)
running_var = running_var * momentum + var_unbiased * (1 - momentum)
data.set_value(xv)
yv = bn(data)
yv_expect = (xv - mean) / sd
assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6)
assertTensorClose(
running_mean.reshape(-1), bn.running_mean.numpy().reshape(-1), max_err=5e-6
)
assertTensorClose(
running_var.reshape(-1), bn.running_var.numpy().reshape(-1), max_err=5e-6
)
# test set 'training' flag to False
mean_backup = bn.running_mean.numpy()
var_backup = bn.running_var.numpy()
bn.training = False
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
data.set_value(xv)
yv1 = bn(data)
yv2 = bn(data)
assertTensorClose(yv1.numpy(), yv2.numpy(), max_err=0)
assertTensorClose(mean_backup, bn.running_mean.numpy(), max_err=0)
assertTensorClose(var_backup, bn.running_var.numpy(), max_err=0)
yv_expect = (xv - running_mean) / np.sqrt(running_var + bn.eps)
assertTensorClose(yv_expect, yv1.numpy(), max_err=5e-6)
def test_batchnorm2d():
nr_chan = 8
data_shape = (3, nr_chan, 16, 16)
momentum = 0.9
bn = BatchNorm2d(nr_chan, momentum=momentum)
running_mean = np.zeros((1, nr_chan, 1, 1), dtype=np.float32)
running_var = np.ones((1, nr_chan, 1, 1), dtype=np.float32)
data = tensor()
for i in range(3):
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
xv_transposed = np.transpose(xv, [0, 2, 3, 1]).reshape(
(data_shape[0] * data_shape[2] * data_shape[3], nr_chan)
)
mean = np.mean(xv_transposed, axis=0).reshape(1, nr_chan, 1, 1)
var_biased = np.var(xv_transposed, axis=0).reshape((1, nr_chan, 1, 1))
sd = np.sqrt(var_biased + bn.eps)
var_unbiased = np.var(xv_transposed, axis=0, ddof=1).reshape((1, nr_chan, 1, 1))
running_mean = running_mean * momentum + mean * (1 - momentum)
running_var = running_var * momentum + var_unbiased * (1 - momentum)
data.set_value(xv)
yv = bn(data)
yv_expect = (xv - mean) / sd
assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6)
assertTensorClose(running_mean, bn.running_mean.numpy(), max_err=5e-6)
assertTensorClose(running_var, bn.running_var.numpy(), max_err=5e-6)
# test set 'training' flag to False
mean_backup = bn.running_mean.numpy()
var_backup = bn.running_var.numpy()
bn.training = False
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
data.set_value(xv)
yv1 = bn(data)
yv2 = bn(data)
assertTensorClose(yv1.numpy(), yv2.numpy(), max_err=0)
assertTensorClose(mean_backup, bn.running_mean.numpy(), max_err=0)
assertTensorClose(var_backup, bn.running_var.numpy(), max_err=0)
yv_expect = (xv - running_mean) / np.sqrt(running_var + bn.eps)
assertTensorClose(yv_expect, yv1.numpy(), max_err=5e-6)
def test_batchnorm_no_stats():
nr_chan = 8
data_shape = (3, nr_chan, 4)
bn = BatchNorm1d(8, track_running_stats=False)
data = tensor()
for i in range(4):
if i == 2:
bn.training = False
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
mean = np.mean(np.mean(xv, axis=0, keepdims=True), axis=2, keepdims=True)
var = np.var(
np.transpose(xv, [0, 2, 1]).reshape(
(data_shape[0] * data_shape[2], nr_chan)
),
axis=0,
).reshape((1, nr_chan, 1))
sd = np.sqrt(var + bn.eps)
data.set_value(xv)
yv = bn(data)
yv_expect = (xv - mean) / sd
assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6)
def test_batchnorm2d_no_stats():
nr_chan = 8
data_shape = (3, nr_chan, 16, 16)
bn = BatchNorm2d(8, track_running_stats=False)
data = | tensor() | megengine.core.tensor |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
import megengine as mge
import megengine.module as M
from models.yolo_fpn import YOLOFPN
from models.yolo_head import YOLOXHead
from models.yolo_pafpn import YOLOPAFPN
from models.yolox import YOLOX
def build_yolox(name="yolox-s"):
num_classes = 80
# value meaning: depth, width
param_dict = {
"yolox-nano": (0.33, 0.25),
"yolox-tiny": (0.33, 0.375),
"yolox-s": (0.33, 0.50),
"yolox-m": (0.67, 0.75),
"yolox-l": (1.0, 1.0),
"yolox-x": (1.33, 1.25),
}
if name == "yolov3":
depth = 1.0
width = 1.0
backbone = YOLOFPN()
head = YOLOXHead(num_classes, width, in_channels=[128, 256, 512], act="lrelu")
model = YOLOX(backbone, head)
else:
assert name in param_dict
kwargs = {}
depth, width = param_dict[name]
if name == "yolox-nano":
kwargs["depthwise"] = True
in_channels = [256, 512, 1024]
backbone = YOLOPAFPN(depth, width, in_channels=in_channels, **kwargs)
head = YOLOXHead(num_classes, width, in_channels=in_channels, **kwargs)
model = YOLOX(backbone, head)
for m in model.modules():
if isinstance(m, M.BatchNorm2d):
m.eps = 1e-3
return model
def build_and_load(weight_file, name="yolox-s"):
model = build_yolox(name)
model_weights = | mge.load(weight_file) | megengine.load |
import numpy as np
import argparse
from datetime import datetime
import time
import model as resnet_model
import megengine as mge
import megengine.autodiff as ad
import megengine.functional as F
import megengine.optimizer as optim
parser = argparse.ArgumentParser(description="MegEngine ResNet Training")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"--steps",
default=10,
type=int,
help="number of total steps to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
parser.add_argument(
"--memory-budget",
dest="mem_budget",
default=5,
type=int,
help="memory budget for DTR, measured in GB (default: 5)",
)
args = parser.parse_args()
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = | DTR(memory_budget=args.mem_budget*1024**3) | megengine.utils.dtr.DTR |
import numpy as np
import argparse
from datetime import datetime
import time
import model as resnet_model
import megengine as mge
import megengine.autodiff as ad
import megengine.functional as F
import megengine.optimizer as optim
parser = argparse.ArgumentParser(description="MegEngine ResNet Training")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"--steps",
default=10,
type=int,
help="number of total steps to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
parser.add_argument(
"--memory-budget",
dest="mem_budget",
default=5,
type=int,
help="memory budget for DTR, measured in GB (default: 5)",
)
args = parser.parse_args()
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=args.mem_budget*1024**3)
batch_size = args.batch_size
image = mge.tensor(np.random.random((batch_size, 3, 224, 224)))
label = mge.tensor(np.random.randint(100, size=(batch_size,)))
#model = resnet_model.__dict__["resnet50"]()
model = resnet_model.__dict__[args.arch]()
gm= | ad.GradManager() | megengine.autodiff.GradManager |
import numpy as np
import argparse
from datetime import datetime
import time
import model as resnet_model
import megengine as mge
import megengine.autodiff as ad
import megengine.functional as F
import megengine.optimizer as optim
parser = argparse.ArgumentParser(description="MegEngine ResNet Training")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"--steps",
default=10,
type=int,
help="number of total steps to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
parser.add_argument(
"--memory-budget",
dest="mem_budget",
default=5,
type=int,
help="memory budget for DTR, measured in GB (default: 5)",
)
args = parser.parse_args()
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=args.mem_budget*1024**3)
batch_size = args.batch_size
image = mge.tensor(np.random.random((batch_size, 3, 224, 224)))
label = mge.tensor(np.random.randint(100, size=(batch_size,)))
#model = resnet_model.__dict__["resnet50"]()
model = resnet_model.__dict__[args.arch]()
gm=ad.GradManager().attach(model.parameters())
opt=optim.SGD(model.parameters(), lr=0.0125, momentum=0.9, weight_decay=1e-4)
# miliseconds
print(datetime.now().timetz())
time_list = []
cur_time = int(round(time.time()*1000))
for i in range(args.steps):
with gm:
logits=model(image)
loss= | F.nn.cross_entropy(logits, label) | megengine.functional.nn.cross_entropy |
import numpy as np
import argparse
from datetime import datetime
import time
import model as resnet_model
import megengine as mge
import megengine.autodiff as ad
import megengine.functional as F
import megengine.optimizer as optim
parser = argparse.ArgumentParser(description="MegEngine ResNet Training")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"--steps",
default=10,
type=int,
help="number of total steps to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
parser.add_argument(
"--memory-budget",
dest="mem_budget",
default=5,
type=int,
help="memory budget for DTR, measured in GB (default: 5)",
)
args = parser.parse_args()
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=args.mem_budget*1024**3)
batch_size = args.batch_size
image = mge.tensor(np.random.random((batch_size, 3, 224, 224)))
label = mge.tensor(np.random.randint(100, size=(batch_size,)))
#model = resnet_model.__dict__["resnet50"]()
model = resnet_model.__dict__[args.arch]()
gm=ad.GradManager().attach(model.parameters())
opt=optim.SGD(model.parameters(), lr=0.0125, momentum=0.9, weight_decay=1e-4)
# miliseconds
print(datetime.now().timetz())
time_list = []
cur_time = int(round(time.time()*1000))
for i in range(args.steps):
with gm:
logits=model(image)
loss=F.nn.cross_entropy(logits, label)
gm.backward(loss)
total, free = | mge.get_mem_status_bytes() | megengine.get_mem_status_bytes |
import numpy as np
import megengine.functional as F
import megengine.module as M
from config import config
from .anchors_generator import AnchorGenerator
from .find_top_rpn_proposals import find_top_rpn_proposals
from .fpn_anchor_target import fpn_anchor_target, fpn_rpn_reshape
from det_opr.loss_opr import softmax_loss, smooth_l1_loss_rpn
import pdb
class RPN(M.Module):
def __init__(self, rpn_channel=256):
super().__init__()
self.anchors_generator = AnchorGenerator(
config.anchor_base_size,
config.anchor_aspect_ratios,
config.anchor_base_scale)
self.rpn_conv = | M.Conv2d(256, rpn_channel, kernel_size=3, stride=1, padding=1) | megengine.module.Conv2d |
import numpy as np
import megengine.functional as F
import megengine.module as M
from config import config
from .anchors_generator import AnchorGenerator
from .find_top_rpn_proposals import find_top_rpn_proposals
from .fpn_anchor_target import fpn_anchor_target, fpn_rpn_reshape
from det_opr.loss_opr import softmax_loss, smooth_l1_loss_rpn
import pdb
class RPN(M.Module):
def __init__(self, rpn_channel=256):
super().__init__()
self.anchors_generator = AnchorGenerator(
config.anchor_base_size,
config.anchor_aspect_ratios,
config.anchor_base_scale)
self.rpn_conv = M.Conv2d(256, rpn_channel, kernel_size=3, stride=1, padding=1)
self.rpn_cls_score = | M.Conv2d(rpn_channel, config.num_cell_anchors * 2, kernel_size=1, stride=1) | megengine.module.Conv2d |
import numpy as np
import megengine.functional as F
import megengine.module as M
from config import config
from .anchors_generator import AnchorGenerator
from .find_top_rpn_proposals import find_top_rpn_proposals
from .fpn_anchor_target import fpn_anchor_target, fpn_rpn_reshape
from det_opr.loss_opr import softmax_loss, smooth_l1_loss_rpn
import pdb
class RPN(M.Module):
def __init__(self, rpn_channel=256):
super().__init__()
self.anchors_generator = AnchorGenerator(
config.anchor_base_size,
config.anchor_aspect_ratios,
config.anchor_base_scale)
self.rpn_conv = M.Conv2d(256, rpn_channel, kernel_size=3, stride=1, padding=1)
self.rpn_cls_score = M.Conv2d(rpn_channel, config.num_cell_anchors * 2, kernel_size=1, stride=1)
self.rpn_bbox_offsets = | M.Conv2d(rpn_channel, config.num_cell_anchors * 4, kernel_size=1, stride=1) | megengine.module.Conv2d |
import numpy as np
import megengine.functional as F
import megengine.module as M
from config import config
from .anchors_generator import AnchorGenerator
from .find_top_rpn_proposals import find_top_rpn_proposals
from .fpn_anchor_target import fpn_anchor_target, fpn_rpn_reshape
from det_opr.loss_opr import softmax_loss, smooth_l1_loss_rpn
import pdb
class RPN(M.Module):
def __init__(self, rpn_channel=256):
super().__init__()
self.anchors_generator = AnchorGenerator(
config.anchor_base_size,
config.anchor_aspect_ratios,
config.anchor_base_scale)
self.rpn_conv = M.Conv2d(256, rpn_channel, kernel_size=3, stride=1, padding=1)
self.rpn_cls_score = M.Conv2d(rpn_channel, config.num_cell_anchors * 2, kernel_size=1, stride=1)
self.rpn_bbox_offsets = M.Conv2d(rpn_channel, config.num_cell_anchors * 4, kernel_size=1, stride=1)
for l in [self.rpn_conv, self.rpn_cls_score, self.rpn_bbox_offsets]:
| M.init.normal_(l.weight, std=0.01) | megengine.module.init.normal_ |
import numpy as np
import megengine.functional as F
import megengine.module as M
from config import config
from .anchors_generator import AnchorGenerator
from .find_top_rpn_proposals import find_top_rpn_proposals
from .fpn_anchor_target import fpn_anchor_target, fpn_rpn_reshape
from det_opr.loss_opr import softmax_loss, smooth_l1_loss_rpn
import pdb
class RPN(M.Module):
def __init__(self, rpn_channel=256):
super().__init__()
self.anchors_generator = AnchorGenerator(
config.anchor_base_size,
config.anchor_aspect_ratios,
config.anchor_base_scale)
self.rpn_conv = M.Conv2d(256, rpn_channel, kernel_size=3, stride=1, padding=1)
self.rpn_cls_score = M.Conv2d(rpn_channel, config.num_cell_anchors * 2, kernel_size=1, stride=1)
self.rpn_bbox_offsets = M.Conv2d(rpn_channel, config.num_cell_anchors * 4, kernel_size=1, stride=1)
for l in [self.rpn_conv, self.rpn_cls_score, self.rpn_bbox_offsets]:
M.init.normal_(l.weight, std=0.01)
| M.init.fill_(l.bias, 0) | megengine.module.init.fill_ |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import itertools
import os
from typing import Callable
import numpy as np
import pytest
import megengine as mge
import megengine.module.init as init
from megengine.core import tensor
from megengine.functional import cross_entropy_with_softmax, relu
from megengine.jit import trace
from megengine.module import Linear, Module
from megengine.optimizer import SGD, Optimizer
from megengine.test import assertTensorClose
batch_size = 64
data_shape = (batch_size, 2)
label_shape = (batch_size,)
def minibatch_generator():
while True:
inp_data = np.zeros((batch_size, 2))
label = np.zeros(batch_size, dtype=np.int32)
for i in range(batch_size):
# [x0, x1], sampled from U[-1, 1]
inp_data[i, :] = np.random.rand(2) * 2 - 1
label[i] = 0 if np.prod(inp_data[i]) < 0 else 1
yield inp_data.astype(np.float32), label.astype(np.int32)
class SimpleNet(Module):
def __init__(self):
self.mid_layers = 14
self.num_class = 2
super().__init__()
self.fc0 = Linear(self.num_class, self.mid_layers, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc0.weight)
init.normal_(self.fc0.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc0.bias)
self.fc1 = Linear(self.mid_layers, self.mid_layers, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc1.weight)
init.normal_(self.fc1.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc1.bias)
self.fc2 = Linear(self.mid_layers, self.num_class, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc2.weight)
init.normal_(self.fc2.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc2.bias)
def forward(self, x):
x = self.fc0(x)
x = relu(x) # Should use tanh but it's not stable now.
x = self.fc1(x)
x = relu(x) # Should use tanh but it's not stable now.
x = self.fc2(x)
return x
def generate_eager_step(net: Module, opt_factory: Callable[[Module], Optimizer]):
data_inp = tensor(np.zeros(data_shape), dtype=np.float32)
label_inp = tensor(np.zeros(label_shape), dtype=np.int32)
opt = opt_factory(net)
def step(data, label):
opt.zero_grad()
data_inp.set_value(data)
label_inp.set_value(label)
pred = net(data_inp)
loss = cross_entropy_with_softmax(pred, label_inp)
opt.backward(loss)
opt.step()
return loss.numpy()[0]
return step
def generate_static_step(net: Module, opt_factory: Callable[[Module], Optimizer]):
data = tensor(np.zeros(data_shape), dtype=np.float32)
label = tensor(np.zeros(label_shape), dtype=np.int32)
opt = opt_factory(net)
# Save state to reset parameters later.
state = copy.deepcopy(net.state_dict())
# Evaluate network in eager mode once.
pred = net(data)
loss = | cross_entropy_with_softmax(pred, label) | megengine.functional.cross_entropy_with_softmax |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import itertools
import os
from typing import Callable
import numpy as np
import pytest
import megengine as mge
import megengine.module.init as init
from megengine.core import tensor
from megengine.functional import cross_entropy_with_softmax, relu
from megengine.jit import trace
from megengine.module import Linear, Module
from megengine.optimizer import SGD, Optimizer
from megengine.test import assertTensorClose
batch_size = 64
data_shape = (batch_size, 2)
label_shape = (batch_size,)
def minibatch_generator():
while True:
inp_data = np.zeros((batch_size, 2))
label = np.zeros(batch_size, dtype=np.int32)
for i in range(batch_size):
# [x0, x1], sampled from U[-1, 1]
inp_data[i, :] = np.random.rand(2) * 2 - 1
label[i] = 0 if np.prod(inp_data[i]) < 0 else 1
yield inp_data.astype(np.float32), label.astype(np.int32)
class SimpleNet(Module):
def __init__(self):
self.mid_layers = 14
self.num_class = 2
super().__init__()
self.fc0 = Linear(self.num_class, self.mid_layers, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc0.weight)
init.normal_(self.fc0.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc0.bias)
self.fc1 = Linear(self.mid_layers, self.mid_layers, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc1.weight)
init.normal_(self.fc1.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc1.bias)
self.fc2 = Linear(self.mid_layers, self.num_class, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc2.weight)
init.normal_(self.fc2.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc2.bias)
def forward(self, x):
x = self.fc0(x)
x = relu(x) # Should use tanh but it's not stable now.
x = self.fc1(x)
x = relu(x) # Should use tanh but it's not stable now.
x = self.fc2(x)
return x
def generate_eager_step(net: Module, opt_factory: Callable[[Module], Optimizer]):
data_inp = tensor(np.zeros(data_shape), dtype=np.float32)
label_inp = tensor(np.zeros(label_shape), dtype=np.int32)
opt = opt_factory(net)
def step(data, label):
opt.zero_grad()
data_inp.set_value(data)
label_inp.set_value(label)
pred = net(data_inp)
loss = cross_entropy_with_softmax(pred, label_inp)
opt.backward(loss)
opt.step()
return loss.numpy()[0]
return step
def generate_static_step(net: Module, opt_factory: Callable[[Module], Optimizer]):
data = tensor(np.zeros(data_shape), dtype=np.float32)
label = tensor(np.zeros(label_shape), dtype=np.int32)
opt = opt_factory(net)
# Save state to reset parameters later.
state = copy.deepcopy(net.state_dict())
# Evaluate network in eager mode once.
pred = net(data)
loss = cross_entropy_with_softmax(pred, label)
opt.zero_grad()
grads = opt.backward(loss)
f = | mge.graph.compile(loss, grads) | megengine.graph.compile |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import itertools
import os
from typing import Callable
import numpy as np
import pytest
import megengine as mge
import megengine.module.init as init
from megengine.core import tensor
from megengine.functional import cross_entropy_with_softmax, relu
from megengine.jit import trace
from megengine.module import Linear, Module
from megengine.optimizer import SGD, Optimizer
from megengine.test import assertTensorClose
batch_size = 64
data_shape = (batch_size, 2)
label_shape = (batch_size,)
def minibatch_generator():
while True:
inp_data = np.zeros((batch_size, 2))
label = np.zeros(batch_size, dtype=np.int32)
for i in range(batch_size):
# [x0, x1], sampled from U[-1, 1]
inp_data[i, :] = np.random.rand(2) * 2 - 1
label[i] = 0 if np.prod(inp_data[i]) < 0 else 1
yield inp_data.astype(np.float32), label.astype(np.int32)
class SimpleNet(Module):
def __init__(self):
self.mid_layers = 14
self.num_class = 2
super().__init__()
self.fc0 = | Linear(self.num_class, self.mid_layers, bias=True) | megengine.module.Linear |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import itertools
import os
from typing import Callable
import numpy as np
import pytest
import megengine as mge
import megengine.module.init as init
from megengine.core import tensor
from megengine.functional import cross_entropy_with_softmax, relu
from megengine.jit import trace
from megengine.module import Linear, Module
from megengine.optimizer import SGD, Optimizer
from megengine.test import assertTensorClose
batch_size = 64
data_shape = (batch_size, 2)
label_shape = (batch_size,)
def minibatch_generator():
while True:
inp_data = np.zeros((batch_size, 2))
label = np.zeros(batch_size, dtype=np.int32)
for i in range(batch_size):
# [x0, x1], sampled from U[-1, 1]
inp_data[i, :] = np.random.rand(2) * 2 - 1
label[i] = 0 if np.prod(inp_data[i]) < 0 else 1
yield inp_data.astype(np.float32), label.astype(np.int32)
class SimpleNet(Module):
def __init__(self):
self.mid_layers = 14
self.num_class = 2
super().__init__()
self.fc0 = Linear(self.num_class, self.mid_layers, bias=True)
fan_in, _ = | init.calculate_fan_in_and_fan_out(self.fc0.weight) | megengine.module.init.calculate_fan_in_and_fan_out |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import itertools
import os
from typing import Callable
import numpy as np
import pytest
import megengine as mge
import megengine.module.init as init
from megengine.core import tensor
from megengine.functional import cross_entropy_with_softmax, relu
from megengine.jit import trace
from megengine.module import Linear, Module
from megengine.optimizer import SGD, Optimizer
from megengine.test import assertTensorClose
batch_size = 64
data_shape = (batch_size, 2)
label_shape = (batch_size,)
def minibatch_generator():
while True:
inp_data = np.zeros((batch_size, 2))
label = np.zeros(batch_size, dtype=np.int32)
for i in range(batch_size):
# [x0, x1], sampled from U[-1, 1]
inp_data[i, :] = np.random.rand(2) * 2 - 1
label[i] = 0 if np.prod(inp_data[i]) < 0 else 1
yield inp_data.astype(np.float32), label.astype(np.int32)
class SimpleNet(Module):
def __init__(self):
self.mid_layers = 14
self.num_class = 2
super().__init__()
self.fc0 = Linear(self.num_class, self.mid_layers, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc0.weight)
init.normal_(self.fc0.weight, std=np.sqrt(float(1.0) / fan_in))
| init.zeros_(self.fc0.bias) | megengine.module.init.zeros_ |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import itertools
import os
from typing import Callable
import numpy as np
import pytest
import megengine as mge
import megengine.module.init as init
from megengine.core import tensor
from megengine.functional import cross_entropy_with_softmax, relu
from megengine.jit import trace
from megengine.module import Linear, Module
from megengine.optimizer import SGD, Optimizer
from megengine.test import assertTensorClose
batch_size = 64
data_shape = (batch_size, 2)
label_shape = (batch_size,)
def minibatch_generator():
while True:
inp_data = np.zeros((batch_size, 2))
label = np.zeros(batch_size, dtype=np.int32)
for i in range(batch_size):
# [x0, x1], sampled from U[-1, 1]
inp_data[i, :] = np.random.rand(2) * 2 - 1
label[i] = 0 if np.prod(inp_data[i]) < 0 else 1
yield inp_data.astype(np.float32), label.astype(np.int32)
class SimpleNet(Module):
def __init__(self):
self.mid_layers = 14
self.num_class = 2
super().__init__()
self.fc0 = Linear(self.num_class, self.mid_layers, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc0.weight)
init.normal_(self.fc0.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc0.bias)
self.fc1 = | Linear(self.mid_layers, self.mid_layers, bias=True) | megengine.module.Linear |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import itertools
import os
from typing import Callable
import numpy as np
import pytest
import megengine as mge
import megengine.module.init as init
from megengine.core import tensor
from megengine.functional import cross_entropy_with_softmax, relu
from megengine.jit import trace
from megengine.module import Linear, Module
from megengine.optimizer import SGD, Optimizer
from megengine.test import assertTensorClose
batch_size = 64
data_shape = (batch_size, 2)
label_shape = (batch_size,)
def minibatch_generator():
while True:
inp_data = np.zeros((batch_size, 2))
label = np.zeros(batch_size, dtype=np.int32)
for i in range(batch_size):
# [x0, x1], sampled from U[-1, 1]
inp_data[i, :] = np.random.rand(2) * 2 - 1
label[i] = 0 if np.prod(inp_data[i]) < 0 else 1
yield inp_data.astype(np.float32), label.astype(np.int32)
class SimpleNet(Module):
def __init__(self):
self.mid_layers = 14
self.num_class = 2
super().__init__()
self.fc0 = Linear(self.num_class, self.mid_layers, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc0.weight)
init.normal_(self.fc0.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc0.bias)
self.fc1 = Linear(self.mid_layers, self.mid_layers, bias=True)
fan_in, _ = | init.calculate_fan_in_and_fan_out(self.fc1.weight) | megengine.module.init.calculate_fan_in_and_fan_out |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import itertools
import os
from typing import Callable
import numpy as np
import pytest
import megengine as mge
import megengine.module.init as init
from megengine.core import tensor
from megengine.functional import cross_entropy_with_softmax, relu
from megengine.jit import trace
from megengine.module import Linear, Module
from megengine.optimizer import SGD, Optimizer
from megengine.test import assertTensorClose
batch_size = 64
data_shape = (batch_size, 2)
label_shape = (batch_size,)
def minibatch_generator():
while True:
inp_data = np.zeros((batch_size, 2))
label = np.zeros(batch_size, dtype=np.int32)
for i in range(batch_size):
# [x0, x1], sampled from U[-1, 1]
inp_data[i, :] = np.random.rand(2) * 2 - 1
label[i] = 0 if np.prod(inp_data[i]) < 0 else 1
yield inp_data.astype(np.float32), label.astype(np.int32)
class SimpleNet(Module):
def __init__(self):
self.mid_layers = 14
self.num_class = 2
super().__init__()
self.fc0 = Linear(self.num_class, self.mid_layers, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc0.weight)
init.normal_(self.fc0.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc0.bias)
self.fc1 = Linear(self.mid_layers, self.mid_layers, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc1.weight)
init.normal_(self.fc1.weight, std=np.sqrt(float(1.0) / fan_in))
| init.zeros_(self.fc1.bias) | megengine.module.init.zeros_ |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import itertools
import os
from typing import Callable
import numpy as np
import pytest
import megengine as mge
import megengine.module.init as init
from megengine.core import tensor
from megengine.functional import cross_entropy_with_softmax, relu
from megengine.jit import trace
from megengine.module import Linear, Module
from megengine.optimizer import SGD, Optimizer
from megengine.test import assertTensorClose
batch_size = 64
data_shape = (batch_size, 2)
label_shape = (batch_size,)
def minibatch_generator():
while True:
inp_data = np.zeros((batch_size, 2))
label = np.zeros(batch_size, dtype=np.int32)
for i in range(batch_size):
# [x0, x1], sampled from U[-1, 1]
inp_data[i, :] = np.random.rand(2) * 2 - 1
label[i] = 0 if np.prod(inp_data[i]) < 0 else 1
yield inp_data.astype(np.float32), label.astype(np.int32)
class SimpleNet(Module):
def __init__(self):
self.mid_layers = 14
self.num_class = 2
super().__init__()
self.fc0 = Linear(self.num_class, self.mid_layers, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc0.weight)
init.normal_(self.fc0.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc0.bias)
self.fc1 = Linear(self.mid_layers, self.mid_layers, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc1.weight)
init.normal_(self.fc1.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc1.bias)
self.fc2 = | Linear(self.mid_layers, self.num_class, bias=True) | megengine.module.Linear |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import itertools
import os
from typing import Callable
import numpy as np
import pytest
import megengine as mge
import megengine.module.init as init
from megengine.core import tensor
from megengine.functional import cross_entropy_with_softmax, relu
from megengine.jit import trace
from megengine.module import Linear, Module
from megengine.optimizer import SGD, Optimizer
from megengine.test import assertTensorClose
batch_size = 64
data_shape = (batch_size, 2)
label_shape = (batch_size,)
def minibatch_generator():
while True:
inp_data = np.zeros((batch_size, 2))
label = np.zeros(batch_size, dtype=np.int32)
for i in range(batch_size):
# [x0, x1], sampled from U[-1, 1]
inp_data[i, :] = np.random.rand(2) * 2 - 1
label[i] = 0 if np.prod(inp_data[i]) < 0 else 1
yield inp_data.astype(np.float32), label.astype(np.int32)
class SimpleNet(Module):
def __init__(self):
self.mid_layers = 14
self.num_class = 2
super().__init__()
self.fc0 = Linear(self.num_class, self.mid_layers, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc0.weight)
init.normal_(self.fc0.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc0.bias)
self.fc1 = Linear(self.mid_layers, self.mid_layers, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc1.weight)
init.normal_(self.fc1.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc1.bias)
self.fc2 = Linear(self.mid_layers, self.num_class, bias=True)
fan_in, _ = | init.calculate_fan_in_and_fan_out(self.fc2.weight) | megengine.module.init.calculate_fan_in_and_fan_out |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import itertools
import os
from typing import Callable
import numpy as np
import pytest
import megengine as mge
import megengine.module.init as init
from megengine.core import tensor
from megengine.functional import cross_entropy_with_softmax, relu
from megengine.jit import trace
from megengine.module import Linear, Module
from megengine.optimizer import SGD, Optimizer
from megengine.test import assertTensorClose
batch_size = 64
data_shape = (batch_size, 2)
label_shape = (batch_size,)
def minibatch_generator():
while True:
inp_data = np.zeros((batch_size, 2))
label = np.zeros(batch_size, dtype=np.int32)
for i in range(batch_size):
# [x0, x1], sampled from U[-1, 1]
inp_data[i, :] = np.random.rand(2) * 2 - 1
label[i] = 0 if np.prod(inp_data[i]) < 0 else 1
yield inp_data.astype(np.float32), label.astype(np.int32)
class SimpleNet(Module):
def __init__(self):
self.mid_layers = 14
self.num_class = 2
super().__init__()
self.fc0 = Linear(self.num_class, self.mid_layers, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc0.weight)
init.normal_(self.fc0.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc0.bias)
self.fc1 = Linear(self.mid_layers, self.mid_layers, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc1.weight)
init.normal_(self.fc1.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc1.bias)
self.fc2 = Linear(self.mid_layers, self.num_class, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc2.weight)
init.normal_(self.fc2.weight, std=np.sqrt(float(1.0) / fan_in))
| init.zeros_(self.fc2.bias) | megengine.module.init.zeros_ |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import itertools
import os
from typing import Callable
import numpy as np
import pytest
import megengine as mge
import megengine.module.init as init
from megengine.core import tensor
from megengine.functional import cross_entropy_with_softmax, relu
from megengine.jit import trace
from megengine.module import Linear, Module
from megengine.optimizer import SGD, Optimizer
from megengine.test import assertTensorClose
batch_size = 64
data_shape = (batch_size, 2)
label_shape = (batch_size,)
def minibatch_generator():
while True:
inp_data = np.zeros((batch_size, 2))
label = np.zeros(batch_size, dtype=np.int32)
for i in range(batch_size):
# [x0, x1], sampled from U[-1, 1]
inp_data[i, :] = np.random.rand(2) * 2 - 1
label[i] = 0 if np.prod(inp_data[i]) < 0 else 1
yield inp_data.astype(np.float32), label.astype(np.int32)
class SimpleNet(Module):
def __init__(self):
self.mid_layers = 14
self.num_class = 2
super().__init__()
self.fc0 = Linear(self.num_class, self.mid_layers, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc0.weight)
init.normal_(self.fc0.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc0.bias)
self.fc1 = Linear(self.mid_layers, self.mid_layers, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc1.weight)
init.normal_(self.fc1.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc1.bias)
self.fc2 = Linear(self.mid_layers, self.num_class, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc2.weight)
init.normal_(self.fc2.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc2.bias)
def forward(self, x):
x = self.fc0(x)
x = | relu(x) | megengine.functional.relu |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import itertools
import os
from typing import Callable
import numpy as np
import pytest
import megengine as mge
import megengine.module.init as init
from megengine.core import tensor
from megengine.functional import cross_entropy_with_softmax, relu
from megengine.jit import trace
from megengine.module import Linear, Module
from megengine.optimizer import SGD, Optimizer
from megengine.test import assertTensorClose
batch_size = 64
data_shape = (batch_size, 2)
label_shape = (batch_size,)
def minibatch_generator():
while True:
inp_data = np.zeros((batch_size, 2))
label = np.zeros(batch_size, dtype=np.int32)
for i in range(batch_size):
# [x0, x1], sampled from U[-1, 1]
inp_data[i, :] = np.random.rand(2) * 2 - 1
label[i] = 0 if np.prod(inp_data[i]) < 0 else 1
yield inp_data.astype(np.float32), label.astype(np.int32)
class SimpleNet(Module):
def __init__(self):
self.mid_layers = 14
self.num_class = 2
super().__init__()
self.fc0 = Linear(self.num_class, self.mid_layers, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc0.weight)
init.normal_(self.fc0.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc0.bias)
self.fc1 = Linear(self.mid_layers, self.mid_layers, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc1.weight)
init.normal_(self.fc1.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc1.bias)
self.fc2 = Linear(self.mid_layers, self.num_class, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc2.weight)
init.normal_(self.fc2.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc2.bias)
def forward(self, x):
x = self.fc0(x)
x = relu(x) # Should use tanh but it's not stable now.
x = self.fc1(x)
x = | relu(x) | megengine.functional.relu |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import itertools
import os
from typing import Callable
import numpy as np
import pytest
import megengine as mge
import megengine.module.init as init
from megengine.core import tensor
from megengine.functional import cross_entropy_with_softmax, relu
from megengine.jit import trace
from megengine.module import Linear, Module
from megengine.optimizer import SGD, Optimizer
from megengine.test import assertTensorClose
batch_size = 64
data_shape = (batch_size, 2)
label_shape = (batch_size,)
def minibatch_generator():
while True:
inp_data = np.zeros((batch_size, 2))
label = np.zeros(batch_size, dtype=np.int32)
for i in range(batch_size):
# [x0, x1], sampled from U[-1, 1]
inp_data[i, :] = np.random.rand(2) * 2 - 1
label[i] = 0 if np.prod(inp_data[i]) < 0 else 1
yield inp_data.astype(np.float32), label.astype(np.int32)
class SimpleNet(Module):
def __init__(self):
self.mid_layers = 14
self.num_class = 2
super().__init__()
self.fc0 = Linear(self.num_class, self.mid_layers, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc0.weight)
init.normal_(self.fc0.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc0.bias)
self.fc1 = Linear(self.mid_layers, self.mid_layers, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc1.weight)
init.normal_(self.fc1.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc1.bias)
self.fc2 = Linear(self.mid_layers, self.num_class, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc2.weight)
init.normal_(self.fc2.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc2.bias)
def forward(self, x):
x = self.fc0(x)
x = relu(x) # Should use tanh but it's not stable now.
x = self.fc1(x)
x = relu(x) # Should use tanh but it's not stable now.
x = self.fc2(x)
return x
def generate_eager_step(net: Module, opt_factory: Callable[[Module], Optimizer]):
data_inp = tensor(np.zeros(data_shape), dtype=np.float32)
label_inp = tensor(np.zeros(label_shape), dtype=np.int32)
opt = opt_factory(net)
def step(data, label):
opt.zero_grad()
data_inp.set_value(data)
label_inp.set_value(label)
pred = net(data_inp)
loss = | cross_entropy_with_softmax(pred, label_inp) | megengine.functional.cross_entropy_with_softmax |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import itertools
import os
from typing import Callable
import numpy as np
import pytest
import megengine as mge
import megengine.module.init as init
from megengine.core import tensor
from megengine.functional import cross_entropy_with_softmax, relu
from megengine.jit import trace
from megengine.module import Linear, Module
from megengine.optimizer import SGD, Optimizer
from megengine.test import assertTensorClose
batch_size = 64
data_shape = (batch_size, 2)
label_shape = (batch_size,)
def minibatch_generator():
while True:
inp_data = np.zeros((batch_size, 2))
label = np.zeros(batch_size, dtype=np.int32)
for i in range(batch_size):
# [x0, x1], sampled from U[-1, 1]
inp_data[i, :] = np.random.rand(2) * 2 - 1
label[i] = 0 if np.prod(inp_data[i]) < 0 else 1
yield inp_data.astype(np.float32), label.astype(np.int32)
class SimpleNet(Module):
def __init__(self):
self.mid_layers = 14
self.num_class = 2
super().__init__()
self.fc0 = Linear(self.num_class, self.mid_layers, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc0.weight)
init.normal_(self.fc0.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc0.bias)
self.fc1 = Linear(self.mid_layers, self.mid_layers, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc1.weight)
init.normal_(self.fc1.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc1.bias)
self.fc2 = Linear(self.mid_layers, self.num_class, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc2.weight)
init.normal_(self.fc2.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc2.bias)
def forward(self, x):
x = self.fc0(x)
x = relu(x) # Should use tanh but it's not stable now.
x = self.fc1(x)
x = relu(x) # Should use tanh but it's not stable now.
x = self.fc2(x)
return x
def generate_eager_step(net: Module, opt_factory: Callable[[Module], Optimizer]):
data_inp = tensor(np.zeros(data_shape), dtype=np.float32)
label_inp = tensor(np.zeros(label_shape), dtype=np.int32)
opt = opt_factory(net)
def step(data, label):
opt.zero_grad()
data_inp.set_value(data)
label_inp.set_value(label)
pred = net(data_inp)
loss = cross_entropy_with_softmax(pred, label_inp)
opt.backward(loss)
opt.step()
return loss.numpy()[0]
return step
def generate_static_step(net: Module, opt_factory: Callable[[Module], Optimizer]):
data = tensor(np.zeros(data_shape), dtype=np.float32)
label = tensor(np.zeros(label_shape), dtype=np.int32)
opt = opt_factory(net)
# Save state to reset parameters later.
state = copy.deepcopy(net.state_dict())
# Evaluate network in eager mode once.
pred = net(data)
loss = cross_entropy_with_softmax(pred, label)
opt.zero_grad()
grads = opt.backward(loss)
f = mge.graph.compile(loss, grads)
def step(data, label):
opt.zero_grad()
out = f(data=data, label=label)
opt.step()
loss = out[0][0]
return loss
# Reset parameters.
net.load_state_dict(state)
return step
def generate_trace_step(
net: Module, opt_factory: Callable[[Module], Optimizer], enable: bool
):
opt = opt_factory(net)
@trace
def train(data, label):
pred = net(data)
loss = | cross_entropy_with_softmax(pred, label) | megengine.functional.cross_entropy_with_softmax |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import itertools
import os
from typing import Callable
import numpy as np
import pytest
import megengine as mge
import megengine.module.init as init
from megengine.core import tensor
from megengine.functional import cross_entropy_with_softmax, relu
from megengine.jit import trace
from megengine.module import Linear, Module
from megengine.optimizer import SGD, Optimizer
from megengine.test import assertTensorClose
batch_size = 64
data_shape = (batch_size, 2)
label_shape = (batch_size,)
def minibatch_generator():
while True:
inp_data = np.zeros((batch_size, 2))
label = np.zeros(batch_size, dtype=np.int32)
for i in range(batch_size):
# [x0, x1], sampled from U[-1, 1]
inp_data[i, :] = np.random.rand(2) * 2 - 1
label[i] = 0 if np.prod(inp_data[i]) < 0 else 1
yield inp_data.astype(np.float32), label.astype(np.int32)
class SimpleNet(Module):
def __init__(self):
self.mid_layers = 14
self.num_class = 2
super().__init__()
self.fc0 = Linear(self.num_class, self.mid_layers, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc0.weight)
init.normal_(self.fc0.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc0.bias)
self.fc1 = Linear(self.mid_layers, self.mid_layers, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc1.weight)
init.normal_(self.fc1.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc1.bias)
self.fc2 = Linear(self.mid_layers, self.num_class, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc2.weight)
init.normal_(self.fc2.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc2.bias)
def forward(self, x):
x = self.fc0(x)
x = relu(x) # Should use tanh but it's not stable now.
x = self.fc1(x)
x = relu(x) # Should use tanh but it's not stable now.
x = self.fc2(x)
return x
def generate_eager_step(net: Module, opt_factory: Callable[[Module], Optimizer]):
data_inp = tensor(np.zeros(data_shape), dtype=np.float32)
label_inp = tensor(np.zeros(label_shape), dtype=np.int32)
opt = opt_factory(net)
def step(data, label):
opt.zero_grad()
data_inp.set_value(data)
label_inp.set_value(label)
pred = net(data_inp)
loss = cross_entropy_with_softmax(pred, label_inp)
opt.backward(loss)
opt.step()
return loss.numpy()[0]
return step
def generate_static_step(net: Module, opt_factory: Callable[[Module], Optimizer]):
data = tensor(np.zeros(data_shape), dtype=np.float32)
label = tensor(np.zeros(label_shape), dtype=np.int32)
opt = opt_factory(net)
# Save state to reset parameters later.
state = copy.deepcopy(net.state_dict())
# Evaluate network in eager mode once.
pred = net(data)
loss = cross_entropy_with_softmax(pred, label)
opt.zero_grad()
grads = opt.backward(loss)
f = mge.graph.compile(loss, grads)
def step(data, label):
opt.zero_grad()
out = f(data=data, label=label)
opt.step()
loss = out[0][0]
return loss
# Reset parameters.
net.load_state_dict(state)
return step
def generate_trace_step(
net: Module, opt_factory: Callable[[Module], Optimizer], enable: bool
):
opt = opt_factory(net)
@trace
def train(data, label):
pred = net(data)
loss = cross_entropy_with_softmax(pred, label)
opt.zero_grad()
opt.backward(loss)
return loss
train.enabled = enable
def step(data, label):
out = train(data, label)
opt.step()
loss = out[0][0]
return loss
return step
def assert_network_equvilence(nets):
net_state = [net.state_dict() for net in nets]
for state in net_state[1:]:
assert len(net_state[0]) == len(state)
for k, v in net_state[0].items():
for state in net_state[1:]:
assert k in state
assertTensorClose(v, state[k])
@pytest.mark.slow
def test_eager_equvilence():
eager_net = SimpleNet()
trace_enable_net = copy.deepcopy(eager_net)
trace_disable_net = copy.deepcopy(eager_net)
opt_factory = lambda net: SGD(
net.parameters(requires_grad=True), lr=0.01, momentum=0.01
)
estep = generate_eager_step(eager_net, opt_factory)
te_step = generate_trace_step(trace_enable_net, opt_factory, True)
td_step = generate_trace_step(trace_disable_net, opt_factory, False)
assert_network_equvilence([eager_net, trace_enable_net, trace_disable_net])
# Use hard code number as limit, may increase if needed.
for data, label in itertools.islice(minibatch_generator(), 200):
eloss = estep(data, label)
te_loss = te_step(data, label)
td_loss = td_step(data, label)
| assertTensorClose(eloss, te_loss) | megengine.test.assertTensorClose |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import itertools
import os
from typing import Callable
import numpy as np
import pytest
import megengine as mge
import megengine.module.init as init
from megengine.core import tensor
from megengine.functional import cross_entropy_with_softmax, relu
from megengine.jit import trace
from megengine.module import Linear, Module
from megengine.optimizer import SGD, Optimizer
from megengine.test import assertTensorClose
batch_size = 64
data_shape = (batch_size, 2)
label_shape = (batch_size,)
def minibatch_generator():
while True:
inp_data = np.zeros((batch_size, 2))
label = np.zeros(batch_size, dtype=np.int32)
for i in range(batch_size):
# [x0, x1], sampled from U[-1, 1]
inp_data[i, :] = np.random.rand(2) * 2 - 1
label[i] = 0 if np.prod(inp_data[i]) < 0 else 1
yield inp_data.astype(np.float32), label.astype(np.int32)
class SimpleNet(Module):
def __init__(self):
self.mid_layers = 14
self.num_class = 2
super().__init__()
self.fc0 = Linear(self.num_class, self.mid_layers, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc0.weight)
init.normal_(self.fc0.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc0.bias)
self.fc1 = Linear(self.mid_layers, self.mid_layers, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc1.weight)
init.normal_(self.fc1.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc1.bias)
self.fc2 = Linear(self.mid_layers, self.num_class, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc2.weight)
init.normal_(self.fc2.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc2.bias)
def forward(self, x):
x = self.fc0(x)
x = relu(x) # Should use tanh but it's not stable now.
x = self.fc1(x)
x = relu(x) # Should use tanh but it's not stable now.
x = self.fc2(x)
return x
def generate_eager_step(net: Module, opt_factory: Callable[[Module], Optimizer]):
data_inp = tensor(np.zeros(data_shape), dtype=np.float32)
label_inp = tensor(np.zeros(label_shape), dtype=np.int32)
opt = opt_factory(net)
def step(data, label):
opt.zero_grad()
data_inp.set_value(data)
label_inp.set_value(label)
pred = net(data_inp)
loss = cross_entropy_with_softmax(pred, label_inp)
opt.backward(loss)
opt.step()
return loss.numpy()[0]
return step
def generate_static_step(net: Module, opt_factory: Callable[[Module], Optimizer]):
data = tensor(np.zeros(data_shape), dtype=np.float32)
label = tensor(np.zeros(label_shape), dtype=np.int32)
opt = opt_factory(net)
# Save state to reset parameters later.
state = copy.deepcopy(net.state_dict())
# Evaluate network in eager mode once.
pred = net(data)
loss = cross_entropy_with_softmax(pred, label)
opt.zero_grad()
grads = opt.backward(loss)
f = mge.graph.compile(loss, grads)
def step(data, label):
opt.zero_grad()
out = f(data=data, label=label)
opt.step()
loss = out[0][0]
return loss
# Reset parameters.
net.load_state_dict(state)
return step
def generate_trace_step(
net: Module, opt_factory: Callable[[Module], Optimizer], enable: bool
):
opt = opt_factory(net)
@trace
def train(data, label):
pred = net(data)
loss = cross_entropy_with_softmax(pred, label)
opt.zero_grad()
opt.backward(loss)
return loss
train.enabled = enable
def step(data, label):
out = train(data, label)
opt.step()
loss = out[0][0]
return loss
return step
def assert_network_equvilence(nets):
net_state = [net.state_dict() for net in nets]
for state in net_state[1:]:
assert len(net_state[0]) == len(state)
for k, v in net_state[0].items():
for state in net_state[1:]:
assert k in state
assertTensorClose(v, state[k])
@pytest.mark.slow
def test_eager_equvilence():
eager_net = SimpleNet()
trace_enable_net = copy.deepcopy(eager_net)
trace_disable_net = copy.deepcopy(eager_net)
opt_factory = lambda net: SGD(
net.parameters(requires_grad=True), lr=0.01, momentum=0.01
)
estep = generate_eager_step(eager_net, opt_factory)
te_step = generate_trace_step(trace_enable_net, opt_factory, True)
td_step = generate_trace_step(trace_disable_net, opt_factory, False)
assert_network_equvilence([eager_net, trace_enable_net, trace_disable_net])
# Use hard code number as limit, may increase if needed.
for data, label in itertools.islice(minibatch_generator(), 200):
eloss = estep(data, label)
te_loss = te_step(data, label)
td_loss = td_step(data, label)
assertTensorClose(eloss, te_loss)
| assertTensorClose(eloss, td_loss) | megengine.test.assertTensorClose |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import itertools
import os
from typing import Callable
import numpy as np
import pytest
import megengine as mge
import megengine.module.init as init
from megengine.core import tensor
from megengine.functional import cross_entropy_with_softmax, relu
from megengine.jit import trace
from megengine.module import Linear, Module
from megengine.optimizer import SGD, Optimizer
from megengine.test import assertTensorClose
batch_size = 64
data_shape = (batch_size, 2)
label_shape = (batch_size,)
def minibatch_generator():
while True:
inp_data = np.zeros((batch_size, 2))
label = np.zeros(batch_size, dtype=np.int32)
for i in range(batch_size):
# [x0, x1], sampled from U[-1, 1]
inp_data[i, :] = np.random.rand(2) * 2 - 1
label[i] = 0 if np.prod(inp_data[i]) < 0 else 1
yield inp_data.astype(np.float32), label.astype(np.int32)
class SimpleNet(Module):
def __init__(self):
self.mid_layers = 14
self.num_class = 2
super().__init__()
self.fc0 = Linear(self.num_class, self.mid_layers, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc0.weight)
init.normal_(self.fc0.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc0.bias)
self.fc1 = Linear(self.mid_layers, self.mid_layers, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc1.weight)
init.normal_(self.fc1.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc1.bias)
self.fc2 = Linear(self.mid_layers, self.num_class, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc2.weight)
init.normal_(self.fc2.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc2.bias)
def forward(self, x):
x = self.fc0(x)
x = relu(x) # Should use tanh but it's not stable now.
x = self.fc1(x)
x = relu(x) # Should use tanh but it's not stable now.
x = self.fc2(x)
return x
def generate_eager_step(net: Module, opt_factory: Callable[[Module], Optimizer]):
data_inp = tensor(np.zeros(data_shape), dtype=np.float32)
label_inp = tensor(np.zeros(label_shape), dtype=np.int32)
opt = opt_factory(net)
def step(data, label):
opt.zero_grad()
data_inp.set_value(data)
label_inp.set_value(label)
pred = net(data_inp)
loss = cross_entropy_with_softmax(pred, label_inp)
opt.backward(loss)
opt.step()
return loss.numpy()[0]
return step
def generate_static_step(net: Module, opt_factory: Callable[[Module], Optimizer]):
data = tensor(np.zeros(data_shape), dtype=np.float32)
label = tensor(np.zeros(label_shape), dtype=np.int32)
opt = opt_factory(net)
# Save state to reset parameters later.
state = copy.deepcopy(net.state_dict())
# Evaluate network in eager mode once.
pred = net(data)
loss = cross_entropy_with_softmax(pred, label)
opt.zero_grad()
grads = opt.backward(loss)
f = mge.graph.compile(loss, grads)
def step(data, label):
opt.zero_grad()
out = f(data=data, label=label)
opt.step()
loss = out[0][0]
return loss
# Reset parameters.
net.load_state_dict(state)
return step
def generate_trace_step(
net: Module, opt_factory: Callable[[Module], Optimizer], enable: bool
):
opt = opt_factory(net)
@trace
def train(data, label):
pred = net(data)
loss = cross_entropy_with_softmax(pred, label)
opt.zero_grad()
opt.backward(loss)
return loss
train.enabled = enable
def step(data, label):
out = train(data, label)
opt.step()
loss = out[0][0]
return loss
return step
def assert_network_equvilence(nets):
net_state = [net.state_dict() for net in nets]
for state in net_state[1:]:
assert len(net_state[0]) == len(state)
for k, v in net_state[0].items():
for state in net_state[1:]:
assert k in state
| assertTensorClose(v, state[k]) | megengine.test.assertTensorClose |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
import argparse
import megengine as mge
import numpy as np
from megengine import jit
from ..build import build_and_load
def make_parser():
parser = argparse.ArgumentParser("YOLOX Demo Dump")
parser.add_argument("-n", "--name", type=str, default="yolox-s", help="model name")
parser.add_argument("-c", "--ckpt", default=None, type=str, help="ckpt for eval")
parser.add_argument(
"--dump_path", default="model.mge", help="path to save the dumped model"
)
return parser
def dump_static_graph(model, graph_name="model.mge"):
model.eval()
model.head.decode_in_inference = False
data = mge.Tensor(np.random.random((1, 3, 640, 640)))
@ | jit.trace(capture_as_const=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import json
import os
import subprocess
import sys
import time
import numpy as np
from resnet50 import Resnet50
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine._internal.plugin import CompGraphProfiler
from megengine.core import Graph, tensor
from megengine.core.graph import get_default_graph
from megengine.functional.debug_param import (
get_conv_execution_strategy,
set_conv_execution_strategy,
)
from megengine.jit import trace
from megengine.module import BatchNorm2d, Conv2d, Linear, MaxPool2d, Module
from megengine.optimizer import SGD
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "..", "examples"))
def init_profiler(comp_graph= | get_default_graph() | megengine.core.graph.get_default_graph |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import json
import os
import subprocess
import sys
import time
import numpy as np
from resnet50 import Resnet50
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine._internal.plugin import CompGraphProfiler
from megengine.core import Graph, tensor
from megengine.core.graph import get_default_graph
from megengine.functional.debug_param import (
get_conv_execution_strategy,
set_conv_execution_strategy,
)
from megengine.jit import trace
from megengine.module import BatchNorm2d, Conv2d, Linear, MaxPool2d, Module
from megengine.optimizer import SGD
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "..", "examples"))
def init_profiler(comp_graph=get_default_graph()):
profiler = | CompGraphProfiler(comp_graph) | megengine._internal.plugin.CompGraphProfiler |