test_param_pack.py 2.0 KB
Newer Older
1 2 3
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
4
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform

import numpy as np
import pytest

import megengine
import megengine.autodiff as ad
import megengine.distributed as dist
import megengine.optimizer as optimizer
from megengine import Parameter, tensor
from megengine.module import Module
from megengine.optimizer import SGD


class Simple(Module):
24
    def __init__(self, param_shape):
25
        super().__init__()
26 27 28
        self.params = [
            Parameter(np.ones(param_shape), dtype=np.float32) for i in range(10)
        ]
29 30 31 32 33 34 35

    def forward(self, x):
        for p in self.params:
            x = x * p
        return x


36
@pytest.mark.require_ngpu(2)
37
@pytest.mark.isolated_distributed
38 39 40 41 42 43 44 45
@pytest.mark.parametrize(
    "threshold", [0, 128, None], ids=["no_pack", "small_pack", "large_pack"]
)
@pytest.mark.parametrize("param_shape", [(16,), (128, 256), (2, 1024, 1024)])
def test_param_pack(param_shape, threshold, n_iters=100):
    data = np.ones(param_shape, dtype="float32")

    @dist.launcher(n_gpus=2)
46
    def worker():
47
        net = Simple(param_shape)
48 49 50
        opt = SGD(net.parameters(), lr=0.1)

        allreduce_cb = dist.make_allreduce_cb("MEAN", dist.WORLD)
51 52
        if threshold is not None:
            allreduce_cb._param_pack_thd = threshold
53 54
        gm = ad.GradManager().attach(net.parameters(), callbacks=[allreduce_cb])

55 56 57 58 59 60 61 62 63 64 65
        def run():
            opt.clear_grad()
            with gm:
                x = tensor(data)
                loss = net(x)
                loss = loss.sum()
                gm.backward(loss)

        for i in range(n_iters):
            run()

66
        for p in net.params:
67
            np.testing.assert_equal(p.grad.numpy(), np.ones_like(p.grad.numpy()))
68 69

    worker()