Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

write DCGAN example with different frontend languages #23

Open
QiJune opened this issue Jul 29, 2020 · 6 comments
Open

write DCGAN example with different frontend languages #23

QiJune opened this issue Jul 29, 2020 · 6 comments

Comments

@QiJune
Copy link
Collaborator

QiJune commented Jul 29, 2020

We want to compare the DCGAN example with different frontend languages: Python/C++/Go/Go+

Python version: https://github.com/pytorch/examples/blob/master/dcgan/main.py
C++ version: https://github.com/pytorch/examples/blob/master/cpp/dcgan/dcgan.cpp

We will add the Go version and Go+ version later.

@QiJune
Copy link
Collaborator Author

QiJune commented Jul 29, 2020

Model definition part:

Go+Python
func weightInit(m *nn.Module) {
	className := m.ClassName
	if className.Find("Conv") != -1 {
		torch.nn.init.Normal_(m.Weight, 0.0, 0.02)
	} else if className.Find("BatchNorm") != -1 {
		torch.nn.init.Normal_(m.Weight, 1.0, 0.02)
		torch.nn.init.Zeros_(m.Bias)
	}   
}


type Generator struct {
	nn.Module
	NGPU    int
	Main    *nn.Sequential
}

func NewGenerator(ngpu) *Generator {
	return &Net{
		NGPU:	ngpu, 
		Main:   &nn.Sequential{[
             		&nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, false),
             		&nn.BatchNorm2d(ngf * 8),
             		&nn.ReLU(true),
             		&nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, false),
             		&nn.BatchNorm2d(ngf * 2),
             		&nn.ReLU(True),
             		&nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, false),
             		&nn.BatchNorm2d(ngf),
             		&nn.ReLU(true),
             		&nn.ConvTranspose2d(ngf, nc, 4, 2, 1, false),
             		&nn.Tanh()]  
			},
	}
}

func (self *Generator) Forward(x *Tensor) *Tensor {
	if x.IsCuda && self.NGPU > 1 {
		output := nn.parallel.DataParallel(self.Main, x, range(self.ngpu))
	} else {
		output := self.Main.Forward(x)
	}
	return output
}

netG := NewGenerator(ngpu).To(device)
netG.Apply(weight_init)

optimizerG := nn.optim.Adam(netG.Parameters(), {LR: lr, Betas: [beta1, 0.999]})
def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        torch.nn.init.normal_(m.weight, 0.0, 0.02)
    elif classname.find('BatchNorm') != -1:
        torch.nn.init.normal_(m.weight, 1.0, 0.02)
        torch.nn.init.zeros_(m.bias)

class Generator(nn.Module):
    def __init__(self, ngpu):
        super(Generator, self).__init__()
        self.ngpu = ngpu
        self.main = nn.Sequential(
            nn.ConvTranspose2d(     nz, ngf * 8, 4, 1, 0, bias=False),
            nn.BatchNorm2d(ngf * 8),
            nn.ReLU(True),
            nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf * 4),
            nn.ReLU(True),
            nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf * 2),
            nn.ReLU(True),
            nn.ConvTranspose2d(ngf * 2,     ngf, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf),
            nn.ReLU(True),
            nn.ConvTranspose2d(    ngf,      nc, 4, 2, 1, bias=False),
            nn.Tanh()
        )

    def forward(self, input):
        if input.is_cuda and self.ngpu > 1:
            output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
        else:
            output = self.main(input)
        return output
    
netG = Generator(ngpu).to(device)
netG.apply(weights_init)

optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))

@QiJune
Copy link
Collaborator Author

QiJune commented Jul 29, 2020

Training loop part:

Go+Python
for epoch := 0; epoch < niter; epoch++ {
    for i, data := range dataloader {
    	netD.ZeroGrad()
        realCpu := data[0].To(device)
        batchSize := realCpu.Size(0)
        label := torch.Full([batch_size,], realLabel, realCpu.Dtype, device)
        output := netD(realCpu)
        errDReal := criterion(output, label)
        errDReal.Backward()
        DX = output.Mean().Item()
          
        noise := torch.RandN(batchSize, nz, 1, 1, device)
        fake := netG(noise)
        label.Fill_(fakeLabel)
        output := netD(fake.Detach())
        errDFake := criterion(output, label)
        errDFake.Backward()
        DGZ1 := output.Mean().Item()
        errD := errDReal + errDFake
        optimizerD.Step()
        
        netG.ZeroGrad()
        label.Fill_(realLabel)  # fake labels are real for generator cost
        output := netD(fake)
        errG := criterion(output, label)
        errG.Backward()
        DGZ2 = output.Mean().Item()
        optimizerG.step()
    }  	
}
for epoch in range(opt.niter):
    for i, data in enumerate(dataloader, 0):
        netD.zero_grad()
        real_cpu = data[0].to(device)
        batch_size = real_cpu.size(0)
        label = torch.full((batch_size,), real_label, dtype=real_cpu.dtype, device=device)
        output = netD(real_cpu)
        errD_real = criterion(output, label)
        errD_real.backward()
        D_x = output.mean().item()

        noise = torch.randn(batch_size, nz, 1, 1, device=device)
        fake = netG(noise)
        label.fill_(fake_label)
        output = netD(fake.detach())
        errD_fake = criterion(output, label)
        errD_fake.backward()
        D_G_z1 = output.mean().item()
        errD = errD_real + errD_fake
        optimizerD.step()

        netG.zero_grad()
        label.fill_(real_label)  # fake labels are real for generator cost
        output = netD(fake)
        errG = criterion(output, label)
        errG.backward()
        D_G_z2 = output.mean().item()
        optimizerG.step()

@QiJune
Copy link
Collaborator Author

QiJune commented Jul 29, 2020

Go+语法建议:

  1. struct的定义与Python相比略显啰嗦,Go中 struct的定义 和 方法的定义是分离的,Python中是在一起的,并且Python中,成员变量不用提前声明。
  2. Python中允许有些变量定义了,但没有被使用,这在Go中会强制报错,使用Python的算法同学可能一时不习惯。
  3. for i in range(10) 相比于 for i := 0; i < 10; i++ 还是要简洁一些,希望Go+同样支持
  4. Python的tuple数据类型在Go+中如何支持?Go是不支持tuple的
  5. nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False) 类似这种参数传递方式,在Go+中如何支持

@wangkuiyi
Copy link
Owner

Go+语法建议:

  1. struct的定义与Python相比略显啰嗦,Go中 struct的定义 和 方法的定义是分离的,Python中是在一起的,并且Python中,成员变量不用提前声明。

确实是个问题。需要想想怎么弄。玉哥之前建议的 Monad pattern 是不是可以解决这个问题?

  1. Python中允许有些变量定义了,但没有被使用,这在Go中会强制报错,使用Python的算法同学可能一时不习惯。

这个我到觉得是 strong typed languages 的优势。严谨的 Python 程序员应该欢迎。也是我们号称 Go+ 更适合科研的一个说法。

  1. for i in range(10) 相比于 for i := 0; i < 10; i++ 还是要简洁一些,希望Go+同样支持

有道理。我看了一下 https://github.com/goplus/gop/blob/master/tutorial/12-For-loop/for.gop ,Go+现在确实不支持 x := range(10) 这样的语法。建议 @QiJune 给 Go+ 提一个 issue,问问可否支持。可以 at 一下 https://github.com/xushiwei

  1. Python的tuple数据类型在Go+中如何支持?Go是不支持tuple的

上面例子里什么地方需要 tuple 了?

  1. nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False) 类似这种参数传递方式,在Go+中如何支持

这个也可以提一个 issue 。之前老许和我在微信上讨论,老许建议增加一个语法能力

touch.RandN(5, 4, {RequireGrad: true})

目前Go+ 已经支持

touch.RandN(5, 4, {“RequireGrad”: true})

@wangkuiyi
Copy link
Owner

@QiJune 我们的第一个 milestone 就把 DCGAN 实现了吧。这个例子是典型的预测和训练一体化的。如果能在 Raspberry Pi 上跑通,并且代码简练如同 Python,应该有初步的说服力了。

@QiJune
Copy link
Collaborator Author

QiJune commented Jul 30, 2020

@wangkuiyi

上面例子里什么地方需要 tuple 了?

在这里 optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999)) ,betas是一个tuple类型

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

2 participants