AI数字人硅基数字人模型训练模型网络结构和训练代码

CSDN 2024-09-14 09:01:01 阅读 69

这种训练的时候加入mask,输出的时候根据mask做处理,直接mask回帖

conv.py

import torch

from torch import nn

from torch.nn import functional as F

class DepthwiseSeparableConv2d(nn.Module):

def init(self, in_channels, out_channels, kernel_size, stride=1, padding=0):

super(DepthwiseSeparableConv2d, self).init()

self.depthwise = nn.Sequential(

nn.Conv2d(in_channels, in_channels, kernel_size=kernel_size, stride=stride, padding=padding,groups=in_channels), # ��Ⱦ���

nn.BatchNorm2d(in_channels),

nn.ReLU6(inplace=True))

<code> self.pointwise = nn.Sequential(

nn.Conv2d(in_channels, out_channels, kernel_size=1), # ������

nn.BatchNorm2d(out_channels),

nn.ReLU6(inplace=True))

def forward(self, x):

x = self.depthwise(x)

x = self.pointwise(x)

return x

class Conv2d(nn.Module):

def init(self, cin, cout, kernel_size, stride, padding, residual=False, depth_wise = False, *args, **kwargs):

super().init(*args, **kwargs)

self.residual = residual

self.depth_wise =depth_wise

if depth_wise:

self.conv_block = DepthwiseSeparableConv2d(cin, cout, kernel_size, stride, padding)

else:

self.conv_block = nn.Sequential(

nn.Conv2d(cin, cout, kernel_size, stride, padding),

nn.BatchNorm2d(cout))

self.act = nn.ReLU6()

def forward(self, x):

out = self.conv_block(x)

if self.residual:

out += x

return out if self.depth_wise else self.act(out)

class nonorm_Conv2d(nn.Module):

def init(self, cin, cout, kernel_size, stride, padding, residual=False, *args, **kwargs):

super().init(*args, **kwargs)

self.conv_block = nn.Sequential(

nn.Conv2d(cin, cout, kernel_size, stride, padding),

)

self.act = nn.LeakyReLU(0.01, inplace=True)

def forward(self, x):

out = self.conv_block(x)

return self.act(out)

class Conv2dTranspose(nn.Module):

def init(self, cin, cout, kernel_size, stride, padding, output_padding=0, *args, **kwargs):

super().init(*args, **kwargs)

self.conv_block = nn.Sequential(

nn.ConvTranspose2d(cin, cout, kernel_size, stride, padding, output_padding),

nn.BatchNorm2d(cout)

)

self.act = nn.ReLU()

def forward(self, x):

out = self.conv_block(x)

return self.act(out)

wav2lip.py

import torch

from torch import nn

from torch.nn import functional as F

import math

from models.conv import Conv2dTranspose, Conv2d, nonorm_Conv2d

class Wav2Lip(nn.Module):

def init(self):

super(Wav2Lip, self).init()

self.face_encoder_blocks = nn.ModuleList([

nn.Sequential(Conv2d(6, 16, kernel_size=7, stride=1, padding=3, depth_wise=True)), # 96,96 # 288,192 # 144,96

nn.Sequential(Conv2d(16, 32, kernel_size=3, stride=2, padding=1, depth_wise=True), # 48,48 # 144,96 # 72,48

Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True, depth_wise=True),

Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True, depth_wise=True)),

nn.Sequential(Conv2d(32, 64, kernel_size=3, stride=2, padding=1, depth_wise=True), # 24,24 # 72,48 # 36,24

Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True, depth_wise=True),

Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True, depth_wise=True),

Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True, depth_wise=True)),

nn.Sequential(Conv2d(64, 128, kernel_size=3, stride=2, padding=1, depth_wise=True), # 12,12 # 36,24 #18,12

Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True, depth_wise=True),

Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True, depth_wise=True)),

nn.Sequential(Conv2d(128, 256, kernel_size=3, stride=2, padding=1, depth_wise=True), # 6,6 #18,12 #9,6

Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True, depth_wise=True),

Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True, depth_wise=True)),

# nn.Sequential(Conv2d(256, 512, kernel_size=3, stride=2, padding=1, depth_wise=True), # 6,6 #9,6

# Conv2d(512, 512, kernel_size=3, stride=1, padding=1, residual=True, depth_wise=True),

# Conv2d(512, 512, kernel_size=3, stride=1, padding=1, residual=True, depth_wise=True)),

# nn.Sequential(Conv2d(512, 512, kernel_size=3, stride=2, padding=1, depth_wise=True), # 3,3 #5,3

# Conv2d(512, 512, kernel_size=3, stride=1, padding=1, residual=True, depth_wise=True),),

#

# nn.Sequential(Conv2d(512, 512, kernel_size=3, stride=(2, 1), padding=1, depth_wise=True), # 3,3 #3,3

# Conv2d(512, 512, kernel_size=3, stride=1, padding=1, residual=True, depth_wise=True),),

#

# nn.Sequential(Conv2d(512, 512, kernel_size=3, stride=1, padding=0, depth_wise=True), # 1, 1

# Conv2d(512, 512, kernel_size=1, stride=1, padding=0, depth_wise=True)),

])

self.audio_encoder = nn.Sequential(

Conv2d(1, 32, kernel_size=3, stride=1, padding=1, depth_wise=False), #80,16

Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True, depth_wise=False),

Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True, depth_wise=False),

Conv2d(32, 64, kernel_size=3, stride=(3, 1), padding=1, depth_wise=False), #27,16

Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True, depth_wise=False),

Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True, depth_wise=False),

Conv2d(64, 128, kernel_size=3, stride=3, padding=1, depth_wise=False), #9,6

Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True, depth_wise=False),

Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True, depth_wise=False),

Conv2d(128, 256, kernel_size=3, stride=1, padding=1, depth_wise=False), #3,3

Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=False, depth_wise=False),

# Conv2d(256, 512, kernel_size=1, stride=1, padding=0, depth_wise=False), #1,1

# Conv2d(512, 512, kernel_size=1, stride=1, padding=0, depth_wise=False), #1,1

)

self.face_decoder_blocks = nn.ModuleList([

nn.Sequential(Conv2d(256, 256, kernel_size=1, stride=1, padding=0, depth_wise=False),),

# nn.Sequential(Conv2dTranspose(1024, 512, kernel_size=3, stride=1, padding=0), # 3,3

# Conv2d(512, 512, kernel_size=3, stride=1, padding=1, residual=True, depth_wise=True),),

#

# nn.Sequential(Conv2dTranspose(1024, 512, kernel_size=3, stride=(2, 1), padding=1),

# Conv2d(512, 512, kernel_size=3, stride=1, padding=1, residual=True, depth_wise=True),), # 5, 3

#

# nn.Sequential(Conv2dTranspose(1024, 512, kernel_size=3, stride=2, padding=1, output_padding=(0, 1)),

# Conv2d(512, 512, kernel_size=3, stride=1, padding=1, residual=True, depth_wise=True),

# Conv2d(512, 512, kernel_size=3, stride=1, padding=1, residual=True, depth_wise=True),), # 9, 6

# nn.Sequential(Conv2dTranspose(1024, 512, kernel_size=3, stride=2, padding=1, output_padding=1),

# Conv2d(512, 512, kernel_size=3, stride=1, padding=1, residual=True, depth_wise=True),

# Conv2d(512, 512, kernel_size=3, stride=1, padding=1, residual=True, depth_wise=True),), # 18, 12

nn.Sequential(Conv2dTranspose(512, 384, kernel_size=3, stride=2, padding=1, output_padding=1),

Conv2d(384, 384, kernel_size=3, stride=1, padding=1, residual=True, depth_wise=True),

Conv2d(384, 384, kernel_size=3, stride=1, padding=1, residual=True, depth_wise=True),), # 36, 24

nn.Sequential(Conv2dTranspose(512, 256, kernel_size=3, stride=2, padding=1, output_padding=1),

Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True, depth_wise=True),

Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True, depth_wise=True),), # 72, 48

nn.Sequential(Conv2dTranspose(320, 128, kernel_size=3, stride=2, padding=1, output_padding=1),

Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True, depth_wise=True),

Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True, depth_wise=True),), # 144, 96

nn.Sequential(Conv2dTranspose(160, 64, kernel_size=3, stride=2, padding=1, output_padding=1),

Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True, depth_wise=True),

Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True, depth_wise=True),),]) # 288,192

self.output_block = nn.Sequential(Conv2d(80, 32, kernel_size=3, stride=1, padding=1, depth_wise=True),

nn.Conv2d(32, 3, kernel_size=1, stride=1, padding=0),

nn.Sigmoid())

def forward(self, audio_sequences, face_sequences):

# audio_sequences = (B, T, 1, 80, 16)

B = audio_sequences.size(0)

input_dim_size = len(face_sequences.size())

if input_dim_size > 4:

audio_sequences = torch.cat([audio_sequences[:, i] for i in range(audio_sequences.size(1))], dim=0)

face_sequences = torch.cat([face_sequences[:, :, i] for i in range(face_sequences.size(2))], dim=0)

audio_embedding = self.audio_encoder(audio_sequences) # B, 512, 1, 1

feats = []

x = face_sequences

for f in self.face_encoder_blocks:

x = f(x)

feats.append(x)

x = audio_embedding

for f in self.face_decoder_blocks:

x = f(x)

try:

x = torch.cat((x, feats[-1]), dim=1)

except Exception as e:

print(x.size())

print(feats[-1].size())

raise e

feats.pop()

x = self.output_block(x)

if input_dim_size > 4:

x = torch.split(x, B, dim=0) # [(B, C, H, W)]

outputs = torch.stack(x, dim=2) # (B, C, T, H, W)

else:

outputs = x

return outputs

class Wav2Lip_disc_qual(nn.Module):

def init(self):

super(Wav2Lip_disc_qual, self).init()

self.face_encoder_blocks = nn.ModuleList([

nn.Sequential(nonorm_Conv2d(3, 32, kernel_size=7, stride=1, padding=3)), # 144,192

nn.Sequential(nonorm_Conv2d(32, 64, kernel_size=5, stride=(1, 2), padding=2), # 144,96

nonorm_Conv2d(64, 64, kernel_size=5, stride=1, padding=2)),

nn.Sequential(nonorm_Conv2d(64, 128, kernel_size=5, stride=2, padding=2), # 72,48

nonorm_Conv2d(128, 128, kernel_size=5, stride=1, padding=2)),

nn.Sequential(nonorm_Conv2d(128, 256, kernel_size=5, stride=2, padding=2), # 36,24

nonorm_Conv2d(256, 256, kernel_size=5, stride=1, padding=2)),

nn.Sequential(nonorm_Conv2d(256, 512, kernel_size=3, stride=2, padding=1), # 18,12

nonorm_Conv2d(512, 512, kernel_size=3, stride=1, padding=1)),

nn.Sequential(nonorm_Conv2d(512, 512, kernel_size=3, stride=2, padding=1), # 9,6

nonorm_Conv2d(512, 512, kernel_size=3, stride=1, padding=1),),

nn.Sequential(nonorm_Conv2d(512, 512, kernel_size=3, stride=2, padding=1), # 5,3

nonorm_Conv2d(512, 512, kernel_size=3, stride=1, padding=1),),

nn.Sequential(nonorm_Conv2d(512, 512, kernel_size=3, stride=(2, 1), padding=1), # 3,3

nonorm_Conv2d(512, 512, kernel_size=3, stride=1, padding=1),),

nn.Sequential(nonorm_Conv2d(512, 512, kernel_size=3, stride=1, padding=0), # 1, 1

nonorm_Conv2d(512, 512, kernel_size=1, stride=1, padding=0)),])

self.binary_pred = nn.Sequential(nn.Conv2d(512, 1, kernel_size=1, stride=1, padding=0), nn.Sigmoid())

self.label_noise = .0

def get_lower_half(self, face_sequences):

return face_sequences[:, :, face_sequences.size(2)//2:]

def to_2d(self, face_sequences):

B = face_sequences.size(0)

face_sequences = torch.cat([face_sequences[:, :, i] for i in range(face_sequences.size(2))], dim=0)

return face_sequences

def perceptual_forward(self, false_face_sequences):

false_face_sequences = self.to_2d(false_face_sequences)

false_face_sequences = self.get_lower_half(false_face_sequences)

false_feats = false_face_sequences

for f in self.face_encoder_blocks:

false_feats = f(false_feats)

false_pred_loss = F.binary_cross_entropy(self.binary_pred(false_feats).view(len(false_feats), -1),

torch.ones((len(false_feats), 1)).cuda())

return false_pred_loss

def forward(self, face_sequences):

face_sequences = self.to_2d(face_sequences)

face_sequences = self.get_lower_half(face_sequences)

x = face_sequences

for f in self.face_encoder_blocks:

x = f(x)

return self.binary_pred(x).view(len(x), -1)

在这里插入图片描述

请添加图片描述



声明

本文内容仅代表作者观点,或转载于其他网站,本站不以此文作为商业用途
如有涉及侵权,请联系本站进行删除
转载本站原创文章,请注明来源及作者。