DDR爱好者之家 Design By 杰米
pytorch构建双模型
第一部分:构建"se_resnet152","DPN92()"双模型
import numpy as np
from functools import partial
import torch
from torch import nn
import torch.nn.functional as F
from torch.optim import SGD,Adam
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
from torch.optim.optimizer import Optimizer
import torchvision
from torchvision import models
import pretrainedmodels
from pretrainedmodels.models import *
from torch import nn
from torchvision import transforms as T
import random
random.seed(2050)
np.random.seed(2050)
torch.manual_seed(2050)
torch.cuda.manual_seed_all(2050)
class FCViewer(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
'''Dual Path Networks in PyTorch.'''
class Bottleneck(nn.Module):
def __init__(self, last_planes, in_planes, out_planes, dense_depth, stride, first_layer):
super(Bottleneck, self).__init__()
self.out_planes = out_planes
self.dense_depth = dense_depth
self.conv1 = nn.Conv2d(last_planes, in_planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv2 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=32, bias=False)
self.bn2 = nn.BatchNorm2d(in_planes)
self.conv3 = nn.Conv2d(in_planes, out_planes+dense_depth, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes+dense_depth)
self.shortcut = nn.Sequential()
if first_layer:
self.shortcut = nn.Sequential(
nn.Conv2d(last_planes, out_planes+dense_depth, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_planes+dense_depth)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
x = self.shortcut(x)
d = self.out_planes
out = torch.cat([x[:,:d,:,:]+out[:,:d,:,:], x[:,d:,:,:], out[:,d:,:,:]], 1)
out = F.relu(out)
return out
class DPN(nn.Module):
def __init__(self, cfg):
super(DPN, self).__init__()
in_planes, out_planes = cfg['in_planes'], cfg['out_planes']
num_blocks, dense_depth = cfg['num_blocks'], cfg['dense_depth']
self.conv1 = nn.Conv2d(7, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.last_planes = 64
self.layer1 = self._make_layer(in_planes[0], out_planes[0], num_blocks[0], dense_depth[0], stride=1)
self.layer2 = self._make_layer(in_planes[1], out_planes[1], num_blocks[1], dense_depth[1], stride=2)
self.layer3 = self._make_layer(in_planes[2], out_planes[2], num_blocks[2], dense_depth[2], stride=2)
self.layer4 = self._make_layer(in_planes[3], out_planes[3], num_blocks[3], dense_depth[3], stride=2)
self.linear = nn.Linear(out_planes[3]+(num_blocks[3]+1)*dense_depth[3], 64)
self.bn2 = nn.BatchNorm1d(64)
def _make_layer(self, in_planes, out_planes, num_blocks, dense_depth, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for i,stride in enumerate(strides):
layers.append(Bottleneck(self.last_planes, in_planes, out_planes, dense_depth, stride, i==0))
self.last_planes = out_planes + (i+2) * dense_depth
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
out= F.relu(self.bn2(out))
return out
def DPN26():
cfg = {
'in_planes': (96,192,384,768),
'out_planes': (256,512,1024,2048),
'num_blocks': (2,2,2,2),
'dense_depth': (16,32,24,128)
}
return DPN(cfg)
def DPN92():
cfg = {
'in_planes': (96,192,384,768),
'out_planes': (256,512,1024,2048),
'num_blocks': (3,4,20,3),
'dense_depth': (16,32,24,128)
}
return DPN(cfg)
class MultiModalNet(nn.Module):
def __init__(self, backbone1, backbone2, drop, pretrained=True):
super().__init__()
if pretrained:
img_model = pretrainedmodels.__dict__[backbone1](num_classes=1000, pretrained='imagenet') #seresnext101
else:
img_model = pretrainedmodels.__dict__[backbone1](num_classes=1000, pretrained=None)
self.visit_model=DPN26()
self.img_encoder = list(img_model.children())[:-2]
self.img_encoder.append(nn.AdaptiveAvgPool2d(1))
self.img_encoder = nn.Sequential(*self.img_encoder)
if drop > 0:
self.img_fc = nn.Sequential(FCViewer(),
nn.Dropout(drop),
nn.Linear(img_model.last_linear.in_features, 512),
nn.BatchNorm1d(512))
else:
self.img_fc = nn.Sequential(
FCViewer(),
nn.BatchNorm1d(img_model.last_linear.in_features),
nn.Linear(img_model.last_linear.in_features, 512))
self.bn=nn.BatchNorm1d(576)
self.cls = nn.Linear(576,9)
def forward(self, x_img,x_vis):
x_img = self.img_encoder(x_img)
x_img = self.img_fc(x_img)
x_vis=self.visit_model(x_vis)
x_cat = torch.cat((x_img,x_vis),1)
x_cat = F.relu(self.bn(x_cat))
x_cat = self.cls(x_cat)
return x_cat
test_x = Variable(torch.zeros(64, 7,26,24))
test_x1 = Variable(torch.zeros(64, 3,224,224))
model=MultiModalNet("se_resnet152","DPN92()",0.1)
out=model(test_x1,test_x)
print(model._modules.keys())
print(model)
print(out.shape)
第二部分构建densenet201单模型
#encoding:utf-8
import torchvision.models as models
import torch
import pretrainedmodels
from torch import nn
from torch.autograd import Variable
#model = models.resnet18(pretrained=True)
#print(model)
#print(model._modules.keys())
#feature = torch.nn.Sequential(*list(model.children())[:-2])#模型的结构
#print(feature)
'''
class FCViewer(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class M(nn.Module):
def __init__(self, backbone1, drop, pretrained=True):
super(M,self).__init__()
if pretrained:
img_model = pretrainedmodels.__dict__[backbone1](num_classes=1000, pretrained='imagenet')
else:
img_model = pretrainedmodels.__dict__[backbone1](num_classes=1000, pretrained=None)
self.img_encoder = list(img_model.children())[:-1]
self.img_encoder.append(nn.AdaptiveAvgPool2d(1))
self.img_encoder = nn.Sequential(*self.img_encoder)
if drop > 0:
self.img_fc = nn.Sequential(FCViewer(),
nn.Dropout(drop),
nn.Linear(img_model.last_linear.in_features, 236))
else:
self.img_fc = nn.Sequential(
FCViewer(),
nn.Linear(img_model.last_linear.in_features, 236)
)
self.cls = nn.Linear(236,9)
def forward(self, x_img):
x_img = self.img_encoder(x_img)
x_img = self.img_fc(x_img)
return x_img
model1=M('densenet201',0,pretrained=True)
print(model1)
print(model1._modules.keys())
feature = torch.nn.Sequential(*list(model1.children())[:-2])#模型的结构
feature1 = torch.nn.Sequential(*list(model1.children())[:])
#print(feature)
#print(feature1)
test_x = Variable(torch.zeros(1, 3, 100, 100))
out=feature(test_x)
print(out.shape)
'''
'''
import torch.nn.functional as F
class LenetNet(nn.Module):
def __init__(self):
super(LenetNet, self).__init__()
self.conv1 = nn.Conv2d(7, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(144, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
x = x.view(x.size()[0], -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
model1=LenetNet()
#print(model1)
#print(model1._modules.keys())
feature = torch.nn.Sequential(*list(model1.children())[:-3])#模型的结构
#feature1 = torch.nn.Sequential(*list(model1.children())[:])
print(feature)
#print(feature1)
test_x = Variable(torch.zeros(1, 7, 27, 24))
out=model1(test_x)
print(out.shape)
class FCViewer(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class M(nn.Module):
def __init__(self):
super(M,self).__init__()
img_model =model1
self.img_encoder = list(img_model.children())[:-3]
self.img_encoder.append(nn.AdaptiveAvgPool2d(1))
self.img_encoder = nn.Sequential(*self.img_encoder)
self.img_fc = nn.Sequential(FCViewer(),
nn.Linear(16, 236))
self.cls = nn.Linear(236,9)
def forward(self, x_img):
x_img = self.img_encoder(x_img)
x_img = self.img_fc(x_img)
return x_img
model2=M()
test_x = Variable(torch.zeros(1, 7, 27, 24))
out=model2(test_x)
print(out.shape)
'''
以上这篇pytorch构建多模型实例就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持。
DDR爱好者之家 Design By 杰米
广告合作:本站广告合作请联系QQ:858582 申请时备注:广告合作(否则不回)
免责声明:本站资源来自互联网收集,仅供用于学习和交流,请遵循相关法律法规,本站一切资源不代表本站立场,如有侵权、后门、不妥请联系本站删除!
免责声明:本站资源来自互联网收集,仅供用于学习和交流,请遵循相关法律法规,本站一切资源不代表本站立场,如有侵权、后门、不妥请联系本站删除!
DDR爱好者之家 Design By 杰米
暂无评论...
《魔兽世界》大逃杀!60人新游玩模式《强袭风暴》3月21日上线
暴雪近日发布了《魔兽世界》10.2.6 更新内容,新游玩模式《强袭风暴》即将于3月21 日在亚服上线,届时玩家将前往阿拉希高地展开一场 60 人大逃杀对战。
艾泽拉斯的冒险者已经征服了艾泽拉斯的大地及遥远的彼岸。他们在对抗世界上最致命的敌人时展现出过人的手腕,并且成功阻止终结宇宙等级的威胁。当他们在为即将于《魔兽世界》资料片《地心之战》中来袭的萨拉塔斯势力做战斗准备时,他们还需要在熟悉的阿拉希高地面对一个全新的敌人──那就是彼此。在《巨龙崛起》10.2.6 更新的《强袭风暴》中,玩家将会进入一个全新的海盗主题大逃杀式限时活动,其中包含极高的风险和史诗级的奖励。
《强袭风暴》不是普通的战场,作为一个独立于主游戏之外的活动,玩家可以用大逃杀的风格来体验《魔兽世界》,不分职业、不分装备(除了你在赛局中捡到的),光是技巧和战略的强弱之分就能决定出谁才是能坚持到最后的赢家。本次活动将会开放单人和双人模式,玩家在加入海盗主题的预赛大厅区域前,可以从强袭风暴角色画面新增好友。游玩游戏将可以累计名望轨迹,《巨龙崛起》和《魔兽世界:巫妖王之怒 经典版》的玩家都可以获得奖励。
更新日志
2025年11月02日
2025年11月02日
- 小骆驼-《草原狼2(蓝光CD)》[原抓WAV+CUE]
- 群星《欢迎来到我身边 电影原声专辑》[320K/MP3][105.02MB]
- 群星《欢迎来到我身边 电影原声专辑》[FLAC/分轨][480.9MB]
- 雷婷《梦里蓝天HQⅡ》 2023头版限量编号低速原抓[WAV+CUE][463M]
- 群星《2024好听新歌42》AI调整音效【WAV分轨】
- 王思雨-《思念陪着鸿雁飞》WAV
- 王思雨《喜马拉雅HQ》头版限量编号[WAV+CUE]
- 李健《无时无刻》[WAV+CUE][590M]
- 陈奕迅《酝酿》[WAV分轨][502M]
- 卓依婷《化蝶》2CD[WAV+CUE][1.1G]
- 群星《吉他王(黑胶CD)》[WAV+CUE]
- 齐秦《穿乐(穿越)》[WAV+CUE]
- 发烧珍品《数位CD音响测试-动向效果(九)》【WAV+CUE】
- 邝美云《邝美云精装歌集》[DSF][1.6G]
- 吕方《爱一回伤一回》[WAV+CUE][454M]