first commit
This commit is contained in:
1
midas/__init__.py
Normal file
1
midas/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
from .model_loader import load_model, default_models
|
||||
196
midas/backbones/beit.py
Normal file
196
midas/backbones/beit.py
Normal file
@@ -0,0 +1,196 @@
|
||||
import timm
|
||||
import torch
|
||||
import types
|
||||
|
||||
import numpy as np
|
||||
import torch.nn.functional as F
|
||||
|
||||
from .utils import forward_adapted_unflatten, make_backbone_default
|
||||
from timm.models.beit import gen_relative_position_index
|
||||
from torch.utils.checkpoint import checkpoint
|
||||
from typing import Optional
|
||||
|
||||
|
||||
def forward_beit(pretrained, x):
|
||||
return forward_adapted_unflatten(pretrained, x, "forward_features")
|
||||
|
||||
|
||||
def patch_embed_forward(self, x):
|
||||
"""
|
||||
Modification of timm.models.layers.patch_embed.py: PatchEmbed.forward to support arbitrary window sizes.
|
||||
"""
|
||||
x = self.proj(x)
|
||||
if self.flatten:
|
||||
x = x.flatten(2).transpose(1, 2)
|
||||
x = self.norm(x)
|
||||
return x
|
||||
|
||||
|
||||
def _get_rel_pos_bias(self, window_size):
|
||||
"""
|
||||
Modification of timm.models.beit.py: Attention._get_rel_pos_bias to support arbitrary window sizes.
|
||||
"""
|
||||
old_height = 2 * self.window_size[0] - 1
|
||||
old_width = 2 * self.window_size[1] - 1
|
||||
|
||||
new_height = 2 * window_size[0] - 1
|
||||
new_width = 2 * window_size[1] - 1
|
||||
|
||||
old_relative_position_bias_table = self.relative_position_bias_table
|
||||
|
||||
old_num_relative_distance = self.num_relative_distance
|
||||
new_num_relative_distance = new_height * new_width + 3
|
||||
|
||||
old_sub_table = old_relative_position_bias_table[:old_num_relative_distance - 3]
|
||||
|
||||
old_sub_table = old_sub_table.reshape(1, old_width, old_height, -1).permute(0, 3, 1, 2)
|
||||
new_sub_table = F.interpolate(old_sub_table, size=(new_height, new_width), mode="bilinear")
|
||||
new_sub_table = new_sub_table.permute(0, 2, 3, 1).reshape(new_num_relative_distance - 3, -1)
|
||||
|
||||
new_relative_position_bias_table = torch.cat(
|
||||
[new_sub_table, old_relative_position_bias_table[old_num_relative_distance - 3:]])
|
||||
|
||||
key = str(window_size[1]) + "," + str(window_size[0])
|
||||
if key not in self.relative_position_indices.keys():
|
||||
self.relative_position_indices[key] = gen_relative_position_index(window_size)
|
||||
|
||||
relative_position_bias = new_relative_position_bias_table[
|
||||
self.relative_position_indices[key].view(-1)].view(
|
||||
window_size[0] * window_size[1] + 1,
|
||||
window_size[0] * window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
|
||||
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
|
||||
return relative_position_bias.unsqueeze(0)
|
||||
|
||||
|
||||
def attention_forward(self, x, resolution, shared_rel_pos_bias: Optional[torch.Tensor] = None):
|
||||
"""
|
||||
Modification of timm.models.beit.py: Attention.forward to support arbitrary window sizes.
|
||||
"""
|
||||
B, N, C = x.shape
|
||||
|
||||
qkv_bias = torch.cat((self.q_bias, self.k_bias, self.v_bias)) if self.q_bias is not None else None
|
||||
qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
|
||||
qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
|
||||
q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
|
||||
|
||||
q = q * self.scale
|
||||
attn = (q @ k.transpose(-2, -1))
|
||||
|
||||
if self.relative_position_bias_table is not None:
|
||||
window_size = tuple(np.array(resolution) // 16)
|
||||
attn = attn + self._get_rel_pos_bias(window_size)
|
||||
if shared_rel_pos_bias is not None:
|
||||
attn = attn + shared_rel_pos_bias
|
||||
|
||||
attn = attn.softmax(dim=-1)
|
||||
attn = self.attn_drop(attn)
|
||||
|
||||
x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
|
||||
x = self.proj(x)
|
||||
x = self.proj_drop(x)
|
||||
return x
|
||||
|
||||
|
||||
def block_forward(self, x, resolution, shared_rel_pos_bias: Optional[torch.Tensor] = None):
|
||||
"""
|
||||
Modification of timm.models.beit.py: Block.forward to support arbitrary window sizes.
|
||||
"""
|
||||
if self.gamma_1 is None:
|
||||
x = x + self.drop_path(self.attn(self.norm1(x), resolution, shared_rel_pos_bias=shared_rel_pos_bias))
|
||||
x = x + self.drop_path(self.mlp(self.norm2(x)))
|
||||
else:
|
||||
x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), resolution,
|
||||
shared_rel_pos_bias=shared_rel_pos_bias))
|
||||
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
|
||||
return x
|
||||
|
||||
|
||||
def beit_forward_features(self, x):
|
||||
"""
|
||||
Modification of timm.models.beit.py: Beit.forward_features to support arbitrary window sizes.
|
||||
"""
|
||||
resolution = x.shape[2:]
|
||||
|
||||
x = self.patch_embed(x)
|
||||
x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1)
|
||||
if self.pos_embed is not None:
|
||||
x = x + self.pos_embed
|
||||
x = self.pos_drop(x)
|
||||
|
||||
rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None
|
||||
for blk in self.blocks:
|
||||
if self.grad_checkpointing and not torch.jit.is_scripting():
|
||||
x = checkpoint(blk, x, shared_rel_pos_bias=rel_pos_bias)
|
||||
else:
|
||||
x = blk(x, resolution, shared_rel_pos_bias=rel_pos_bias)
|
||||
x = self.norm(x)
|
||||
return x
|
||||
|
||||
|
||||
def _make_beit_backbone(
|
||||
model,
|
||||
features=[96, 192, 384, 768],
|
||||
size=[384, 384],
|
||||
hooks=[0, 4, 8, 11],
|
||||
vit_features=768,
|
||||
use_readout="ignore",
|
||||
start_index=1,
|
||||
start_index_readout=1,
|
||||
):
|
||||
backbone = make_backbone_default(model, features, size, hooks, vit_features, use_readout, start_index,
|
||||
start_index_readout)
|
||||
|
||||
backbone.model.patch_embed.forward = types.MethodType(patch_embed_forward, backbone.model.patch_embed)
|
||||
backbone.model.forward_features = types.MethodType(beit_forward_features, backbone.model)
|
||||
|
||||
for block in backbone.model.blocks:
|
||||
attn = block.attn
|
||||
attn._get_rel_pos_bias = types.MethodType(_get_rel_pos_bias, attn)
|
||||
attn.forward = types.MethodType(attention_forward, attn)
|
||||
attn.relative_position_indices = {}
|
||||
|
||||
block.forward = types.MethodType(block_forward, block)
|
||||
|
||||
return backbone
|
||||
|
||||
|
||||
def _make_pretrained_beitl16_512(pretrained, use_readout="ignore", hooks=None):
|
||||
model = timm.create_model("beit_large_patch16_512", pretrained=pretrained)
|
||||
|
||||
hooks = [5, 11, 17, 23] if hooks is None else hooks
|
||||
|
||||
features = [256, 512, 1024, 1024]
|
||||
|
||||
return _make_beit_backbone(
|
||||
model,
|
||||
features=features,
|
||||
size=[512, 512],
|
||||
hooks=hooks,
|
||||
vit_features=1024,
|
||||
use_readout=use_readout,
|
||||
)
|
||||
|
||||
|
||||
def _make_pretrained_beitl16_384(pretrained, use_readout="ignore", hooks=None):
|
||||
model = timm.create_model("beit_large_patch16_384", pretrained=pretrained)
|
||||
|
||||
hooks = [5, 11, 17, 23] if hooks is None else hooks
|
||||
return _make_beit_backbone(
|
||||
model,
|
||||
features=[256, 512, 1024, 1024],
|
||||
hooks=hooks,
|
||||
vit_features=1024,
|
||||
use_readout=use_readout,
|
||||
)
|
||||
|
||||
|
||||
def _make_pretrained_beitb16_384(pretrained, use_readout="ignore", hooks=None):
|
||||
model = timm.create_model("beit_base_patch16_384", pretrained=pretrained)
|
||||
|
||||
hooks = [2, 5, 8, 11] if hooks is None else hooks
|
||||
return _make_beit_backbone(
|
||||
model,
|
||||
features=[96, 192, 384, 768],
|
||||
hooks=hooks,
|
||||
use_readout=use_readout,
|
||||
)
|
||||
106
midas/backbones/levit.py
Normal file
106
midas/backbones/levit.py
Normal file
@@ -0,0 +1,106 @@
|
||||
import timm
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import numpy as np
|
||||
|
||||
from .utils import activations, get_activation, Transpose
|
||||
|
||||
|
||||
def forward_levit(pretrained, x):
|
||||
pretrained.model.forward_features(x)
|
||||
|
||||
layer_1 = pretrained.activations["1"]
|
||||
layer_2 = pretrained.activations["2"]
|
||||
layer_3 = pretrained.activations["3"]
|
||||
|
||||
layer_1 = pretrained.act_postprocess1(layer_1)
|
||||
layer_2 = pretrained.act_postprocess2(layer_2)
|
||||
layer_3 = pretrained.act_postprocess3(layer_3)
|
||||
|
||||
return layer_1, layer_2, layer_3
|
||||
|
||||
|
||||
def _make_levit_backbone(
|
||||
model,
|
||||
hooks=[3, 11, 21],
|
||||
patch_grid=[14, 14]
|
||||
):
|
||||
pretrained = nn.Module()
|
||||
|
||||
pretrained.model = model
|
||||
pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1"))
|
||||
pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2"))
|
||||
pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3"))
|
||||
|
||||
pretrained.activations = activations
|
||||
|
||||
patch_grid_size = np.array(patch_grid, dtype=int)
|
||||
|
||||
pretrained.act_postprocess1 = nn.Sequential(
|
||||
Transpose(1, 2),
|
||||
nn.Unflatten(2, torch.Size(patch_grid_size.tolist()))
|
||||
)
|
||||
pretrained.act_postprocess2 = nn.Sequential(
|
||||
Transpose(1, 2),
|
||||
nn.Unflatten(2, torch.Size((np.ceil(patch_grid_size / 2).astype(int)).tolist()))
|
||||
)
|
||||
pretrained.act_postprocess3 = nn.Sequential(
|
||||
Transpose(1, 2),
|
||||
nn.Unflatten(2, torch.Size((np.ceil(patch_grid_size / 4).astype(int)).tolist()))
|
||||
)
|
||||
|
||||
return pretrained
|
||||
|
||||
|
||||
class ConvTransposeNorm(nn.Sequential):
|
||||
"""
|
||||
Modification of
|
||||
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/levit.py: ConvNorm
|
||||
such that ConvTranspose2d is used instead of Conv2d.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, in_chs, out_chs, kernel_size=1, stride=1, pad=0, dilation=1,
|
||||
groups=1, bn_weight_init=1):
|
||||
super().__init__()
|
||||
self.add_module('c',
|
||||
nn.ConvTranspose2d(in_chs, out_chs, kernel_size, stride, pad, dilation, groups, bias=False))
|
||||
self.add_module('bn', nn.BatchNorm2d(out_chs))
|
||||
|
||||
nn.init.constant_(self.bn.weight, bn_weight_init)
|
||||
|
||||
@torch.no_grad()
|
||||
def fuse(self):
|
||||
c, bn = self._modules.values()
|
||||
w = bn.weight / (bn.running_var + bn.eps) ** 0.5
|
||||
w = c.weight * w[:, None, None, None]
|
||||
b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5
|
||||
m = nn.ConvTranspose2d(
|
||||
w.size(1), w.size(0), w.shape[2:], stride=self.c.stride,
|
||||
padding=self.c.padding, dilation=self.c.dilation, groups=self.c.groups)
|
||||
m.weight.data.copy_(w)
|
||||
m.bias.data.copy_(b)
|
||||
return m
|
||||
|
||||
|
||||
def stem_b4_transpose(in_chs, out_chs, activation):
|
||||
"""
|
||||
Modification of
|
||||
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/levit.py: stem_b16
|
||||
such that ConvTranspose2d is used instead of Conv2d and stem is also reduced to the half.
|
||||
"""
|
||||
return nn.Sequential(
|
||||
ConvTransposeNorm(in_chs, out_chs, 3, 2, 1),
|
||||
activation(),
|
||||
ConvTransposeNorm(out_chs, out_chs // 2, 3, 2, 1),
|
||||
activation())
|
||||
|
||||
|
||||
def _make_pretrained_levit_384(pretrained, hooks=None):
|
||||
model = timm.create_model("levit_384", pretrained=pretrained)
|
||||
|
||||
hooks = [3, 11, 21] if hooks == None else hooks
|
||||
return _make_levit_backbone(
|
||||
model,
|
||||
hooks=hooks
|
||||
)
|
||||
39
midas/backbones/next_vit.py
Normal file
39
midas/backbones/next_vit.py
Normal file
@@ -0,0 +1,39 @@
|
||||
import timm
|
||||
|
||||
import torch.nn as nn
|
||||
|
||||
from pathlib import Path
|
||||
from .utils import activations, forward_default, get_activation
|
||||
|
||||
from ..external.next_vit.classification.nextvit import *
|
||||
|
||||
|
||||
def forward_next_vit(pretrained, x):
|
||||
return forward_default(pretrained, x, "forward")
|
||||
|
||||
|
||||
def _make_next_vit_backbone(
|
||||
model,
|
||||
hooks=[2, 6, 36, 39],
|
||||
):
|
||||
pretrained = nn.Module()
|
||||
|
||||
pretrained.model = model
|
||||
pretrained.model.features[hooks[0]].register_forward_hook(get_activation("1"))
|
||||
pretrained.model.features[hooks[1]].register_forward_hook(get_activation("2"))
|
||||
pretrained.model.features[hooks[2]].register_forward_hook(get_activation("3"))
|
||||
pretrained.model.features[hooks[3]].register_forward_hook(get_activation("4"))
|
||||
|
||||
pretrained.activations = activations
|
||||
|
||||
return pretrained
|
||||
|
||||
|
||||
def _make_pretrained_next_vit_large_6m(hooks=None):
|
||||
model = timm.create_model("nextvit_large")
|
||||
|
||||
hooks = [2, 6, 36, 39] if hooks == None else hooks
|
||||
return _make_next_vit_backbone(
|
||||
model,
|
||||
hooks=hooks,
|
||||
)
|
||||
13
midas/backbones/swin.py
Normal file
13
midas/backbones/swin.py
Normal file
@@ -0,0 +1,13 @@
|
||||
import timm
|
||||
|
||||
from .swin_common import _make_swin_backbone
|
||||
|
||||
|
||||
def _make_pretrained_swinl12_384(pretrained, hooks=None):
|
||||
model = timm.create_model("swin_large_patch4_window12_384", pretrained=pretrained)
|
||||
|
||||
hooks = [1, 1, 17, 1] if hooks == None else hooks
|
||||
return _make_swin_backbone(
|
||||
model,
|
||||
hooks=hooks
|
||||
)
|
||||
34
midas/backbones/swin2.py
Normal file
34
midas/backbones/swin2.py
Normal file
@@ -0,0 +1,34 @@
|
||||
import timm
|
||||
|
||||
from .swin_common import _make_swin_backbone
|
||||
|
||||
|
||||
def _make_pretrained_swin2l24_384(pretrained, hooks=None):
|
||||
model = timm.create_model("swinv2_large_window12to24_192to384_22kft1k", pretrained=pretrained)
|
||||
|
||||
hooks = [1, 1, 17, 1] if hooks == None else hooks
|
||||
return _make_swin_backbone(
|
||||
model,
|
||||
hooks=hooks
|
||||
)
|
||||
|
||||
|
||||
def _make_pretrained_swin2b24_384(pretrained, hooks=None):
|
||||
model = timm.create_model("swinv2_base_window12to24_192to384_22kft1k", pretrained=pretrained)
|
||||
|
||||
hooks = [1, 1, 17, 1] if hooks == None else hooks
|
||||
return _make_swin_backbone(
|
||||
model,
|
||||
hooks=hooks
|
||||
)
|
||||
|
||||
|
||||
def _make_pretrained_swin2t16_256(pretrained, hooks=None):
|
||||
model = timm.create_model("swinv2_tiny_window16_256", pretrained=pretrained)
|
||||
|
||||
hooks = [1, 1, 5, 1] if hooks == None else hooks
|
||||
return _make_swin_backbone(
|
||||
model,
|
||||
hooks=hooks,
|
||||
patch_grid=[64, 64]
|
||||
)
|
||||
52
midas/backbones/swin_common.py
Normal file
52
midas/backbones/swin_common.py
Normal file
@@ -0,0 +1,52 @@
|
||||
import torch
|
||||
|
||||
import torch.nn as nn
|
||||
import numpy as np
|
||||
|
||||
from .utils import activations, forward_default, get_activation, Transpose
|
||||
|
||||
|
||||
def forward_swin(pretrained, x):
|
||||
return forward_default(pretrained, x)
|
||||
|
||||
|
||||
def _make_swin_backbone(
|
||||
model,
|
||||
hooks=[1, 1, 17, 1],
|
||||
patch_grid=[96, 96]
|
||||
):
|
||||
pretrained = nn.Module()
|
||||
|
||||
pretrained.model = model
|
||||
pretrained.model.layers[0].blocks[hooks[0]].register_forward_hook(get_activation("1"))
|
||||
pretrained.model.layers[1].blocks[hooks[1]].register_forward_hook(get_activation("2"))
|
||||
pretrained.model.layers[2].blocks[hooks[2]].register_forward_hook(get_activation("3"))
|
||||
pretrained.model.layers[3].blocks[hooks[3]].register_forward_hook(get_activation("4"))
|
||||
|
||||
pretrained.activations = activations
|
||||
|
||||
if hasattr(model, "patch_grid"):
|
||||
used_patch_grid = model.patch_grid
|
||||
else:
|
||||
used_patch_grid = patch_grid
|
||||
|
||||
patch_grid_size = np.array(used_patch_grid, dtype=int)
|
||||
|
||||
pretrained.act_postprocess1 = nn.Sequential(
|
||||
Transpose(1, 2),
|
||||
nn.Unflatten(2, torch.Size(patch_grid_size.tolist()))
|
||||
)
|
||||
pretrained.act_postprocess2 = nn.Sequential(
|
||||
Transpose(1, 2),
|
||||
nn.Unflatten(2, torch.Size((patch_grid_size // 2).tolist()))
|
||||
)
|
||||
pretrained.act_postprocess3 = nn.Sequential(
|
||||
Transpose(1, 2),
|
||||
nn.Unflatten(2, torch.Size((patch_grid_size // 4).tolist()))
|
||||
)
|
||||
pretrained.act_postprocess4 = nn.Sequential(
|
||||
Transpose(1, 2),
|
||||
nn.Unflatten(2, torch.Size((patch_grid_size // 8).tolist()))
|
||||
)
|
||||
|
||||
return pretrained
|
||||
249
midas/backbones/utils.py
Normal file
249
midas/backbones/utils.py
Normal file
@@ -0,0 +1,249 @@
|
||||
import torch
|
||||
|
||||
import torch.nn as nn
|
||||
|
||||
|
||||
class Slice(nn.Module):
|
||||
def __init__(self, start_index=1):
|
||||
super(Slice, self).__init__()
|
||||
self.start_index = start_index
|
||||
|
||||
def forward(self, x):
|
||||
return x[:, self.start_index:]
|
||||
|
||||
|
||||
class AddReadout(nn.Module):
|
||||
def __init__(self, start_index=1):
|
||||
super(AddReadout, self).__init__()
|
||||
self.start_index = start_index
|
||||
|
||||
def forward(self, x):
|
||||
if self.start_index == 2:
|
||||
readout = (x[:, 0] + x[:, 1]) / 2
|
||||
else:
|
||||
readout = x[:, 0]
|
||||
return x[:, self.start_index:] + readout.unsqueeze(1)
|
||||
|
||||
|
||||
class ProjectReadout(nn.Module):
|
||||
def __init__(self, in_features, start_index=1):
|
||||
super(ProjectReadout, self).__init__()
|
||||
self.start_index = start_index
|
||||
|
||||
self.project = nn.Sequential(nn.Linear(2 * in_features, in_features), nn.GELU())
|
||||
|
||||
def forward(self, x):
|
||||
readout = x[:, 0].unsqueeze(1).expand_as(x[:, self.start_index:])
|
||||
features = torch.cat((x[:, self.start_index:], readout), -1)
|
||||
|
||||
return self.project(features)
|
||||
|
||||
|
||||
class Transpose(nn.Module):
|
||||
def __init__(self, dim0, dim1):
|
||||
super(Transpose, self).__init__()
|
||||
self.dim0 = dim0
|
||||
self.dim1 = dim1
|
||||
|
||||
def forward(self, x):
|
||||
x = x.transpose(self.dim0, self.dim1)
|
||||
return x
|
||||
|
||||
|
||||
activations = {}
|
||||
|
||||
|
||||
def get_activation(name):
|
||||
def hook(model, input, output):
|
||||
activations[name] = output
|
||||
|
||||
return hook
|
||||
|
||||
|
||||
def forward_default(pretrained, x, function_name="forward_features"):
|
||||
exec(f"pretrained.model.{function_name}(x)")
|
||||
|
||||
layer_1 = pretrained.activations["1"]
|
||||
layer_2 = pretrained.activations["2"]
|
||||
layer_3 = pretrained.activations["3"]
|
||||
layer_4 = pretrained.activations["4"]
|
||||
|
||||
if hasattr(pretrained, "act_postprocess1"):
|
||||
layer_1 = pretrained.act_postprocess1(layer_1)
|
||||
if hasattr(pretrained, "act_postprocess2"):
|
||||
layer_2 = pretrained.act_postprocess2(layer_2)
|
||||
if hasattr(pretrained, "act_postprocess3"):
|
||||
layer_3 = pretrained.act_postprocess3(layer_3)
|
||||
if hasattr(pretrained, "act_postprocess4"):
|
||||
layer_4 = pretrained.act_postprocess4(layer_4)
|
||||
|
||||
return layer_1, layer_2, layer_3, layer_4
|
||||
|
||||
|
||||
def forward_adapted_unflatten(pretrained, x, function_name="forward_features"):
|
||||
b, c, h, w = x.shape
|
||||
|
||||
exec(f"glob = pretrained.model.{function_name}(x)")
|
||||
|
||||
layer_1 = pretrained.activations["1"]
|
||||
layer_2 = pretrained.activations["2"]
|
||||
layer_3 = pretrained.activations["3"]
|
||||
layer_4 = pretrained.activations["4"]
|
||||
|
||||
layer_1 = pretrained.act_postprocess1[0:2](layer_1)
|
||||
layer_2 = pretrained.act_postprocess2[0:2](layer_2)
|
||||
layer_3 = pretrained.act_postprocess3[0:2](layer_3)
|
||||
layer_4 = pretrained.act_postprocess4[0:2](layer_4)
|
||||
|
||||
unflatten = nn.Sequential(
|
||||
nn.Unflatten(
|
||||
2,
|
||||
torch.Size(
|
||||
[
|
||||
h // pretrained.model.patch_size[1],
|
||||
w // pretrained.model.patch_size[0],
|
||||
]
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
if layer_1.ndim == 3:
|
||||
layer_1 = unflatten(layer_1)
|
||||
if layer_2.ndim == 3:
|
||||
layer_2 = unflatten(layer_2)
|
||||
if layer_3.ndim == 3:
|
||||
layer_3 = unflatten(layer_3)
|
||||
if layer_4.ndim == 3:
|
||||
layer_4 = unflatten(layer_4)
|
||||
|
||||
layer_1 = pretrained.act_postprocess1[3: len(pretrained.act_postprocess1)](layer_1)
|
||||
layer_2 = pretrained.act_postprocess2[3: len(pretrained.act_postprocess2)](layer_2)
|
||||
layer_3 = pretrained.act_postprocess3[3: len(pretrained.act_postprocess3)](layer_3)
|
||||
layer_4 = pretrained.act_postprocess4[3: len(pretrained.act_postprocess4)](layer_4)
|
||||
|
||||
return layer_1, layer_2, layer_3, layer_4
|
||||
|
||||
|
||||
def get_readout_oper(vit_features, features, use_readout, start_index=1):
|
||||
if use_readout == "ignore":
|
||||
readout_oper = [Slice(start_index)] * len(features)
|
||||
elif use_readout == "add":
|
||||
readout_oper = [AddReadout(start_index)] * len(features)
|
||||
elif use_readout == "project":
|
||||
readout_oper = [
|
||||
ProjectReadout(vit_features, start_index) for out_feat in features
|
||||
]
|
||||
else:
|
||||
assert (
|
||||
False
|
||||
), "wrong operation for readout token, use_readout can be 'ignore', 'add', or 'project'"
|
||||
|
||||
return readout_oper
|
||||
|
||||
|
||||
def make_backbone_default(
|
||||
model,
|
||||
features=[96, 192, 384, 768],
|
||||
size=[384, 384],
|
||||
hooks=[2, 5, 8, 11],
|
||||
vit_features=768,
|
||||
use_readout="ignore",
|
||||
start_index=1,
|
||||
start_index_readout=1,
|
||||
):
|
||||
pretrained = nn.Module()
|
||||
|
||||
pretrained.model = model
|
||||
pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1"))
|
||||
pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2"))
|
||||
pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3"))
|
||||
pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4"))
|
||||
|
||||
pretrained.activations = activations
|
||||
|
||||
readout_oper = get_readout_oper(vit_features, features, use_readout, start_index_readout)
|
||||
|
||||
# 32, 48, 136, 384
|
||||
pretrained.act_postprocess1 = nn.Sequential(
|
||||
readout_oper[0],
|
||||
Transpose(1, 2),
|
||||
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
||||
nn.Conv2d(
|
||||
in_channels=vit_features,
|
||||
out_channels=features[0],
|
||||
kernel_size=1,
|
||||
stride=1,
|
||||
padding=0,
|
||||
),
|
||||
nn.ConvTranspose2d(
|
||||
in_channels=features[0],
|
||||
out_channels=features[0],
|
||||
kernel_size=4,
|
||||
stride=4,
|
||||
padding=0,
|
||||
bias=True,
|
||||
dilation=1,
|
||||
groups=1,
|
||||
),
|
||||
)
|
||||
|
||||
pretrained.act_postprocess2 = nn.Sequential(
|
||||
readout_oper[1],
|
||||
Transpose(1, 2),
|
||||
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
||||
nn.Conv2d(
|
||||
in_channels=vit_features,
|
||||
out_channels=features[1],
|
||||
kernel_size=1,
|
||||
stride=1,
|
||||
padding=0,
|
||||
),
|
||||
nn.ConvTranspose2d(
|
||||
in_channels=features[1],
|
||||
out_channels=features[1],
|
||||
kernel_size=2,
|
||||
stride=2,
|
||||
padding=0,
|
||||
bias=True,
|
||||
dilation=1,
|
||||
groups=1,
|
||||
),
|
||||
)
|
||||
|
||||
pretrained.act_postprocess3 = nn.Sequential(
|
||||
readout_oper[2],
|
||||
Transpose(1, 2),
|
||||
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
||||
nn.Conv2d(
|
||||
in_channels=vit_features,
|
||||
out_channels=features[2],
|
||||
kernel_size=1,
|
||||
stride=1,
|
||||
padding=0,
|
||||
),
|
||||
)
|
||||
|
||||
pretrained.act_postprocess4 = nn.Sequential(
|
||||
readout_oper[3],
|
||||
Transpose(1, 2),
|
||||
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
||||
nn.Conv2d(
|
||||
in_channels=vit_features,
|
||||
out_channels=features[3],
|
||||
kernel_size=1,
|
||||
stride=1,
|
||||
padding=0,
|
||||
),
|
||||
nn.Conv2d(
|
||||
in_channels=features[3],
|
||||
out_channels=features[3],
|
||||
kernel_size=3,
|
||||
stride=2,
|
||||
padding=1,
|
||||
),
|
||||
)
|
||||
|
||||
pretrained.model.start_index = start_index
|
||||
pretrained.model.patch_size = [16, 16]
|
||||
|
||||
return pretrained
|
||||
221
midas/backbones/vit.py
Normal file
221
midas/backbones/vit.py
Normal file
@@ -0,0 +1,221 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import timm
|
||||
import types
|
||||
import math
|
||||
import torch.nn.functional as F
|
||||
|
||||
from .utils import (activations, forward_adapted_unflatten, get_activation, get_readout_oper,
|
||||
make_backbone_default, Transpose)
|
||||
|
||||
|
||||
def forward_vit(pretrained, x):
|
||||
return forward_adapted_unflatten(pretrained, x, "forward_flex")
|
||||
|
||||
|
||||
def _resize_pos_embed(self, posemb, gs_h, gs_w):
|
||||
posemb_tok, posemb_grid = (
|
||||
posemb[:, : self.start_index],
|
||||
posemb[0, self.start_index:],
|
||||
)
|
||||
|
||||
gs_old = int(math.sqrt(len(posemb_grid)))
|
||||
|
||||
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
|
||||
posemb_grid = F.interpolate(posemb_grid, size=(gs_h, gs_w), mode="bilinear")
|
||||
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_h * gs_w, -1)
|
||||
|
||||
posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
|
||||
|
||||
return posemb
|
||||
|
||||
|
||||
def forward_flex(self, x):
|
||||
b, c, h, w = x.shape
|
||||
|
||||
pos_embed = self._resize_pos_embed(
|
||||
self.pos_embed, h // self.patch_size[1], w // self.patch_size[0]
|
||||
)
|
||||
|
||||
B = x.shape[0]
|
||||
|
||||
if hasattr(self.patch_embed, "backbone"):
|
||||
x = self.patch_embed.backbone(x)
|
||||
if isinstance(x, (list, tuple)):
|
||||
x = x[-1] # last feature if backbone outputs list/tuple of features
|
||||
|
||||
x = self.patch_embed.proj(x).flatten(2).transpose(1, 2)
|
||||
|
||||
if getattr(self, "dist_token", None) is not None:
|
||||
cls_tokens = self.cls_token.expand(
|
||||
B, -1, -1
|
||||
) # stole cls_tokens impl from Phil Wang, thanks
|
||||
dist_token = self.dist_token.expand(B, -1, -1)
|
||||
x = torch.cat((cls_tokens, dist_token, x), dim=1)
|
||||
else:
|
||||
if self.no_embed_class:
|
||||
x = x + pos_embed
|
||||
cls_tokens = self.cls_token.expand(
|
||||
B, -1, -1
|
||||
) # stole cls_tokens impl from Phil Wang, thanks
|
||||
x = torch.cat((cls_tokens, x), dim=1)
|
||||
|
||||
if not self.no_embed_class:
|
||||
x = x + pos_embed
|
||||
x = self.pos_drop(x)
|
||||
|
||||
for blk in self.blocks:
|
||||
x = blk(x)
|
||||
|
||||
x = self.norm(x)
|
||||
|
||||
return x
|
||||
|
||||
|
||||
def _make_vit_b16_backbone(
|
||||
model,
|
||||
features=[96, 192, 384, 768],
|
||||
size=[384, 384],
|
||||
hooks=[2, 5, 8, 11],
|
||||
vit_features=768,
|
||||
use_readout="ignore",
|
||||
start_index=1,
|
||||
start_index_readout=1,
|
||||
):
|
||||
pretrained = make_backbone_default(model, features, size, hooks, vit_features, use_readout, start_index,
|
||||
start_index_readout)
|
||||
|
||||
# We inject this function into the VisionTransformer instances so that
|
||||
# we can use it with interpolated position embeddings without modifying the library source.
|
||||
pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
|
||||
pretrained.model._resize_pos_embed = types.MethodType(
|
||||
_resize_pos_embed, pretrained.model
|
||||
)
|
||||
|
||||
return pretrained
|
||||
|
||||
|
||||
def _make_pretrained_vitl16_384(pretrained, use_readout="ignore", hooks=None):
|
||||
model = timm.create_model("vit_large_patch16_384", pretrained=pretrained)
|
||||
|
||||
hooks = [5, 11, 17, 23] if hooks == None else hooks
|
||||
return _make_vit_b16_backbone(
|
||||
model,
|
||||
features=[256, 512, 1024, 1024],
|
||||
hooks=hooks,
|
||||
vit_features=1024,
|
||||
use_readout=use_readout,
|
||||
)
|
||||
|
||||
|
||||
def _make_pretrained_vitb16_384(pretrained, use_readout="ignore", hooks=None):
|
||||
model = timm.create_model("vit_base_patch16_384", pretrained=pretrained)
|
||||
|
||||
hooks = [2, 5, 8, 11] if hooks == None else hooks
|
||||
return _make_vit_b16_backbone(
|
||||
model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout
|
||||
)
|
||||
|
||||
|
||||
def _make_vit_b_rn50_backbone(
|
||||
model,
|
||||
features=[256, 512, 768, 768],
|
||||
size=[384, 384],
|
||||
hooks=[0, 1, 8, 11],
|
||||
vit_features=768,
|
||||
patch_size=[16, 16],
|
||||
number_stages=2,
|
||||
use_vit_only=False,
|
||||
use_readout="ignore",
|
||||
start_index=1,
|
||||
):
|
||||
pretrained = nn.Module()
|
||||
|
||||
pretrained.model = model
|
||||
|
||||
used_number_stages = 0 if use_vit_only else number_stages
|
||||
for s in range(used_number_stages):
|
||||
pretrained.model.patch_embed.backbone.stages[s].register_forward_hook(
|
||||
get_activation(str(s + 1))
|
||||
)
|
||||
for s in range(used_number_stages, 4):
|
||||
pretrained.model.blocks[hooks[s]].register_forward_hook(get_activation(str(s + 1)))
|
||||
|
||||
pretrained.activations = activations
|
||||
|
||||
readout_oper = get_readout_oper(vit_features, features, use_readout, start_index)
|
||||
|
||||
for s in range(used_number_stages):
|
||||
value = nn.Sequential(nn.Identity(), nn.Identity(), nn.Identity())
|
||||
exec(f"pretrained.act_postprocess{s + 1}=value")
|
||||
for s in range(used_number_stages, 4):
|
||||
if s < number_stages:
|
||||
final_layer = nn.ConvTranspose2d(
|
||||
in_channels=features[s],
|
||||
out_channels=features[s],
|
||||
kernel_size=4 // (2 ** s),
|
||||
stride=4 // (2 ** s),
|
||||
padding=0,
|
||||
bias=True,
|
||||
dilation=1,
|
||||
groups=1,
|
||||
)
|
||||
elif s > number_stages:
|
||||
final_layer = nn.Conv2d(
|
||||
in_channels=features[3],
|
||||
out_channels=features[3],
|
||||
kernel_size=3,
|
||||
stride=2,
|
||||
padding=1,
|
||||
)
|
||||
else:
|
||||
final_layer = None
|
||||
|
||||
layers = [
|
||||
readout_oper[s],
|
||||
Transpose(1, 2),
|
||||
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
||||
nn.Conv2d(
|
||||
in_channels=vit_features,
|
||||
out_channels=features[s],
|
||||
kernel_size=1,
|
||||
stride=1,
|
||||
padding=0,
|
||||
),
|
||||
]
|
||||
if final_layer is not None:
|
||||
layers.append(final_layer)
|
||||
|
||||
value = nn.Sequential(*layers)
|
||||
exec(f"pretrained.act_postprocess{s + 1}=value")
|
||||
|
||||
pretrained.model.start_index = start_index
|
||||
pretrained.model.patch_size = patch_size
|
||||
|
||||
# We inject this function into the VisionTransformer instances so that
|
||||
# we can use it with interpolated position embeddings without modifying the library source.
|
||||
pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
|
||||
|
||||
# We inject this function into the VisionTransformer instances so that
|
||||
# we can use it with interpolated position embeddings without modifying the library source.
|
||||
pretrained.model._resize_pos_embed = types.MethodType(
|
||||
_resize_pos_embed, pretrained.model
|
||||
)
|
||||
|
||||
return pretrained
|
||||
|
||||
|
||||
def _make_pretrained_vitb_rn50_384(
|
||||
pretrained, use_readout="ignore", hooks=None, use_vit_only=False
|
||||
):
|
||||
model = timm.create_model("vit_base_resnet50_384", pretrained=pretrained)
|
||||
|
||||
hooks = [0, 1, 8, 11] if hooks == None else hooks
|
||||
return _make_vit_b_rn50_backbone(
|
||||
model,
|
||||
features=[256, 512, 768, 768],
|
||||
size=[384, 384],
|
||||
hooks=hooks,
|
||||
use_vit_only=use_vit_only,
|
||||
use_readout=use_readout,
|
||||
)
|
||||
16
midas/base_model.py
Normal file
16
midas/base_model.py
Normal file
@@ -0,0 +1,16 @@
|
||||
import torch
|
||||
|
||||
|
||||
class BaseModel(torch.nn.Module):
|
||||
def load(self, path):
|
||||
"""Load model from file.
|
||||
|
||||
Args:
|
||||
path (str): file path
|
||||
"""
|
||||
parameters = torch.load(path, map_location=torch.device('cpu'))
|
||||
|
||||
if "optimizer" in parameters:
|
||||
parameters = parameters["model"]
|
||||
|
||||
self.load_state_dict(parameters)
|
||||
439
midas/blocks.py
Normal file
439
midas/blocks.py
Normal file
@@ -0,0 +1,439 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
from .backbones.beit import (
|
||||
_make_pretrained_beitl16_512,
|
||||
_make_pretrained_beitl16_384,
|
||||
_make_pretrained_beitb16_384,
|
||||
forward_beit,
|
||||
)
|
||||
from .backbones.swin_common import (
|
||||
forward_swin,
|
||||
)
|
||||
from .backbones.swin2 import (
|
||||
_make_pretrained_swin2l24_384,
|
||||
_make_pretrained_swin2b24_384,
|
||||
_make_pretrained_swin2t16_256,
|
||||
)
|
||||
from .backbones.swin import (
|
||||
_make_pretrained_swinl12_384,
|
||||
)
|
||||
from .backbones.levit import (
|
||||
_make_pretrained_levit_384,
|
||||
forward_levit,
|
||||
)
|
||||
from .backbones.vit import (
|
||||
_make_pretrained_vitb_rn50_384,
|
||||
_make_pretrained_vitl16_384,
|
||||
_make_pretrained_vitb16_384,
|
||||
forward_vit,
|
||||
)
|
||||
|
||||
def _make_encoder(backbone, features, use_pretrained, groups=1, expand=False, exportable=True, hooks=None,
|
||||
use_vit_only=False, use_readout="ignore", in_features=[96, 256, 512, 1024]):
|
||||
if backbone == "beitl16_512":
|
||||
pretrained = _make_pretrained_beitl16_512(
|
||||
use_pretrained, hooks=hooks, use_readout=use_readout
|
||||
)
|
||||
scratch = _make_scratch(
|
||||
[256, 512, 1024, 1024], features, groups=groups, expand=expand
|
||||
) # BEiT_512-L (backbone)
|
||||
elif backbone == "beitl16_384":
|
||||
pretrained = _make_pretrained_beitl16_384(
|
||||
use_pretrained, hooks=hooks, use_readout=use_readout
|
||||
)
|
||||
scratch = _make_scratch(
|
||||
[256, 512, 1024, 1024], features, groups=groups, expand=expand
|
||||
) # BEiT_384-L (backbone)
|
||||
elif backbone == "beitb16_384":
|
||||
pretrained = _make_pretrained_beitb16_384(
|
||||
use_pretrained, hooks=hooks, use_readout=use_readout
|
||||
)
|
||||
scratch = _make_scratch(
|
||||
[96, 192, 384, 768], features, groups=groups, expand=expand
|
||||
) # BEiT_384-B (backbone)
|
||||
elif backbone == "swin2l24_384":
|
||||
pretrained = _make_pretrained_swin2l24_384(
|
||||
use_pretrained, hooks=hooks
|
||||
)
|
||||
scratch = _make_scratch(
|
||||
[192, 384, 768, 1536], features, groups=groups, expand=expand
|
||||
) # Swin2-L/12to24 (backbone)
|
||||
elif backbone == "swin2b24_384":
|
||||
pretrained = _make_pretrained_swin2b24_384(
|
||||
use_pretrained, hooks=hooks
|
||||
)
|
||||
scratch = _make_scratch(
|
||||
[128, 256, 512, 1024], features, groups=groups, expand=expand
|
||||
) # Swin2-B/12to24 (backbone)
|
||||
elif backbone == "swin2t16_256":
|
||||
pretrained = _make_pretrained_swin2t16_256(
|
||||
use_pretrained, hooks=hooks
|
||||
)
|
||||
scratch = _make_scratch(
|
||||
[96, 192, 384, 768], features, groups=groups, expand=expand
|
||||
) # Swin2-T/16 (backbone)
|
||||
elif backbone == "swinl12_384":
|
||||
pretrained = _make_pretrained_swinl12_384(
|
||||
use_pretrained, hooks=hooks
|
||||
)
|
||||
scratch = _make_scratch(
|
||||
[192, 384, 768, 1536], features, groups=groups, expand=expand
|
||||
) # Swin-L/12 (backbone)
|
||||
elif backbone == "next_vit_large_6m":
|
||||
from .backbones.next_vit import _make_pretrained_next_vit_large_6m
|
||||
pretrained = _make_pretrained_next_vit_large_6m(hooks=hooks)
|
||||
scratch = _make_scratch(
|
||||
in_features, features, groups=groups, expand=expand
|
||||
) # Next-ViT-L on ImageNet-1K-6M (backbone)
|
||||
elif backbone == "levit_384":
|
||||
pretrained = _make_pretrained_levit_384(
|
||||
use_pretrained, hooks=hooks
|
||||
)
|
||||
scratch = _make_scratch(
|
||||
[384, 512, 768], features, groups=groups, expand=expand
|
||||
) # LeViT 384 (backbone)
|
||||
elif backbone == "vitl16_384":
|
||||
pretrained = _make_pretrained_vitl16_384(
|
||||
use_pretrained, hooks=hooks, use_readout=use_readout
|
||||
)
|
||||
scratch = _make_scratch(
|
||||
[256, 512, 1024, 1024], features, groups=groups, expand=expand
|
||||
) # ViT-L/16 - 85.0% Top1 (backbone)
|
||||
elif backbone == "vitb_rn50_384":
|
||||
pretrained = _make_pretrained_vitb_rn50_384(
|
||||
use_pretrained,
|
||||
hooks=hooks,
|
||||
use_vit_only=use_vit_only,
|
||||
use_readout=use_readout,
|
||||
)
|
||||
scratch = _make_scratch(
|
||||
[256, 512, 768, 768], features, groups=groups, expand=expand
|
||||
) # ViT-H/16 - 85.0% Top1 (backbone)
|
||||
elif backbone == "vitb16_384":
|
||||
pretrained = _make_pretrained_vitb16_384(
|
||||
use_pretrained, hooks=hooks, use_readout=use_readout
|
||||
)
|
||||
scratch = _make_scratch(
|
||||
[96, 192, 384, 768], features, groups=groups, expand=expand
|
||||
) # ViT-B/16 - 84.6% Top1 (backbone)
|
||||
elif backbone == "resnext101_wsl":
|
||||
pretrained = _make_pretrained_resnext101_wsl(use_pretrained)
|
||||
scratch = _make_scratch([256, 512, 1024, 2048], features, groups=groups, expand=expand) # efficientnet_lite3
|
||||
elif backbone == "efficientnet_lite3":
|
||||
pretrained = _make_pretrained_efficientnet_lite3(use_pretrained, exportable=exportable)
|
||||
scratch = _make_scratch([32, 48, 136, 384], features, groups=groups, expand=expand) # efficientnet_lite3
|
||||
else:
|
||||
print(f"Backbone '{backbone}' not implemented")
|
||||
assert False
|
||||
|
||||
return pretrained, scratch
|
||||
|
||||
|
||||
def _make_scratch(in_shape, out_shape, groups=1, expand=False):
|
||||
scratch = nn.Module()
|
||||
|
||||
out_shape1 = out_shape
|
||||
out_shape2 = out_shape
|
||||
out_shape3 = out_shape
|
||||
if len(in_shape) >= 4:
|
||||
out_shape4 = out_shape
|
||||
|
||||
if expand:
|
||||
out_shape1 = out_shape
|
||||
out_shape2 = out_shape*2
|
||||
out_shape3 = out_shape*4
|
||||
if len(in_shape) >= 4:
|
||||
out_shape4 = out_shape*8
|
||||
|
||||
scratch.layer1_rn = nn.Conv2d(
|
||||
in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
|
||||
)
|
||||
scratch.layer2_rn = nn.Conv2d(
|
||||
in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
|
||||
)
|
||||
scratch.layer3_rn = nn.Conv2d(
|
||||
in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
|
||||
)
|
||||
if len(in_shape) >= 4:
|
||||
scratch.layer4_rn = nn.Conv2d(
|
||||
in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
|
||||
)
|
||||
|
||||
return scratch
|
||||
|
||||
|
||||
def _make_pretrained_efficientnet_lite3(use_pretrained, exportable=False):
|
||||
efficientnet = torch.hub.load(
|
||||
"rwightman/gen-efficientnet-pytorch",
|
||||
"tf_efficientnet_lite3",
|
||||
pretrained=use_pretrained,
|
||||
exportable=exportable
|
||||
)
|
||||
return _make_efficientnet_backbone(efficientnet)
|
||||
|
||||
|
||||
def _make_efficientnet_backbone(effnet):
|
||||
pretrained = nn.Module()
|
||||
|
||||
pretrained.layer1 = nn.Sequential(
|
||||
effnet.conv_stem, effnet.bn1, effnet.act1, *effnet.blocks[0:2]
|
||||
)
|
||||
pretrained.layer2 = nn.Sequential(*effnet.blocks[2:3])
|
||||
pretrained.layer3 = nn.Sequential(*effnet.blocks[3:5])
|
||||
pretrained.layer4 = nn.Sequential(*effnet.blocks[5:9])
|
||||
|
||||
return pretrained
|
||||
|
||||
|
||||
def _make_resnet_backbone(resnet):
|
||||
pretrained = nn.Module()
|
||||
pretrained.layer1 = nn.Sequential(
|
||||
resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1
|
||||
)
|
||||
|
||||
pretrained.layer2 = resnet.layer2
|
||||
pretrained.layer3 = resnet.layer3
|
||||
pretrained.layer4 = resnet.layer4
|
||||
|
||||
return pretrained
|
||||
|
||||
|
||||
def _make_pretrained_resnext101_wsl(use_pretrained):
|
||||
resnet = torch.hub.load("facebookresearch/WSL-Images", "resnext101_32x8d_wsl")
|
||||
return _make_resnet_backbone(resnet)
|
||||
|
||||
|
||||
|
||||
class Interpolate(nn.Module):
|
||||
"""Interpolation module.
|
||||
"""
|
||||
|
||||
def __init__(self, scale_factor, mode, align_corners=False):
|
||||
"""Init.
|
||||
|
||||
Args:
|
||||
scale_factor (float): scaling
|
||||
mode (str): interpolation mode
|
||||
"""
|
||||
super(Interpolate, self).__init__()
|
||||
|
||||
self.interp = nn.functional.interpolate
|
||||
self.scale_factor = scale_factor
|
||||
self.mode = mode
|
||||
self.align_corners = align_corners
|
||||
|
||||
def forward(self, x):
|
||||
"""Forward pass.
|
||||
|
||||
Args:
|
||||
x (tensor): input
|
||||
|
||||
Returns:
|
||||
tensor: interpolated data
|
||||
"""
|
||||
|
||||
x = self.interp(
|
||||
x, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners
|
||||
)
|
||||
|
||||
return x
|
||||
|
||||
|
||||
class ResidualConvUnit(nn.Module):
|
||||
"""Residual convolution module.
|
||||
"""
|
||||
|
||||
def __init__(self, features):
|
||||
"""Init.
|
||||
|
||||
Args:
|
||||
features (int): number of features
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
self.conv1 = nn.Conv2d(
|
||||
features, features, kernel_size=3, stride=1, padding=1, bias=True
|
||||
)
|
||||
|
||||
self.conv2 = nn.Conv2d(
|
||||
features, features, kernel_size=3, stride=1, padding=1, bias=True
|
||||
)
|
||||
|
||||
self.relu = nn.ReLU(inplace=True)
|
||||
|
||||
def forward(self, x):
|
||||
"""Forward pass.
|
||||
|
||||
Args:
|
||||
x (tensor): input
|
||||
|
||||
Returns:
|
||||
tensor: output
|
||||
"""
|
||||
out = self.relu(x)
|
||||
out = self.conv1(out)
|
||||
out = self.relu(out)
|
||||
out = self.conv2(out)
|
||||
|
||||
return out + x
|
||||
|
||||
|
||||
class FeatureFusionBlock(nn.Module):
|
||||
"""Feature fusion block.
|
||||
"""
|
||||
|
||||
def __init__(self, features):
|
||||
"""Init.
|
||||
|
||||
Args:
|
||||
features (int): number of features
|
||||
"""
|
||||
super(FeatureFusionBlock, self).__init__()
|
||||
|
||||
self.resConfUnit1 = ResidualConvUnit(features)
|
||||
self.resConfUnit2 = ResidualConvUnit(features)
|
||||
|
||||
def forward(self, *xs):
|
||||
"""Forward pass.
|
||||
|
||||
Returns:
|
||||
tensor: output
|
||||
"""
|
||||
output = xs[0]
|
||||
|
||||
if len(xs) == 2:
|
||||
output += self.resConfUnit1(xs[1])
|
||||
|
||||
output = self.resConfUnit2(output)
|
||||
|
||||
output = nn.functional.interpolate(
|
||||
output, scale_factor=2, mode="bilinear", align_corners=True
|
||||
)
|
||||
|
||||
return output
|
||||
|
||||
|
||||
|
||||
|
||||
class ResidualConvUnit_custom(nn.Module):
|
||||
"""Residual convolution module.
|
||||
"""
|
||||
|
||||
def __init__(self, features, activation, bn):
|
||||
"""Init.
|
||||
|
||||
Args:
|
||||
features (int): number of features
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
self.bn = bn
|
||||
|
||||
self.groups=1
|
||||
|
||||
self.conv1 = nn.Conv2d(
|
||||
features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
|
||||
)
|
||||
|
||||
self.conv2 = nn.Conv2d(
|
||||
features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
|
||||
)
|
||||
|
||||
if self.bn==True:
|
||||
self.bn1 = nn.BatchNorm2d(features)
|
||||
self.bn2 = nn.BatchNorm2d(features)
|
||||
|
||||
self.activation = activation
|
||||
|
||||
self.skip_add = nn.quantized.FloatFunctional()
|
||||
|
||||
def forward(self, x):
|
||||
"""Forward pass.
|
||||
|
||||
Args:
|
||||
x (tensor): input
|
||||
|
||||
Returns:
|
||||
tensor: output
|
||||
"""
|
||||
|
||||
out = self.activation(x)
|
||||
out = self.conv1(out)
|
||||
if self.bn==True:
|
||||
out = self.bn1(out)
|
||||
|
||||
out = self.activation(out)
|
||||
out = self.conv2(out)
|
||||
if self.bn==True:
|
||||
out = self.bn2(out)
|
||||
|
||||
if self.groups > 1:
|
||||
out = self.conv_merge(out)
|
||||
|
||||
return self.skip_add.add(out, x)
|
||||
|
||||
# return out + x
|
||||
|
||||
|
||||
class FeatureFusionBlock_custom(nn.Module):
|
||||
"""Feature fusion block.
|
||||
"""
|
||||
|
||||
def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True, size=None):
|
||||
"""Init.
|
||||
|
||||
Args:
|
||||
features (int): number of features
|
||||
"""
|
||||
super(FeatureFusionBlock_custom, self).__init__()
|
||||
|
||||
self.deconv = deconv
|
||||
self.align_corners = align_corners
|
||||
|
||||
self.groups=1
|
||||
|
||||
self.expand = expand
|
||||
out_features = features
|
||||
if self.expand==True:
|
||||
out_features = features//2
|
||||
|
||||
self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1)
|
||||
|
||||
self.resConfUnit1 = ResidualConvUnit_custom(features, activation, bn)
|
||||
self.resConfUnit2 = ResidualConvUnit_custom(features, activation, bn)
|
||||
|
||||
self.skip_add = nn.quantized.FloatFunctional()
|
||||
|
||||
self.size=size
|
||||
|
||||
def forward(self, *xs, size=None):
|
||||
"""Forward pass.
|
||||
|
||||
Returns:
|
||||
tensor: output
|
||||
"""
|
||||
output = xs[0]
|
||||
|
||||
if len(xs) == 2:
|
||||
res = self.resConfUnit1(xs[1])
|
||||
output = self.skip_add.add(output, res)
|
||||
# output += res
|
||||
|
||||
output = self.resConfUnit2(output)
|
||||
|
||||
if (size is None) and (self.size is None):
|
||||
modifier = {"scale_factor": 2}
|
||||
elif size is None:
|
||||
modifier = {"size": self.size}
|
||||
else:
|
||||
modifier = {"size": size}
|
||||
|
||||
output = nn.functional.interpolate(
|
||||
output, **modifier, mode="bilinear", align_corners=self.align_corners
|
||||
)
|
||||
|
||||
output = self.out_conv(output)
|
||||
|
||||
return output
|
||||
|
||||
166
midas/dpt_depth.py
Normal file
166
midas/dpt_depth.py
Normal file
@@ -0,0 +1,166 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
from .base_model import BaseModel
|
||||
from .blocks import (
|
||||
FeatureFusionBlock_custom,
|
||||
Interpolate,
|
||||
_make_encoder,
|
||||
forward_beit,
|
||||
forward_swin,
|
||||
forward_levit,
|
||||
forward_vit,
|
||||
)
|
||||
from .backbones.levit import stem_b4_transpose
|
||||
from timm.models.layers import get_act_layer
|
||||
|
||||
|
||||
def _make_fusion_block(features, use_bn, size = None):
|
||||
return FeatureFusionBlock_custom(
|
||||
features,
|
||||
nn.ReLU(False),
|
||||
deconv=False,
|
||||
bn=use_bn,
|
||||
expand=False,
|
||||
align_corners=True,
|
||||
size=size,
|
||||
)
|
||||
|
||||
|
||||
class DPT(BaseModel):
|
||||
def __init__(
|
||||
self,
|
||||
head,
|
||||
features=256,
|
||||
backbone="vitb_rn50_384",
|
||||
readout="project",
|
||||
channels_last=False,
|
||||
use_bn=False,
|
||||
**kwargs
|
||||
):
|
||||
|
||||
super(DPT, self).__init__()
|
||||
|
||||
self.channels_last = channels_last
|
||||
|
||||
# For the Swin, Swin 2, LeViT and Next-ViT Transformers, the hierarchical architectures prevent setting the
|
||||
# hooks freely. Instead, the hooks have to be chosen according to the ranges specified in the comments.
|
||||
hooks = {
|
||||
"beitl16_512": [5, 11, 17, 23],
|
||||
"beitl16_384": [5, 11, 17, 23],
|
||||
"beitb16_384": [2, 5, 8, 11],
|
||||
"swin2l24_384": [1, 1, 17, 1], # Allowed ranges: [0, 1], [0, 1], [ 0, 17], [ 0, 1]
|
||||
"swin2b24_384": [1, 1, 17, 1], # [0, 1], [0, 1], [ 0, 17], [ 0, 1]
|
||||
"swin2t16_256": [1, 1, 5, 1], # [0, 1], [0, 1], [ 0, 5], [ 0, 1]
|
||||
"swinl12_384": [1, 1, 17, 1], # [0, 1], [0, 1], [ 0, 17], [ 0, 1]
|
||||
"next_vit_large_6m": [2, 6, 36, 39], # [0, 2], [3, 6], [ 7, 36], [37, 39]
|
||||
"levit_384": [3, 11, 21], # [0, 3], [6, 11], [14, 21]
|
||||
"vitb_rn50_384": [0, 1, 8, 11],
|
||||
"vitb16_384": [2, 5, 8, 11],
|
||||
"vitl16_384": [5, 11, 17, 23],
|
||||
}[backbone]
|
||||
|
||||
if "next_vit" in backbone:
|
||||
in_features = {
|
||||
"next_vit_large_6m": [96, 256, 512, 1024],
|
||||
}[backbone]
|
||||
else:
|
||||
in_features = None
|
||||
|
||||
# Instantiate backbone and reassemble blocks
|
||||
self.pretrained, self.scratch = _make_encoder(
|
||||
backbone,
|
||||
features,
|
||||
False, # Set to true of you want to train from scratch, uses ImageNet weights
|
||||
groups=1,
|
||||
expand=False,
|
||||
exportable=False,
|
||||
hooks=hooks,
|
||||
use_readout=readout,
|
||||
in_features=in_features,
|
||||
)
|
||||
|
||||
self.number_layers = len(hooks) if hooks is not None else 4
|
||||
size_refinenet3 = None
|
||||
self.scratch.stem_transpose = None
|
||||
|
||||
if "beit" in backbone:
|
||||
self.forward_transformer = forward_beit
|
||||
elif "swin" in backbone:
|
||||
self.forward_transformer = forward_swin
|
||||
elif "next_vit" in backbone:
|
||||
from .backbones.next_vit import forward_next_vit
|
||||
self.forward_transformer = forward_next_vit
|
||||
elif "levit" in backbone:
|
||||
self.forward_transformer = forward_levit
|
||||
size_refinenet3 = 7
|
||||
self.scratch.stem_transpose = stem_b4_transpose(256, 128, get_act_layer("hard_swish"))
|
||||
else:
|
||||
self.forward_transformer = forward_vit
|
||||
|
||||
self.scratch.refinenet1 = _make_fusion_block(features, use_bn)
|
||||
self.scratch.refinenet2 = _make_fusion_block(features, use_bn)
|
||||
self.scratch.refinenet3 = _make_fusion_block(features, use_bn, size_refinenet3)
|
||||
if self.number_layers >= 4:
|
||||
self.scratch.refinenet4 = _make_fusion_block(features, use_bn)
|
||||
|
||||
self.scratch.output_conv = head
|
||||
|
||||
|
||||
def forward(self, x):
|
||||
if self.channels_last == True:
|
||||
x.contiguous(memory_format=torch.channels_last)
|
||||
|
||||
layers = self.forward_transformer(self.pretrained, x)
|
||||
if self.number_layers == 3:
|
||||
layer_1, layer_2, layer_3 = layers
|
||||
else:
|
||||
layer_1, layer_2, layer_3, layer_4 = layers
|
||||
|
||||
layer_1_rn = self.scratch.layer1_rn(layer_1)
|
||||
layer_2_rn = self.scratch.layer2_rn(layer_2)
|
||||
layer_3_rn = self.scratch.layer3_rn(layer_3)
|
||||
if self.number_layers >= 4:
|
||||
layer_4_rn = self.scratch.layer4_rn(layer_4)
|
||||
|
||||
if self.number_layers == 3:
|
||||
path_3 = self.scratch.refinenet3(layer_3_rn, size=layer_2_rn.shape[2:])
|
||||
else:
|
||||
path_4 = self.scratch.refinenet4(layer_4_rn, size=layer_3_rn.shape[2:])
|
||||
path_3 = self.scratch.refinenet3(path_4, layer_3_rn, size=layer_2_rn.shape[2:])
|
||||
path_2 = self.scratch.refinenet2(path_3, layer_2_rn, size=layer_1_rn.shape[2:])
|
||||
path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
|
||||
|
||||
if self.scratch.stem_transpose is not None:
|
||||
path_1 = self.scratch.stem_transpose(path_1)
|
||||
|
||||
out = self.scratch.output_conv(path_1)
|
||||
|
||||
return out
|
||||
|
||||
|
||||
class DPTDepthModel(DPT):
|
||||
def __init__(self, path=None, non_negative=True, **kwargs):
|
||||
features = kwargs["features"] if "features" in kwargs else 256
|
||||
head_features_1 = kwargs["head_features_1"] if "head_features_1" in kwargs else features
|
||||
head_features_2 = kwargs["head_features_2"] if "head_features_2" in kwargs else 32
|
||||
kwargs.pop("head_features_1", None)
|
||||
kwargs.pop("head_features_2", None)
|
||||
|
||||
head = nn.Sequential(
|
||||
nn.Conv2d(head_features_1, head_features_1 // 2, kernel_size=3, stride=1, padding=1),
|
||||
Interpolate(scale_factor=2, mode="bilinear", align_corners=True),
|
||||
nn.Conv2d(head_features_1 // 2, head_features_2, kernel_size=3, stride=1, padding=1),
|
||||
nn.ReLU(True),
|
||||
nn.Conv2d(head_features_2, 1, kernel_size=1, stride=1, padding=0),
|
||||
nn.ReLU(True) if non_negative else nn.Identity(),
|
||||
nn.Identity(),
|
||||
)
|
||||
|
||||
super().__init__(head, **kwargs)
|
||||
|
||||
if path is not None:
|
||||
self.load(path)
|
||||
|
||||
def forward(self, x):
|
||||
return super().forward(x).squeeze(dim=1)
|
||||
76
midas/midas_net.py
Normal file
76
midas/midas_net.py
Normal file
@@ -0,0 +1,76 @@
|
||||
"""MidashNet: Network for monocular depth estimation trained by mixing several datasets.
|
||||
This file contains code that is adapted from
|
||||
https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py
|
||||
"""
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
from .base_model import BaseModel
|
||||
from .blocks import FeatureFusionBlock, Interpolate, _make_encoder
|
||||
|
||||
|
||||
class MidasNet(BaseModel):
|
||||
"""Network for monocular depth estimation.
|
||||
"""
|
||||
|
||||
def __init__(self, path=None, features=256, non_negative=True):
|
||||
"""Init.
|
||||
|
||||
Args:
|
||||
path (str, optional): Path to saved model. Defaults to None.
|
||||
features (int, optional): Number of features. Defaults to 256.
|
||||
backbone (str, optional): Backbone network for encoder. Defaults to resnet50
|
||||
"""
|
||||
print("Loading weights: ", path)
|
||||
|
||||
super(MidasNet, self).__init__()
|
||||
|
||||
use_pretrained = False if path is None else True
|
||||
|
||||
self.pretrained, self.scratch = _make_encoder(backbone="resnext101_wsl", features=features, use_pretrained=use_pretrained)
|
||||
|
||||
self.scratch.refinenet4 = FeatureFusionBlock(features)
|
||||
self.scratch.refinenet3 = FeatureFusionBlock(features)
|
||||
self.scratch.refinenet2 = FeatureFusionBlock(features)
|
||||
self.scratch.refinenet1 = FeatureFusionBlock(features)
|
||||
|
||||
self.scratch.output_conv = nn.Sequential(
|
||||
nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1),
|
||||
Interpolate(scale_factor=2, mode="bilinear"),
|
||||
nn.Conv2d(128, 32, kernel_size=3, stride=1, padding=1),
|
||||
nn.ReLU(True),
|
||||
nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
|
||||
nn.ReLU(True) if non_negative else nn.Identity(),
|
||||
)
|
||||
|
||||
if path:
|
||||
self.load(path)
|
||||
|
||||
def forward(self, x):
|
||||
"""Forward pass.
|
||||
|
||||
Args:
|
||||
x (tensor): input data (image)
|
||||
|
||||
Returns:
|
||||
tensor: depth
|
||||
"""
|
||||
|
||||
layer_1 = self.pretrained.layer1(x)
|
||||
layer_2 = self.pretrained.layer2(layer_1)
|
||||
layer_3 = self.pretrained.layer3(layer_2)
|
||||
layer_4 = self.pretrained.layer4(layer_3)
|
||||
|
||||
layer_1_rn = self.scratch.layer1_rn(layer_1)
|
||||
layer_2_rn = self.scratch.layer2_rn(layer_2)
|
||||
layer_3_rn = self.scratch.layer3_rn(layer_3)
|
||||
layer_4_rn = self.scratch.layer4_rn(layer_4)
|
||||
|
||||
path_4 = self.scratch.refinenet4(layer_4_rn)
|
||||
path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
|
||||
path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
|
||||
path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
|
||||
|
||||
out = self.scratch.output_conv(path_1)
|
||||
|
||||
return torch.squeeze(out, dim=1)
|
||||
128
midas/midas_net_custom.py
Normal file
128
midas/midas_net_custom.py
Normal file
@@ -0,0 +1,128 @@
|
||||
"""MidashNet: Network for monocular depth estimation trained by mixing several datasets.
|
||||
This file contains code that is adapted from
|
||||
https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py
|
||||
"""
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
from .base_model import BaseModel
|
||||
from .blocks import FeatureFusionBlock, FeatureFusionBlock_custom, Interpolate, _make_encoder
|
||||
|
||||
|
||||
class MidasNet_small(BaseModel):
|
||||
"""Network for monocular depth estimation.
|
||||
"""
|
||||
|
||||
def __init__(self, path=None, features=64, backbone="efficientnet_lite3", non_negative=True, exportable=True, channels_last=False, align_corners=True,
|
||||
blocks={'expand': True}):
|
||||
"""Init.
|
||||
|
||||
Args:
|
||||
path (str, optional): Path to saved model. Defaults to None.
|
||||
features (int, optional): Number of features. Defaults to 256.
|
||||
backbone (str, optional): Backbone network for encoder. Defaults to resnet50
|
||||
"""
|
||||
print("Loading weights: ", path)
|
||||
|
||||
super(MidasNet_small, self).__init__()
|
||||
|
||||
use_pretrained = False if path else True
|
||||
|
||||
self.channels_last = channels_last
|
||||
self.blocks = blocks
|
||||
self.backbone = backbone
|
||||
|
||||
self.groups = 1
|
||||
|
||||
features1=features
|
||||
features2=features
|
||||
features3=features
|
||||
features4=features
|
||||
self.expand = False
|
||||
if "expand" in self.blocks and self.blocks['expand'] == True:
|
||||
self.expand = True
|
||||
features1=features
|
||||
features2=features*2
|
||||
features3=features*4
|
||||
features4=features*8
|
||||
|
||||
self.pretrained, self.scratch = _make_encoder(self.backbone, features, use_pretrained, groups=self.groups, expand=self.expand, exportable=exportable)
|
||||
|
||||
self.scratch.activation = nn.ReLU(False)
|
||||
|
||||
self.scratch.refinenet4 = FeatureFusionBlock_custom(features4, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
|
||||
self.scratch.refinenet3 = FeatureFusionBlock_custom(features3, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
|
||||
self.scratch.refinenet2 = FeatureFusionBlock_custom(features2, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
|
||||
self.scratch.refinenet1 = FeatureFusionBlock_custom(features1, self.scratch.activation, deconv=False, bn=False, align_corners=align_corners)
|
||||
|
||||
|
||||
self.scratch.output_conv = nn.Sequential(
|
||||
nn.Conv2d(features, features//2, kernel_size=3, stride=1, padding=1, groups=self.groups),
|
||||
Interpolate(scale_factor=2, mode="bilinear"),
|
||||
nn.Conv2d(features//2, 32, kernel_size=3, stride=1, padding=1),
|
||||
self.scratch.activation,
|
||||
nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
|
||||
nn.ReLU(True) if non_negative else nn.Identity(),
|
||||
nn.Identity(),
|
||||
)
|
||||
|
||||
if path:
|
||||
self.load(path)
|
||||
|
||||
|
||||
def forward(self, x):
|
||||
"""Forward pass.
|
||||
|
||||
Args:
|
||||
x (tensor): input data (image)
|
||||
|
||||
Returns:
|
||||
tensor: depth
|
||||
"""
|
||||
if self.channels_last==True:
|
||||
print("self.channels_last = ", self.channels_last)
|
||||
x.contiguous(memory_format=torch.channels_last)
|
||||
|
||||
|
||||
layer_1 = self.pretrained.layer1(x)
|
||||
layer_2 = self.pretrained.layer2(layer_1)
|
||||
layer_3 = self.pretrained.layer3(layer_2)
|
||||
layer_4 = self.pretrained.layer4(layer_3)
|
||||
|
||||
layer_1_rn = self.scratch.layer1_rn(layer_1)
|
||||
layer_2_rn = self.scratch.layer2_rn(layer_2)
|
||||
layer_3_rn = self.scratch.layer3_rn(layer_3)
|
||||
layer_4_rn = self.scratch.layer4_rn(layer_4)
|
||||
|
||||
|
||||
path_4 = self.scratch.refinenet4(layer_4_rn)
|
||||
path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
|
||||
path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
|
||||
path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
|
||||
|
||||
out = self.scratch.output_conv(path_1)
|
||||
|
||||
return torch.squeeze(out, dim=1)
|
||||
|
||||
|
||||
|
||||
def fuse_model(m):
|
||||
prev_previous_type = nn.Identity()
|
||||
prev_previous_name = ''
|
||||
previous_type = nn.Identity()
|
||||
previous_name = ''
|
||||
for name, module in m.named_modules():
|
||||
if prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d and type(module) == nn.ReLU:
|
||||
# print("FUSED ", prev_previous_name, previous_name, name)
|
||||
torch.quantization.fuse_modules(m, [prev_previous_name, previous_name, name], inplace=True)
|
||||
elif prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d:
|
||||
# print("FUSED ", prev_previous_name, previous_name)
|
||||
torch.quantization.fuse_modules(m, [prev_previous_name, previous_name], inplace=True)
|
||||
# elif previous_type == nn.Conv2d and type(module) == nn.ReLU:
|
||||
# print("FUSED ", previous_name, name)
|
||||
# torch.quantization.fuse_modules(m, [previous_name, name], inplace=True)
|
||||
|
||||
prev_previous_type = previous_type
|
||||
prev_previous_name = previous_name
|
||||
previous_type = type(module)
|
||||
previous_name = name
|
||||
242
midas/model_loader.py
Normal file
242
midas/model_loader.py
Normal file
@@ -0,0 +1,242 @@
|
||||
import cv2
|
||||
import torch
|
||||
|
||||
from midas.dpt_depth import DPTDepthModel
|
||||
from midas.midas_net import MidasNet
|
||||
from midas.midas_net_custom import MidasNet_small
|
||||
from midas.transforms import Resize, NormalizeImage, PrepareForNet
|
||||
|
||||
from torchvision.transforms import Compose
|
||||
|
||||
default_models = {
|
||||
"dpt_beit_large_512": "weights/dpt_beit_large_512.pt",
|
||||
"dpt_beit_large_384": "weights/dpt_beit_large_384.pt",
|
||||
"dpt_beit_base_384": "weights/dpt_beit_base_384.pt",
|
||||
"dpt_swin2_large_384": "weights/dpt_swin2_large_384.pt",
|
||||
"dpt_swin2_base_384": "weights/dpt_swin2_base_384.pt",
|
||||
"dpt_swin2_tiny_256": "weights/dpt_swin2_tiny_256.pt",
|
||||
"dpt_swin_large_384": "weights/dpt_swin_large_384.pt",
|
||||
"dpt_next_vit_large_384": "weights/dpt_next_vit_large_384.pt",
|
||||
"dpt_levit_224": "weights/dpt_levit_224.pt",
|
||||
"dpt_large_384": "weights/dpt_large_384.pt",
|
||||
"dpt_hybrid_384": "weights/dpt_hybrid_384.pt",
|
||||
"midas_v21_384": "weights/midas_v21_384.pt",
|
||||
"midas_v21_small_256": "weights/midas_v21_small_256.pt",
|
||||
"openvino_midas_v21_small_256": "weights/openvino_midas_v21_small_256.xml",
|
||||
}
|
||||
|
||||
|
||||
def load_model(device, model_path, model_type="dpt_large_384", optimize=True, height=None, square=False):
|
||||
"""Load the specified network.
|
||||
|
||||
Args:
|
||||
device (device): the torch device used
|
||||
model_path (str): path to saved model
|
||||
model_type (str): the type of the model to be loaded
|
||||
optimize (bool): optimize the model to half-integer on CUDA?
|
||||
height (int): inference encoder image height
|
||||
square (bool): resize to a square resolution?
|
||||
|
||||
Returns:
|
||||
The loaded network, the transform which prepares images as input to the network and the dimensions of the
|
||||
network input
|
||||
"""
|
||||
if "openvino" in model_type:
|
||||
from openvino.runtime import Core
|
||||
|
||||
keep_aspect_ratio = not square
|
||||
|
||||
if model_type == "dpt_beit_large_512":
|
||||
model = DPTDepthModel(
|
||||
path=model_path,
|
||||
backbone="beitl16_512",
|
||||
non_negative=True,
|
||||
)
|
||||
net_w, net_h = 512, 512
|
||||
resize_mode = "minimal"
|
||||
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
|
||||
|
||||
elif model_type == "dpt_beit_large_384":
|
||||
model = DPTDepthModel(
|
||||
path=model_path,
|
||||
backbone="beitl16_384",
|
||||
non_negative=True,
|
||||
)
|
||||
net_w, net_h = 384, 384
|
||||
resize_mode = "minimal"
|
||||
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
|
||||
|
||||
elif model_type == "dpt_beit_base_384":
|
||||
model = DPTDepthModel(
|
||||
path=model_path,
|
||||
backbone="beitb16_384",
|
||||
non_negative=True,
|
||||
)
|
||||
net_w, net_h = 384, 384
|
||||
resize_mode = "minimal"
|
||||
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
|
||||
|
||||
elif model_type == "dpt_swin2_large_384":
|
||||
model = DPTDepthModel(
|
||||
path=model_path,
|
||||
backbone="swin2l24_384",
|
||||
non_negative=True,
|
||||
)
|
||||
net_w, net_h = 384, 384
|
||||
keep_aspect_ratio = False
|
||||
resize_mode = "minimal"
|
||||
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
|
||||
|
||||
elif model_type == "dpt_swin2_base_384":
|
||||
model = DPTDepthModel(
|
||||
path=model_path,
|
||||
backbone="swin2b24_384",
|
||||
non_negative=True,
|
||||
)
|
||||
net_w, net_h = 384, 384
|
||||
keep_aspect_ratio = False
|
||||
resize_mode = "minimal"
|
||||
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
|
||||
|
||||
elif model_type == "dpt_swin2_tiny_256":
|
||||
model = DPTDepthModel(
|
||||
path=model_path,
|
||||
backbone="swin2t16_256",
|
||||
non_negative=True,
|
||||
)
|
||||
net_w, net_h = 256, 256
|
||||
keep_aspect_ratio = False
|
||||
resize_mode = "minimal"
|
||||
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
|
||||
|
||||
elif model_type == "dpt_swin_large_384":
|
||||
model = DPTDepthModel(
|
||||
path=model_path,
|
||||
backbone="swinl12_384",
|
||||
non_negative=True,
|
||||
)
|
||||
net_w, net_h = 384, 384
|
||||
keep_aspect_ratio = False
|
||||
resize_mode = "minimal"
|
||||
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
|
||||
|
||||
elif model_type == "dpt_next_vit_large_384":
|
||||
model = DPTDepthModel(
|
||||
path=model_path,
|
||||
backbone="next_vit_large_6m",
|
||||
non_negative=True,
|
||||
)
|
||||
net_w, net_h = 384, 384
|
||||
resize_mode = "minimal"
|
||||
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
|
||||
|
||||
# We change the notation from dpt_levit_224 (MiDaS notation) to levit_384 (timm notation) here, where the 224 refers
|
||||
# to the resolution 224x224 used by LeViT and 384 is the first entry of the embed_dim, see _cfg and model_cfgs of
|
||||
# https://github.com/rwightman/pytorch-image-models/blob/main/timm/models/levit.py
|
||||
# (commit id: 927f031293a30afb940fff0bee34b85d9c059b0e)
|
||||
elif model_type == "dpt_levit_224":
|
||||
model = DPTDepthModel(
|
||||
path=model_path,
|
||||
backbone="levit_384",
|
||||
non_negative=True,
|
||||
head_features_1=64,
|
||||
head_features_2=8,
|
||||
)
|
||||
net_w, net_h = 224, 224
|
||||
keep_aspect_ratio = False
|
||||
resize_mode = "minimal"
|
||||
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
|
||||
|
||||
elif model_type == "dpt_large_384":
|
||||
model = DPTDepthModel(
|
||||
path=model_path,
|
||||
backbone="vitl16_384",
|
||||
non_negative=True,
|
||||
)
|
||||
net_w, net_h = 384, 384
|
||||
resize_mode = "minimal"
|
||||
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
|
||||
|
||||
elif model_type == "dpt_hybrid_384":
|
||||
model = DPTDepthModel(
|
||||
path=model_path,
|
||||
backbone="vitb_rn50_384",
|
||||
non_negative=True,
|
||||
)
|
||||
net_w, net_h = 384, 384
|
||||
resize_mode = "minimal"
|
||||
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
|
||||
|
||||
elif model_type == "midas_v21_384":
|
||||
model = MidasNet(model_path, non_negative=True)
|
||||
net_w, net_h = 384, 384
|
||||
resize_mode = "upper_bound"
|
||||
normalization = NormalizeImage(
|
||||
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
|
||||
)
|
||||
|
||||
elif model_type == "midas_v21_small_256":
|
||||
model = MidasNet_small(model_path, features=64, backbone="efficientnet_lite3", exportable=True,
|
||||
non_negative=True, blocks={'expand': True})
|
||||
net_w, net_h = 256, 256
|
||||
resize_mode = "upper_bound"
|
||||
normalization = NormalizeImage(
|
||||
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
|
||||
)
|
||||
|
||||
elif model_type == "openvino_midas_v21_small_256":
|
||||
ie = Core()
|
||||
uncompiled_model = ie.read_model(model=model_path)
|
||||
model = ie.compile_model(uncompiled_model, "CPU")
|
||||
net_w, net_h = 256, 256
|
||||
resize_mode = "upper_bound"
|
||||
normalization = NormalizeImage(
|
||||
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
|
||||
)
|
||||
|
||||
else:
|
||||
print(f"model_type '{model_type}' not implemented, use: --model_type large")
|
||||
assert False
|
||||
|
||||
if not "openvino" in model_type:
|
||||
print("Model loaded, number of parameters = {:.0f}M".format(sum(p.numel() for p in model.parameters()) / 1e6))
|
||||
else:
|
||||
print("Model loaded, optimized with OpenVINO")
|
||||
|
||||
if "openvino" in model_type:
|
||||
keep_aspect_ratio = False
|
||||
|
||||
if height is not None:
|
||||
net_w, net_h = height, height
|
||||
|
||||
transform = Compose(
|
||||
[
|
||||
Resize(
|
||||
net_w,
|
||||
net_h,
|
||||
resize_target=None,
|
||||
keep_aspect_ratio=keep_aspect_ratio,
|
||||
ensure_multiple_of=32,
|
||||
resize_method=resize_mode,
|
||||
image_interpolation_method=cv2.INTER_CUBIC,
|
||||
),
|
||||
normalization,
|
||||
PrepareForNet(),
|
||||
]
|
||||
)
|
||||
|
||||
if not "openvino" in model_type:
|
||||
model.eval()
|
||||
|
||||
if optimize and (device == torch.device("cuda")):
|
||||
if not "openvino" in model_type:
|
||||
model = model.to(memory_format=torch.channels_last)
|
||||
model = model.half()
|
||||
else:
|
||||
print("Error: OpenVINO models are already optimized. No optimization to half-float possible.")
|
||||
exit()
|
||||
|
||||
if not "openvino" in model_type:
|
||||
model.to(device)
|
||||
|
||||
return model, transform, net_w, net_h
|
||||
234
midas/transforms.py
Normal file
234
midas/transforms.py
Normal file
@@ -0,0 +1,234 @@
|
||||
import numpy as np
|
||||
import cv2
|
||||
import math
|
||||
|
||||
|
||||
def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA):
|
||||
"""Rezise the sample to ensure the given size. Keeps aspect ratio.
|
||||
|
||||
Args:
|
||||
sample (dict): sample
|
||||
size (tuple): image size
|
||||
|
||||
Returns:
|
||||
tuple: new size
|
||||
"""
|
||||
shape = list(sample["disparity"].shape)
|
||||
|
||||
if shape[0] >= size[0] and shape[1] >= size[1]:
|
||||
return sample
|
||||
|
||||
scale = [0, 0]
|
||||
scale[0] = size[0] / shape[0]
|
||||
scale[1] = size[1] / shape[1]
|
||||
|
||||
scale = max(scale)
|
||||
|
||||
shape[0] = math.ceil(scale * shape[0])
|
||||
shape[1] = math.ceil(scale * shape[1])
|
||||
|
||||
# resize
|
||||
sample["image"] = cv2.resize(
|
||||
sample["image"], tuple(shape[::-1]), interpolation=image_interpolation_method
|
||||
)
|
||||
|
||||
sample["disparity"] = cv2.resize(
|
||||
sample["disparity"], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST
|
||||
)
|
||||
sample["mask"] = cv2.resize(
|
||||
sample["mask"].astype(np.float32),
|
||||
tuple(shape[::-1]),
|
||||
interpolation=cv2.INTER_NEAREST,
|
||||
)
|
||||
sample["mask"] = sample["mask"].astype(bool)
|
||||
|
||||
return tuple(shape)
|
||||
|
||||
|
||||
class Resize(object):
|
||||
"""Resize sample to given size (width, height).
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
width,
|
||||
height,
|
||||
resize_target=True,
|
||||
keep_aspect_ratio=False,
|
||||
ensure_multiple_of=1,
|
||||
resize_method="lower_bound",
|
||||
image_interpolation_method=cv2.INTER_AREA,
|
||||
):
|
||||
"""Init.
|
||||
|
||||
Args:
|
||||
width (int): desired output width
|
||||
height (int): desired output height
|
||||
resize_target (bool, optional):
|
||||
True: Resize the full sample (image, mask, target).
|
||||
False: Resize image only.
|
||||
Defaults to True.
|
||||
keep_aspect_ratio (bool, optional):
|
||||
True: Keep the aspect ratio of the input sample.
|
||||
Output sample might not have the given width and height, and
|
||||
resize behaviour depends on the parameter 'resize_method'.
|
||||
Defaults to False.
|
||||
ensure_multiple_of (int, optional):
|
||||
Output width and height is constrained to be multiple of this parameter.
|
||||
Defaults to 1.
|
||||
resize_method (str, optional):
|
||||
"lower_bound": Output will be at least as large as the given size.
|
||||
"upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
|
||||
"minimal": Scale as least as possible. (Output size might be smaller than given size.)
|
||||
Defaults to "lower_bound".
|
||||
"""
|
||||
self.__width = width
|
||||
self.__height = height
|
||||
|
||||
self.__resize_target = resize_target
|
||||
self.__keep_aspect_ratio = keep_aspect_ratio
|
||||
self.__multiple_of = ensure_multiple_of
|
||||
self.__resize_method = resize_method
|
||||
self.__image_interpolation_method = image_interpolation_method
|
||||
|
||||
def constrain_to_multiple_of(self, x, min_val=0, max_val=None):
|
||||
y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)
|
||||
|
||||
if max_val is not None and y > max_val:
|
||||
y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int)
|
||||
|
||||
if y < min_val:
|
||||
y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int)
|
||||
|
||||
return y
|
||||
|
||||
def get_size(self, width, height):
|
||||
# determine new height and width
|
||||
scale_height = self.__height / height
|
||||
scale_width = self.__width / width
|
||||
|
||||
if self.__keep_aspect_ratio:
|
||||
if self.__resize_method == "lower_bound":
|
||||
# scale such that output size is lower bound
|
||||
if scale_width > scale_height:
|
||||
# fit width
|
||||
scale_height = scale_width
|
||||
else:
|
||||
# fit height
|
||||
scale_width = scale_height
|
||||
elif self.__resize_method == "upper_bound":
|
||||
# scale such that output size is upper bound
|
||||
if scale_width < scale_height:
|
||||
# fit width
|
||||
scale_height = scale_width
|
||||
else:
|
||||
# fit height
|
||||
scale_width = scale_height
|
||||
elif self.__resize_method == "minimal":
|
||||
# scale as least as possbile
|
||||
if abs(1 - scale_width) < abs(1 - scale_height):
|
||||
# fit width
|
||||
scale_height = scale_width
|
||||
else:
|
||||
# fit height
|
||||
scale_width = scale_height
|
||||
else:
|
||||
raise ValueError(
|
||||
f"resize_method {self.__resize_method} not implemented"
|
||||
)
|
||||
|
||||
if self.__resize_method == "lower_bound":
|
||||
new_height = self.constrain_to_multiple_of(
|
||||
scale_height * height, min_val=self.__height
|
||||
)
|
||||
new_width = self.constrain_to_multiple_of(
|
||||
scale_width * width, min_val=self.__width
|
||||
)
|
||||
elif self.__resize_method == "upper_bound":
|
||||
new_height = self.constrain_to_multiple_of(
|
||||
scale_height * height, max_val=self.__height
|
||||
)
|
||||
new_width = self.constrain_to_multiple_of(
|
||||
scale_width * width, max_val=self.__width
|
||||
)
|
||||
elif self.__resize_method == "minimal":
|
||||
new_height = self.constrain_to_multiple_of(scale_height * height)
|
||||
new_width = self.constrain_to_multiple_of(scale_width * width)
|
||||
else:
|
||||
raise ValueError(f"resize_method {self.__resize_method} not implemented")
|
||||
|
||||
return (new_width, new_height)
|
||||
|
||||
def __call__(self, sample):
|
||||
width, height = self.get_size(
|
||||
sample["image"].shape[1], sample["image"].shape[0]
|
||||
)
|
||||
|
||||
# resize sample
|
||||
sample["image"] = cv2.resize(
|
||||
sample["image"],
|
||||
(width, height),
|
||||
interpolation=self.__image_interpolation_method,
|
||||
)
|
||||
|
||||
if self.__resize_target:
|
||||
if "disparity" in sample:
|
||||
sample["disparity"] = cv2.resize(
|
||||
sample["disparity"],
|
||||
(width, height),
|
||||
interpolation=cv2.INTER_NEAREST,
|
||||
)
|
||||
|
||||
if "depth" in sample:
|
||||
sample["depth"] = cv2.resize(
|
||||
sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST
|
||||
)
|
||||
|
||||
sample["mask"] = cv2.resize(
|
||||
sample["mask"].astype(np.float32),
|
||||
(width, height),
|
||||
interpolation=cv2.INTER_NEAREST,
|
||||
)
|
||||
sample["mask"] = sample["mask"].astype(bool)
|
||||
|
||||
return sample
|
||||
|
||||
|
||||
class NormalizeImage(object):
|
||||
"""Normlize image by given mean and std.
|
||||
"""
|
||||
|
||||
def __init__(self, mean, std):
|
||||
self.__mean = mean
|
||||
self.__std = std
|
||||
|
||||
def __call__(self, sample):
|
||||
sample["image"] = (sample["image"] - self.__mean) / self.__std
|
||||
|
||||
return sample
|
||||
|
||||
|
||||
class PrepareForNet(object):
|
||||
"""Prepare sample for usage as network input.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def __call__(self, sample):
|
||||
image = np.transpose(sample["image"], (2, 0, 1))
|
||||
sample["image"] = np.ascontiguousarray(image).astype(np.float32)
|
||||
|
||||
if "mask" in sample:
|
||||
sample["mask"] = sample["mask"].astype(np.float32)
|
||||
sample["mask"] = np.ascontiguousarray(sample["mask"])
|
||||
|
||||
if "disparity" in sample:
|
||||
disparity = sample["disparity"].astype(np.float32)
|
||||
sample["disparity"] = np.ascontiguousarray(disparity)
|
||||
|
||||
if "depth" in sample:
|
||||
depth = sample["depth"].astype(np.float32)
|
||||
sample["depth"] = np.ascontiguousarray(depth)
|
||||
|
||||
return sample
|
||||
Reference in New Issue
Block a user