用Transformer从序列到序列的角度重新思考语义分割.
ViT的出现将纯Transformer结构引入到图像分类中,并在ImageNet中取得优秀的效果。Transformer这种基于SelfAttention的机制,在任意层就能实现全局的感受野,建立全局依赖。而且CNN网络往往需要将原始图像的分辨率下采样到8倍甚至32倍,这样就会损失一些信息,而Transformer无需进行较大程度的下采样就能实现特征提取,保留了图像的更多信息。
本文作者提出了SETR,采取了ViT作为语义分割encoder-decoder结构中的encoder结构,作为编码器来提取图像特征;使用传统的卷积decoder来实现语义分割任务。
SETR的encoder结构采用ViT结构,假设一张图像大小为$H×W×3$,如果直接输入到ViT中,运算量可能过大;因此作者在这里做了一个下采样操作,将图像映射成$H/16×W/16×3$,然后切分成$16 \times 16$的图像块序列。将序列嵌入并编码位置后,得到最终的输入。ViT的输出为 $[1024, 16, 16]$ 大小,需要通过上采样恢复原始尺寸。作者设计了两种上采样方式:
- Progressive UPsampling(PUP):通过卷积->2倍上采样->卷积->2倍上采样的逐步上采样模式(图 b)。
- Multi-Level feature Aggregation (MLA):获取Transformer中间层结果,聚合后4倍上采样。
SETR的ViT实现如下,对图像$16$倍下采样构造序列,取出$10,15,20,24$层的特征。
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
class FeedForward(nn.Module):
def __init__(self, dim, hidden_dim, dropout = 0.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, hidden_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(hidden_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x):
return self.net(x)
class Attention(nn.Module):
def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.):
super().__init__()
inner_dim = dim_head * heads
project_out = not (heads == 1 and dim_head == dim)
self.heads = heads
self.scale = dim_head ** -0.5
self.attend = nn.Softmax(dim = -1)
self.dropout = nn.Dropout(dropout)
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
) if project_out else nn.Identity()
def forward(self, x):
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv)
dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale
attn = self.attend(dots)
attn = self.dropout(attn)
out = torch.matmul(attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class Transformer(nn.Module):
def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0., out_indices = (9, 14, 19, 23)):
super().__init__()
self.out_indices = out_indices
assert self.out_indices[-1] == depth - 1
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
PreNorm(dim, Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout)),
PreNorm(dim, FeedForward(dim, mlp_dim, dropout = dropout))
]))
def forward(self, x):
out = []
for index, (attn, ff) in enumerate(self.layers):
x = attn(x) + x
x = ff(x) + x
if index in self.out_indices:
out.append(x)
return out
class ViT(nn.Module):
def __init__(self, *, image_size, patch_size, dim, depth, heads, mlp_dim, channels = 3, dim_head = 64, dropout = 0., emb_dropout = 0., out_indices = (9, 14, 19, 23)):
super().__init__()
image_height, image_width = (image_size, image_size)
patch_height, patch_width = (patch_size, patch_size)
assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.'
num_patches = (image_height // patch_height) * (image_width // patch_width)
patch_dim = channels * patch_height * patch_width
self.to_patch_embedding = nn.Sequential(
Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_height, p2 = patch_width),
nn.Linear(patch_dim, dim),
)
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim))
self.cls_token = nn.Parameter(torch.randn(1, 1, dim))
self.dropout = nn.Dropout(emb_dropout)
self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout, out_indices=out_indices)
self.out = Rearrange("b (h w) c->b c h w", h=image_height//patch_height, w=image_width//patch_width)
def forward(self, img):
x = self.to_patch_embedding(img)
b, n, _ = x.shape
cls_tokens = repeat(self.cls_token, '1 1 d -> b 1 d', b = b)
x = torch.cat((cls_tokens, x), dim=1)
x += self.pos_embedding[:, :(n + 1)]
x = self.dropout(x)
out = self.transformer(x)
for index, transformer_out in enumerate(out):
# delete cls_tokens and transform output to [b, c, h, w]
out[index] = self.out(transformer_out[:,1:,:])
return out
SETR的解码器实现如下,采用渐进上采样的方式实现分割结果的生成:
class PUPHead(nn.Module):
def __init__(self, num_classes):
super(PUPHead, self).__init__()
self.UP_stage_1 = nn.Sequential(
nn.Conv2d(1024, 256, 3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True)
)
self.UP_stage_2 = nn.Sequential(
nn.Conv2d(256, 256, 3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True)
)
self.UP_stage_3= nn.Sequential(
nn.Conv2d(256, 256, 3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True)
)
self.UP_stage_4= nn.Sequential(
nn.Conv2d(256, 256, 3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True)
)
self.cls_seg = nn.Conv2d(256, num_classes, 3, padding=1)
def forward(self, x):
x = self.UP_stage_1(x)
x = self.UP_stage_2(x)
x = self.UP_stage_3(x)
x = self.UP_stage_4(x)
x = self.cls_seg(x)
return x
class SETR(nn.Module):
def __init__(self, num_classes, image_size, patch_size, dim, depth, heads, mlp_dim, channels = 3, dim_head = 64, dropout = 0., emb_dropout = 0., out_indices = (9, 14, 19, 23)):
super(SETR, self).__init__()
self.out_indices = out_indices
self.num_classes = num_classes
self.VIT = ViT( image_size=image_size, patch_size=patch_size, dim=dim, depth=depth, heads=heads, mlp_dim=mlp_dim,
channels = channels, dim_head = dim_head, dropout = dropout, emb_dropout = emb_dropout, out_indices = out_indices)
self.Head = nn.ModuleDict()
for index, indices in enumerate(self.out_indices):
self.Head["Head"+str(indices)] = PUPHead(num_classes)
def forward(self, x):
VIT_OUT = self.VIT(x)
out = []
for index, indices in enumerate(self.out_indices):
# 最后一个是最后层的输出
out.append(self.Head["Head"+str(indices)](VIT_OUT[index]))
return out