You're looking at a specific version of this model. Jump to the model overview.
xpixelgroup /hat:ad47b01e
Input
Run this model in Node.js with one line of code:
npm install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import Replicate from "replicate";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run xpixelgroup/hat using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"xpixelgroup/hat:ad47b01e4923c19fed424451d925d7e95b743be1df19e9b3b0dbb9ed8685ed6b",
{
input: {
image: "https://replicate.delivery/mgxm/0722e88d-4d62-42c9-b775-9007aade21ba/butterflyx4.png"
}
}
);
console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import replicate
Run xpixelgroup/hat using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"xpixelgroup/hat:ad47b01e4923c19fed424451d925d7e95b743be1df19e9b3b0dbb9ed8685ed6b",
input={
"image": "https://replicate.delivery/mgxm/0722e88d-4d62-42c9-b775-9007aade21ba/butterflyx4.png"
}
)
print(output)
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run xpixelgroup/hat using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "ad47b01e4923c19fed424451d925d7e95b743be1df19e9b3b0dbb9ed8685ed6b",
"input": {
"image": "https://replicate.delivery/mgxm/0722e88d-4d62-42c9-b775-9007aade21ba/butterflyx4.png"
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
brew install cog
If you don’t have Homebrew, there are other installation options available.
Run this to download the model and run it in your local environment:
cog predict r8.im/xpixelgroup/hat@sha256:ad47b01e4923c19fed424451d925d7e95b743be1df19e9b3b0dbb9ed8685ed6b \
-i 'image="https://replicate.delivery/mgxm/0722e88d-4d62-42c9-b775-9007aade21ba/butterflyx4.png"'
To learn more, take a look at the Cog documentation.
Run this to download the model and run it in your local environment:
docker run -d -p 5000:5000 --gpus=all r8.im/xpixelgroup/hat@sha256:ad47b01e4923c19fed424451d925d7e95b743be1df19e9b3b0dbb9ed8685ed6b
curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "image": "https://replicate.delivery/mgxm/0722e88d-4d62-42c9-b775-9007aade21ba/butterflyx4.png" } }' \ http://localhost:5000/predictions
To learn more, take a look at the Cog documentation.
Add a payment method to run this model.
Each run costs approximately $0.034. Alternatively, try out our featured models for free.
By signing in, you agree to our
terms of service and privacy policy
Output
{
"completed_at": "2022-08-28T18:09:44.858968Z",
"created_at": "2022-08-28T18:08:05.647286Z",
"data_removed": false,
"error": null,
"id": "5u3kz6kl65bzfl6yxz7tahml7u",
"input": {
"image": "https://replicate.delivery/mgxm/0722e88d-4d62-42c9-b775-9007aade21ba/butterflyx4.png"
},
"logs": "Disable distributed.\n2022-08-28 18:09:38,591 INFO:\n ____ _ _____ ____\n / __ ) ____ _ _____ (_)_____/ ___/ / __ \\\n / __ |/ __ `// ___// // ___/\\__ \\ / /_/ /\n / /_/ // /_/ /(__ )/ // /__ ___/ // _, _/\n /_____/ \\__,_//____//_/ \\___//____//_/ |_|\n ______ __ __ __ __\n / ____/____ ____ ____/ / / / __ __ _____ / /__ / /\n / / __ / __ \\ / __ \\ / __ / / / / / / // ___// //_/ / /\n / /_/ // /_/ // /_/ // /_/ / / /___/ /_/ // /__ / /< /_/\n \\____/ \\____/ \\____/ \\____/ /_____/\\____/ \\___//_/|_| (_)\n\nVersion Information:\n\tBasicSR: 1.3.4.9\n\tPyTorch: 1.9.1+cu102\n\tTorchVision: 0.10.1+cu102\n2022-08-28 18:09:38,592 INFO:\n name: HAT_SRx4_ImageNet-LR\n model_type: HATModel\n scale: 4\n num_gpu: 1\n manual_seed: 0\n datasets:[\n test_1:[\n name: custom\n type: SingleImageDataset\n dataroot_lq: input_dir\n io_backend:[\n type: disk\n ]\n phase: test\n scale: 4\n ]\n ]\n network_g:[\n type: HAT\n upscale: 4\n in_chans: 3\n img_size: 64\n window_size: 16\n compress_ratio: 3\n squeeze_factor: 30\n conv_scale: 0.01\n overlap_ratio: 0.5\n img_range: 1.0\n depths: [6, 6, 6, 6, 6, 6]\n embed_dim: 180\n num_heads: [6, 6, 6, 6, 6, 6]\n mlp_ratio: 2\n upsampler: pixelshuffle\n resi_connection: 1conv\n ]\n path:[\n pretrain_network_g: experiments/pretrained_models/HAT_SRx4_ImageNet-pretrain.pth\n strict_load_g: True\n param_key_g: params_ema\n results_root: /src/results/HAT_SRx4_ImageNet-LR\n log: /src/results/HAT_SRx4_ImageNet-LR\n visualization: /src/results/HAT_SRx4_ImageNet-LR/visualization\n ]\n val:[\n save_img: True\n suffix: None\n ]\n dist: False\n rank: 0\n world_size: 1\n auto_resume: False\n is_train: False\n\n2022-08-28 18:09:38,592 INFO: Dataset [SingleImageDataset] - custom is built.\n2022-08-28 18:09:38,592 INFO: Number of test images in custom: 1\n2022-08-28 18:09:38,943 INFO: Network [HAT] is created.\n2022-08-28 18:09:42,882 INFO: Network: HAT, with parameters: 20,772,507\n2022-08-28 18:09:42,882 INFO: HAT(\n (conv_first): Conv2d(3, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (patch_embed): PatchEmbed(\n (norm): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n )\n (patch_unembed): PatchUnEmbed()\n (pos_drop): Dropout(p=0.0, inplace=False)\n (layers): ModuleList(\n (0): RHAG(\n (residual_group): AttenBlocks(\n (blocks): ModuleList(\n (0): HAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (attn): WindowAttention(\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n (softmax): Softmax(dim=-1)\n )\n (conv_block): CAB(\n (cab): Sequential(\n (0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): GELU()\n (2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ChannelAttention(\n (attention): Sequential(\n (0): AdaptiveAvgPool2d(output_size=1)\n (1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))\n (2): ReLU(inplace=True)\n (3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))\n (4): Sigmoid()\n )\n )\n )\n )\n (drop_path): Identity()\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (1): HAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (attn): WindowAttention(\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n (softmax): Softmax(dim=-1)\n )\n (conv_block): CAB(\n (cab): Sequential(\n (0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): GELU()\n (2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ChannelAttention(\n (attention): Sequential(\n (0): AdaptiveAvgPool2d(output_size=1)\n (1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))\n (2): ReLU(inplace=True)\n (3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))\n (4): Sigmoid()\n )\n )\n )\n )\n (drop_path): DropPath()\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (2): HAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (attn): WindowAttention(\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n (softmax): Softmax(dim=-1)\n )\n (conv_block): CAB(\n (cab): Sequential(\n (0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): GELU()\n (2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ChannelAttention(\n (attention): Sequential(\n (0): AdaptiveAvgPool2d(output_size=1)\n (1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))\n (2): ReLU(inplace=True)\n (3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))\n (4): Sigmoid()\n )\n )\n )\n )\n (drop_path): DropPath()\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (3): HAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (attn): WindowAttention(\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n (softmax): Softmax(dim=-1)\n )\n (conv_block): CAB(\n (cab): Sequential(\n (0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): GELU()\n (2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ChannelAttention(\n (attention): Sequential(\n (0): AdaptiveAvgPool2d(output_size=1)\n (1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))\n (2): ReLU(inplace=True)\n (3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))\n (4): Sigmoid()\n )\n )\n )\n )\n (drop_path): DropPath()\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (4): HAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (attn): WindowAttention(\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n (softmax): Softmax(dim=-1)\n )\n (conv_block): CAB(\n (cab): Sequential(\n (0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): GELU()\n (2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ChannelAttention(\n (attention): Sequential(\n (0): AdaptiveAvgPool2d(output_size=1)\n (1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))\n (2): ReLU(inplace=True)\n (3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))\n (4): Sigmoid()\n )\n )\n )\n )\n (drop_path): DropPath()\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (5): HAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (attn): WindowAttention(\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n (softmax): Softmax(dim=-1)\n )\n (conv_block): CAB(\n (cab): Sequential(\n (0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): GELU()\n (2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ChannelAttention(\n (attention): Sequential(\n (0): AdaptiveAvgPool2d(output_size=1)\n (1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))\n (2): ReLU(inplace=True)\n (3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))\n (4): Sigmoid()\n )\n )\n )\n )\n (drop_path): DropPath()\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n )\n (overlap_attn): OCAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (unfold): Unfold(kernel_size=(24, 24), dilation=1, padding=4, stride=16)\n (softmax): Softmax(dim=-1)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n )\n (conv): Conv2d(180, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (patch_embed): PatchEmbed()\n (patch_unembed): PatchUnEmbed()\n )\n (1): RHAG(\n (residual_group): AttenBlocks(\n (blocks): ModuleList(\n (0): HAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (attn): WindowAttention(\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n (softmax): Softmax(dim=-1)\n )\n (conv_block): CAB(\n (cab): Sequential(\n (0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): GELU()\n (2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ChannelAttention(\n (attention): Sequential(\n (0): AdaptiveAvgPool2d(output_size=1)\n (1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))\n (2): ReLU(inplace=True)\n (3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))\n (4): Sigmoid()\n )\n )\n )\n )\n (drop_path): DropPath()\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (1): HAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (attn): WindowAttention(\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n (softmax): Softmax(dim=-1)\n )\n (conv_block): CAB(\n (cab): Sequential(\n (0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): GELU()\n (2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ChannelAttention(\n (attention): Sequential(\n (0): AdaptiveAvgPool2d(output_size=1)\n (1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))\n (2): ReLU(inplace=True)\n (3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))\n (4): Sigmoid()\n )\n )\n )\n )\n (drop_path): DropPath()\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (2): HAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (attn): WindowAttention(\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n (softmax): Softmax(dim=-1)\n )\n (conv_block): CAB(\n (cab): Sequential(\n (0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): GELU()\n (2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ChannelAttention(\n (attention): Sequential(\n (0): AdaptiveAvgPool2d(output_size=1)\n (1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))\n (2): ReLU(inplace=True)\n (3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))\n (4): Sigmoid()\n )\n )\n )\n )\n (drop_path): DropPath()\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (3): HAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (attn): WindowAttention(\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n (softmax): Softmax(dim=-1)\n )\n (conv_block): CAB(\n (cab): Sequential(\n (0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): GELU()\n (2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ChannelAttention(\n (attention): Sequential(\n (0): AdaptiveAvgPool2d(output_size=1)\n (1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))\n (2): ReLU(inplace=True)\n (3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))\n (4): Sigmoid()\n )\n )\n )\n )\n (drop_path): DropPath()\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (4): HAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (attn): WindowAttention(\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n (softmax): Softmax(dim=-1)\n )\n (conv_block): CAB(\n (cab): Sequential(\n (0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): GELU()\n (2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ChannelAttention(\n (attention): Sequential(\n (0): AdaptiveAvgPool2d(output_size=1)\n (1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))\n (2): ReLU(inplace=True)\n (3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))\n (4): Sigmoid()\n )\n )\n )\n )\n (drop_path): DropPath()\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (5): HAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (attn): WindowAttention(\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n (softmax): Softmax(dim=-1)\n )\n (conv_block): CAB(\n (cab): Sequential(\n (0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): GELU()\n (2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ChannelAttention(\n (attention): Sequential(\n (0): AdaptiveAvgPool2d(output_size=1)\n (1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))\n (2): ReLU(inplace=True)\n (3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))\n (4): Sigmoid()\n )\n )\n )\n )\n (drop_path): DropPath()\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n )\n (overlap_attn): OCAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (unfold): Unfold(kernel_size=(24, 24), dilation=1, padding=4, stride=16)\n (softmax): Softmax(dim=-1)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n )\n (conv): Conv2d(180, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (patch_embed): PatchEmbed()\n (patch_unembed): PatchUnEmbed()\n )\n (2): RHAG(\n (residual_group): AttenBlocks(\n (blocks): ModuleList(\n (0): HAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (attn): WindowAttention(\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n (softmax): Softmax(dim=-1)\n )\n (conv_block): CAB(\n (cab): Sequential(\n (0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): GELU()\n (2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ChannelAttention(\n (attention): Sequential(\n (0): AdaptiveAvgPool2d(output_size=1)\n (1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))\n (2): ReLU(inplace=True)\n (3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))\n (4): Sigmoid()\n )\n )\n )\n )\n (drop_path): DropPath()\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (1): HAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (attn): WindowAttention(\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n (softmax): Softmax(dim=-1)\n )\n (conv_block): CAB(\n (cab): Sequential(\n (0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): GELU()\n (2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ChannelAttention(\n (attention): Sequential(\n (0): AdaptiveAvgPool2d(output_size=1)\n (1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))\n (2): ReLU(inplace=True)\n (3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))\n (4): Sigmoid()\n )\n )\n )\n )\n (drop_path): DropPath()\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (2): HAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (attn): WindowAttention(\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n (softmax): Softmax(dim=-1)\n )\n (conv_block): CAB(\n (cab): Sequential(\n (0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): GELU()\n (2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ChannelAttention(\n (attention): Sequential(\n (0): AdaptiveAvgPool2d(output_size=1)\n (1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))\n (2): ReLU(inplace=True)\n (3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))\n (4): Sigmoid()\n )\n )\n )\n )\n (drop_path): DropPath()\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (3): HAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (attn): WindowAttention(\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n (softmax): Softmax(dim=-1)\n )\n (conv_block): CAB(\n (cab): Sequential(\n (0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): GELU()\n (2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ChannelAttention(\n (attention): Sequential(\n (0): AdaptiveAvgPool2d(output_size=1)\n (1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))\n (2): ReLU(inplace=True)\n (3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))\n (4): Sigmoid()\n )\n )\n )\n )\n (drop_path): DropPath()\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (4): HAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (attn): WindowAttention(\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n (softmax): Softmax(dim=-1)\n )\n (conv_block): CAB(\n (cab): Sequential(\n (0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): GELU()\n (2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ChannelAttention(\n (attention): Sequential(\n (0): AdaptiveAvgPool2d(output_size=1)\n (1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))\n (2): ReLU(inplace=True)\n (3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))\n (4): Sigmoid()\n )\n )\n )\n )\n (drop_path): DropPath()\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (5): HAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (attn): WindowAttention(\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n (softmax): Softmax(dim=-1)\n )\n (conv_block): CAB(\n (cab): Sequential(\n (0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): GELU()\n (2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ChannelAttention(\n (attention): Sequential(\n (0): AdaptiveAvgPool2d(output_size=1)\n (1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))\n (2): ReLU(inplace=True)\n (3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))\n (4): Sigmoid()\n )\n )\n )\n )\n (drop_path): DropPath()\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n )\n (overlap_attn): OCAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (unfold): Unfold(kernel_size=(24, 24), dilation=1, padding=4, stride=16)\n (softmax): Softmax(dim=-1)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n )\n (conv): Conv2d(180, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (patch_embed): PatchEmbed()\n (patch_unembed): PatchUnEmbed()\n )\n (3): RHAG(\n (residual_group): AttenBlocks(\n (blocks): ModuleList(\n (0): HAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (attn): WindowAttention(\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n (softmax): Softmax(dim=-1)\n )\n (conv_block): CAB(\n (cab): Sequential(\n (0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): GELU()\n (2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ChannelAttention(\n (attention): Sequential(\n (0): AdaptiveAvgPool2d(output_size=1)\n (1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))\n (2): ReLU(inplace=True)\n (3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))\n (4): Sigmoid()\n )\n )\n )\n )\n (drop_path): DropPath()\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (1): HAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (attn): WindowAttention(\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n (softmax): Softmax(dim=-1)\n )\n (conv_block): CAB(\n (cab): Sequential(\n (0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): GELU()\n (2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ChannelAttention(\n (attention): Sequential(\n (0): AdaptiveAvgPool2d(output_size=1)\n (1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))\n (2): ReLU(inplace=True)\n (3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))\n (4): Sigmoid()\n )\n )\n )\n )\n (drop_path): DropPath()\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (2): HAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (attn): WindowAttention(\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n (softmax): Softmax(dim=-1)\n )\n (conv_block): CAB(\n (cab): Sequential(\n (0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): GELU()\n (2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ChannelAttention(\n (attention): Sequential(\n (0): AdaptiveAvgPool2d(output_size=1)\n (1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))\n (2): ReLU(inplace=True)\n (3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))\n (4): Sigmoid()\n )\n )\n )\n )\n (drop_path): DropPath()\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (3): HAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (attn): WindowAttention(\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n (softmax): Softmax(dim=-1)\n )\n (conv_block): CAB(\n (cab): Sequential(\n (0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): GELU()\n (2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ChannelAttention(\n (attention): Sequential(\n (0): AdaptiveAvgPool2d(output_size=1)\n (1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))\n (2): ReLU(inplace=True)\n (3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))\n (4): Sigmoid()\n )\n )\n )\n )\n (drop_path): DropPath()\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (4): HAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (attn): WindowAttention(\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n (softmax): Softmax(dim=-1)\n )\n (conv_block): CAB(\n (cab): Sequential(\n (0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): GELU()\n (2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ChannelAttention(\n (attention): Sequential(\n (0): AdaptiveAvgPool2d(output_size=1)\n (1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))\n (2): ReLU(inplace=True)\n (3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))\n (4): Sigmoid()\n )\n )\n )\n )\n (drop_path): DropPath()\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (5): HAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (attn): WindowAttention(\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n (softmax): Softmax(dim=-1)\n )\n (conv_block): CAB(\n (cab): Sequential(\n (0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): GELU()\n (2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ChannelAttention(\n (attention): Sequential(\n (0): AdaptiveAvgPool2d(output_size=1)\n (1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))\n (2): ReLU(inplace=True)\n (3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))\n (4): Sigmoid()\n )\n )\n )\n )\n (drop_path): DropPath()\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n )\n (overlap_attn): OCAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (unfold): Unfold(kernel_size=(24, 24), dilation=1, padding=4, stride=16)\n (softmax): Softmax(dim=-1)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n )\n (conv): Conv2d(180, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (patch_embed): PatchEmbed()\n (patch_unembed): PatchUnEmbed()\n )\n (4): RHAG(\n (residual_group): AttenBlocks(\n (blocks): ModuleList(\n (0): HAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (attn): WindowAttention(\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n (softmax): Softmax(dim=-1)\n )\n (conv_block): CAB(\n (cab): Sequential(\n (0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): GELU()\n (2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ChannelAttention(\n (attention): Sequential(\n (0): AdaptiveAvgPool2d(output_size=1)\n (1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))\n (2): ReLU(inplace=True)\n (3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))\n (4): Sigmoid()\n )\n )\n )\n )\n (drop_path): DropPath()\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (1): HAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (attn): WindowAttention(\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n (softmax): Softmax(dim=-1)\n )\n (conv_block): CAB(\n (cab): Sequential(\n (0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): GELU()\n (2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ChannelAttention(\n (attention): Sequential(\n (0): AdaptiveAvgPool2d(output_size=1)\n (1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))\n (2): ReLU(inplace=True)\n (3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))\n (4): Sigmoid()\n )\n )\n )\n )\n (drop_path): DropPath()\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (2): HAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (attn): WindowAttention(\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n (softmax): Softmax(dim=-1)\n )\n (conv_block): CAB(\n (cab): Sequential(\n (0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): GELU()\n (2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ChannelAttention(\n (attention): Sequential(\n (0): AdaptiveAvgPool2d(output_size=1)\n (1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))\n (2): ReLU(inplace=True)\n (3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))\n (4): Sigmoid()\n )\n )\n )\n )\n (drop_path): DropPath()\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (3): HAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (attn): WindowAttention(\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n (softmax): Softmax(dim=-1)\n )\n (conv_block): CAB(\n (cab): Sequential(\n (0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): GELU()\n (2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ChannelAttention(\n (attention): Sequential(\n (0): AdaptiveAvgPool2d(output_size=1)\n (1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))\n (2): ReLU(inplace=True)\n (3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))\n (4): Sigmoid()\n )\n )\n )\n )\n (drop_path): DropPath()\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (4): HAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (attn): WindowAttention(\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n (softmax): Softmax(dim=-1)\n )\n (conv_block): CAB(\n (cab): Sequential(\n (0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): GELU()\n (2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ChannelAttention(\n (attention): Sequential(\n (0): AdaptiveAvgPool2d(output_size=1)\n (1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))\n (2): ReLU(inplace=True)\n (3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))\n (4): Sigmoid()\n )\n )\n )\n )\n (drop_path): DropPath()\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (5): HAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (attn): WindowAttention(\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n (softmax): Softmax(dim=-1)\n )\n (conv_block): CAB(\n (cab): Sequential(\n (0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): GELU()\n (2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ChannelAttention(\n (attention): Sequential(\n (0): AdaptiveAvgPool2d(output_size=1)\n (1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))\n (2): ReLU(inplace=True)\n (3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))\n (4): Sigmoid()\n )\n )\n )\n )\n (drop_path): DropPath()\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n )\n (overlap_attn): OCAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (unfold): Unfold(kernel_size=(24, 24), dilation=1, padding=4, stride=16)\n (softmax): Softmax(dim=-1)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n )\n (conv): Conv2d(180, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (patch_embed): PatchEmbed()\n (patch_unembed): PatchUnEmbed()\n )\n (5): RHAG(\n (residual_group): AttenBlocks(\n (blocks): ModuleList(\n (0): HAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (attn): WindowAttention(\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n (softmax): Softmax(dim=-1)\n )\n (conv_block): CAB(\n (cab): Sequential(\n (0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): GELU()\n (2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ChannelAttention(\n (attention): Sequential(\n (0): AdaptiveAvgPool2d(output_size=1)\n (1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))\n (2): ReLU(inplace=True)\n (3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))\n (4): Sigmoid()\n )\n )\n )\n )\n (drop_path): DropPath()\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (1): HAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (attn): WindowAttention(\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n (softmax): Softmax(dim=-1)\n )\n (conv_block): CAB(\n (cab): Sequential(\n (0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): GELU()\n (2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ChannelAttention(\n (attention): Sequential(\n (0): AdaptiveAvgPool2d(output_size=1)\n (1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))\n (2): ReLU(inplace=True)\n (3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))\n (4): Sigmoid()\n )\n )\n )\n )\n (drop_path): DropPath()\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (2): HAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (attn): WindowAttention(\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n (softmax): Softmax(dim=-1)\n )\n (conv_block): CAB(\n (cab): Sequential(\n (0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): GELU()\n (2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ChannelAttention(\n (attention): Sequential(\n (0): AdaptiveAvgPool2d(output_size=1)\n (1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))\n (2): ReLU(inplace=True)\n (3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))\n (4): Sigmoid()\n )\n )\n )\n )\n (drop_path): DropPath()\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (3): HAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (attn): WindowAttention(\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n (softmax): Softmax(dim=-1)\n )\n (conv_block): CAB(\n (cab): Sequential(\n (0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): GELU()\n (2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ChannelAttention(\n (attention): Sequential(\n (0): AdaptiveAvgPool2d(output_size=1)\n (1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))\n (2): ReLU(inplace=True)\n (3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))\n (4): Sigmoid()\n )\n )\n )\n )\n (drop_path): DropPath()\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (4): HAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (attn): WindowAttention(\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n (softmax): Softmax(dim=-1)\n )\n (conv_block): CAB(\n (cab): Sequential(\n (0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): GELU()\n (2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ChannelAttention(\n (attention): Sequential(\n (0): AdaptiveAvgPool2d(output_size=1)\n (1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))\n (2): ReLU(inplace=True)\n (3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))\n (4): Sigmoid()\n )\n )\n )\n )\n (drop_path): DropPath()\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (5): HAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (attn): WindowAttention(\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n (softmax): Softmax(dim=-1)\n )\n (conv_block): CAB(\n (cab): Sequential(\n (0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): GELU()\n (2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ChannelAttention(\n (attention): Sequential(\n (0): AdaptiveAvgPool2d(output_size=1)\n (1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))\n (2): ReLU(inplace=True)\n (3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))\n (4): Sigmoid()\n )\n )\n )\n )\n (drop_path): DropPath()\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n )\n (overlap_attn): OCAB(\n (norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (qkv): Linear(in_features=180, out_features=540, bias=True)\n (unfold): Unfold(kernel_size=(24, 24), dilation=1, padding=4, stride=16)\n (softmax): Softmax(dim=-1)\n (proj): Linear(in_features=180, out_features=180, bias=True)\n (norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=180, out_features=360, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=360, out_features=180, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n )\n (conv): Conv2d(180, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (patch_embed): PatchEmbed()\n (patch_unembed): PatchUnEmbed()\n )\n )\n (norm): LayerNorm((180,), eps=1e-05, elementwise_affine=True)\n (conv_after_body): Conv2d(180, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (conv_before_upsample): Sequential(\n (0): Conv2d(180, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): LeakyReLU(negative_slope=0.01, inplace=True)\n )\n (upsample): Upsample(\n (0): Conv2d(64, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): PixelShuffle(upscale_factor=2)\n (2): Conv2d(64, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): PixelShuffle(upscale_factor=2)\n )\n (conv_last): Conv2d(64, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n)\n2022-08-28 18:09:42,955 INFO: Loading HAT model from experiments/pretrained_models/HAT_SRx4_ImageNet-pretrain.pth, with param key: [params_ema].\n2022-08-28 18:09:43,225 INFO: Model [HATModel] is created.\n2022-08-28 18:09:43,226 INFO: Testing custom...",
"metrics": {
"predict_time": 7.268437,
"total_time": 99.211682
},
"output": "https://replicate.delivery/mgxm/83101122-3778-41ff-80be-4f7a1a59b727/output.png",
"started_at": "2022-08-28T18:09:37.590531Z",
"status": "succeeded",
"urls": {
"get": "https://api.replicate.com/v1/predictions/5u3kz6kl65bzfl6yxz7tahml7u",
"cancel": "https://api.replicate.com/v1/predictions/5u3kz6kl65bzfl6yxz7tahml7u/cancel"
},
"version": "5ba193b469df4777e9ffa041d775326935bbd1e572cde69b0454f68a7d29997b"
}
Disable distributed.
2022-08-28 18:09:38,591 INFO:
____ _ _____ ____
/ __ ) ____ _ _____ (_)_____/ ___/ / __ \
/ __ |/ __ `// ___// // ___/\__ \ / /_/ /
/ /_/ // /_/ /(__ )/ // /__ ___/ // _, _/
/_____/ \__,_//____//_/ \___//____//_/ |_|
______ __ __ __ __
/ ____/____ ____ ____/ / / / __ __ _____ / /__ / /
/ / __ / __ \ / __ \ / __ / / / / / / // ___// //_/ / /
/ /_/ // /_/ // /_/ // /_/ / / /___/ /_/ // /__ / /< /_/
\____/ \____/ \____/ \____/ /_____/\____/ \___//_/|_| (_)
Version Information:
BasicSR: 1.3.4.9
PyTorch: 1.9.1+cu102
TorchVision: 0.10.1+cu102
2022-08-28 18:09:38,592 INFO:
name: HAT_SRx4_ImageNet-LR
model_type: HATModel
scale: 4
num_gpu: 1
manual_seed: 0
datasets:[
test_1:[
name: custom
type: SingleImageDataset
dataroot_lq: input_dir
io_backend:[
type: disk
]
phase: test
scale: 4
]
]
network_g:[
type: HAT
upscale: 4
in_chans: 3
img_size: 64
window_size: 16
compress_ratio: 3
squeeze_factor: 30
conv_scale: 0.01
overlap_ratio: 0.5
img_range: 1.0
depths: [6, 6, 6, 6, 6, 6]
embed_dim: 180
num_heads: [6, 6, 6, 6, 6, 6]
mlp_ratio: 2
upsampler: pixelshuffle
resi_connection: 1conv
]
path:[
pretrain_network_g: experiments/pretrained_models/HAT_SRx4_ImageNet-pretrain.pth
strict_load_g: True
param_key_g: params_ema
results_root: /src/results/HAT_SRx4_ImageNet-LR
log: /src/results/HAT_SRx4_ImageNet-LR
visualization: /src/results/HAT_SRx4_ImageNet-LR/visualization
]
val:[
save_img: True
suffix: None
]
dist: False
rank: 0
world_size: 1
auto_resume: False
is_train: False
2022-08-28 18:09:38,592 INFO: Dataset [SingleImageDataset] - custom is built.
2022-08-28 18:09:38,592 INFO: Number of test images in custom: 1
2022-08-28 18:09:38,943 INFO: Network [HAT] is created.
2022-08-28 18:09:42,882 INFO: Network: HAT, with parameters: 20,772,507
2022-08-28 18:09:42,882 INFO: HAT(
(conv_first): Conv2d(3, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(patch_embed): PatchEmbed(
(norm): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
)
(patch_unembed): PatchUnEmbed()
(pos_drop): Dropout(p=0.0, inplace=False)
(layers): ModuleList(
(0): RHAG(
(residual_group): AttenBlocks(
(blocks): ModuleList(
(0): HAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(attn): WindowAttention(
(qkv): Linear(in_features=180, out_features=540, bias=True)
(attn_drop): Dropout(p=0.0, inplace=False)
(proj): Linear(in_features=180, out_features=180, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(softmax): Softmax(dim=-1)
)
(conv_block): CAB(
(cab): Sequential(
(0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): GELU()
(2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ChannelAttention(
(attention): Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))
(4): Sigmoid()
)
)
)
)
(drop_path): Identity()
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
(1): HAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(attn): WindowAttention(
(qkv): Linear(in_features=180, out_features=540, bias=True)
(attn_drop): Dropout(p=0.0, inplace=False)
(proj): Linear(in_features=180, out_features=180, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(softmax): Softmax(dim=-1)
)
(conv_block): CAB(
(cab): Sequential(
(0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): GELU()
(2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ChannelAttention(
(attention): Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))
(4): Sigmoid()
)
)
)
)
(drop_path): DropPath()
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
(2): HAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(attn): WindowAttention(
(qkv): Linear(in_features=180, out_features=540, bias=True)
(attn_drop): Dropout(p=0.0, inplace=False)
(proj): Linear(in_features=180, out_features=180, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(softmax): Softmax(dim=-1)
)
(conv_block): CAB(
(cab): Sequential(
(0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): GELU()
(2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ChannelAttention(
(attention): Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))
(4): Sigmoid()
)
)
)
)
(drop_path): DropPath()
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
(3): HAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(attn): WindowAttention(
(qkv): Linear(in_features=180, out_features=540, bias=True)
(attn_drop): Dropout(p=0.0, inplace=False)
(proj): Linear(in_features=180, out_features=180, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(softmax): Softmax(dim=-1)
)
(conv_block): CAB(
(cab): Sequential(
(0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): GELU()
(2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ChannelAttention(
(attention): Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))
(4): Sigmoid()
)
)
)
)
(drop_path): DropPath()
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
(4): HAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(attn): WindowAttention(
(qkv): Linear(in_features=180, out_features=540, bias=True)
(attn_drop): Dropout(p=0.0, inplace=False)
(proj): Linear(in_features=180, out_features=180, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(softmax): Softmax(dim=-1)
)
(conv_block): CAB(
(cab): Sequential(
(0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): GELU()
(2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ChannelAttention(
(attention): Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))
(4): Sigmoid()
)
)
)
)
(drop_path): DropPath()
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
(5): HAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(attn): WindowAttention(
(qkv): Linear(in_features=180, out_features=540, bias=True)
(attn_drop): Dropout(p=0.0, inplace=False)
(proj): Linear(in_features=180, out_features=180, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(softmax): Softmax(dim=-1)
)
(conv_block): CAB(
(cab): Sequential(
(0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): GELU()
(2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ChannelAttention(
(attention): Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))
(4): Sigmoid()
)
)
)
)
(drop_path): DropPath()
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
)
(overlap_attn): OCAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(qkv): Linear(in_features=180, out_features=540, bias=True)
(unfold): Unfold(kernel_size=(24, 24), dilation=1, padding=4, stride=16)
(softmax): Softmax(dim=-1)
(proj): Linear(in_features=180, out_features=180, bias=True)
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
)
(conv): Conv2d(180, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(patch_embed): PatchEmbed()
(patch_unembed): PatchUnEmbed()
)
(1): RHAG(
(residual_group): AttenBlocks(
(blocks): ModuleList(
(0): HAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(attn): WindowAttention(
(qkv): Linear(in_features=180, out_features=540, bias=True)
(attn_drop): Dropout(p=0.0, inplace=False)
(proj): Linear(in_features=180, out_features=180, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(softmax): Softmax(dim=-1)
)
(conv_block): CAB(
(cab): Sequential(
(0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): GELU()
(2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ChannelAttention(
(attention): Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))
(4): Sigmoid()
)
)
)
)
(drop_path): DropPath()
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
(1): HAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(attn): WindowAttention(
(qkv): Linear(in_features=180, out_features=540, bias=True)
(attn_drop): Dropout(p=0.0, inplace=False)
(proj): Linear(in_features=180, out_features=180, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(softmax): Softmax(dim=-1)
)
(conv_block): CAB(
(cab): Sequential(
(0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): GELU()
(2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ChannelAttention(
(attention): Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))
(4): Sigmoid()
)
)
)
)
(drop_path): DropPath()
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
(2): HAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(attn): WindowAttention(
(qkv): Linear(in_features=180, out_features=540, bias=True)
(attn_drop): Dropout(p=0.0, inplace=False)
(proj): Linear(in_features=180, out_features=180, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(softmax): Softmax(dim=-1)
)
(conv_block): CAB(
(cab): Sequential(
(0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): GELU()
(2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ChannelAttention(
(attention): Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))
(4): Sigmoid()
)
)
)
)
(drop_path): DropPath()
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
(3): HAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(attn): WindowAttention(
(qkv): Linear(in_features=180, out_features=540, bias=True)
(attn_drop): Dropout(p=0.0, inplace=False)
(proj): Linear(in_features=180, out_features=180, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(softmax): Softmax(dim=-1)
)
(conv_block): CAB(
(cab): Sequential(
(0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): GELU()
(2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ChannelAttention(
(attention): Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))
(4): Sigmoid()
)
)
)
)
(drop_path): DropPath()
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
(4): HAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(attn): WindowAttention(
(qkv): Linear(in_features=180, out_features=540, bias=True)
(attn_drop): Dropout(p=0.0, inplace=False)
(proj): Linear(in_features=180, out_features=180, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(softmax): Softmax(dim=-1)
)
(conv_block): CAB(
(cab): Sequential(
(0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): GELU()
(2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ChannelAttention(
(attention): Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))
(4): Sigmoid()
)
)
)
)
(drop_path): DropPath()
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
(5): HAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(attn): WindowAttention(
(qkv): Linear(in_features=180, out_features=540, bias=True)
(attn_drop): Dropout(p=0.0, inplace=False)
(proj): Linear(in_features=180, out_features=180, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(softmax): Softmax(dim=-1)
)
(conv_block): CAB(
(cab): Sequential(
(0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): GELU()
(2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ChannelAttention(
(attention): Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))
(4): Sigmoid()
)
)
)
)
(drop_path): DropPath()
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
)
(overlap_attn): OCAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(qkv): Linear(in_features=180, out_features=540, bias=True)
(unfold): Unfold(kernel_size=(24, 24), dilation=1, padding=4, stride=16)
(softmax): Softmax(dim=-1)
(proj): Linear(in_features=180, out_features=180, bias=True)
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
)
(conv): Conv2d(180, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(patch_embed): PatchEmbed()
(patch_unembed): PatchUnEmbed()
)
(2): RHAG(
(residual_group): AttenBlocks(
(blocks): ModuleList(
(0): HAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(attn): WindowAttention(
(qkv): Linear(in_features=180, out_features=540, bias=True)
(attn_drop): Dropout(p=0.0, inplace=False)
(proj): Linear(in_features=180, out_features=180, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(softmax): Softmax(dim=-1)
)
(conv_block): CAB(
(cab): Sequential(
(0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): GELU()
(2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ChannelAttention(
(attention): Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))
(4): Sigmoid()
)
)
)
)
(drop_path): DropPath()
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
(1): HAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(attn): WindowAttention(
(qkv): Linear(in_features=180, out_features=540, bias=True)
(attn_drop): Dropout(p=0.0, inplace=False)
(proj): Linear(in_features=180, out_features=180, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(softmax): Softmax(dim=-1)
)
(conv_block): CAB(
(cab): Sequential(
(0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): GELU()
(2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ChannelAttention(
(attention): Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))
(4): Sigmoid()
)
)
)
)
(drop_path): DropPath()
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
(2): HAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(attn): WindowAttention(
(qkv): Linear(in_features=180, out_features=540, bias=True)
(attn_drop): Dropout(p=0.0, inplace=False)
(proj): Linear(in_features=180, out_features=180, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(softmax): Softmax(dim=-1)
)
(conv_block): CAB(
(cab): Sequential(
(0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): GELU()
(2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ChannelAttention(
(attention): Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))
(4): Sigmoid()
)
)
)
)
(drop_path): DropPath()
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
(3): HAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(attn): WindowAttention(
(qkv): Linear(in_features=180, out_features=540, bias=True)
(attn_drop): Dropout(p=0.0, inplace=False)
(proj): Linear(in_features=180, out_features=180, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(softmax): Softmax(dim=-1)
)
(conv_block): CAB(
(cab): Sequential(
(0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): GELU()
(2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ChannelAttention(
(attention): Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))
(4): Sigmoid()
)
)
)
)
(drop_path): DropPath()
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
(4): HAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(attn): WindowAttention(
(qkv): Linear(in_features=180, out_features=540, bias=True)
(attn_drop): Dropout(p=0.0, inplace=False)
(proj): Linear(in_features=180, out_features=180, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(softmax): Softmax(dim=-1)
)
(conv_block): CAB(
(cab): Sequential(
(0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): GELU()
(2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ChannelAttention(
(attention): Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))
(4): Sigmoid()
)
)
)
)
(drop_path): DropPath()
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
(5): HAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(attn): WindowAttention(
(qkv): Linear(in_features=180, out_features=540, bias=True)
(attn_drop): Dropout(p=0.0, inplace=False)
(proj): Linear(in_features=180, out_features=180, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(softmax): Softmax(dim=-1)
)
(conv_block): CAB(
(cab): Sequential(
(0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): GELU()
(2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ChannelAttention(
(attention): Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))
(4): Sigmoid()
)
)
)
)
(drop_path): DropPath()
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
)
(overlap_attn): OCAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(qkv): Linear(in_features=180, out_features=540, bias=True)
(unfold): Unfold(kernel_size=(24, 24), dilation=1, padding=4, stride=16)
(softmax): Softmax(dim=-1)
(proj): Linear(in_features=180, out_features=180, bias=True)
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
)
(conv): Conv2d(180, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(patch_embed): PatchEmbed()
(patch_unembed): PatchUnEmbed()
)
(3): RHAG(
(residual_group): AttenBlocks(
(blocks): ModuleList(
(0): HAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(attn): WindowAttention(
(qkv): Linear(in_features=180, out_features=540, bias=True)
(attn_drop): Dropout(p=0.0, inplace=False)
(proj): Linear(in_features=180, out_features=180, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(softmax): Softmax(dim=-1)
)
(conv_block): CAB(
(cab): Sequential(
(0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): GELU()
(2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ChannelAttention(
(attention): Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))
(4): Sigmoid()
)
)
)
)
(drop_path): DropPath()
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
(1): HAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(attn): WindowAttention(
(qkv): Linear(in_features=180, out_features=540, bias=True)
(attn_drop): Dropout(p=0.0, inplace=False)
(proj): Linear(in_features=180, out_features=180, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(softmax): Softmax(dim=-1)
)
(conv_block): CAB(
(cab): Sequential(
(0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): GELU()
(2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ChannelAttention(
(attention): Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))
(4): Sigmoid()
)
)
)
)
(drop_path): DropPath()
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
(2): HAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(attn): WindowAttention(
(qkv): Linear(in_features=180, out_features=540, bias=True)
(attn_drop): Dropout(p=0.0, inplace=False)
(proj): Linear(in_features=180, out_features=180, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(softmax): Softmax(dim=-1)
)
(conv_block): CAB(
(cab): Sequential(
(0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): GELU()
(2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ChannelAttention(
(attention): Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))
(4): Sigmoid()
)
)
)
)
(drop_path): DropPath()
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
(3): HAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(attn): WindowAttention(
(qkv): Linear(in_features=180, out_features=540, bias=True)
(attn_drop): Dropout(p=0.0, inplace=False)
(proj): Linear(in_features=180, out_features=180, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(softmax): Softmax(dim=-1)
)
(conv_block): CAB(
(cab): Sequential(
(0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): GELU()
(2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ChannelAttention(
(attention): Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))
(4): Sigmoid()
)
)
)
)
(drop_path): DropPath()
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
(4): HAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(attn): WindowAttention(
(qkv): Linear(in_features=180, out_features=540, bias=True)
(attn_drop): Dropout(p=0.0, inplace=False)
(proj): Linear(in_features=180, out_features=180, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(softmax): Softmax(dim=-1)
)
(conv_block): CAB(
(cab): Sequential(
(0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): GELU()
(2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ChannelAttention(
(attention): Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))
(4): Sigmoid()
)
)
)
)
(drop_path): DropPath()
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
(5): HAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(attn): WindowAttention(
(qkv): Linear(in_features=180, out_features=540, bias=True)
(attn_drop): Dropout(p=0.0, inplace=False)
(proj): Linear(in_features=180, out_features=180, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(softmax): Softmax(dim=-1)
)
(conv_block): CAB(
(cab): Sequential(
(0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): GELU()
(2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ChannelAttention(
(attention): Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))
(4): Sigmoid()
)
)
)
)
(drop_path): DropPath()
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
)
(overlap_attn): OCAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(qkv): Linear(in_features=180, out_features=540, bias=True)
(unfold): Unfold(kernel_size=(24, 24), dilation=1, padding=4, stride=16)
(softmax): Softmax(dim=-1)
(proj): Linear(in_features=180, out_features=180, bias=True)
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
)
(conv): Conv2d(180, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(patch_embed): PatchEmbed()
(patch_unembed): PatchUnEmbed()
)
(4): RHAG(
(residual_group): AttenBlocks(
(blocks): ModuleList(
(0): HAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(attn): WindowAttention(
(qkv): Linear(in_features=180, out_features=540, bias=True)
(attn_drop): Dropout(p=0.0, inplace=False)
(proj): Linear(in_features=180, out_features=180, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(softmax): Softmax(dim=-1)
)
(conv_block): CAB(
(cab): Sequential(
(0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): GELU()
(2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ChannelAttention(
(attention): Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))
(4): Sigmoid()
)
)
)
)
(drop_path): DropPath()
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
(1): HAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(attn): WindowAttention(
(qkv): Linear(in_features=180, out_features=540, bias=True)
(attn_drop): Dropout(p=0.0, inplace=False)
(proj): Linear(in_features=180, out_features=180, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(softmax): Softmax(dim=-1)
)
(conv_block): CAB(
(cab): Sequential(
(0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): GELU()
(2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ChannelAttention(
(attention): Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))
(4): Sigmoid()
)
)
)
)
(drop_path): DropPath()
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
(2): HAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(attn): WindowAttention(
(qkv): Linear(in_features=180, out_features=540, bias=True)
(attn_drop): Dropout(p=0.0, inplace=False)
(proj): Linear(in_features=180, out_features=180, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(softmax): Softmax(dim=-1)
)
(conv_block): CAB(
(cab): Sequential(
(0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): GELU()
(2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ChannelAttention(
(attention): Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))
(4): Sigmoid()
)
)
)
)
(drop_path): DropPath()
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
(3): HAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(attn): WindowAttention(
(qkv): Linear(in_features=180, out_features=540, bias=True)
(attn_drop): Dropout(p=0.0, inplace=False)
(proj): Linear(in_features=180, out_features=180, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(softmax): Softmax(dim=-1)
)
(conv_block): CAB(
(cab): Sequential(
(0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): GELU()
(2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ChannelAttention(
(attention): Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))
(4): Sigmoid()
)
)
)
)
(drop_path): DropPath()
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
(4): HAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(attn): WindowAttention(
(qkv): Linear(in_features=180, out_features=540, bias=True)
(attn_drop): Dropout(p=0.0, inplace=False)
(proj): Linear(in_features=180, out_features=180, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(softmax): Softmax(dim=-1)
)
(conv_block): CAB(
(cab): Sequential(
(0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): GELU()
(2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ChannelAttention(
(attention): Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))
(4): Sigmoid()
)
)
)
)
(drop_path): DropPath()
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
(5): HAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(attn): WindowAttention(
(qkv): Linear(in_features=180, out_features=540, bias=True)
(attn_drop): Dropout(p=0.0, inplace=False)
(proj): Linear(in_features=180, out_features=180, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(softmax): Softmax(dim=-1)
)
(conv_block): CAB(
(cab): Sequential(
(0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): GELU()
(2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ChannelAttention(
(attention): Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))
(4): Sigmoid()
)
)
)
)
(drop_path): DropPath()
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
)
(overlap_attn): OCAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(qkv): Linear(in_features=180, out_features=540, bias=True)
(unfold): Unfold(kernel_size=(24, 24), dilation=1, padding=4, stride=16)
(softmax): Softmax(dim=-1)
(proj): Linear(in_features=180, out_features=180, bias=True)
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
)
(conv): Conv2d(180, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(patch_embed): PatchEmbed()
(patch_unembed): PatchUnEmbed()
)
(5): RHAG(
(residual_group): AttenBlocks(
(blocks): ModuleList(
(0): HAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(attn): WindowAttention(
(qkv): Linear(in_features=180, out_features=540, bias=True)
(attn_drop): Dropout(p=0.0, inplace=False)
(proj): Linear(in_features=180, out_features=180, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(softmax): Softmax(dim=-1)
)
(conv_block): CAB(
(cab): Sequential(
(0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): GELU()
(2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ChannelAttention(
(attention): Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))
(4): Sigmoid()
)
)
)
)
(drop_path): DropPath()
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
(1): HAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(attn): WindowAttention(
(qkv): Linear(in_features=180, out_features=540, bias=True)
(attn_drop): Dropout(p=0.0, inplace=False)
(proj): Linear(in_features=180, out_features=180, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(softmax): Softmax(dim=-1)
)
(conv_block): CAB(
(cab): Sequential(
(0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): GELU()
(2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ChannelAttention(
(attention): Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))
(4): Sigmoid()
)
)
)
)
(drop_path): DropPath()
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
(2): HAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(attn): WindowAttention(
(qkv): Linear(in_features=180, out_features=540, bias=True)
(attn_drop): Dropout(p=0.0, inplace=False)
(proj): Linear(in_features=180, out_features=180, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(softmax): Softmax(dim=-1)
)
(conv_block): CAB(
(cab): Sequential(
(0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): GELU()
(2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ChannelAttention(
(attention): Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))
(4): Sigmoid()
)
)
)
)
(drop_path): DropPath()
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
(3): HAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(attn): WindowAttention(
(qkv): Linear(in_features=180, out_features=540, bias=True)
(attn_drop): Dropout(p=0.0, inplace=False)
(proj): Linear(in_features=180, out_features=180, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(softmax): Softmax(dim=-1)
)
(conv_block): CAB(
(cab): Sequential(
(0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): GELU()
(2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ChannelAttention(
(attention): Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))
(4): Sigmoid()
)
)
)
)
(drop_path): DropPath()
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
(4): HAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(attn): WindowAttention(
(qkv): Linear(in_features=180, out_features=540, bias=True)
(attn_drop): Dropout(p=0.0, inplace=False)
(proj): Linear(in_features=180, out_features=180, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(softmax): Softmax(dim=-1)
)
(conv_block): CAB(
(cab): Sequential(
(0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): GELU()
(2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ChannelAttention(
(attention): Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))
(4): Sigmoid()
)
)
)
)
(drop_path): DropPath()
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
(5): HAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(attn): WindowAttention(
(qkv): Linear(in_features=180, out_features=540, bias=True)
(attn_drop): Dropout(p=0.0, inplace=False)
(proj): Linear(in_features=180, out_features=180, bias=True)
(proj_drop): Dropout(p=0.0, inplace=False)
(softmax): Softmax(dim=-1)
)
(conv_block): CAB(
(cab): Sequential(
(0): Conv2d(180, 60, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): GELU()
(2): Conv2d(60, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ChannelAttention(
(attention): Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Conv2d(180, 6, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): Conv2d(6, 180, kernel_size=(1, 1), stride=(1, 1))
(4): Sigmoid()
)
)
)
)
(drop_path): DropPath()
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
)
(overlap_attn): OCAB(
(norm1): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(qkv): Linear(in_features=180, out_features=540, bias=True)
(unfold): Unfold(kernel_size=(24, 24), dilation=1, padding=4, stride=16)
(softmax): Softmax(dim=-1)
(proj): Linear(in_features=180, out_features=180, bias=True)
(norm2): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(mlp): Mlp(
(fc1): Linear(in_features=180, out_features=360, bias=True)
(act): GELU()
(fc2): Linear(in_features=360, out_features=180, bias=True)
(drop): Dropout(p=0.0, inplace=False)
)
)
)
(conv): Conv2d(180, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(patch_embed): PatchEmbed()
(patch_unembed): PatchUnEmbed()
)
)
(norm): LayerNorm((180,), eps=1e-05, elementwise_affine=True)
(conv_after_body): Conv2d(180, 180, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(conv_before_upsample): Sequential(
(0): Conv2d(180, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): LeakyReLU(negative_slope=0.01, inplace=True)
)
(upsample): Upsample(
(0): Conv2d(64, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): PixelShuffle(upscale_factor=2)
(2): Conv2d(64, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): PixelShuffle(upscale_factor=2)
)
(conv_last): Conv2d(64, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
2022-08-28 18:09:42,955 INFO: Loading HAT model from experiments/pretrained_models/HAT_SRx4_ImageNet-pretrain.pth, with param key: [params_ema].
2022-08-28 18:09:43,225 INFO: Model [HATModel] is created.
2022-08-28 18:09:43,226 INFO: Testing custom...
This example was created by a different version, xpixelgroup/hat:5ba193b4.