python_code stringlengths 0 992k | repo_name stringlengths 8 46 | file_path stringlengths 5 162 |
|---|---|---|
# Copyright (c) OpenMMLab. All rights reserved.
# dataset settings
dataset_type = 'CocoPanopticDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadPanopticAnnotation... | ViT-Adapter-main | wsdm2023/configs/_base_/datasets/coco_panoptic.py |
# Copyright (c) OpenMMLab. All rights reserved.
# dataset settings
dataset_type = 'VGDataset'
data_root = 'data/refcoco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=T... | ViT-Adapter-main | wsdm2023/configs/_base_/datasets/refcoco.py |
# Copyright (c) OpenMMLab. All rights reserved.
# dataset settings
dataset_type = 'VOCDataset'
data_root = 'data/VOCdevkit/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbo... | ViT-Adapter-main | wsdm2023/configs/_base_/datasets/voc0712.py |
# Copyright (c) OpenMMLab. All rights reserved.
# dataset settings
dataset_type = 'DeepFashionDataset'
data_root = 'data/DeepFashion/In-shop/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnno... | ViT-Adapter-main | wsdm2023/configs/_base_/datasets/deepfashion.py |
# model settings
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
type='FasterRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
frozen_stages=1,
norm_cfg=norm_cfg,
... | ViT-Adapter-main | wsdm2023/configs/_base_/models/faster_rcnn_r50_caffe_c4.py |
# model settings
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
type='MaskRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
frozen_stages=1,
norm_cfg=norm_cfg,
... | ViT-Adapter-main | wsdm2023/configs/_base_/models/mask_rcnn_r50_caffe_c4.py |
# model settings
input_size = 300
model = dict(
type='SingleStageDetector',
backbone=dict(
type='SSDVGG',
depth=16,
with_last_pool=False,
ceil_mode=True,
out_indices=(3, 4),
out_feature_indices=(22, 34),
init_cfg=dict(
type='Pretrained', checkp... | ViT-Adapter-main | wsdm2023/configs/_base_/models/ssd300.py |
# model settings
model = dict(
type='CascadeRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict... | ViT-Adapter-main | wsdm2023/configs/_base_/models/cascade_rcnn_r50_fpn.py |
# model settings
model = dict(
type='FastRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(ty... | ViT-Adapter-main | wsdm2023/configs/_base_/models/fast_rcnn_r50_fpn.py |
# model settings
model = dict(
type='RPN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='P... | ViT-Adapter-main | wsdm2023/configs/_base_/models/rpn_r50_fpn.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# model settings
model = dict(
type='MaskRCNN',
pretrained=None,
backbone=dict(
type='ConvNeXt',
... | ViT-Adapter-main | wsdm2023/configs/_base_/models/mask_rcnn_convnext_fpn.py |
# model settings
model = dict(
type='RetinaNet',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(t... | ViT-Adapter-main | wsdm2023/configs/_base_/models/retinanet_r50_fpn.py |
# model settings
model = dict(
type='FasterRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(... | ViT-Adapter-main | wsdm2023/configs/_base_/models/faster_rcnn_r50_fpn.py |
# model settings
model = dict(
type='RPN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
... | ViT-Adapter-main | wsdm2023/configs/_base_/models/rpn_r50_caffe_c4.py |
# model settings
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
type='FasterRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
strides=(1, 2, 2, 1),
dilations=(1, 1, 1, 2),
out_indices=(3, ),
frozen_stages=1,
norm_cfg=norm_... | ViT-Adapter-main | wsdm2023/configs/_base_/models/faster_rcnn_r50_caffe_dc5.py |
# model settings
model = dict(
type='CascadeRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict... | ViT-Adapter-main | wsdm2023/configs/_base_/models/cascade_mask_rcnn_r50_fpn.py |
# model settings
model = dict(
type='MaskRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(ty... | ViT-Adapter-main | wsdm2023/configs/_base_/models/mask_rcnn_r50_fpn.py |
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[8, 11])
runner = dict(type='EpochBasedRunner', max_epochs=12)
| ViT-Adapter-main | wsdm2023/configs/_base_/schedules/schedule_1x.py |
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)
| ViT-Adapter-main | wsdm2023/configs/_base_/schedules/schedule_2x.py |
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[27, 33])
runner = dict(type='EpochBasedRunner', max_epochs=36)
| ViT-Adapter-main | wsdm2023/configs/_base_/schedules/schedule_3x.py |
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=2000,
warmup_ratio=0.001,
step=[62, 68])
runner = dict(type='EpochBasedRunner', max_epochs=72)... | ViT-Adapter-main | wsdm2023/configs/_base_/schedules/schedule_6x.py |
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[16, 19])
runner = dict(type='EpochBasedRunner', max_epochs=20)
| ViT-Adapter-main | wsdm2023/configs/_base_/schedules/schedule_20e.py |
# Copyright (c) OpenMMLab. All rights reserved.
import asyncio
from argparse import ArgumentParser
from mmdet.apis import (async_inference_detector, inference_detector,
init_detector, show_result_pyplot)
import mmcv
import mmcv_custom # noqa: F401,F403
import mmdet_custom # noqa: F401,F403
im... | ViT-Adapter-main | detection/image_demo.py |
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import time
import warnings
import mmcv
import mmcv_custom # noqa: F401,F403
import mmdet_custom # noqa: F401,F403
import torch
from mmcv import Config, DictAction
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel impo... | ViT-Adapter-main | detection/test.py |
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import copy
import os
import os.path as osp
import time
import warnings
import mmcv
import mmcv_custom # noqa: F401,F403
import mmdet_custom # noqa: F401,F403
import torch
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_d... | ViT-Adapter-main | detection/train.py |
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
from mmdet.apis import inference_detector, init_detector
import mmcv_custom # noqa: F401,F403
import mmdet_custom # noqa: F401,F403
def parse_args():
parser = argparse.ArgumentParser(description='MMDetection video demo')
... | ViT-Adapter-main | detection/video_demo.py |
import torch
import argparse
import torch.nn.functional as F
parser = argparse.ArgumentParser(description='Hyperparams')
parser.add_argument('filename', nargs='?', type=str, default=None)
args = parser.parse_args()
model = torch.load(args.filename, map_location=torch.device('cpu'))
# resize patch embedding from 14x... | ViT-Adapter-main | detection/convert_14to16.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
from .models import * # noqa: F401,F403
| ViT-Adapter-main | detection/mmdet_custom/__init__.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
from .backbones import * # noqa: F401,F403
from .necks import * # noqa: F401,F403
from .detectors import * # noqa: F401,F403 | ViT-Adapter-main | detection/mmdet_custom/models/__init__.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from mmdet.models.builder import NECKS
@NECKS.register_module()
class ChannelMapperWithPooling(BaseModule):
r"""Channel Mapper to reduce/increase channels of backbone feat... | ViT-Adapter-main | detection/mmdet_custom/models/necks/channel_mapper.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
from .channel_mapper import ChannelMapperWithPooling
from .extra_attention import ExtraAttention
__all__ = ['ExtraAttention', 'ChannelMapperWithPooling']
| ViT-Adapter-main | detection/mmdet_custom/models/necks/__init__.py |
import torch.nn as nn
from mmcv.runner import BaseModule, auto_fp16
from mmdet.models.builder import NECKS
from timm.models.layers import trunc_normal_, DropPath
import math
import torch
import torch.utils.checkpoint as cp
class Mlp(nn.Module):
""" MLP as used in Vision Transformer, MLP-Mixer and related networks... | ViT-Adapter-main | detection/mmdet_custom/models/necks/extra_attention.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
from .beit_adapter import BEiTAdapter
from .uniperceiver_adapter import UniPerceiverAdapter
from .vit_adapter import ViTAdapter
from .vit_baseline import ViTBaseline
__all__ = ['UniPerceiverAdapter', 'ViTAdapter', 'ViTBaseline', 'BEiTAdapter']
| ViT-Adapter-main | detection/mmdet_custom/models/backbones/__init__.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
import logging
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmdet.models.builder import BACKBONES
from ops.modules import MSDeformAttn
from timm.models.layers import DropPath, trunc_normal_
from torch.nn.init import normal_
f... | ViT-Adapter-main | detection/mmdet_custom/models/backbones/vit_adapter.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
import logging
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmdet.models.builder import BACKBONES
from ops.modules import MSDeformAttn
from timm.models.layers import trunc_normal_
from torch.nn.init import normal_
from .base.... | ViT-Adapter-main | detection/mmdet_custom/models/backbones/beit_adapter.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
import logging
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmdet.models.builder import BACKBONES
from ops.modules import MSDeformAttn
from timm.models.layers import DropPath, trunc_normal_
from torch.nn.init import normal_
f... | ViT-Adapter-main | detection/mmdet_custom/models/backbones/uniperceiver_adapter.py |
import logging
from functools import partial
import torch
import torch.nn as nn
from ops.modules import MSDeformAttn
from timm.models.layers import DropPath
import torch.utils.checkpoint as cp
_logger = logging.getLogger(__name__)
def get_reference_points(spatial_shapes, device):
reference_points_list = []
... | ViT-Adapter-main | detection/mmdet_custom/models/backbones/adapter_modules.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
import logging
import math
import torch.nn as nn
import torch.nn.functional as F
from mmdet.models.builder import BACKBONES
from timm.models.layers import trunc_normal_
from .base.vit import TIMMVisionTransformer
from .base.vit import ResBottleneckBlock
_logger = ... | ViT-Adapter-main | detection/mmdet_custom/models/backbones/vit_baseline.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# B... | ViT-Adapter-main | detection/mmdet_custom/models/backbones/base/beit.py |
import logging
import math
import torch
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from mmcv.runner import load_checkpoint
from mmdet.utils import get_root_logger
from timm.models.layers import DropPath
from torch import nn
def window_partition(x, window_size):
"""
Args:
x: (... | ViT-Adapter-main | detection/mmdet_custom/models/backbones/base/uniperceiver.py |
"""Vision Transformer (ViT) in PyTorch.
A PyTorch implement of Vision Transformers as described in:
'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale'
- https://arxiv.org/abs/2010.11929
`How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers`
- https:... | ViT-Adapter-main | detection/mmdet_custom/models/backbones/base/vit.py |
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.models.builder import DETECTORS
from mmdet.models.detectors.cascade_rcnn import CascadeRCNN
from mmdet.core import (bbox2result, bbox_mapping_back, multiclass_nms,
bbox2roi, merge_aug_masks, bbox_mapping)
import torch
import numpy as np
... | ViT-Adapter-main | detection/mmdet_custom/models/detectors/htc_aug.py |
from .htc_aug import HybridTaskCascadeAug
__all__ = ['HybridTaskCascadeAug'] | ViT-Adapter-main | detection/mmdet_custom/models/detectors/__init__.py |
# Copyright (c) ByteDance, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Mostly copy-paste from BEiT library:
https://github.com/microsoft/unilm/blob/master/beit/semantic_segmentation/mmcv_cus... | ViT-Adapter-main | detection/mmcv_custom/layer_decay_optimizer_constructor.py |
# Copyright (c) Open-MMLab. All rights reserved.
import io
import math
import os
import os.path as osp
import pkgutil
import time
import warnings
from collections import OrderedDict
from importlib import import_module
from tempfile import TemporaryDirectory
import mmcv
import numpy as np
import torch
import torchvisio... | ViT-Adapter-main | detection/mmcv_custom/checkpoint.py |
import os.path as osp
import pkgutil
import time
from collections import OrderedDict
from importlib import import_module
import mmcv
import torch
from torch.utils import model_zoo
open_mmlab_model_urls = {
'vgg16_caffe': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/vgg16_caffe-292e1171... | ViT-Adapter-main | detection/mmcv_custom/my_checkpoint.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
from .checkpoint import load_checkpoint
from .customized_text import CustomizedTextLoggerHook
from .layer_decay_optimizer_constructor import LayerDecayOptimizerConstructor
from .my_checkpoint import my_load_checkpoint
__all__ = [
'LayerDecayOptimizerConstructor... | ViT-Adapter-main | detection/mmcv_custom/__init__.py |
import torch
checkpoint = torch.load("../pretrained/uni-perceiver-large-L24-H1024-224size-pretrained.pth",
map_location=torch.device('cpu'))
checkpoint = checkpoint['model']
new_checkpoint = {}
for k, v in checkpoint.items():
new_k = k.replace("fused_encoder.", "")
new_k = new_k.replace... | ViT-Adapter-main | detection/mmcv_custom/uniperceiver_converter.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import datetime
from collections import OrderedDict
import torch
from mmcv.runner import HOOKS, TextLoggerHook
@HOOKS.... | ViT-Adapter-main | detection/mmcv_custom/customized_text.py |
# Copyright (c) OpenMMLab. All rights reserved.
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
custom_hooks = [dict(type='NumClassCheckHook')]
# evaluation = dict(s... | ViT-Adapter-main | detection/configs/_base_/default_runtime.py |
# Copyright (c) OpenMMLab. All rights reserved.
# dataset settings
dataset_type = 'WIDERFaceDataset'
data_root = 'data/WIDERFace/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with... | ViT-Adapter-main | detection/configs/_base_/datasets/wider_face.py |
# Copyright (c) OpenMMLab. All rights reserved.
# dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', ... | ViT-Adapter-main | detection/configs/_base_/datasets/cityscapes_instance.py |
# Copyright (c) OpenMMLab. All rights reserved.
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=Tr... | ViT-Adapter-main | detection/configs/_base_/datasets/coco_detection.py |
# Copyright (c) OpenMMLab. All rights reserved.
# dataset settings
_base_ = 'coco_instance.py'
dataset_type = 'LVISV05Dataset'
data_root = 'data/lvis_v0.5/'
data = dict(samples_per_gpu=2,
workers_per_gpu=2,
train=dict(_delete_=True,
type='ClassBalancedDataset',
... | ViT-Adapter-main | detection/configs/_base_/datasets/lvis_v0.5_instance.py |
# Copyright (c) OpenMMLab. All rights reserved.
# dataset settings
_base_ = 'coco_instance.py'
dataset_type = 'LVISV1Dataset'
data_root = 'data/lvis_v1/'
data = dict(samples_per_gpu=2,
workers_per_gpu=2,
train=dict(_delete_=True,
type='ClassBalancedDataset',
... | ViT-Adapter-main | detection/configs/_base_/datasets/lvis_v1_instance.py |
# Copyright (c) OpenMMLab. All rights reserved.
# dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', ... | ViT-Adapter-main | detection/configs/_base_/datasets/cityscapes_detection.py |
# Copyright (c) OpenMMLab. All rights reserved.
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=Tr... | ViT-Adapter-main | detection/configs/_base_/datasets/coco_instance.py |
# Copyright (c) OpenMMLab. All rights reserved.
# dataset settings
dataset_type = 'CocoPanopticDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadPanopticAnnotation... | ViT-Adapter-main | detection/configs/_base_/datasets/coco_panoptic.py |
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', img_scale... | ViT-Adapter-main | detection/configs/_base_/datasets/coco_instance_augreg.py |
# dataset settings
dataset_type = 'Objects365V2Dataset'
data_root = 'data/Objects365/Obj365_v2/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resi... | ViT-Adapter-main | detection/configs/_base_/datasets/obj365_detection.py |
# Copyright (c) OpenMMLab. All rights reserved.
# dataset settings
dataset_type = 'VOCDataset'
data_root = 'data/VOCdevkit/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbo... | ViT-Adapter-main | detection/configs/_base_/datasets/voc0712.py |
# Copyright (c) OpenMMLab. All rights reserved.
# dataset settings
dataset_type = 'DeepFashionDataset'
data_root = 'data/DeepFashion/In-shop/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnno... | ViT-Adapter-main | detection/configs/_base_/datasets/deepfashion.py |
# model settings
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
type='FasterRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
frozen_stages=1,
norm_cfg=norm_cfg,
... | ViT-Adapter-main | detection/configs/_base_/models/faster_rcnn_r50_caffe_c4.py |
# model settings
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
type='MaskRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
frozen_stages=1,
norm_cfg=norm_cfg,
... | ViT-Adapter-main | detection/configs/_base_/models/mask_rcnn_r50_caffe_c4.py |
# model settings
input_size = 300
model = dict(
type='SingleStageDetector',
backbone=dict(
type='SSDVGG',
depth=16,
with_last_pool=False,
ceil_mode=True,
out_indices=(3, 4),
out_feature_indices=(22, 34),
init_cfg=dict(
type='Pretrained', checkp... | ViT-Adapter-main | detection/configs/_base_/models/ssd300.py |
# model settings
model = dict(
type='CascadeRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict... | ViT-Adapter-main | detection/configs/_base_/models/cascade_rcnn_r50_fpn.py |
# model settings
model = dict(
type='FastRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(ty... | ViT-Adapter-main | detection/configs/_base_/models/fast_rcnn_r50_fpn.py |
# model settings
model = dict(
type='RPN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='P... | ViT-Adapter-main | detection/configs/_base_/models/rpn_r50_fpn.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# model settings
model = dict(
type='MaskRCNN',
pretrained=None,
backbone=dict(
type='ConvNeXt',
... | ViT-Adapter-main | detection/configs/_base_/models/mask_rcnn_convnext_fpn.py |
# model settings
model = dict(
type='RetinaNet',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(t... | ViT-Adapter-main | detection/configs/_base_/models/retinanet_r50_fpn.py |
# model settings
model = dict(
type='FasterRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(... | ViT-Adapter-main | detection/configs/_base_/models/faster_rcnn_r50_fpn.py |
# model settings
model = dict(
type='RPN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
... | ViT-Adapter-main | detection/configs/_base_/models/rpn_r50_caffe_c4.py |
# model settings
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
type='FasterRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
strides=(1, 2, 2, 1),
dilations=(1, 1, 1, 2),
out_indices=(3, ),
frozen_stages=1,
norm_cfg=norm_... | ViT-Adapter-main | detection/configs/_base_/models/faster_rcnn_r50_caffe_dc5.py |
# model settings
model = dict(
type='CascadeRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict... | ViT-Adapter-main | detection/configs/_base_/models/cascade_mask_rcnn_r50_fpn.py |
# model settings
model = dict(
type='MaskRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(ty... | ViT-Adapter-main | detection/configs/_base_/models/mask_rcnn_r50_fpn.py |
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[8, 11])
runner = dict(type='EpochBasedRunner', max_epochs=12)
| ViT-Adapter-main | detection/configs/_base_/schedules/schedule_1x.py |
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)
| ViT-Adapter-main | detection/configs/_base_/schedules/schedule_2x.py |
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[27, 33])
runner = dict(type='EpochBasedRunner', max_epochs=36)
| ViT-Adapter-main | detection/configs/_base_/schedules/schedule_3x.py |
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=2000,
warmup_ratio=0.001,
step=[62, 68])
runner = dict(type='EpochBasedRunner', max_epochs=72)... | ViT-Adapter-main | detection/configs/_base_/schedules/schedule_6x.py |
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[16, 19])
runner = dict(type='EpochBasedRunner', max_epochs=20)
| ViT-Adapter-main | detection/configs/_base_/schedules/schedule_20e.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/cascade_mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_3x.py',
'../_base_/default_runtime.py'
]
# pretrained = 'https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a... | ViT-Adapter-main | detection/configs/cascade_rcnn/cascade_mask_rcnn_deit_adapter_small_fpn_3x_coco.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/cascade_mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_3x.py',
'../_base_/default_runtime.py'
]
# pretrained = 'https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef... | ViT-Adapter-main | detection/configs/cascade_rcnn/cascade_mask_rcnn_deit_adapter_base_fpn_3x_coco.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/cascade_mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_3x.py',
'../_base_/default_runtime.py'
]
# pretrained = 'https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef... | ViT-Adapter-main | detection/configs/cascade_rcnn/cascade_mask_rcnn_deit_base_fpn_3x_coco.py |
_base_ = [
'../_base_/datasets/coco_panoptic.py', '../_base_/default_runtime.py'
]
num_things_classes = 80
num_stuff_classes = 53
num_classes = num_things_classes + num_stuff_classes
# pretrained = 'https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_large_patch16_224_pt1k_ft21k.pth'
pretr... | ViT-Adapter-main | detection/configs/mask2former/mask2former_beitv2_adapter_large_16x1_3x_coco-panoptic.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py',
'../_base_/default_runtime.py'
]
# pretrained = 'https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth'
... | ViT-Adapter-main | detection/configs/mask_rcnn/mask_rcnn_deit_adapter_tiny_fpn_1x_coco.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_3x.py',
'../_base_/default_runtime.py'
]
# pretrained = 'https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth'
... | ViT-Adapter-main | detection/configs/mask_rcnn/mask_rcnn_deit_base_fpn_3x_coco.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_3x.py',
'../_base_/default_runtime.py'
]
# pretrained = 'https://github.com/czczup/ViT-Adapter/releases/download/v0.3.1/' \
# ... | ViT-Adapter-main | detection/configs/mask_rcnn/mask_rcnn_uniperceiver_adapter_base_fpn_3x_coco.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_3x.py',
'../_base_/default_runtime.py'
]
# pretrained = 'https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth'... | ViT-Adapter-main | detection/configs/mask_rcnn/mask_rcnn_deit_adapter_small_3x_coco.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_3x.py',
'../_base_/default_runtime.py'
]
# pretrained = 'https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth'... | ViT-Adapter-main | detection/configs/mask_rcnn/mask_rcnn_deit_small_fpn_3x_coco.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_3x.py',
'../_base_/default_runtime.py'
]
# pretrained = 'https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth'
... | ViT-Adapter-main | detection/configs/mask_rcnn/mask_rcnn_deit_adapter_base_fpn_3x_coco.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_3x.py',
'../_base_/default_runtime.py'
]
# pretrained = 'https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth'... | ViT-Adapter-main | detection/configs/mask_rcnn/mask_rcnn_deit_adapter_small_fpn_3x_coco.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance_augreg.py',
'../_base_/schedules/schedule_3x.py',
'../_base_/default_runtime.py'
]
# pretrained = 'https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-... | ViT-Adapter-main | detection/configs/mask_rcnn/mask_rcnn_augreg_adapter_large_fpn_3x_coco.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_3x.py',
'../_base_/default_runtime.py'
]
# pretrained = 'https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth'
... | ViT-Adapter-main | detection/configs/mask_rcnn/mask_rcnn_deit_adapter_tiny_fpn_3x_coco.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance_augreg.py',
'../_base_/schedules/schedule_3x.py',
'../_base_/default_runtime.py'
]
# pretrained = 'https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-... | ViT-Adapter-main | detection/configs/mask_rcnn/mask_rcnn_augreg_large_fpn_3x_coco.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_3x.py',
'../_base_/default_runtime.py'
]
# pretrained = 'https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth'
... | ViT-Adapter-main | detection/configs/mask_rcnn/mask_rcnn_deit_tiny_fpn_3x_coco.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../../_base_/models/mask_rcnn_r50_fpn.py',
'../../_base_/datasets/coco_instance.py',
'../../_base_/schedules/schedule_3x.py',
'../../_base_/default_runtime.py'
]
# pretrained = 'https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_v... | ViT-Adapter-main | detection/configs/mask_rcnn/dinov2/mask_rcnn_dinov2_adapter_small_fpn_3x_coco.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../../_base_/models/mask_rcnn_r50_fpn.py',
'../../_base_/datasets/coco_instance.py',
'../../_base_/schedules/schedule_3x.py',
'../../_base_/default_runtime.py'
]
# pretrained = 'https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_v... | ViT-Adapter-main | detection/configs/mask_rcnn/dinov2/mask_rcnn_dinov2_adapter_base_fpn_3x_coco.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../../_base_/models/mask_rcnn_r50_fpn.py',
'../../_base_/datasets/coco_instance.py',
'../../_base_/schedules/schedule_3x.py',
'../../_base_/default_runtime.py'
]
# pretrained = 'https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_v... | ViT-Adapter-main | detection/configs/mask_rcnn/dinov2/mask_rcnn_dinov2_adapter_large_fpn_3x_coco.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_3x.py',
'../_base_/default_runtime.py'
]
# pretrained = 'https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth'
pretrained = 'pretrained/deit_small_patch16... | ViT-Adapter-main | detection/configs/sparse_rcnn/sparse_rcnn_deit_adapter_small_fpn_3x_coco.py |
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py',
'../_base_/default_runtime.py'
]
# pretrained = 'https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_base.pth'
p... | ViT-Adapter-main | detection/configs/upgraded_mask_rcnn/mask_rcnn_mae_adapter_base_lsj_fpn_25ep_coco.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.