Shortcuts

Source code for mmpretrain.models.backbones.alexnet

# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn

from mmpretrain.registry import MODELS
from .base_backbone import BaseBackbone


[docs]@MODELS.register_module() class AlexNet(BaseBackbone): """`AlexNet <https://en.wikipedia.org/wiki/AlexNet>`_ backbone. The input for AlexNet is a 224x224 RGB image. Args: num_classes (int): number of classes for classification. The default value is -1, which uses the backbone as a feature extractor without the top classifier. """ def __init__(self, num_classes=-1): super(AlexNet, self).__init__() self.num_classes = num_classes self.features = nn.Sequential( nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(64, 192, kernel_size=5, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(192, 384, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), ) if self.num_classes > 0: self.classifier = nn.Sequential( nn.Dropout(), nn.Linear(256 * 6 * 6, 4096), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(inplace=True), nn.Linear(4096, num_classes), ) def forward(self, x): x = self.features(x) if self.num_classes > 0: x = x.view(x.size(0), 256 * 6 * 6) x = self.classifier(x) return (x, )
Read the Docs v: latest
Versions
latest
stable
mmcls-1.x
mmcls-0.x
dev
Downloads
epub
On Read the Docs
Project Home
Builds

Free document hosting provided by Read the Docs.