1# Licensed to the Apache Software Foundation (ASF) under one 2# or more contributor license agreements. See the NOTICE file 3# distributed with this work for additional information 4# regarding copyright ownership. The ASF licenses this file 5# to you under the Apache License, Version 2.0 (the 6# "License"); you may not use this file except in compliance 7# with the License. You may obtain a copy of the License at 8# 9# http://www.apache.org/licenses/LICENSE-2.0 10# 11# Unless required by applicable law or agreed to in writing, 12# software distributed under the License is distributed on an 13# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14# KIND, either express or implied. See the License for the 15# specific language governing permissions and limitations 16# under the License. 17 18use strict; 19use warnings; 20use AI::MXNet::Function::Parameters; 21 22package AI::MXNet::Gluon::ModelZoo::Vision::ResNet::BasicBlockV1; 23use AI::MXNet::Gluon::Mouse; 24extends 'AI::MXNet::Gluon::HybridBlock'; 25 26=head1 NAME 27 28 AI::MXNet::Gluon::ModelZoo::Vision::ResNet::BasicBlockV1 - BasicBlock V1 from `"Deep Residual Learning for Image Recognition" 29=cut 30 31=head1 DESCRIPTION 32 33 BasicBlock V1 from `"Deep Residual Learning for Image Recognition" 34 <http://arxiv.org/abs/1512.03385>`_ paper. 35 This is used for ResNet V1 for 18, 34 layers. 36 37 Parameters 38 ---------- 39 channels : Int 40 Number of output channels. 41 stride : Int 42 Stride size. 43 downsample : Bool, default 0 44 Whether to downsample the input. 45 in_channels : Int, default 0 46 Number of input channels. Default is 0, to infer from the graph. 47=cut 48 49has ['channels', 50 'stride'] => (is => 'ro', isa => 'Int', required => 1); 51has 'downsample' => (is => 'rw', default => 0); 52has 'in_channels' => (is => 'ro', isa => 'Int', default => 0); 53method python_constructor_arguments() { [qw/channels stride downsample/] } 54func _conv3x3($channels, $stride, $in_channels) 55{ 56 return nn->Conv2D( 57 $channels, kernel_size=>3, strides=>$stride, padding=>1, 58 use_bias=>0, in_channels=>$in_channels 59 ); 60} 61 62sub BUILD 63{ 64 my $self = shift; 65 $self->body(nn->HybridSequential(prefix=>'')); 66 $self->body->add(_conv3x3($self->channels, $self->stride, $self->in_channels)); 67 $self->body->add(nn->BatchNorm()); 68 $self->body->add(nn->Activation('relu')); 69 $self->body->add(_conv3x3($self->channels, 1, $self->channels)); 70 $self->body->add(nn->BatchNorm()); 71 if($self->downsample) 72 { 73 $self->downsample(nn->HybridSequential(prefix=>'')); 74 $self->downsample->add( 75 nn->Conv2D($self->channels, kernel_size=>1, strides=>$self->stride, 76 use_bias=>0, in_channels=>$self->in_channels) 77 ); 78 $self->downsample->add(nn->BatchNorm()); 79 } 80 else 81 { 82 $self->downsample(undef); 83 } 84} 85 86method hybrid_forward(GluonClass $F, GluonInput $x) 87{ 88 my $residual = $x; 89 $x = $self->body->($x); 90 if(defined $self->downsample) 91 { 92 $residual = $self->downsample->($residual); 93 } 94 $x = $F->Activation($residual+$x, act_type=>'relu'); 95 return $x; 96} 97 98package AI::MXNet::Gluon::ModelZoo::Vision::ResNet::BottleneckV1; 99use AI::MXNet::Gluon::Mouse; 100extends 'AI::MXNet::Gluon::HybridBlock'; 101 102=head1 NAME 103 104 AI::MXNet::Gluon::ModelZoo::Vision::ResNet::BottleneckV1 - Bottleneck V1 from "Deep Residual Learning for Image Recognition" 105=cut 106 107=head1 DESCRIPTION 108 109 Bottleneck V1 from "Deep Residual Learning for Image Recognition" 110 <http://arxiv.org/abs/1512.03385> paper. 111 This is used for ResNet V1 for 50, 101, 152 layers. 112 113 Parameters 114 ---------- 115 channels : int 116 Number of output channels. 117 stride : int 118 Stride size. 119 downsample : bool, default False 120 Whether to downsample the input. 121 in_channels : int, default 0 122 Number of input channels. Default is 0, to infer from the graph. 123=cut 124 125has ['channels', 126 'stride'] => (is => 'ro', isa => 'Int', required => 1); 127has 'downsample' => (is => 'rw', default => 0); 128has 'in_channels' => (is => 'ro', isa => 'Int', default => 0); 129method python_constructor_arguments() { [qw/channels stride downsample/] } 130func _conv3x3($channels, $stride, $in_channels) 131{ 132 return nn->Conv2D( 133 $channels, kernel_size=>3, strides=>$stride, padding=>1, 134 use_bias=>0, in_channels=>$in_channels 135 ); 136} 137 138sub BUILD 139{ 140 my $self = shift; 141 $self->body(nn->HybridSequential(prefix=>'')); 142 $self->body->add(nn->Conv2D(int($self->channels/4), kernel_size=>1, strides=>$self->stride)); 143 $self->body->add(nn->BatchNorm()); 144 $self->body->add(nn->Activation('relu')); 145 $self->body->add(_conv3x3(int($self->channels/4), 1, int($self->channels/4))); 146 $self->body->add(nn->BatchNorm()); 147 $self->body->add(nn->Activation('relu')); 148 $self->body->add(nn->Conv2D($self->channels, kernel_size=>1, strides=>1)); 149 $self->body->add(nn->BatchNorm()); 150 if($self->downsample) 151 { 152 $self->downsample(nn->HybridSequential(prefix=>'')); 153 $self->downsample->add( 154 nn->Conv2D($self->channels, kernel_size=>1, strides=>$self->stride, 155 use_bias=>0, in_channels=>$self->in_channels) 156 ); 157 $self->downsample->add(nn->BatchNorm()); 158 } 159 else 160 { 161 $self->downsample(undef); 162 } 163} 164 165method hybrid_forward(GluonClass $F, GluonInput $x) 166{ 167 my $residual = $x; 168 $x = $self->body->($x); 169 if(defined $self->downsample) 170 { 171 $residual = $self->downsample->($residual); 172 } 173 $x = $F->Activation($residual+$x, act_type=>'relu'); 174 return $x; 175} 176 177package AI::MXNet::Gluon::ModelZoo::Vision::ResNet::BasicBlockV2; 178use AI::MXNet::Gluon::Mouse; 179extends 'AI::MXNet::Gluon::HybridBlock'; 180 181=head1 NAME 182 183 AI::MXNet::Gluon::ModelZoo::Vision::ResNet::BasicBlockV2 - BasicBlock V2 from "Identity Mappings in Deep Residual Networks" 184=cut 185 186=head1 DESCRIPTION 187 188 Bottleneck V2 from "Identity Mappings in Deep Residual Networks" 189 <https://arxiv.org/abs/1603.05027> paper. 190 This is used for ResNet V2 for 18, 34 layers. 191 192 Parameters 193 ---------- 194 channels : Int 195 Number of output channels. 196 stride : Int 197 Stride size. 198 downsample : Bool, default 0 199 Whether to downsample the input. 200 in_channels : Int, default 0 201 Number of input channels. Default is 0, to infer from the graph. 202=cut 203 204has ['channels', 205 'stride'] => (is => 'ro', isa => 'Int', required => 1); 206has 'downsample' => (is => 'rw', default => 0); 207has 'in_channels' => (is => 'ro', isa => 'Int', default => 0); 208method python_constructor_arguments() { [qw/channels stride downsample/] } 209func _conv3x3($channels, $stride, $in_channels) 210{ 211 return nn->Conv2D( 212 $channels, kernel_size=>3, strides=>$stride, padding=>1, 213 use_bias=>0, in_channels=>$in_channels 214 ); 215} 216 217sub BUILD 218{ 219 my $self = shift; 220 $self->bn1(nn->BatchNorm()); 221 $self->conv1(_conv3x3($self->channels, $self->stride, $self->in_channels)); 222 $self->bn2(nn->BatchNorm()); 223 $self->conv2(_conv3x3($self->channels, 1, $self->channels)); 224 if($self->downsample) 225 { 226 $self->downsample( 227 nn->Conv2D($self->channels, kernel_size=>1, strides=>$self->stride, 228 use_bias=>0, in_channels=>$self->in_channels) 229 ); 230 } 231 else 232 { 233 $self->downsample(undef); 234 } 235} 236 237method hybrid_forward(GluonClass $F, GluonInput $x) 238{ 239 my $residual = $x; 240 $x = $self->bn1->($x); 241 $x = $F->Activation($x, act_type=>'relu'); 242 if(defined $self->downsample) 243 { 244 $residual = $self->downsample->($x); 245 } 246 $x = $self->conv1->($x); 247 248 $x = $self->bn2->($x); 249 $x = $F->Activation($x, act_type=>'relu'); 250 $x = $self->conv2->($x); 251 252 return $x + $residual; 253} 254 255 256package AI::MXNet::Gluon::ModelZoo::Vision::ResNet::BottleneckV2; 257use AI::MXNet::Gluon::Mouse; 258extends 'AI::MXNet::Gluon::HybridBlock'; 259 260=head1 NAME 261 262 AI::MXNet::Gluon::ModelZoo::Vision::ResNet::BottleneckV2 - Bottleneck V2 from "Identity Mappings in Deep Residual Networks" 263=cut 264 265=head1 DESCRIPTION 266 267 Bottleneck V2 from "Identity Mappings in Deep Residual Networks" 268 <https://arxiv.org/abs/1603.05027> paper. 269 This is used for ResNet V2 for 50, 101, 152 layers. 270 271 Parameters 272 ---------- 273 channels : int 274 Number of output channels. 275 stride : int 276 Stride size. 277 downsample : bool, default False 278 Whether to downsample the input. 279 in_channels : int, default 0 280 Number of input channels. Default is 0, to infer from the graph. 281=cut 282 283has ['channels', 284 'stride'] => (is => 'ro', isa => 'Int', required => 1); 285has 'downsample' => (is => 'rw', default => 0); 286has 'in_channels' => (is => 'ro', isa => 'Int', default => 0); 287method python_constructor_arguments() { [qw/channels stride downsample/] } 288func _conv3x3($channels, $stride, $in_channels) 289{ 290 return nn->Conv2D( 291 $channels, kernel_size=>3, strides=>$stride, padding=>1, 292 use_bias=>0, in_channels=>$in_channels 293 ); 294} 295 296sub BUILD 297{ 298 my $self = shift; 299 $self->bn1(nn->BatchNorm()); 300 $self->conv1(nn->Conv2D(int($self->channels/4), kernel_size=>1, strides=>1, use_bias=>0)); 301 $self->bn2(nn->BatchNorm()); 302 $self->conv2(_conv3x3(int($self->channels/4), $self->stride, int($self->channels/4))); 303 $self->bn3(nn->BatchNorm()); 304 $self->conv3(nn->Conv2D($self->channels, kernel_size=>1, strides=>1, use_bias=>0)); 305 if($self->downsample) 306 { 307 $self->downsample( 308 nn->Conv2D($self->channels, kernel_size=>1, strides=>$self->stride, 309 use_bias=>0, in_channels=>$self->in_channels) 310 ); 311 } 312 else 313 { 314 $self->downsample(undef); 315 } 316} 317 318method hybrid_forward(GluonClass $F, GluonInput $x) 319{ 320 my $residual = $x; 321 $x = $self->bn1->($x); 322 $x = $F->Activation($x, act_type=>'relu'); 323 if(defined $self->downsample) 324 { 325 $residual = $self->downsample->($x); 326 } 327 $x = $self->conv1->($x); 328 329 $x = $self->bn2->($x); 330 $x = $F->Activation($x, act_type=>'relu'); 331 $x = $self->conv2->($x); 332 333 $x = $self->bn3->($x); 334 $x = $F->Activation($x, act_type=>'relu'); 335 $x = $self->conv3->($x); 336 337 return $x + $residual; 338} 339 340 341# Nets 342package AI::MXNet::Gluon::ModelZoo::Vision::ResNet::V1; 343use AI::MXNet::Gluon::Mouse; 344extends 'AI::MXNet::Gluon::HybridBlock'; 345use AI::MXNet::Base; 346 347=head1 NAME 348 349 AI::MXNet::Gluon::ModelZoo::Vision::ResNet::V1 - ResNet V1 model from "Deep Residual Learning for Image Recognition" 350=cut 351 352=head1 DESCRIPTION 353 354 ResNet V1 model from from "Deep Residual Learning for Image Recognition" 355 <http://arxiv.org/abs/1512.03385> paper. 356 357 Parameters 358 ---------- 359 block : AI::MXNet::Gluon::HybridBlock 360 Class for the residual block. Options are AI::MXNet::Gluon::ModelZoo::Vision::ResNet::BasicBlockV1, 361 AI::MXNet::Gluon::ModelZoo::Vision::ResNet::BottleneckV1. 362 layers : array ref of Int 363 Numbers of layers in each block 364 channels : array ref of Int 365 Numbers of channels in each block. Length should be one larger than layers list. 366 classes : int, default 1000 367 Number of classification classes. 368 thumbnail : bool, default 0 369 Enable thumbnail. 370=cut 371 372has 'block' => (is => 'ro', isa => 'Str', required => 1); 373has ['layers', 374 'channels'] => (is => 'ro', isa => 'ArrayRef[Int]', required => 1); 375has 'classes' => (is => 'ro', isa => 'Int', default => 1000); 376has 'thumbnail' => (is => 'ro', isa => 'Bool', default => 0); 377method python_constructor_arguments() { [qw/block layers channels classes thumbnail/] } 378func _conv3x3($channels, $stride, $in_channels) 379{ 380 return nn->Conv2D( 381 $channels, kernel_size=>3, strides=>$stride, padding=>1, 382 use_bias=>0, in_channels=>$in_channels 383 ); 384} 385 386sub BUILD 387{ 388 my $self = shift; 389 assert(@{ $self->layers } == (@{ $self->channels } - 1)); 390 $self->name_scope(sub { 391 $self->features(nn->HybridSequential(prefix=>'')); 392 if($self->thumbnail) 393 { 394 $self->features->add(_conv3x3($self->channels->[0], 1, 0)); 395 } 396 else 397 { 398 $self->features->add(nn->Conv2D($self->channels->[0], 7, 2, 3, use_bias=>0)); 399 $self->features->add(nn->BatchNorm()); 400 $self->features->add(nn->Activation('relu')); 401 $self->features->add(nn->MaxPool2D(3, 2, 1)); 402 } 403 for(enumerate($self->layers)) 404 { 405 my ($i, $num_layer) = @$_; 406 my $stride = $i == 0 ? 1 : 2; 407 $self->features->add( 408 $self->_make_layer( 409 $self->block, $num_layer, $self->channels->[$i+1], 410 $stride, $i+1, in_channels=>$self->channels->[$i] 411 ) 412 ); 413 } 414 $self->features->add(nn->GlobalAvgPool2D()); 415 $self->output(nn->Dense($self->classes, in_units=>$self->channels->[-1])); 416 }); 417} 418 419method _make_layer($block, $layers, $channels, $stride, $stage_index, :$in_channels=0) 420{ 421 my $layer = nn->HybridSequential(prefix=>"stage${stage_index}_"); 422 $layer->name_scope(sub { 423 $layer->add( 424 $block->new( 425 $channels, $stride, $channels != $in_channels, in_channels=>$in_channels, 426 prefix=>'' 427 ) 428 ); 429 for(1..$layers-1) 430 { 431 $layer->add($block->new($channels, 1, 0, in_channels=>$channels, prefix=>'')); 432 } 433 }); 434 return $layer; 435} 436 437method hybrid_forward(GluonClass $F, GluonInput $x) 438{ 439 $x = $self->features->($x); 440 $x = $self->output->($x); 441 return $x; 442} 443 444 445package AI::MXNet::Gluon::ModelZoo::Vision::ResNet::V2; 446use AI::MXNet::Gluon::Mouse; 447extends 'AI::MXNet::Gluon::HybridBlock'; 448use AI::MXNet::Base; 449 450=head1 NAME 451 452 AI::MXNet::Gluon::ModelZoo::Vision::ResNet::V2 - ResNet V2 model from "Identity Mappings in Deep Residual Networks" 453=cut 454 455=head1 DESCRIPTION 456 457 ResNet V2 model from "Identity Mappings in Deep Residual Networks" 458 <https://arxiv.org/abs/1603.05027> paper. 459 460 Parameters 461 ---------- 462 block : AI::MXNet::Gluon::HybridBlock 463 Class for the residual block. Options are AI::MXNet::Gluon::ModelZoo::Vision::ResNet::BasicBlockV2, 464 AI::MXNet::Gluon::ModelZoo::Vision::ResNet::BottleneckV2. 465 layers : array ref of Int 466 Numbers of layers in each block 467 channels : array ref of Int 468 Numbers of channels in each block. Length should be one larger than layers list. 469 classes : int, default 1000 470 Number of classification classes. 471 thumbnail : bool, default 0 472 Enable thumbnail. 473=cut 474 475has 'block' => (is => 'ro', isa => 'Str', required => 1); 476has ['layers', 477 'channels'] => (is => 'ro', isa => 'ArrayRef[Int]', required => 1); 478has 'classes' => (is => 'ro', isa => 'Int', default => 1000); 479has 'thumbnail' => (is => 'ro', isa => 'Bool', default => 0); 480method python_constructor_arguments() { [qw/block layers channels classes thumbnail/] } 481func _conv3x3($channels, $stride, $in_channels) 482{ 483 return nn->Conv2D( 484 $channels, kernel_size=>3, strides=>$stride, padding=>1, 485 use_bias=>0, in_channels=>$in_channels 486 ); 487} 488 489sub BUILD 490{ 491 my $self = shift; 492 assert(@{ $self->layers } == (@{ $self->channels } - 1)); 493 $self->name_scope(sub { 494 $self->features(nn->HybridSequential(prefix=>'')); 495 $self->features->add(nn->BatchNorm(scale=>0, center=>0)); 496 if($self->thumbnail) 497 { 498 $self->features->add(_conv3x3($self->channels->[0], 1, 0)); 499 } 500 else 501 { 502 $self->features->add(nn->Conv2D($self->channels->[0], 7, 2, 3, use_bias=>0)); 503 $self->features->add(nn->BatchNorm()); 504 $self->features->add(nn->Activation('relu')); 505 $self->features->add(nn->MaxPool2D(3, 2, 1)); 506 } 507 my $in_channels = $self->channels->[0]; 508 for(enumerate($self->layers)) 509 { 510 my ($i, $num_layer) = @$_; 511 my $stride = $i == 0 ? 1 : 2; 512 $self->features->add( 513 $self->_make_layer( 514 $self->block, $num_layer, $self->channels->[$i+1], 515 $stride, $i+1, in_channels=>$in_channels 516 ) 517 ); 518 $in_channels = $self->channels->[$i+1]; 519 } 520 $self->features->add(nn->BatchNorm()); 521 $self->features->add(nn->Activation('relu')); 522 $self->features->add(nn->GlobalAvgPool2D()); 523 $self->features->add(nn->Flatten()); 524 $self->output(nn->Dense($self->classes, in_units=>$in_channels)); 525 }); 526} 527 528method _make_layer($block, $layers, $channels, $stride, $stage_index, :$in_channels=0) 529{ 530 my $layer = nn->HybridSequential(prefix=>"stage${stage_index}_"); 531 $layer->name_scope(sub { 532 $layer->add( 533 $block->new( 534 $channels, $stride, $channels != $in_channels, in_channels=>$in_channels, 535 prefix=>'' 536 ) 537 ); 538 for(1..$layers-1) 539 { 540 $layer->add($block->new($channels, 1, 0, in_channels=>$channels, prefix=>'')); 541 } 542 }); 543 return $layer; 544} 545 546method hybrid_forward(GluonClass $F, GluonInput $x) 547{ 548 $x = $self->features->($x); 549 $x = $self->output->($x); 550 return $x; 551} 552 553package AI::MXNet::Gluon::ModelZoo::Vision; 554 555# Specification 556my %resnet_spec = ( 557 18 => ['basic_block', [2, 2, 2, 2], [64, 64, 128, 256, 512]], 558 34 => ['basic_block', [3, 4, 6, 3], [64, 64, 128, 256, 512]], 559 50 => ['bottle_neck', [3, 4, 6, 3], [64, 256, 512, 1024, 2048]], 560 101 => ['bottle_neck', [3, 4, 23, 3], [64, 256, 512, 1024, 2048]], 561 152 => ['bottle_neck', [3, 8, 36, 3], [64, 256, 512, 1024, 2048]] 562); 563 564my @resnet_net_versions = qw(AI::MXNet::Gluon::ModelZoo::Vision::ResNet::V1 AI::MXNet::Gluon::ModelZoo::Vision::ResNet::V2); 565my @resnet_block_versions = ( 566 { 567 basic_block => 'AI::MXNet::Gluon::ModelZoo::Vision::ResNet::BasicBlockV1', 568 bottle_neck => 'AI::MXNet::Gluon::ModelZoo::Vision::ResNet::BottleneckV1' 569 }, 570 { 571 basic_block => 'AI::MXNet::Gluon::ModelZoo::Vision::ResNet::BasicBlockV2', 572 bottle_neck => 'AI::MXNet::Gluon::ModelZoo::Vision::ResNet::BottleneckV2' 573 }, 574); 575 576=head2 get_resnet 577 578 ResNet V1 model from "Deep Residual Learning for Image Recognition" 579 <http://arxiv.org/abs/1512.03385> paper. 580 ResNet V2 model from "Identity Mappings in Deep Residual Networks" 581 <https://arxiv.org/abs/1603.05027> paper. 582 583 Parameters 584 ---------- 585 $version : Int 586 Version of ResNet. Options are 1, 2. 587 $num_layers : Int 588 Numbers of layers. Options are 18, 34, 50, 101, 152. 589 :$pretrained : Bool, default 0 590 Whether to load the pretrained weights for model. 591 :$ctx : AI::MXNet::Context, default CPU 592 The context in which to load the pretrained weights. 593 :$root : Str, default '~/.mxnet/models' 594 Location for keeping the model parameters. 595=cut 596 597# Constructor 598method get_resnet( 599 Int $version, Int $num_layers, Bool :$pretrained=0, 600 AI::MXNet::Context :$ctx=AI::MXNet::Context->cpu(), 601 Str :$root='~/.mxnet/models', 602 Maybe[Int] :$classes=, 603 Maybe[Bool] :$thumbnail= 604) 605{ 606 my ($block_type, $layers, $channels) = @{ $resnet_spec{$num_layers} }; 607 my $resnet_class = $resnet_net_versions[$version-1]; 608 confess("invalid resnet $version [$version], can be 1,2") unless $resnet_class; 609 my $block_class = $resnet_block_versions[$version-1]{$block_type}; 610 my $net = $resnet_class->new( 611 $block_class, $layers, $channels, 612 (defined($classes) ? (classes => $classes) : ()), 613 (defined($thumbnail) ? (thumbnail => $thumbnail) : ()) 614 ); 615 if($pretrained) 616 { 617 $net->load_parameters( 618 AI::MXNet::Gluon::ModelZoo::ModelStore->get_model_file( 619 "resnet${num_layers}_v$version", 620 root=>$root 621 ), 622 ctx=>$ctx 623 ); 624 } 625 return $net; 626} 627 628=head2 resnet18_v1 629 630 ResNet-18 V1 model from "Deep Residual Learning for Image Recognition" 631 <http://arxiv.org/abs/1512.03385> paper. 632 633 Parameters 634 ---------- 635 :$pretrained : Bool, default 0 636 Whether to load the pretrained weights for model. 637 :$ctx : AI::MXNet::Context, default CPU 638 The context in which to load the pretrained weights. 639 :$root : Str, default '~/.mxnet/models' 640 Location for keeping the model parameters. 641=cut 642 643method resnet18_v1(%kwargs) 644{ 645 return __PACKAGE__->get_resnet(1, 18, %kwargs); 646} 647 648=head2 resnet34_v1 649 650 ResNet-34 V1 model from "Deep Residual Learning for Image Recognition" 651 <http://arxiv.org/abs/1512.03385> paper. 652 653 Parameters 654 ---------- 655 :$pretrained : Bool, default 0 656 Whether to load the pretrained weights for model. 657 :$ctx : AI::MXNet::Context, default CPU 658 The context in which to load the pretrained weights. 659 :$root : Str, default '~/.mxnet/models' 660 Location for keeping the model parameters. 661=cut 662 663method resnet34_v1(%kwargs) 664{ 665 return __PACKAGE__->get_resnet(1, 34, %kwargs); 666} 667 668=head2 resnet50_v1 669 670 ResNet-50 V1 model from "Deep Residual Learning for Image Recognition" 671 <http://arxiv.org/abs/1512.03385> paper. 672 673 Parameters 674 ---------- 675 :$pretrained : Bool, default 0 676 Whether to load the pretrained weights for model. 677 :$ctx : AI::MXNet::Context, default CPU 678 The context in which to load the pretrained weights. 679 :$root : Str, default '~/.mxnet/models' 680 Location for keeping the model parameters. 681=cut 682 683method resnet50_v1(%kwargs) 684{ 685 return __PACKAGE__->get_resnet(1, 50, %kwargs); 686} 687 688=head2 resnet101_v1 689 690 ResNet-101 V1 model from "Deep Residual Learning for Image Recognition" 691 <http://arxiv.org/abs/1512.03385> paper. 692 693 Parameters 694 ---------- 695 :$pretrained : Bool, default 0 696 Whether to load the pretrained weights for model. 697 :$ctx : AI::MXNet::Context, default CPU 698 The context in which to load the pretrained weights. 699 :$root : Str, default '~/.mxnet/models' 700 Location for keeping the model parameters. 701=cut 702 703method resnet101_v1(%kwargs) 704{ 705 return __PACKAGE__->get_resnet(1, 101, %kwargs); 706} 707 708=head2 resnet152_v1 709 710 ResNet-152 V1 model from "Deep Residual Learning for Image Recognition" 711 <http://arxiv.org/abs/1512.03385> paper. 712 713 Parameters 714 ---------- 715 :$pretrained : Bool, default 0 716 Whether to load the pretrained weights for model. 717 :$ctx : AI::MXNet::Context, default CPU 718 The context in which to load the pretrained weights. 719 :$root : Str, default '~/.mxnet/models' 720 Location for keeping the model parameters. 721=cut 722 723method resnet152_v1(%kwargs) 724{ 725 return __PACKAGE__->get_resnet(1, 152, %kwargs); 726} 727 728=head2 resnet18_v2 729 730 ResNet-18 V2 model from "Identity Mappings in Deep Residual Networks" 731 <https://arxiv.org/abs/1603.05027> paper. 732 733 Parameters 734 ---------- 735 :$pretrained : Bool, default 0 736 Whether to load the pretrained weights for model. 737 :$ctx : AI::MXNet::Context, default CPU 738 The context in which to load the pretrained weights. 739 :$root : Str, default '~/.mxnet/models' 740 Location for keeping the model parameters. 741=cut 742 743method resnet18_v2(%kwargs) 744{ 745 return __PACKAGE__->get_resnet(2, 18, %kwargs); 746} 747 748=head2 resnet34_v2 749 750 ResNet-34 V2 model from "Identity Mappings in Deep Residual Networks" 751 <https://arxiv.org/abs/1603.05027> paper. 752 753 Parameters 754 ---------- 755 :$pretrained : Bool, default 0 756 Whether to load the pretrained weights for model. 757 :$ctx : AI::MXNet::Context, default CPU 758 The context in which to load the pretrained weights. 759 :$root : Str, default '~/.mxnet/models' 760 Location for keeping the model parameters. 761=cut 762 763method resnet34_v2(%kwargs) 764{ 765 return __PACKAGE__->get_resnet(2, 34, %kwargs); 766} 767 768=head2 resnet50_v2 769 770 ResNet-50 V2 model from "Identity Mappings in Deep Residual Networks" 771 <https://arxiv.org/abs/1603.05027> paper. 772 773 Parameters 774 ---------- 775 :$pretrained : Bool, default 0 776 Whether to load the pretrained weights for model. 777 :$ctx : AI::MXNet::Context, default CPU 778 The context in which to load the pretrained weights. 779 :$root : Str, default '~/.mxnet/models' 780 Location for keeping the model parameters. 781=cut 782 783method resnet50_v2(%kwargs) 784{ 785 return __PACKAGE__->get_resnet(2, 50, %kwargs); 786} 787 788=head2 resnet101_v2 789 790 ResNet-101 V2 model from "Identity Mappings in Deep Residual Networks" 791 <https://arxiv.org/abs/1603.05027> paper. 792 793 Parameters 794 ---------- 795 :$pretrained : Bool, default 0 796 Whether to load the pretrained weights for model. 797 :$ctx : AI::MXNet::Context, default CPU 798 The context in which to load the pretrained weights. 799 :$root : Str, default '~/.mxnet/models' 800 Location for keeping the model parameters. 801=cut 802 803method resnet101_v2(%kwargs) 804{ 805 return __PACKAGE__->get_resnet(2, 101, %kwargs); 806} 807 808=head2 resnet152_v2 809 810 ResNet-152 V2 model from "Identity Mappings in Deep Residual Networks" 811 <https://arxiv.org/abs/1603.05027> paper. 812 813 Parameters 814 ---------- 815 :$pretrained : Bool, default 0 816 Whether to load the pretrained weights for model. 817 :$ctx : AI::MXNet::Context, default CPU 818 The context in which to load the pretrained weights. 819 :$root : Str, default '~/.mxnet/models' 820 Location for keeping the model parameters. 821=cut 822 823method resnet152_v2(%kwargs) 824{ 825 return __PACKAGE__->get_resnet(2, 152, %kwargs); 826} 827 8281; 829