1package mock 2 3import ( 4 "fmt" 5 "time" 6 7 "github.com/hashicorp/nomad/helper" 8 "github.com/hashicorp/nomad/helper/envoy" 9 "github.com/hashicorp/nomad/helper/uuid" 10 "github.com/hashicorp/nomad/nomad/structs" 11 psstructs "github.com/hashicorp/nomad/plugins/shared/structs" 12) 13 14func Node() *structs.Node { 15 node := &structs.Node{ 16 ID: uuid.Generate(), 17 SecretID: uuid.Generate(), 18 Datacenter: "dc1", 19 Name: "foobar", 20 Drivers: map[string]*structs.DriverInfo{ 21 "exec": { 22 Detected: true, 23 Healthy: true, 24 }, 25 "mock_driver": { 26 Detected: true, 27 Healthy: true, 28 }, 29 }, 30 Attributes: map[string]string{ 31 "kernel.name": "linux", 32 "arch": "x86", 33 "nomad.version": "0.5.0", 34 "driver.exec": "1", 35 "driver.mock_driver": "1", 36 }, 37 38 // TODO Remove once clientv2 gets merged 39 Resources: &structs.Resources{ 40 CPU: 4000, 41 MemoryMB: 8192, 42 DiskMB: 100 * 1024, 43 }, 44 Reserved: &structs.Resources{ 45 CPU: 100, 46 MemoryMB: 256, 47 DiskMB: 4 * 1024, 48 Networks: []*structs.NetworkResource{ 49 { 50 Device: "eth0", 51 IP: "192.168.0.100", 52 ReservedPorts: []structs.Port{{Label: "ssh", Value: 22}}, 53 MBits: 1, 54 }, 55 }, 56 }, 57 58 NodeResources: &structs.NodeResources{ 59 Cpu: structs.NodeCpuResources{ 60 CpuShares: 4000, 61 }, 62 Memory: structs.NodeMemoryResources{ 63 MemoryMB: 8192, 64 }, 65 Disk: structs.NodeDiskResources{ 66 DiskMB: 100 * 1024, 67 }, 68 Networks: []*structs.NetworkResource{ 69 { 70 Mode: "host", 71 Device: "eth0", 72 CIDR: "192.168.0.100/32", 73 MBits: 1000, 74 }, 75 }, 76 NodeNetworks: []*structs.NodeNetworkResource{ 77 { 78 Mode: "host", 79 Device: "eth0", 80 Speed: 1000, 81 Addresses: []structs.NodeNetworkAddress{ 82 { 83 Alias: "default", 84 Address: "192.168.0.100", 85 Family: structs.NodeNetworkAF_IPv4, 86 }, 87 }, 88 }, 89 }, 90 }, 91 ReservedResources: &structs.NodeReservedResources{ 92 Cpu: structs.NodeReservedCpuResources{ 93 CpuShares: 100, 94 }, 95 Memory: structs.NodeReservedMemoryResources{ 96 MemoryMB: 256, 97 }, 98 Disk: structs.NodeReservedDiskResources{ 99 DiskMB: 4 * 1024, 100 }, 101 Networks: structs.NodeReservedNetworkResources{ 102 ReservedHostPorts: "22", 103 }, 104 }, 105 Links: map[string]string{ 106 "consul": "foobar.dc1", 107 }, 108 Meta: map[string]string{ 109 "pci-dss": "true", 110 "database": "mysql", 111 "version": "5.6", 112 }, 113 NodeClass: "linux-medium-pci", 114 Status: structs.NodeStatusReady, 115 SchedulingEligibility: structs.NodeSchedulingEligible, 116 } 117 node.ComputeClass() 118 return node 119} 120 121func DrainNode() *structs.Node { 122 node := Node() 123 node.DrainStrategy = &structs.DrainStrategy{ 124 DrainSpec: structs.DrainSpec{}, 125 } 126 node.Canonicalize() 127 return node 128} 129 130// NvidiaNode returns a node with two instances of an Nvidia GPU 131func NvidiaNode() *structs.Node { 132 n := Node() 133 n.NodeResources.Devices = []*structs.NodeDeviceResource{ 134 { 135 Type: "gpu", 136 Vendor: "nvidia", 137 Name: "1080ti", 138 Attributes: map[string]*psstructs.Attribute{ 139 "memory": psstructs.NewIntAttribute(11, psstructs.UnitGiB), 140 "cuda_cores": psstructs.NewIntAttribute(3584, ""), 141 "graphics_clock": psstructs.NewIntAttribute(1480, psstructs.UnitMHz), 142 "memory_bandwidth": psstructs.NewIntAttribute(11, psstructs.UnitGBPerS), 143 }, 144 Instances: []*structs.NodeDevice{ 145 { 146 ID: uuid.Generate(), 147 Healthy: true, 148 }, 149 { 150 ID: uuid.Generate(), 151 Healthy: true, 152 }, 153 }, 154 }, 155 } 156 n.ComputeClass() 157 return n 158} 159 160func HCL() string { 161 return `job "my-job" { 162 datacenters = ["dc1"] 163 type = "service" 164 constraint { 165 attribute = "${attr.kernel.name}" 166 value = "linux" 167 } 168 169 group "web" { 170 count = 10 171 restart { 172 attempts = 3 173 interval = "10m" 174 delay = "1m" 175 mode = "delay" 176 } 177 task "web" { 178 driver = "exec" 179 config { 180 command = "/bin/date" 181 } 182 resources { 183 cpu = 500 184 memory = 256 185 } 186 } 187 } 188} 189` 190} 191 192func Job() *structs.Job { 193 job := &structs.Job{ 194 Region: "global", 195 ID: fmt.Sprintf("mock-service-%s", uuid.Generate()), 196 Name: "my-job", 197 Namespace: structs.DefaultNamespace, 198 Type: structs.JobTypeService, 199 Priority: 50, 200 AllAtOnce: false, 201 Datacenters: []string{"dc1"}, 202 Constraints: []*structs.Constraint{ 203 { 204 LTarget: "${attr.kernel.name}", 205 RTarget: "linux", 206 Operand: "=", 207 }, 208 }, 209 TaskGroups: []*structs.TaskGroup{ 210 { 211 Name: "web", 212 Count: 10, 213 EphemeralDisk: &structs.EphemeralDisk{ 214 SizeMB: 150, 215 }, 216 RestartPolicy: &structs.RestartPolicy{ 217 Attempts: 3, 218 Interval: 10 * time.Minute, 219 Delay: 1 * time.Minute, 220 Mode: structs.RestartPolicyModeDelay, 221 }, 222 ReschedulePolicy: &structs.ReschedulePolicy{ 223 Attempts: 2, 224 Interval: 10 * time.Minute, 225 Delay: 5 * time.Second, 226 DelayFunction: "constant", 227 }, 228 Migrate: structs.DefaultMigrateStrategy(), 229 Networks: []*structs.NetworkResource{ 230 { 231 Mode: "host", 232 DynamicPorts: []structs.Port{ 233 {Label: "http"}, 234 {Label: "admin"}, 235 }, 236 }, 237 }, 238 Tasks: []*structs.Task{ 239 { 240 Name: "web", 241 Driver: "exec", 242 Config: map[string]interface{}{ 243 "command": "/bin/date", 244 }, 245 Env: map[string]string{ 246 "FOO": "bar", 247 }, 248 Services: []*structs.Service{ 249 { 250 Name: "${TASK}-frontend", 251 PortLabel: "http", 252 Tags: []string{"pci:${meta.pci-dss}", "datacenter:${node.datacenter}"}, 253 Checks: []*structs.ServiceCheck{ 254 { 255 Name: "check-table", 256 Type: structs.ServiceCheckScript, 257 Command: "/usr/local/check-table-${meta.database}", 258 Args: []string{"${meta.version}"}, 259 Interval: 30 * time.Second, 260 Timeout: 5 * time.Second, 261 }, 262 }, 263 }, 264 { 265 Name: "${TASK}-admin", 266 PortLabel: "admin", 267 }, 268 }, 269 LogConfig: structs.DefaultLogConfig(), 270 Resources: &structs.Resources{ 271 CPU: 500, 272 MemoryMB: 256, 273 }, 274 Meta: map[string]string{ 275 "foo": "bar", 276 }, 277 }, 278 }, 279 Meta: map[string]string{ 280 "elb_check_type": "http", 281 "elb_check_interval": "30s", 282 "elb_check_min": "3", 283 }, 284 }, 285 }, 286 Meta: map[string]string{ 287 "owner": "armon", 288 }, 289 Status: structs.JobStatusPending, 290 Version: 0, 291 CreateIndex: 42, 292 ModifyIndex: 99, 293 JobModifyIndex: 99, 294 } 295 job.Canonicalize() 296 return job 297} 298 299func LifecycleSideTask(resources structs.Resources, i int) *structs.Task { 300 return &structs.Task{ 301 Name: fmt.Sprintf("side-%d", i), 302 Driver: "exec", 303 Config: map[string]interface{}{ 304 "command": "/bin/date", 305 }, 306 Lifecycle: &structs.TaskLifecycleConfig{ 307 Hook: structs.TaskLifecycleHookPrestart, 308 Sidecar: true, 309 }, 310 LogConfig: structs.DefaultLogConfig(), 311 Resources: &resources, 312 } 313} 314 315func LifecycleInitTask(resources structs.Resources, i int) *structs.Task { 316 return &structs.Task{ 317 Name: fmt.Sprintf("init-%d", i), 318 Driver: "exec", 319 Config: map[string]interface{}{ 320 "command": "/bin/date", 321 }, 322 Lifecycle: &structs.TaskLifecycleConfig{ 323 Hook: structs.TaskLifecycleHookPrestart, 324 Sidecar: false, 325 }, 326 LogConfig: structs.DefaultLogConfig(), 327 Resources: &resources, 328 } 329} 330 331func LifecycleMainTask(resources structs.Resources, i int) *structs.Task { 332 return &structs.Task{ 333 Name: fmt.Sprintf("main-%d", i), 334 Driver: "exec", 335 Config: map[string]interface{}{ 336 "command": "/bin/date", 337 }, 338 LogConfig: structs.DefaultLogConfig(), 339 Resources: &resources, 340 } 341} 342func VariableLifecycleJob(resources structs.Resources, main int, init int, side int) *structs.Job { 343 tasks := []*structs.Task{} 344 for i := 0; i < main; i++ { 345 tasks = append(tasks, LifecycleMainTask(resources, i)) 346 } 347 for i := 0; i < init; i++ { 348 tasks = append(tasks, LifecycleInitTask(resources, i)) 349 } 350 for i := 0; i < side; i++ { 351 tasks = append(tasks, LifecycleSideTask(resources, i)) 352 } 353 job := &structs.Job{ 354 Region: "global", 355 ID: fmt.Sprintf("mock-service-%s", uuid.Generate()), 356 Name: "my-job", 357 Namespace: structs.DefaultNamespace, 358 Type: structs.JobTypeService, 359 Priority: 50, 360 AllAtOnce: false, 361 Datacenters: []string{"dc1"}, 362 Constraints: []*structs.Constraint{ 363 { 364 LTarget: "${attr.kernel.name}", 365 RTarget: "linux", 366 Operand: "=", 367 }, 368 }, 369 TaskGroups: []*structs.TaskGroup{ 370 { 371 Name: "web", 372 Count: 1, 373 Tasks: tasks, 374 }, 375 }, 376 Meta: map[string]string{ 377 "owner": "armon", 378 }, 379 Status: structs.JobStatusPending, 380 Version: 0, 381 CreateIndex: 42, 382 ModifyIndex: 99, 383 JobModifyIndex: 99, 384 } 385 job.Canonicalize() 386 return job 387} 388 389func LifecycleJob() *structs.Job { 390 job := &structs.Job{ 391 Region: "global", 392 ID: fmt.Sprintf("mock-service-%s", uuid.Generate()), 393 Name: "my-job", 394 Namespace: structs.DefaultNamespace, 395 Type: structs.JobTypeBatch, 396 Priority: 50, 397 AllAtOnce: false, 398 Datacenters: []string{"dc1"}, 399 Constraints: []*structs.Constraint{ 400 { 401 LTarget: "${attr.kernel.name}", 402 RTarget: "linux", 403 Operand: "=", 404 }, 405 }, 406 TaskGroups: []*structs.TaskGroup{ 407 { 408 Name: "web", 409 Count: 1, 410 RestartPolicy: &structs.RestartPolicy{ 411 Attempts: 0, 412 Interval: 10 * time.Minute, 413 Delay: 1 * time.Minute, 414 Mode: structs.RestartPolicyModeFail, 415 }, 416 Tasks: []*structs.Task{ 417 { 418 Name: "web", 419 Driver: "mock_driver", 420 Config: map[string]interface{}{ 421 "run_for": "1s", 422 }, 423 LogConfig: structs.DefaultLogConfig(), 424 Resources: &structs.Resources{ 425 CPU: 1000, 426 MemoryMB: 256, 427 }, 428 }, 429 { 430 Name: "side", 431 Driver: "mock_driver", 432 Config: map[string]interface{}{ 433 "run_for": "1s", 434 }, 435 Lifecycle: &structs.TaskLifecycleConfig{ 436 Hook: structs.TaskLifecycleHookPrestart, 437 Sidecar: true, 438 }, 439 LogConfig: structs.DefaultLogConfig(), 440 Resources: &structs.Resources{ 441 CPU: 1000, 442 MemoryMB: 256, 443 }, 444 }, 445 { 446 Name: "init", 447 Driver: "mock_driver", 448 Config: map[string]interface{}{ 449 "run_for": "1s", 450 }, 451 Lifecycle: &structs.TaskLifecycleConfig{ 452 Hook: structs.TaskLifecycleHookPrestart, 453 Sidecar: false, 454 }, 455 LogConfig: structs.DefaultLogConfig(), 456 Resources: &structs.Resources{ 457 CPU: 1000, 458 MemoryMB: 256, 459 }, 460 }, 461 }, 462 }, 463 }, 464 Meta: map[string]string{ 465 "owner": "armon", 466 }, 467 Status: structs.JobStatusPending, 468 Version: 0, 469 CreateIndex: 42, 470 ModifyIndex: 99, 471 JobModifyIndex: 99, 472 } 473 job.Canonicalize() 474 return job 475} 476 477func LifecycleAlloc() *structs.Allocation { 478 alloc := &structs.Allocation{ 479 ID: uuid.Generate(), 480 EvalID: uuid.Generate(), 481 NodeID: "12345678-abcd-efab-cdef-123456789abc", 482 Namespace: structs.DefaultNamespace, 483 TaskGroup: "web", 484 485 // TODO Remove once clientv2 gets merged 486 Resources: &structs.Resources{ 487 CPU: 500, 488 MemoryMB: 256, 489 }, 490 TaskResources: map[string]*structs.Resources{ 491 "web": { 492 CPU: 1000, 493 MemoryMB: 256, 494 }, 495 "init": { 496 CPU: 1000, 497 MemoryMB: 256, 498 }, 499 "side": { 500 CPU: 1000, 501 MemoryMB: 256, 502 }, 503 }, 504 505 AllocatedResources: &structs.AllocatedResources{ 506 Tasks: map[string]*structs.AllocatedTaskResources{ 507 "web": { 508 Cpu: structs.AllocatedCpuResources{ 509 CpuShares: 1000, 510 }, 511 Memory: structs.AllocatedMemoryResources{ 512 MemoryMB: 256, 513 }, 514 }, 515 "init": { 516 Cpu: structs.AllocatedCpuResources{ 517 CpuShares: 1000, 518 }, 519 Memory: structs.AllocatedMemoryResources{ 520 MemoryMB: 256, 521 }, 522 }, 523 "side": { 524 Cpu: structs.AllocatedCpuResources{ 525 CpuShares: 1000, 526 }, 527 Memory: structs.AllocatedMemoryResources{ 528 MemoryMB: 256, 529 }, 530 }, 531 }, 532 }, 533 Job: LifecycleJob(), 534 DesiredStatus: structs.AllocDesiredStatusRun, 535 ClientStatus: structs.AllocClientStatusPending, 536 } 537 alloc.JobID = alloc.Job.ID 538 return alloc 539} 540 541func LifecycleJobWithPoststopDeploy() *structs.Job { 542 job := &structs.Job{ 543 Region: "global", 544 ID: fmt.Sprintf("mock-service-%s", uuid.Generate()), 545 Name: "my-job", 546 Namespace: structs.DefaultNamespace, 547 Type: structs.JobTypeBatch, 548 Priority: 50, 549 AllAtOnce: false, 550 Datacenters: []string{"dc1"}, 551 Constraints: []*structs.Constraint{ 552 { 553 LTarget: "${attr.kernel.name}", 554 RTarget: "linux", 555 Operand: "=", 556 }, 557 }, 558 TaskGroups: []*structs.TaskGroup{ 559 { 560 Name: "web", 561 Count: 1, 562 Migrate: structs.DefaultMigrateStrategy(), 563 RestartPolicy: &structs.RestartPolicy{ 564 Attempts: 0, 565 Interval: 10 * time.Minute, 566 Delay: 1 * time.Minute, 567 Mode: structs.RestartPolicyModeFail, 568 }, 569 Tasks: []*structs.Task{ 570 { 571 Name: "web", 572 Driver: "mock_driver", 573 Config: map[string]interface{}{ 574 "run_for": "1s", 575 }, 576 LogConfig: structs.DefaultLogConfig(), 577 Resources: &structs.Resources{ 578 CPU: 1000, 579 MemoryMB: 256, 580 }, 581 }, 582 { 583 Name: "side", 584 Driver: "mock_driver", 585 Config: map[string]interface{}{ 586 "run_for": "1s", 587 }, 588 Lifecycle: &structs.TaskLifecycleConfig{ 589 Hook: structs.TaskLifecycleHookPrestart, 590 Sidecar: true, 591 }, 592 LogConfig: structs.DefaultLogConfig(), 593 Resources: &structs.Resources{ 594 CPU: 1000, 595 MemoryMB: 256, 596 }, 597 }, 598 { 599 Name: "post", 600 Driver: "mock_driver", 601 Config: map[string]interface{}{ 602 "run_for": "1s", 603 }, 604 Lifecycle: &structs.TaskLifecycleConfig{ 605 Hook: structs.TaskLifecycleHookPoststop, 606 }, 607 LogConfig: structs.DefaultLogConfig(), 608 Resources: &structs.Resources{ 609 CPU: 1000, 610 MemoryMB: 256, 611 }, 612 }, 613 { 614 Name: "init", 615 Driver: "mock_driver", 616 Config: map[string]interface{}{ 617 "run_for": "1s", 618 }, 619 Lifecycle: &structs.TaskLifecycleConfig{ 620 Hook: structs.TaskLifecycleHookPrestart, 621 Sidecar: false, 622 }, 623 LogConfig: structs.DefaultLogConfig(), 624 Resources: &structs.Resources{ 625 CPU: 1000, 626 MemoryMB: 256, 627 }, 628 }, 629 }, 630 }, 631 }, 632 Meta: map[string]string{ 633 "owner": "armon", 634 }, 635 Status: structs.JobStatusPending, 636 Version: 0, 637 CreateIndex: 42, 638 ModifyIndex: 99, 639 JobModifyIndex: 99, 640 } 641 job.Canonicalize() 642 return job 643} 644 645func LifecycleAllocWithPoststopDeploy() *structs.Allocation { 646 alloc := &structs.Allocation{ 647 ID: uuid.Generate(), 648 EvalID: uuid.Generate(), 649 NodeID: "12345678-abcd-efab-cdef-123456789abc", 650 Namespace: structs.DefaultNamespace, 651 TaskGroup: "web", 652 653 // TODO Remove once clientv2 gets merged 654 Resources: &structs.Resources{ 655 CPU: 500, 656 MemoryMB: 256, 657 }, 658 TaskResources: map[string]*structs.Resources{ 659 "web": { 660 CPU: 1000, 661 MemoryMB: 256, 662 }, 663 "init": { 664 CPU: 1000, 665 MemoryMB: 256, 666 }, 667 "side": { 668 CPU: 1000, 669 MemoryMB: 256, 670 }, 671 "post": { 672 CPU: 1000, 673 MemoryMB: 256, 674 }, 675 }, 676 677 AllocatedResources: &structs.AllocatedResources{ 678 Tasks: map[string]*structs.AllocatedTaskResources{ 679 "web": { 680 Cpu: structs.AllocatedCpuResources{ 681 CpuShares: 1000, 682 }, 683 Memory: structs.AllocatedMemoryResources{ 684 MemoryMB: 256, 685 }, 686 }, 687 "init": { 688 Cpu: structs.AllocatedCpuResources{ 689 CpuShares: 1000, 690 }, 691 Memory: structs.AllocatedMemoryResources{ 692 MemoryMB: 256, 693 }, 694 }, 695 "side": { 696 Cpu: structs.AllocatedCpuResources{ 697 CpuShares: 1000, 698 }, 699 Memory: structs.AllocatedMemoryResources{ 700 MemoryMB: 256, 701 }, 702 }, 703 "post": { 704 Cpu: structs.AllocatedCpuResources{ 705 CpuShares: 1000, 706 }, 707 Memory: structs.AllocatedMemoryResources{ 708 MemoryMB: 256, 709 }, 710 }, 711 }, 712 }, 713 Job: LifecycleJobWithPoststopDeploy(), 714 DesiredStatus: structs.AllocDesiredStatusRun, 715 ClientStatus: structs.AllocClientStatusPending, 716 } 717 alloc.JobID = alloc.Job.ID 718 return alloc 719} 720 721func MaxParallelJob() *structs.Job { 722 update := *structs.DefaultUpdateStrategy 723 update.MaxParallel = 0 724 job := &structs.Job{ 725 Region: "global", 726 ID: fmt.Sprintf("mock-service-%s", uuid.Generate()), 727 Name: "my-job", 728 Namespace: structs.DefaultNamespace, 729 Type: structs.JobTypeService, 730 Priority: 50, 731 AllAtOnce: false, 732 Datacenters: []string{"dc1"}, 733 Constraints: []*structs.Constraint{ 734 { 735 LTarget: "${attr.kernel.name}", 736 RTarget: "linux", 737 Operand: "=", 738 }, 739 }, 740 Update: update, 741 TaskGroups: []*structs.TaskGroup{ 742 { 743 Name: "web", 744 Count: 10, 745 EphemeralDisk: &structs.EphemeralDisk{ 746 SizeMB: 150, 747 }, 748 RestartPolicy: &structs.RestartPolicy{ 749 Attempts: 3, 750 Interval: 10 * time.Minute, 751 Delay: 1 * time.Minute, 752 Mode: structs.RestartPolicyModeDelay, 753 }, 754 ReschedulePolicy: &structs.ReschedulePolicy{ 755 Attempts: 2, 756 Interval: 10 * time.Minute, 757 Delay: 5 * time.Second, 758 DelayFunction: "constant", 759 }, 760 Migrate: structs.DefaultMigrateStrategy(), 761 Update: &update, 762 Tasks: []*structs.Task{ 763 { 764 Name: "web", 765 Driver: "exec", 766 Config: map[string]interface{}{ 767 "command": "/bin/date", 768 }, 769 Env: map[string]string{ 770 "FOO": "bar", 771 }, 772 Services: []*structs.Service{ 773 { 774 Name: "${TASK}-frontend", 775 PortLabel: "http", 776 Tags: []string{"pci:${meta.pci-dss}", "datacenter:${node.datacenter}"}, 777 Checks: []*structs.ServiceCheck{ 778 { 779 Name: "check-table", 780 Type: structs.ServiceCheckScript, 781 Command: "/usr/local/check-table-${meta.database}", 782 Args: []string{"${meta.version}"}, 783 Interval: 30 * time.Second, 784 Timeout: 5 * time.Second, 785 }, 786 }, 787 }, 788 { 789 Name: "${TASK}-admin", 790 PortLabel: "admin", 791 }, 792 }, 793 LogConfig: structs.DefaultLogConfig(), 794 Resources: &structs.Resources{ 795 CPU: 500, 796 MemoryMB: 256, 797 Networks: []*structs.NetworkResource{ 798 { 799 MBits: 50, 800 DynamicPorts: []structs.Port{ 801 {Label: "http"}, 802 {Label: "admin"}, 803 }, 804 }, 805 }, 806 }, 807 Meta: map[string]string{ 808 "foo": "bar", 809 }, 810 }, 811 }, 812 Meta: map[string]string{ 813 "elb_check_type": "http", 814 "elb_check_interval": "30s", 815 "elb_check_min": "3", 816 }, 817 }, 818 }, 819 Meta: map[string]string{ 820 "owner": "armon", 821 }, 822 Status: structs.JobStatusPending, 823 Version: 0, 824 CreateIndex: 42, 825 ModifyIndex: 99, 826 JobModifyIndex: 99, 827 } 828 job.Canonicalize() 829 return job 830} 831 832// ConnectJob adds a Connect proxy sidecar group service to mock.Job. 833// 834// Note this does *not* include the Job.Register mutation that inserts the 835// associated Sidecar Task (nor the hook that configures envoy as the default). 836func ConnectJob() *structs.Job { 837 job := Job() 838 tg := job.TaskGroups[0] 839 tg.Services = []*structs.Service{{ 840 Name: "testconnect", 841 PortLabel: "9999", 842 Connect: &structs.ConsulConnect{ 843 SidecarService: new(structs.ConsulSidecarService), 844 }, 845 }} 846 tg.Networks = structs.Networks{{ 847 Mode: "bridge", // always bridge ... for now? 848 }} 849 return job 850} 851 852func ConnectNativeJob(mode string) *structs.Job { 853 job := Job() 854 tg := job.TaskGroups[0] 855 tg.Networks = []*structs.NetworkResource{{ 856 Mode: mode, 857 }} 858 tg.Services = []*structs.Service{{ 859 Name: "test_connect_native", 860 PortLabel: "9999", 861 Connect: &structs.ConsulConnect{ 862 Native: true, 863 }, 864 }} 865 tg.Tasks = []*structs.Task{{ 866 Name: "native_task", 867 }} 868 return job 869} 870 871// ConnectIngressGatewayJob creates a structs.Job that contains the definition 872// of a Consul Ingress Gateway service. The mode is the name of the network 873// mode assumed by the task group. If inject is true, a corresponding Task is 874// set on the group's Tasks (i.e. what the job would look like after job mutation). 875func ConnectIngressGatewayJob(mode string, inject bool) *structs.Job { 876 job := Job() 877 tg := job.TaskGroups[0] 878 tg.Networks = []*structs.NetworkResource{{ 879 Mode: mode, 880 }} 881 tg.Services = []*structs.Service{{ 882 Name: "my-ingress-service", 883 PortLabel: "9999", 884 Connect: &structs.ConsulConnect{ 885 Gateway: &structs.ConsulGateway{ 886 Proxy: &structs.ConsulGatewayProxy{ 887 ConnectTimeout: helper.TimeToPtr(3 * time.Second), 888 EnvoyGatewayBindAddresses: make(map[string]*structs.ConsulGatewayBindAddress), 889 }, 890 Ingress: &structs.ConsulIngressConfigEntry{ 891 Listeners: []*structs.ConsulIngressListener{{ 892 Port: 2000, 893 Protocol: "tcp", 894 Services: []*structs.ConsulIngressService{{ 895 Name: "service1", 896 }}, 897 }}, 898 }, 899 }, 900 }, 901 }} 902 // some tests need to assume the gateway proxy task has already been injected 903 if inject { 904 tg.Tasks = []*structs.Task{{ 905 Name: fmt.Sprintf("%s-%s", structs.ConnectIngressPrefix, "my-ingress-service"), 906 Kind: structs.NewTaskKind(structs.ConnectIngressPrefix, "my-ingress-service"), 907 Driver: "docker", 908 Config: make(map[string]interface{}), 909 ShutdownDelay: 5 * time.Second, 910 LogConfig: &structs.LogConfig{ 911 MaxFiles: 2, 912 MaxFileSizeMB: 2, 913 }, 914 }} 915 } else { 916 // otherwise there are no tasks in the group yet 917 tg.Tasks = nil 918 } 919 return job 920} 921 922func ConnectSidecarTask() *structs.Task { 923 return &structs.Task{ 924 Name: "mysidecar-sidecar-task", 925 Driver: "docker", 926 User: "nobody", 927 Config: map[string]interface{}{ 928 "image": envoy.SidecarConfigVar, 929 }, 930 Env: nil, 931 Resources: &structs.Resources{ 932 CPU: 150, 933 MemoryMB: 350, 934 }, 935 Kind: structs.NewTaskKind(structs.ConnectProxyPrefix, "mysidecar"), 936 } 937} 938 939func BatchJob() *structs.Job { 940 job := &structs.Job{ 941 Region: "global", 942 ID: fmt.Sprintf("mock-batch-%s", uuid.Generate()), 943 Name: "batch-job", 944 Namespace: structs.DefaultNamespace, 945 Type: structs.JobTypeBatch, 946 Priority: 50, 947 AllAtOnce: false, 948 Datacenters: []string{"dc1"}, 949 TaskGroups: []*structs.TaskGroup{ 950 { 951 Name: "web", 952 Count: 10, 953 EphemeralDisk: &structs.EphemeralDisk{ 954 SizeMB: 150, 955 }, 956 RestartPolicy: &structs.RestartPolicy{ 957 Attempts: 3, 958 Interval: 10 * time.Minute, 959 Delay: 1 * time.Minute, 960 Mode: structs.RestartPolicyModeDelay, 961 }, 962 ReschedulePolicy: &structs.ReschedulePolicy{ 963 Attempts: 2, 964 Interval: 10 * time.Minute, 965 Delay: 5 * time.Second, 966 DelayFunction: "constant", 967 }, 968 Tasks: []*structs.Task{ 969 { 970 Name: "web", 971 Driver: "mock_driver", 972 Config: map[string]interface{}{ 973 "run_for": "500ms", 974 }, 975 Env: map[string]string{ 976 "FOO": "bar", 977 }, 978 LogConfig: structs.DefaultLogConfig(), 979 Resources: &structs.Resources{ 980 CPU: 100, 981 MemoryMB: 100, 982 Networks: []*structs.NetworkResource{ 983 { 984 MBits: 50, 985 }, 986 }, 987 }, 988 Meta: map[string]string{ 989 "foo": "bar", 990 }, 991 }, 992 }, 993 }, 994 }, 995 Status: structs.JobStatusPending, 996 Version: 0, 997 CreateIndex: 43, 998 ModifyIndex: 99, 999 JobModifyIndex: 99, 1000 } 1001 job.Canonicalize() 1002 return job 1003} 1004 1005func SystemJob() *structs.Job { 1006 job := &structs.Job{ 1007 Region: "global", 1008 Namespace: structs.DefaultNamespace, 1009 ID: fmt.Sprintf("mock-system-%s", uuid.Generate()), 1010 Name: "my-job", 1011 Type: structs.JobTypeSystem, 1012 Priority: 100, 1013 AllAtOnce: false, 1014 Datacenters: []string{"dc1"}, 1015 Constraints: []*structs.Constraint{ 1016 { 1017 LTarget: "${attr.kernel.name}", 1018 RTarget: "linux", 1019 Operand: "=", 1020 }, 1021 }, 1022 TaskGroups: []*structs.TaskGroup{ 1023 { 1024 Name: "web", 1025 Count: 1, 1026 RestartPolicy: &structs.RestartPolicy{ 1027 Attempts: 3, 1028 Interval: 10 * time.Minute, 1029 Delay: 1 * time.Minute, 1030 Mode: structs.RestartPolicyModeDelay, 1031 }, 1032 EphemeralDisk: structs.DefaultEphemeralDisk(), 1033 Tasks: []*structs.Task{ 1034 { 1035 Name: "web", 1036 Driver: "exec", 1037 Config: map[string]interface{}{ 1038 "command": "/bin/date", 1039 }, 1040 Env: map[string]string{}, 1041 Resources: &structs.Resources{ 1042 CPU: 500, 1043 MemoryMB: 256, 1044 Networks: []*structs.NetworkResource{ 1045 { 1046 MBits: 50, 1047 DynamicPorts: []structs.Port{{Label: "http"}}, 1048 }, 1049 }, 1050 }, 1051 LogConfig: structs.DefaultLogConfig(), 1052 }, 1053 }, 1054 }, 1055 }, 1056 Meta: map[string]string{ 1057 "owner": "armon", 1058 }, 1059 Status: structs.JobStatusPending, 1060 CreateIndex: 42, 1061 ModifyIndex: 99, 1062 } 1063 job.Canonicalize() 1064 return job 1065} 1066 1067func PeriodicJob() *structs.Job { 1068 job := Job() 1069 job.Type = structs.JobTypeBatch 1070 job.Periodic = &structs.PeriodicConfig{ 1071 Enabled: true, 1072 SpecType: structs.PeriodicSpecCron, 1073 Spec: "*/30 * * * *", 1074 } 1075 job.Status = structs.JobStatusRunning 1076 job.TaskGroups[0].Migrate = nil 1077 return job 1078} 1079 1080func Eval() *structs.Evaluation { 1081 now := time.Now().UTC().UnixNano() 1082 eval := &structs.Evaluation{ 1083 ID: uuid.Generate(), 1084 Namespace: structs.DefaultNamespace, 1085 Priority: 50, 1086 Type: structs.JobTypeService, 1087 JobID: uuid.Generate(), 1088 Status: structs.EvalStatusPending, 1089 CreateTime: now, 1090 ModifyTime: now, 1091 } 1092 return eval 1093} 1094 1095func BlockedEval() *structs.Evaluation { 1096 e := Eval() 1097 e.Status = structs.EvalStatusBlocked 1098 e.FailedTGAllocs = map[string]*structs.AllocMetric{ 1099 "cache": { 1100 DimensionExhausted: map[string]int{ 1101 "memory": 1, 1102 }, 1103 ResourcesExhausted: map[string]*structs.Resources{ 1104 "redis": { 1105 CPU: 100, 1106 MemoryMB: 1024, 1107 }, 1108 }, 1109 }, 1110 } 1111 1112 return e 1113} 1114 1115func JobSummary(jobID string) *structs.JobSummary { 1116 js := &structs.JobSummary{ 1117 JobID: jobID, 1118 Namespace: structs.DefaultNamespace, 1119 Summary: map[string]structs.TaskGroupSummary{ 1120 "web": { 1121 Queued: 0, 1122 Starting: 0, 1123 }, 1124 }, 1125 } 1126 return js 1127} 1128 1129func Alloc() *structs.Allocation { 1130 job := Job() 1131 alloc := &structs.Allocation{ 1132 ID: uuid.Generate(), 1133 EvalID: uuid.Generate(), 1134 NodeID: "12345678-abcd-efab-cdef-123456789abc", 1135 Namespace: structs.DefaultNamespace, 1136 TaskGroup: "web", 1137 1138 // TODO Remove once clientv2 gets merged 1139 Resources: &structs.Resources{ 1140 CPU: 500, 1141 MemoryMB: 256, 1142 DiskMB: 150, 1143 Networks: []*structs.NetworkResource{ 1144 { 1145 Device: "eth0", 1146 IP: "192.168.0.100", 1147 ReservedPorts: []structs.Port{{Label: "admin", Value: 5000}}, 1148 MBits: 50, 1149 DynamicPorts: []structs.Port{{Label: "http"}}, 1150 }, 1151 }, 1152 }, 1153 TaskResources: map[string]*structs.Resources{ 1154 "web": { 1155 CPU: 500, 1156 MemoryMB: 256, 1157 Networks: []*structs.NetworkResource{ 1158 { 1159 Device: "eth0", 1160 IP: "192.168.0.100", 1161 ReservedPorts: []structs.Port{{Label: "admin", Value: 5000}}, 1162 MBits: 50, 1163 DynamicPorts: []structs.Port{{Label: "http", Value: 9876}}, 1164 }, 1165 }, 1166 }, 1167 }, 1168 SharedResources: &structs.Resources{ 1169 DiskMB: 150, 1170 }, 1171 1172 AllocatedResources: &structs.AllocatedResources{ 1173 Tasks: map[string]*structs.AllocatedTaskResources{ 1174 "web": { 1175 Cpu: structs.AllocatedCpuResources{ 1176 CpuShares: 500, 1177 }, 1178 Memory: structs.AllocatedMemoryResources{ 1179 MemoryMB: 256, 1180 }, 1181 Networks: []*structs.NetworkResource{ 1182 { 1183 Device: "eth0", 1184 IP: "192.168.0.100", 1185 ReservedPorts: []structs.Port{{Label: "admin", Value: 5000}}, 1186 MBits: 50, 1187 DynamicPorts: []structs.Port{{Label: "http", Value: 9876}}, 1188 }, 1189 }, 1190 }, 1191 }, 1192 Shared: structs.AllocatedSharedResources{ 1193 DiskMB: 150, 1194 }, 1195 }, 1196 Job: job, 1197 DesiredStatus: structs.AllocDesiredStatusRun, 1198 ClientStatus: structs.AllocClientStatusPending, 1199 } 1200 alloc.JobID = alloc.Job.ID 1201 return alloc 1202} 1203 1204// ConnectJob adds a Connect proxy sidecar group service to mock.Alloc. 1205func ConnectAlloc() *structs.Allocation { 1206 alloc := Alloc() 1207 alloc.Job = ConnectJob() 1208 alloc.AllocatedResources.Shared.Networks = []*structs.NetworkResource{ 1209 { 1210 Mode: "bridge", 1211 IP: "10.0.0.1", 1212 DynamicPorts: []structs.Port{ 1213 { 1214 Label: "connect-proxy-testconnect", 1215 Value: 9999, 1216 To: 9999, 1217 }, 1218 }, 1219 }, 1220 } 1221 return alloc 1222} 1223 1224// ConnectNativeAlloc creates an alloc with a connect native task. 1225func ConnectNativeAlloc(mode string) *structs.Allocation { 1226 alloc := Alloc() 1227 alloc.Job = ConnectNativeJob(mode) 1228 alloc.AllocatedResources.Shared.Networks = []*structs.NetworkResource{{ 1229 Mode: mode, 1230 IP: "10.0.0.1", 1231 }} 1232 return alloc 1233} 1234 1235func ConnectIngressGatewayAlloc(mode string) *structs.Allocation { 1236 alloc := Alloc() 1237 alloc.Job = ConnectIngressGatewayJob(mode, true) 1238 alloc.AllocatedResources.Shared.Networks = []*structs.NetworkResource{{ 1239 Mode: mode, 1240 IP: "10.0.0.1", 1241 }} 1242 return alloc 1243} 1244 1245func BatchConnectJob() *structs.Job { 1246 job := &structs.Job{ 1247 Region: "global", 1248 ID: fmt.Sprintf("mock-connect-batch-job%s", uuid.Generate()), 1249 Name: "mock-connect-batch-job", 1250 Namespace: structs.DefaultNamespace, 1251 Type: structs.JobTypeBatch, 1252 Priority: 50, 1253 AllAtOnce: false, 1254 Datacenters: []string{"dc1"}, 1255 TaskGroups: []*structs.TaskGroup{{ 1256 Name: "mock-connect-batch-job", 1257 Count: 1, 1258 EphemeralDisk: &structs.EphemeralDisk{SizeMB: 150}, 1259 Networks: []*structs.NetworkResource{{ 1260 Mode: "bridge", 1261 }}, 1262 Tasks: []*structs.Task{{ 1263 Name: "connect-proxy-testconnect", 1264 Kind: "connect-proxy:testconnect", 1265 Driver: "mock_driver", 1266 Config: map[string]interface{}{ 1267 "run_for": "500ms", 1268 }, 1269 LogConfig: structs.DefaultLogConfig(), 1270 Resources: &structs.Resources{ 1271 CPU: 500, 1272 MemoryMB: 256, 1273 Networks: []*structs.NetworkResource{{ 1274 MBits: 50, 1275 DynamicPorts: []structs.Port{{Label: "port1"}}, 1276 }}, 1277 }, 1278 }}, 1279 Services: []*structs.Service{{ 1280 Name: "testconnect", 1281 }}, 1282 }}, 1283 Meta: map[string]string{"owner": "shoenig"}, 1284 Status: structs.JobStatusPending, 1285 Version: 0, 1286 CreateIndex: 42, 1287 ModifyIndex: 99, 1288 JobModifyIndex: 99, 1289 } 1290 job.Canonicalize() 1291 return job 1292} 1293 1294// BatchConnectAlloc is useful for testing task runner things. 1295func BatchConnectAlloc() *structs.Allocation { 1296 alloc := &structs.Allocation{ 1297 ID: uuid.Generate(), 1298 EvalID: uuid.Generate(), 1299 NodeID: "12345678-abcd-efab-cdef-123456789abc", 1300 Namespace: structs.DefaultNamespace, 1301 TaskGroup: "mock-connect-batch-job", 1302 TaskResources: map[string]*structs.Resources{ 1303 "connect-proxy-testconnect": { 1304 CPU: 500, 1305 MemoryMB: 256, 1306 }, 1307 }, 1308 1309 AllocatedResources: &structs.AllocatedResources{ 1310 Tasks: map[string]*structs.AllocatedTaskResources{ 1311 "connect-proxy-testconnect": { 1312 Cpu: structs.AllocatedCpuResources{CpuShares: 500}, 1313 Memory: structs.AllocatedMemoryResources{MemoryMB: 256}, 1314 }, 1315 }, 1316 Shared: structs.AllocatedSharedResources{ 1317 Networks: []*structs.NetworkResource{{ 1318 Mode: "bridge", 1319 IP: "10.0.0.1", 1320 DynamicPorts: []structs.Port{{ 1321 Label: "connect-proxy-testconnect", 1322 Value: 9999, 1323 To: 9999, 1324 }}, 1325 }}, 1326 DiskMB: 0, 1327 }, 1328 }, 1329 Job: BatchConnectJob(), 1330 DesiredStatus: structs.AllocDesiredStatusRun, 1331 ClientStatus: structs.AllocClientStatusPending, 1332 } 1333 alloc.JobID = alloc.Job.ID 1334 return alloc 1335} 1336 1337func BatchAlloc() *structs.Allocation { 1338 alloc := &structs.Allocation{ 1339 ID: uuid.Generate(), 1340 EvalID: uuid.Generate(), 1341 NodeID: "12345678-abcd-efab-cdef-123456789abc", 1342 Namespace: structs.DefaultNamespace, 1343 TaskGroup: "web", 1344 1345 // TODO Remove once clientv2 gets merged 1346 Resources: &structs.Resources{ 1347 CPU: 500, 1348 MemoryMB: 256, 1349 DiskMB: 150, 1350 Networks: []*structs.NetworkResource{ 1351 { 1352 Device: "eth0", 1353 IP: "192.168.0.100", 1354 ReservedPorts: []structs.Port{{Label: "admin", Value: 5000}}, 1355 MBits: 50, 1356 DynamicPorts: []structs.Port{{Label: "http"}}, 1357 }, 1358 }, 1359 }, 1360 TaskResources: map[string]*structs.Resources{ 1361 "web": { 1362 CPU: 500, 1363 MemoryMB: 256, 1364 Networks: []*structs.NetworkResource{ 1365 { 1366 Device: "eth0", 1367 IP: "192.168.0.100", 1368 ReservedPorts: []structs.Port{{Label: "admin", Value: 5000}}, 1369 MBits: 50, 1370 DynamicPorts: []structs.Port{{Label: "http", Value: 9876}}, 1371 }, 1372 }, 1373 }, 1374 }, 1375 SharedResources: &structs.Resources{ 1376 DiskMB: 150, 1377 }, 1378 1379 AllocatedResources: &structs.AllocatedResources{ 1380 Tasks: map[string]*structs.AllocatedTaskResources{ 1381 "web": { 1382 Cpu: structs.AllocatedCpuResources{ 1383 CpuShares: 500, 1384 }, 1385 Memory: structs.AllocatedMemoryResources{ 1386 MemoryMB: 256, 1387 }, 1388 Networks: []*structs.NetworkResource{ 1389 { 1390 Device: "eth0", 1391 IP: "192.168.0.100", 1392 ReservedPorts: []structs.Port{{Label: "admin", Value: 5000}}, 1393 MBits: 50, 1394 DynamicPorts: []structs.Port{{Label: "http", Value: 9876}}, 1395 }, 1396 }, 1397 }, 1398 }, 1399 Shared: structs.AllocatedSharedResources{ 1400 DiskMB: 150, 1401 }, 1402 }, 1403 Job: BatchJob(), 1404 DesiredStatus: structs.AllocDesiredStatusRun, 1405 ClientStatus: structs.AllocClientStatusPending, 1406 } 1407 alloc.JobID = alloc.Job.ID 1408 return alloc 1409} 1410 1411func SystemAlloc() *structs.Allocation { 1412 alloc := &structs.Allocation{ 1413 ID: uuid.Generate(), 1414 EvalID: uuid.Generate(), 1415 NodeID: "12345678-abcd-efab-cdef-123456789abc", 1416 Namespace: structs.DefaultNamespace, 1417 TaskGroup: "web", 1418 1419 // TODO Remove once clientv2 gets merged 1420 Resources: &structs.Resources{ 1421 CPU: 500, 1422 MemoryMB: 256, 1423 DiskMB: 150, 1424 Networks: []*structs.NetworkResource{ 1425 { 1426 Device: "eth0", 1427 IP: "192.168.0.100", 1428 ReservedPorts: []structs.Port{{Label: "admin", Value: 5000}}, 1429 MBits: 50, 1430 DynamicPorts: []structs.Port{{Label: "http"}}, 1431 }, 1432 }, 1433 }, 1434 TaskResources: map[string]*structs.Resources{ 1435 "web": { 1436 CPU: 500, 1437 MemoryMB: 256, 1438 Networks: []*structs.NetworkResource{ 1439 { 1440 Device: "eth0", 1441 IP: "192.168.0.100", 1442 ReservedPorts: []structs.Port{{Label: "admin", Value: 5000}}, 1443 MBits: 50, 1444 DynamicPorts: []structs.Port{{Label: "http", Value: 9876}}, 1445 }, 1446 }, 1447 }, 1448 }, 1449 SharedResources: &structs.Resources{ 1450 DiskMB: 150, 1451 }, 1452 1453 AllocatedResources: &structs.AllocatedResources{ 1454 Tasks: map[string]*structs.AllocatedTaskResources{ 1455 "web": { 1456 Cpu: structs.AllocatedCpuResources{ 1457 CpuShares: 500, 1458 }, 1459 Memory: structs.AllocatedMemoryResources{ 1460 MemoryMB: 256, 1461 }, 1462 Networks: []*structs.NetworkResource{ 1463 { 1464 Device: "eth0", 1465 IP: "192.168.0.100", 1466 ReservedPorts: []structs.Port{{Label: "admin", Value: 5000}}, 1467 MBits: 50, 1468 DynamicPorts: []structs.Port{{Label: "http", Value: 9876}}, 1469 }, 1470 }, 1471 }, 1472 }, 1473 Shared: structs.AllocatedSharedResources{ 1474 DiskMB: 150, 1475 }, 1476 }, 1477 Job: SystemJob(), 1478 DesiredStatus: structs.AllocDesiredStatusRun, 1479 ClientStatus: structs.AllocClientStatusPending, 1480 } 1481 alloc.JobID = alloc.Job.ID 1482 return alloc 1483} 1484 1485func VaultAccessor() *structs.VaultAccessor { 1486 return &structs.VaultAccessor{ 1487 Accessor: uuid.Generate(), 1488 NodeID: uuid.Generate(), 1489 AllocID: uuid.Generate(), 1490 CreationTTL: 86400, 1491 Task: "foo", 1492 } 1493} 1494 1495func SITokenAccessor() *structs.SITokenAccessor { 1496 return &structs.SITokenAccessor{ 1497 NodeID: uuid.Generate(), 1498 AllocID: uuid.Generate(), 1499 AccessorID: uuid.Generate(), 1500 TaskName: "foo", 1501 } 1502} 1503 1504func Deployment() *structs.Deployment { 1505 return &structs.Deployment{ 1506 ID: uuid.Generate(), 1507 JobID: uuid.Generate(), 1508 Namespace: structs.DefaultNamespace, 1509 JobVersion: 2, 1510 JobModifyIndex: 20, 1511 JobCreateIndex: 18, 1512 TaskGroups: map[string]*structs.DeploymentState{ 1513 "web": { 1514 DesiredTotal: 10, 1515 }, 1516 }, 1517 Status: structs.DeploymentStatusRunning, 1518 StatusDescription: structs.DeploymentStatusDescriptionRunning, 1519 ModifyIndex: 23, 1520 CreateIndex: 21, 1521 } 1522} 1523 1524func Plan() *structs.Plan { 1525 return &structs.Plan{ 1526 Priority: 50, 1527 } 1528} 1529 1530func PlanResult() *structs.PlanResult { 1531 return &structs.PlanResult{} 1532} 1533 1534func ACLPolicy() *structs.ACLPolicy { 1535 ap := &structs.ACLPolicy{ 1536 Name: fmt.Sprintf("policy-%s", uuid.Generate()), 1537 Description: "Super cool policy!", 1538 Rules: ` 1539 namespace "default" { 1540 policy = "write" 1541 } 1542 node { 1543 policy = "read" 1544 } 1545 agent { 1546 policy = "read" 1547 } 1548 `, 1549 CreateIndex: 10, 1550 ModifyIndex: 20, 1551 } 1552 ap.SetHash() 1553 return ap 1554} 1555 1556func ACLToken() *structs.ACLToken { 1557 tk := &structs.ACLToken{ 1558 AccessorID: uuid.Generate(), 1559 SecretID: uuid.Generate(), 1560 Name: "my cool token " + uuid.Generate(), 1561 Type: "client", 1562 Policies: []string{"foo", "bar"}, 1563 Global: false, 1564 CreateTime: time.Now().UTC(), 1565 CreateIndex: 10, 1566 ModifyIndex: 20, 1567 } 1568 tk.SetHash() 1569 return tk 1570} 1571 1572func ACLManagementToken() *structs.ACLToken { 1573 return &structs.ACLToken{ 1574 AccessorID: uuid.Generate(), 1575 SecretID: uuid.Generate(), 1576 Name: "management " + uuid.Generate(), 1577 Type: "management", 1578 Global: true, 1579 CreateTime: time.Now().UTC(), 1580 CreateIndex: 10, 1581 ModifyIndex: 20, 1582 } 1583} 1584 1585func ScalingPolicy() *structs.ScalingPolicy { 1586 return &structs.ScalingPolicy{ 1587 ID: uuid.Generate(), 1588 Min: 1, 1589 Max: 100, 1590 Type: structs.ScalingPolicyTypeHorizontal, 1591 Target: map[string]string{ 1592 structs.ScalingTargetNamespace: structs.DefaultNamespace, 1593 structs.ScalingTargetJob: uuid.Generate(), 1594 structs.ScalingTargetGroup: uuid.Generate(), 1595 structs.ScalingTargetTask: uuid.Generate(), 1596 }, 1597 Policy: map[string]interface{}{ 1598 "a": "b", 1599 }, 1600 Enabled: true, 1601 } 1602} 1603 1604func JobWithScalingPolicy() (*structs.Job, *structs.ScalingPolicy) { 1605 job := Job() 1606 policy := &structs.ScalingPolicy{ 1607 ID: uuid.Generate(), 1608 Min: int64(job.TaskGroups[0].Count), 1609 Max: int64(job.TaskGroups[0].Count), 1610 Type: structs.ScalingPolicyTypeHorizontal, 1611 Policy: map[string]interface{}{}, 1612 Enabled: true, 1613 } 1614 policy.TargetTaskGroup(job, job.TaskGroups[0]) 1615 job.TaskGroups[0].Scaling = policy 1616 return job, policy 1617} 1618 1619func MultiregionJob() *structs.Job { 1620 job := Job() 1621 update := *structs.DefaultUpdateStrategy 1622 job.Update = update 1623 job.TaskGroups[0].Update = &update 1624 job.Multiregion = &structs.Multiregion{ 1625 Strategy: &structs.MultiregionStrategy{ 1626 MaxParallel: 1, 1627 OnFailure: "fail_all", 1628 }, 1629 Regions: []*structs.MultiregionRegion{ 1630 { 1631 Name: "west", 1632 Count: 2, 1633 Datacenters: []string{"west-1", "west-2"}, 1634 Meta: map[string]string{"region_code": "W"}, 1635 }, 1636 { 1637 Name: "east", 1638 Count: 1, 1639 Datacenters: []string{"east-1"}, 1640 Meta: map[string]string{"region_code": "E"}, 1641 }, 1642 }, 1643 } 1644 return job 1645} 1646 1647func CSIPlugin() *structs.CSIPlugin { 1648 return &structs.CSIPlugin{ 1649 ID: uuid.Generate(), 1650 Provider: "com.hashicorp:mock", 1651 Version: "0.1", 1652 ControllerRequired: true, 1653 Controllers: map[string]*structs.CSIInfo{}, 1654 Nodes: map[string]*structs.CSIInfo{}, 1655 Allocations: []*structs.AllocListStub{}, 1656 ControllersHealthy: 0, 1657 NodesHealthy: 0, 1658 } 1659} 1660 1661func CSIVolume(plugin *structs.CSIPlugin) *structs.CSIVolume { 1662 return &structs.CSIVolume{ 1663 ID: uuid.Generate(), 1664 Name: "test-vol", 1665 ExternalID: "vol-01", 1666 Namespace: "default", 1667 Topologies: []*structs.CSITopology{}, 1668 AccessMode: structs.CSIVolumeAccessModeSingleNodeWriter, 1669 AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, 1670 MountOptions: &structs.CSIMountOptions{}, 1671 Secrets: structs.CSISecrets{}, 1672 Parameters: map[string]string{}, 1673 Context: map[string]string{}, 1674 ReadAllocs: map[string]*structs.Allocation{}, 1675 WriteAllocs: map[string]*structs.Allocation{}, 1676 ReadClaims: map[string]*structs.CSIVolumeClaim{}, 1677 WriteClaims: map[string]*structs.CSIVolumeClaim{}, 1678 PastClaims: map[string]*structs.CSIVolumeClaim{}, 1679 PluginID: plugin.ID, 1680 Provider: plugin.Provider, 1681 ProviderVersion: plugin.Version, 1682 ControllerRequired: plugin.ControllerRequired, 1683 ControllersHealthy: plugin.ControllersHealthy, 1684 ControllersExpected: len(plugin.Controllers), 1685 NodesHealthy: plugin.NodesHealthy, 1686 NodesExpected: len(plugin.Nodes), 1687 } 1688} 1689 1690func Events(index uint64) *structs.Events { 1691 return &structs.Events{ 1692 Index: index, 1693 Events: []structs.Event{ 1694 { 1695 Index: index, 1696 Topic: "Node", 1697 Type: "update", 1698 Key: uuid.Generate(), 1699 Payload: Node(), 1700 }, 1701 { 1702 Index: index, 1703 Topic: "Eval", 1704 Type: "update", 1705 Key: uuid.Generate(), 1706 Payload: Eval(), 1707 }, 1708 }, 1709 } 1710} 1711 1712func AllocNetworkStatus() *structs.AllocNetworkStatus { 1713 return &structs.AllocNetworkStatus{ 1714 InterfaceName: "eth0", 1715 Address: "192.168.0.100", 1716 DNS: &structs.DNSConfig{ 1717 Servers: []string{"1.1.1.1"}, 1718 Searches: []string{"localdomain"}, 1719 Options: []string{"ndots:5"}, 1720 }, 1721 } 1722} 1723 1724func Namespace() *structs.Namespace { 1725 ns := &structs.Namespace{ 1726 Name: fmt.Sprintf("team-%s", uuid.Generate()), 1727 Description: "test namespace", 1728 CreateIndex: 100, 1729 ModifyIndex: 200, 1730 } 1731 ns.SetHash() 1732 return ns 1733} 1734