1package structs
2
3import (
4	"fmt"
5	"time"
6
7	"github.com/hashicorp/nomad/helper/uuid"
8	psstructs "github.com/hashicorp/nomad/plugins/shared/structs"
9)
10
11// NodeResourcesToAllocatedResources converts a node resources to an allocated
12// resources. The task name used is "web" and network is omitted. This is
13// useful when trying to make an allocation fill an entire node.
14func NodeResourcesToAllocatedResources(n *NodeResources) *AllocatedResources {
15	if n == nil {
16		return nil
17	}
18
19	return &AllocatedResources{
20		Tasks: map[string]*AllocatedTaskResources{
21			"web": {
22				Cpu: AllocatedCpuResources{
23					CpuShares: n.Cpu.CpuShares,
24				},
25				Memory: AllocatedMemoryResources{
26					MemoryMB: n.Memory.MemoryMB,
27				},
28			},
29		},
30		Shared: AllocatedSharedResources{
31			DiskMB: n.Disk.DiskMB,
32		},
33	}
34}
35
36func MockNode() *Node {
37	node := &Node{
38		ID:         uuid.Generate(),
39		SecretID:   uuid.Generate(),
40		Datacenter: "dc1",
41		Name:       "foobar",
42		Attributes: map[string]string{
43			"kernel.name":        "linux",
44			"arch":               "x86",
45			"nomad.version":      "1.0.0",
46			"driver.exec":        "1",
47			"driver.mock_driver": "1",
48		},
49		NodeResources: &NodeResources{
50			Cpu: NodeCpuResources{
51				CpuShares: 4000,
52			},
53			Memory: NodeMemoryResources{
54				MemoryMB: 8192,
55			},
56			Disk: NodeDiskResources{
57				DiskMB: 100 * 1024,
58			},
59			Networks: []*NetworkResource{
60				{
61					Device: "eth0",
62					CIDR:   "192.168.0.100/32",
63					MBits:  1000,
64				},
65			},
66		},
67		ReservedResources: &NodeReservedResources{
68			Cpu: NodeReservedCpuResources{
69				CpuShares: 100,
70			},
71			Memory: NodeReservedMemoryResources{
72				MemoryMB: 256,
73			},
74			Disk: NodeReservedDiskResources{
75				DiskMB: 4 * 1024,
76			},
77			Networks: NodeReservedNetworkResources{
78				ReservedHostPorts: "22",
79			},
80		},
81		Links: map[string]string{
82			"consul": "foobar.dc1",
83		},
84		Meta: map[string]string{
85			"pci-dss":  "true",
86			"database": "mysql",
87			"version":  "5.6",
88		},
89		NodeClass:             "linux-medium-pci",
90		Status:                NodeStatusReady,
91		SchedulingEligibility: NodeSchedulingEligible,
92	}
93	err := node.ComputeClass()
94	if err != nil {
95		panic(fmt.Sprintf("failed to compute node class: %v", err))
96	}
97	return node
98}
99
100// NvidiaNode returns a node with two instances of an Nvidia GPU
101func MockNvidiaNode() *Node {
102	n := MockNode()
103	n.NodeResources.Devices = []*NodeDeviceResource{
104		{
105			Type:   "gpu",
106			Vendor: "nvidia",
107			Name:   "1080ti",
108			Attributes: map[string]*psstructs.Attribute{
109				"memory":           psstructs.NewIntAttribute(11, psstructs.UnitGiB),
110				"cuda_cores":       psstructs.NewIntAttribute(3584, ""),
111				"graphics_clock":   psstructs.NewIntAttribute(1480, psstructs.UnitMHz),
112				"memory_bandwidth": psstructs.NewIntAttribute(11, psstructs.UnitGBPerS),
113			},
114			Instances: []*NodeDevice{
115				{
116					ID:      uuid.Generate(),
117					Healthy: true,
118				},
119				{
120					ID:      uuid.Generate(),
121					Healthy: true,
122				},
123			},
124		},
125	}
126	err := n.ComputeClass()
127	if err != nil {
128		panic(fmt.Sprintf("failed to compute node class: %v", err))
129	}
130	return n
131}
132
133func MockJob() *Job {
134	job := &Job{
135		Region:      "global",
136		ID:          fmt.Sprintf("mock-service-%s", uuid.Generate()),
137		Name:        "my-job",
138		Namespace:   DefaultNamespace,
139		Type:        JobTypeService,
140		Priority:    50,
141		AllAtOnce:   false,
142		Datacenters: []string{"dc1"},
143		Constraints: []*Constraint{
144			{
145				LTarget: "${attr.kernel.name}",
146				RTarget: "linux",
147				Operand: "=",
148			},
149		},
150		TaskGroups: []*TaskGroup{
151			{
152				Name:  "web",
153				Count: 10,
154				EphemeralDisk: &EphemeralDisk{
155					SizeMB: 150,
156				},
157				RestartPolicy: &RestartPolicy{
158					Attempts: 3,
159					Interval: 10 * time.Minute,
160					Delay:    1 * time.Minute,
161					Mode:     RestartPolicyModeDelay,
162				},
163				ReschedulePolicy: &ReschedulePolicy{
164					Attempts:      2,
165					Interval:      10 * time.Minute,
166					Delay:         5 * time.Second,
167					DelayFunction: "constant",
168				},
169				Migrate: DefaultMigrateStrategy(),
170				Tasks: []*Task{
171					{
172						Name:   "web",
173						Driver: "exec",
174						Config: map[string]interface{}{
175							"command": "/bin/date",
176						},
177						Env: map[string]string{
178							"FOO": "bar",
179						},
180						Services: []*Service{
181							{
182								Name:      "${TASK}-frontend",
183								PortLabel: "http",
184								Tags:      []string{"pci:${meta.pci-dss}", "datacenter:${node.datacenter}"},
185								Checks: []*ServiceCheck{
186									{
187										Name:     "check-table",
188										Type:     ServiceCheckScript,
189										Command:  "/usr/local/check-table-${meta.database}",
190										Args:     []string{"${meta.version}"},
191										Interval: 30 * time.Second,
192										Timeout:  5 * time.Second,
193									},
194								},
195							},
196							{
197								Name:      "${TASK}-admin",
198								PortLabel: "admin",
199							},
200						},
201						LogConfig: DefaultLogConfig(),
202						Resources: &Resources{
203							CPU:      500,
204							MemoryMB: 256,
205							Networks: []*NetworkResource{
206								{
207									MBits: 50,
208									DynamicPorts: []Port{
209										{Label: "http"},
210										{Label: "admin"},
211									},
212								},
213							},
214						},
215						Meta: map[string]string{
216							"foo": "bar",
217						},
218					},
219				},
220				Meta: map[string]string{
221					"elb_check_type":     "http",
222					"elb_check_interval": "30s",
223					"elb_check_min":      "3",
224				},
225			},
226		},
227		Meta: map[string]string{
228			"owner": "armon",
229		},
230		Status:         JobStatusPending,
231		Version:        0,
232		CreateIndex:    42,
233		ModifyIndex:    99,
234		JobModifyIndex: 99,
235	}
236	job.Canonicalize()
237	return job
238}
239
240func MockAlloc() *Allocation {
241	alloc := &Allocation{
242		ID:        uuid.Generate(),
243		EvalID:    uuid.Generate(),
244		NodeID:    "12345678-abcd-efab-cdef-123456789abc",
245		Namespace: DefaultNamespace,
246		TaskGroup: "web",
247		AllocatedResources: &AllocatedResources{
248			Tasks: map[string]*AllocatedTaskResources{
249				"web": {
250					Cpu: AllocatedCpuResources{
251						CpuShares: 500,
252					},
253					Memory: AllocatedMemoryResources{
254						MemoryMB: 256,
255					},
256					Networks: []*NetworkResource{
257						{
258							Device:        "eth0",
259							IP:            "192.168.0.100",
260							ReservedPorts: []Port{{Label: "admin", Value: 5000}},
261							MBits:         50,
262							DynamicPorts:  []Port{{Label: "http", Value: 9876}},
263						},
264					},
265				},
266			},
267			Shared: AllocatedSharedResources{
268				DiskMB: 150,
269			},
270		},
271		Job:           MockJob(),
272		DesiredStatus: AllocDesiredStatusRun,
273		ClientStatus:  AllocClientStatusPending,
274	}
275	alloc.JobID = alloc.Job.ID
276	return alloc
277}
278