1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * Authors:
6 * Haiyang Zhang <haiyangz@microsoft.com>
7 * Hank Janssen <hjanssen@microsoft.com>
8 * K. Y. Srinivasan <kys@microsoft.com>
9 */
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/device.h>
15 #include <linux/interrupt.h>
16 #include <linux/sysctl.h>
17 #include <linux/slab.h>
18 #include <linux/acpi.h>
19 #include <linux/completion.h>
20 #include <linux/hyperv.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/clockchips.h>
23 #include <linux/cpu.h>
24 #include <linux/sched/task_stack.h>
25
26 #include <linux/delay.h>
27 #include <linux/notifier.h>
28 #include <linux/ptrace.h>
29 #include <linux/screen_info.h>
30 #include <linux/kdebug.h>
31 #include <linux/efi.h>
32 #include <linux/random.h>
33 #include <linux/kernel.h>
34 #include <linux/syscore_ops.h>
35 #include <clocksource/hyperv_timer.h>
36 #include "hyperv_vmbus.h"
37
38 struct vmbus_dynid {
39 struct list_head node;
40 struct hv_vmbus_device_id id;
41 };
42
43 static struct acpi_device *hv_acpi_dev;
44
45 static struct completion probe_event;
46
47 static int hyperv_cpuhp_online;
48
49 static void *hv_panic_page;
50
51 static long __percpu *vmbus_evt;
52
53 /* Values parsed from ACPI DSDT */
54 int vmbus_irq;
55 int vmbus_interrupt;
56
57 /*
58 * Boolean to control whether to report panic messages over Hyper-V.
59 *
60 * It can be set via /proc/sys/kernel/hyperv_record_panic_msg
61 */
62 static int sysctl_record_panic_msg = 1;
63
hyperv_report_reg(void)64 static int hyperv_report_reg(void)
65 {
66 return !sysctl_record_panic_msg || !hv_panic_page;
67 }
68
hyperv_panic_event(struct notifier_block * nb,unsigned long val,void * args)69 static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
70 void *args)
71 {
72 struct pt_regs *regs;
73
74 vmbus_initiate_unload(true);
75
76 /*
77 * Hyper-V should be notified only once about a panic. If we will be
78 * doing hyperv_report_panic_msg() later with kmsg data, don't do
79 * the notification here.
80 */
81 if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE
82 && hyperv_report_reg()) {
83 regs = current_pt_regs();
84 hyperv_report_panic(regs, val, false);
85 }
86 return NOTIFY_DONE;
87 }
88
hyperv_die_event(struct notifier_block * nb,unsigned long val,void * args)89 static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
90 void *args)
91 {
92 struct die_args *die = args;
93 struct pt_regs *regs = die->regs;
94
95 /* Don't notify Hyper-V if the die event is other than oops */
96 if (val != DIE_OOPS)
97 return NOTIFY_DONE;
98
99 /*
100 * Hyper-V should be notified only once about a panic. If we will be
101 * doing hyperv_report_panic_msg() later with kmsg data, don't do
102 * the notification here.
103 */
104 if (hyperv_report_reg())
105 hyperv_report_panic(regs, val, true);
106 return NOTIFY_DONE;
107 }
108
109 static struct notifier_block hyperv_die_block = {
110 .notifier_call = hyperv_die_event,
111 };
112 static struct notifier_block hyperv_panic_block = {
113 .notifier_call = hyperv_panic_event,
114 };
115
116 static const char *fb_mmio_name = "fb_range";
117 static struct resource *fb_mmio;
118 static struct resource *hyperv_mmio;
119 static DEFINE_MUTEX(hyperv_mmio_lock);
120
vmbus_exists(void)121 static int vmbus_exists(void)
122 {
123 if (hv_acpi_dev == NULL)
124 return -ENODEV;
125
126 return 0;
127 }
128
channel_monitor_group(const struct vmbus_channel * channel)129 static u8 channel_monitor_group(const struct vmbus_channel *channel)
130 {
131 return (u8)channel->offermsg.monitorid / 32;
132 }
133
channel_monitor_offset(const struct vmbus_channel * channel)134 static u8 channel_monitor_offset(const struct vmbus_channel *channel)
135 {
136 return (u8)channel->offermsg.monitorid % 32;
137 }
138
channel_pending(const struct vmbus_channel * channel,const struct hv_monitor_page * monitor_page)139 static u32 channel_pending(const struct vmbus_channel *channel,
140 const struct hv_monitor_page *monitor_page)
141 {
142 u8 monitor_group = channel_monitor_group(channel);
143
144 return monitor_page->trigger_group[monitor_group].pending;
145 }
146
channel_latency(const struct vmbus_channel * channel,const struct hv_monitor_page * monitor_page)147 static u32 channel_latency(const struct vmbus_channel *channel,
148 const struct hv_monitor_page *monitor_page)
149 {
150 u8 monitor_group = channel_monitor_group(channel);
151 u8 monitor_offset = channel_monitor_offset(channel);
152
153 return monitor_page->latency[monitor_group][monitor_offset];
154 }
155
channel_conn_id(struct vmbus_channel * channel,struct hv_monitor_page * monitor_page)156 static u32 channel_conn_id(struct vmbus_channel *channel,
157 struct hv_monitor_page *monitor_page)
158 {
159 u8 monitor_group = channel_monitor_group(channel);
160 u8 monitor_offset = channel_monitor_offset(channel);
161
162 return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id;
163 }
164
id_show(struct device * dev,struct device_attribute * dev_attr,char * buf)165 static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
166 char *buf)
167 {
168 struct hv_device *hv_dev = device_to_hv_device(dev);
169
170 if (!hv_dev->channel)
171 return -ENODEV;
172 return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
173 }
174 static DEVICE_ATTR_RO(id);
175
state_show(struct device * dev,struct device_attribute * dev_attr,char * buf)176 static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
177 char *buf)
178 {
179 struct hv_device *hv_dev = device_to_hv_device(dev);
180
181 if (!hv_dev->channel)
182 return -ENODEV;
183 return sprintf(buf, "%d\n", hv_dev->channel->state);
184 }
185 static DEVICE_ATTR_RO(state);
186
monitor_id_show(struct device * dev,struct device_attribute * dev_attr,char * buf)187 static ssize_t monitor_id_show(struct device *dev,
188 struct device_attribute *dev_attr, char *buf)
189 {
190 struct hv_device *hv_dev = device_to_hv_device(dev);
191
192 if (!hv_dev->channel)
193 return -ENODEV;
194 return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
195 }
196 static DEVICE_ATTR_RO(monitor_id);
197
class_id_show(struct device * dev,struct device_attribute * dev_attr,char * buf)198 static ssize_t class_id_show(struct device *dev,
199 struct device_attribute *dev_attr, char *buf)
200 {
201 struct hv_device *hv_dev = device_to_hv_device(dev);
202
203 if (!hv_dev->channel)
204 return -ENODEV;
205 return sprintf(buf, "{%pUl}\n",
206 &hv_dev->channel->offermsg.offer.if_type);
207 }
208 static DEVICE_ATTR_RO(class_id);
209
device_id_show(struct device * dev,struct device_attribute * dev_attr,char * buf)210 static ssize_t device_id_show(struct device *dev,
211 struct device_attribute *dev_attr, char *buf)
212 {
213 struct hv_device *hv_dev = device_to_hv_device(dev);
214
215 if (!hv_dev->channel)
216 return -ENODEV;
217 return sprintf(buf, "{%pUl}\n",
218 &hv_dev->channel->offermsg.offer.if_instance);
219 }
220 static DEVICE_ATTR_RO(device_id);
221
modalias_show(struct device * dev,struct device_attribute * dev_attr,char * buf)222 static ssize_t modalias_show(struct device *dev,
223 struct device_attribute *dev_attr, char *buf)
224 {
225 struct hv_device *hv_dev = device_to_hv_device(dev);
226
227 return sprintf(buf, "vmbus:%*phN\n", UUID_SIZE, &hv_dev->dev_type);
228 }
229 static DEVICE_ATTR_RO(modalias);
230
231 #ifdef CONFIG_NUMA
numa_node_show(struct device * dev,struct device_attribute * attr,char * buf)232 static ssize_t numa_node_show(struct device *dev,
233 struct device_attribute *attr, char *buf)
234 {
235 struct hv_device *hv_dev = device_to_hv_device(dev);
236
237 if (!hv_dev->channel)
238 return -ENODEV;
239
240 return sprintf(buf, "%d\n", cpu_to_node(hv_dev->channel->target_cpu));
241 }
242 static DEVICE_ATTR_RO(numa_node);
243 #endif
244
server_monitor_pending_show(struct device * dev,struct device_attribute * dev_attr,char * buf)245 static ssize_t server_monitor_pending_show(struct device *dev,
246 struct device_attribute *dev_attr,
247 char *buf)
248 {
249 struct hv_device *hv_dev = device_to_hv_device(dev);
250
251 if (!hv_dev->channel)
252 return -ENODEV;
253 return sprintf(buf, "%d\n",
254 channel_pending(hv_dev->channel,
255 vmbus_connection.monitor_pages[0]));
256 }
257 static DEVICE_ATTR_RO(server_monitor_pending);
258
client_monitor_pending_show(struct device * dev,struct device_attribute * dev_attr,char * buf)259 static ssize_t client_monitor_pending_show(struct device *dev,
260 struct device_attribute *dev_attr,
261 char *buf)
262 {
263 struct hv_device *hv_dev = device_to_hv_device(dev);
264
265 if (!hv_dev->channel)
266 return -ENODEV;
267 return sprintf(buf, "%d\n",
268 channel_pending(hv_dev->channel,
269 vmbus_connection.monitor_pages[1]));
270 }
271 static DEVICE_ATTR_RO(client_monitor_pending);
272
server_monitor_latency_show(struct device * dev,struct device_attribute * dev_attr,char * buf)273 static ssize_t server_monitor_latency_show(struct device *dev,
274 struct device_attribute *dev_attr,
275 char *buf)
276 {
277 struct hv_device *hv_dev = device_to_hv_device(dev);
278
279 if (!hv_dev->channel)
280 return -ENODEV;
281 return sprintf(buf, "%d\n",
282 channel_latency(hv_dev->channel,
283 vmbus_connection.monitor_pages[0]));
284 }
285 static DEVICE_ATTR_RO(server_monitor_latency);
286
client_monitor_latency_show(struct device * dev,struct device_attribute * dev_attr,char * buf)287 static ssize_t client_monitor_latency_show(struct device *dev,
288 struct device_attribute *dev_attr,
289 char *buf)
290 {
291 struct hv_device *hv_dev = device_to_hv_device(dev);
292
293 if (!hv_dev->channel)
294 return -ENODEV;
295 return sprintf(buf, "%d\n",
296 channel_latency(hv_dev->channel,
297 vmbus_connection.monitor_pages[1]));
298 }
299 static DEVICE_ATTR_RO(client_monitor_latency);
300
server_monitor_conn_id_show(struct device * dev,struct device_attribute * dev_attr,char * buf)301 static ssize_t server_monitor_conn_id_show(struct device *dev,
302 struct device_attribute *dev_attr,
303 char *buf)
304 {
305 struct hv_device *hv_dev = device_to_hv_device(dev);
306
307 if (!hv_dev->channel)
308 return -ENODEV;
309 return sprintf(buf, "%d\n",
310 channel_conn_id(hv_dev->channel,
311 vmbus_connection.monitor_pages[0]));
312 }
313 static DEVICE_ATTR_RO(server_monitor_conn_id);
314
client_monitor_conn_id_show(struct device * dev,struct device_attribute * dev_attr,char * buf)315 static ssize_t client_monitor_conn_id_show(struct device *dev,
316 struct device_attribute *dev_attr,
317 char *buf)
318 {
319 struct hv_device *hv_dev = device_to_hv_device(dev);
320
321 if (!hv_dev->channel)
322 return -ENODEV;
323 return sprintf(buf, "%d\n",
324 channel_conn_id(hv_dev->channel,
325 vmbus_connection.monitor_pages[1]));
326 }
327 static DEVICE_ATTR_RO(client_monitor_conn_id);
328
out_intr_mask_show(struct device * dev,struct device_attribute * dev_attr,char * buf)329 static ssize_t out_intr_mask_show(struct device *dev,
330 struct device_attribute *dev_attr, char *buf)
331 {
332 struct hv_device *hv_dev = device_to_hv_device(dev);
333 struct hv_ring_buffer_debug_info outbound;
334 int ret;
335
336 if (!hv_dev->channel)
337 return -ENODEV;
338
339 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
340 &outbound);
341 if (ret < 0)
342 return ret;
343
344 return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
345 }
346 static DEVICE_ATTR_RO(out_intr_mask);
347
out_read_index_show(struct device * dev,struct device_attribute * dev_attr,char * buf)348 static ssize_t out_read_index_show(struct device *dev,
349 struct device_attribute *dev_attr, char *buf)
350 {
351 struct hv_device *hv_dev = device_to_hv_device(dev);
352 struct hv_ring_buffer_debug_info outbound;
353 int ret;
354
355 if (!hv_dev->channel)
356 return -ENODEV;
357
358 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
359 &outbound);
360 if (ret < 0)
361 return ret;
362 return sprintf(buf, "%d\n", outbound.current_read_index);
363 }
364 static DEVICE_ATTR_RO(out_read_index);
365
out_write_index_show(struct device * dev,struct device_attribute * dev_attr,char * buf)366 static ssize_t out_write_index_show(struct device *dev,
367 struct device_attribute *dev_attr,
368 char *buf)
369 {
370 struct hv_device *hv_dev = device_to_hv_device(dev);
371 struct hv_ring_buffer_debug_info outbound;
372 int ret;
373
374 if (!hv_dev->channel)
375 return -ENODEV;
376
377 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
378 &outbound);
379 if (ret < 0)
380 return ret;
381 return sprintf(buf, "%d\n", outbound.current_write_index);
382 }
383 static DEVICE_ATTR_RO(out_write_index);
384
out_read_bytes_avail_show(struct device * dev,struct device_attribute * dev_attr,char * buf)385 static ssize_t out_read_bytes_avail_show(struct device *dev,
386 struct device_attribute *dev_attr,
387 char *buf)
388 {
389 struct hv_device *hv_dev = device_to_hv_device(dev);
390 struct hv_ring_buffer_debug_info outbound;
391 int ret;
392
393 if (!hv_dev->channel)
394 return -ENODEV;
395
396 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
397 &outbound);
398 if (ret < 0)
399 return ret;
400 return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
401 }
402 static DEVICE_ATTR_RO(out_read_bytes_avail);
403
out_write_bytes_avail_show(struct device * dev,struct device_attribute * dev_attr,char * buf)404 static ssize_t out_write_bytes_avail_show(struct device *dev,
405 struct device_attribute *dev_attr,
406 char *buf)
407 {
408 struct hv_device *hv_dev = device_to_hv_device(dev);
409 struct hv_ring_buffer_debug_info outbound;
410 int ret;
411
412 if (!hv_dev->channel)
413 return -ENODEV;
414
415 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
416 &outbound);
417 if (ret < 0)
418 return ret;
419 return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
420 }
421 static DEVICE_ATTR_RO(out_write_bytes_avail);
422
in_intr_mask_show(struct device * dev,struct device_attribute * dev_attr,char * buf)423 static ssize_t in_intr_mask_show(struct device *dev,
424 struct device_attribute *dev_attr, char *buf)
425 {
426 struct hv_device *hv_dev = device_to_hv_device(dev);
427 struct hv_ring_buffer_debug_info inbound;
428 int ret;
429
430 if (!hv_dev->channel)
431 return -ENODEV;
432
433 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
434 if (ret < 0)
435 return ret;
436
437 return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
438 }
439 static DEVICE_ATTR_RO(in_intr_mask);
440
in_read_index_show(struct device * dev,struct device_attribute * dev_attr,char * buf)441 static ssize_t in_read_index_show(struct device *dev,
442 struct device_attribute *dev_attr, char *buf)
443 {
444 struct hv_device *hv_dev = device_to_hv_device(dev);
445 struct hv_ring_buffer_debug_info inbound;
446 int ret;
447
448 if (!hv_dev->channel)
449 return -ENODEV;
450
451 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
452 if (ret < 0)
453 return ret;
454
455 return sprintf(buf, "%d\n", inbound.current_read_index);
456 }
457 static DEVICE_ATTR_RO(in_read_index);
458
in_write_index_show(struct device * dev,struct device_attribute * dev_attr,char * buf)459 static ssize_t in_write_index_show(struct device *dev,
460 struct device_attribute *dev_attr, char *buf)
461 {
462 struct hv_device *hv_dev = device_to_hv_device(dev);
463 struct hv_ring_buffer_debug_info inbound;
464 int ret;
465
466 if (!hv_dev->channel)
467 return -ENODEV;
468
469 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
470 if (ret < 0)
471 return ret;
472
473 return sprintf(buf, "%d\n", inbound.current_write_index);
474 }
475 static DEVICE_ATTR_RO(in_write_index);
476
in_read_bytes_avail_show(struct device * dev,struct device_attribute * dev_attr,char * buf)477 static ssize_t in_read_bytes_avail_show(struct device *dev,
478 struct device_attribute *dev_attr,
479 char *buf)
480 {
481 struct hv_device *hv_dev = device_to_hv_device(dev);
482 struct hv_ring_buffer_debug_info inbound;
483 int ret;
484
485 if (!hv_dev->channel)
486 return -ENODEV;
487
488 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
489 if (ret < 0)
490 return ret;
491
492 return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
493 }
494 static DEVICE_ATTR_RO(in_read_bytes_avail);
495
in_write_bytes_avail_show(struct device * dev,struct device_attribute * dev_attr,char * buf)496 static ssize_t in_write_bytes_avail_show(struct device *dev,
497 struct device_attribute *dev_attr,
498 char *buf)
499 {
500 struct hv_device *hv_dev = device_to_hv_device(dev);
501 struct hv_ring_buffer_debug_info inbound;
502 int ret;
503
504 if (!hv_dev->channel)
505 return -ENODEV;
506
507 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
508 if (ret < 0)
509 return ret;
510
511 return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
512 }
513 static DEVICE_ATTR_RO(in_write_bytes_avail);
514
channel_vp_mapping_show(struct device * dev,struct device_attribute * dev_attr,char * buf)515 static ssize_t channel_vp_mapping_show(struct device *dev,
516 struct device_attribute *dev_attr,
517 char *buf)
518 {
519 struct hv_device *hv_dev = device_to_hv_device(dev);
520 struct vmbus_channel *channel = hv_dev->channel, *cur_sc;
521 int buf_size = PAGE_SIZE, n_written, tot_written;
522 struct list_head *cur;
523
524 if (!channel)
525 return -ENODEV;
526
527 mutex_lock(&vmbus_connection.channel_mutex);
528
529 tot_written = snprintf(buf, buf_size, "%u:%u\n",
530 channel->offermsg.child_relid, channel->target_cpu);
531
532 list_for_each(cur, &channel->sc_list) {
533 if (tot_written >= buf_size - 1)
534 break;
535
536 cur_sc = list_entry(cur, struct vmbus_channel, sc_list);
537 n_written = scnprintf(buf + tot_written,
538 buf_size - tot_written,
539 "%u:%u\n",
540 cur_sc->offermsg.child_relid,
541 cur_sc->target_cpu);
542 tot_written += n_written;
543 }
544
545 mutex_unlock(&vmbus_connection.channel_mutex);
546
547 return tot_written;
548 }
549 static DEVICE_ATTR_RO(channel_vp_mapping);
550
vendor_show(struct device * dev,struct device_attribute * dev_attr,char * buf)551 static ssize_t vendor_show(struct device *dev,
552 struct device_attribute *dev_attr,
553 char *buf)
554 {
555 struct hv_device *hv_dev = device_to_hv_device(dev);
556
557 return sprintf(buf, "0x%x\n", hv_dev->vendor_id);
558 }
559 static DEVICE_ATTR_RO(vendor);
560
device_show(struct device * dev,struct device_attribute * dev_attr,char * buf)561 static ssize_t device_show(struct device *dev,
562 struct device_attribute *dev_attr,
563 char *buf)
564 {
565 struct hv_device *hv_dev = device_to_hv_device(dev);
566
567 return sprintf(buf, "0x%x\n", hv_dev->device_id);
568 }
569 static DEVICE_ATTR_RO(device);
570
driver_override_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)571 static ssize_t driver_override_store(struct device *dev,
572 struct device_attribute *attr,
573 const char *buf, size_t count)
574 {
575 struct hv_device *hv_dev = device_to_hv_device(dev);
576 char *driver_override, *old, *cp;
577
578 /* We need to keep extra room for a newline */
579 if (count >= (PAGE_SIZE - 1))
580 return -EINVAL;
581
582 driver_override = kstrndup(buf, count, GFP_KERNEL);
583 if (!driver_override)
584 return -ENOMEM;
585
586 cp = strchr(driver_override, '\n');
587 if (cp)
588 *cp = '\0';
589
590 device_lock(dev);
591 old = hv_dev->driver_override;
592 if (strlen(driver_override)) {
593 hv_dev->driver_override = driver_override;
594 } else {
595 kfree(driver_override);
596 hv_dev->driver_override = NULL;
597 }
598 device_unlock(dev);
599
600 kfree(old);
601
602 return count;
603 }
604
driver_override_show(struct device * dev,struct device_attribute * attr,char * buf)605 static ssize_t driver_override_show(struct device *dev,
606 struct device_attribute *attr, char *buf)
607 {
608 struct hv_device *hv_dev = device_to_hv_device(dev);
609 ssize_t len;
610
611 device_lock(dev);
612 len = snprintf(buf, PAGE_SIZE, "%s\n", hv_dev->driver_override);
613 device_unlock(dev);
614
615 return len;
616 }
617 static DEVICE_ATTR_RW(driver_override);
618
619 /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
620 static struct attribute *vmbus_dev_attrs[] = {
621 &dev_attr_id.attr,
622 &dev_attr_state.attr,
623 &dev_attr_monitor_id.attr,
624 &dev_attr_class_id.attr,
625 &dev_attr_device_id.attr,
626 &dev_attr_modalias.attr,
627 #ifdef CONFIG_NUMA
628 &dev_attr_numa_node.attr,
629 #endif
630 &dev_attr_server_monitor_pending.attr,
631 &dev_attr_client_monitor_pending.attr,
632 &dev_attr_server_monitor_latency.attr,
633 &dev_attr_client_monitor_latency.attr,
634 &dev_attr_server_monitor_conn_id.attr,
635 &dev_attr_client_monitor_conn_id.attr,
636 &dev_attr_out_intr_mask.attr,
637 &dev_attr_out_read_index.attr,
638 &dev_attr_out_write_index.attr,
639 &dev_attr_out_read_bytes_avail.attr,
640 &dev_attr_out_write_bytes_avail.attr,
641 &dev_attr_in_intr_mask.attr,
642 &dev_attr_in_read_index.attr,
643 &dev_attr_in_write_index.attr,
644 &dev_attr_in_read_bytes_avail.attr,
645 &dev_attr_in_write_bytes_avail.attr,
646 &dev_attr_channel_vp_mapping.attr,
647 &dev_attr_vendor.attr,
648 &dev_attr_device.attr,
649 &dev_attr_driver_override.attr,
650 NULL,
651 };
652
653 /*
654 * Device-level attribute_group callback function. Returns the permission for
655 * each attribute, and returns 0 if an attribute is not visible.
656 */
vmbus_dev_attr_is_visible(struct kobject * kobj,struct attribute * attr,int idx)657 static umode_t vmbus_dev_attr_is_visible(struct kobject *kobj,
658 struct attribute *attr, int idx)
659 {
660 struct device *dev = kobj_to_dev(kobj);
661 const struct hv_device *hv_dev = device_to_hv_device(dev);
662
663 /* Hide the monitor attributes if the monitor mechanism is not used. */
664 if (!hv_dev->channel->offermsg.monitor_allocated &&
665 (attr == &dev_attr_monitor_id.attr ||
666 attr == &dev_attr_server_monitor_pending.attr ||
667 attr == &dev_attr_client_monitor_pending.attr ||
668 attr == &dev_attr_server_monitor_latency.attr ||
669 attr == &dev_attr_client_monitor_latency.attr ||
670 attr == &dev_attr_server_monitor_conn_id.attr ||
671 attr == &dev_attr_client_monitor_conn_id.attr))
672 return 0;
673
674 return attr->mode;
675 }
676
677 static const struct attribute_group vmbus_dev_group = {
678 .attrs = vmbus_dev_attrs,
679 .is_visible = vmbus_dev_attr_is_visible
680 };
681 __ATTRIBUTE_GROUPS(vmbus_dev);
682
683 /* Set up the attribute for /sys/bus/vmbus/hibernation */
hibernation_show(struct bus_type * bus,char * buf)684 static ssize_t hibernation_show(struct bus_type *bus, char *buf)
685 {
686 return sprintf(buf, "%d\n", !!hv_is_hibernation_supported());
687 }
688
689 static BUS_ATTR_RO(hibernation);
690
691 static struct attribute *vmbus_bus_attrs[] = {
692 &bus_attr_hibernation.attr,
693 NULL,
694 };
695 static const struct attribute_group vmbus_bus_group = {
696 .attrs = vmbus_bus_attrs,
697 };
698 __ATTRIBUTE_GROUPS(vmbus_bus);
699
700 /*
701 * vmbus_uevent - add uevent for our device
702 *
703 * This routine is invoked when a device is added or removed on the vmbus to
704 * generate a uevent to udev in the userspace. The udev will then look at its
705 * rule and the uevent generated here to load the appropriate driver
706 *
707 * The alias string will be of the form vmbus:guid where guid is the string
708 * representation of the device guid (each byte of the guid will be
709 * represented with two hex characters.
710 */
vmbus_uevent(struct device * device,struct kobj_uevent_env * env)711 static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
712 {
713 struct hv_device *dev = device_to_hv_device(device);
714 const char *format = "MODALIAS=vmbus:%*phN";
715
716 return add_uevent_var(env, format, UUID_SIZE, &dev->dev_type);
717 }
718
719 static const struct hv_vmbus_device_id *
hv_vmbus_dev_match(const struct hv_vmbus_device_id * id,const guid_t * guid)720 hv_vmbus_dev_match(const struct hv_vmbus_device_id *id, const guid_t *guid)
721 {
722 if (id == NULL)
723 return NULL; /* empty device table */
724
725 for (; !guid_is_null(&id->guid); id++)
726 if (guid_equal(&id->guid, guid))
727 return id;
728
729 return NULL;
730 }
731
732 static const struct hv_vmbus_device_id *
hv_vmbus_dynid_match(struct hv_driver * drv,const guid_t * guid)733 hv_vmbus_dynid_match(struct hv_driver *drv, const guid_t *guid)
734 {
735 const struct hv_vmbus_device_id *id = NULL;
736 struct vmbus_dynid *dynid;
737
738 spin_lock(&drv->dynids.lock);
739 list_for_each_entry(dynid, &drv->dynids.list, node) {
740 if (guid_equal(&dynid->id.guid, guid)) {
741 id = &dynid->id;
742 break;
743 }
744 }
745 spin_unlock(&drv->dynids.lock);
746
747 return id;
748 }
749
750 static const struct hv_vmbus_device_id vmbus_device_null;
751
752 /*
753 * Return a matching hv_vmbus_device_id pointer.
754 * If there is no match, return NULL.
755 */
hv_vmbus_get_id(struct hv_driver * drv,struct hv_device * dev)756 static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv,
757 struct hv_device *dev)
758 {
759 const guid_t *guid = &dev->dev_type;
760 const struct hv_vmbus_device_id *id;
761
762 /* When driver_override is set, only bind to the matching driver */
763 if (dev->driver_override && strcmp(dev->driver_override, drv->name))
764 return NULL;
765
766 /* Look at the dynamic ids first, before the static ones */
767 id = hv_vmbus_dynid_match(drv, guid);
768 if (!id)
769 id = hv_vmbus_dev_match(drv->id_table, guid);
770
771 /* driver_override will always match, send a dummy id */
772 if (!id && dev->driver_override)
773 id = &vmbus_device_null;
774
775 return id;
776 }
777
778 /* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */
vmbus_add_dynid(struct hv_driver * drv,guid_t * guid)779 static int vmbus_add_dynid(struct hv_driver *drv, guid_t *guid)
780 {
781 struct vmbus_dynid *dynid;
782
783 dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
784 if (!dynid)
785 return -ENOMEM;
786
787 dynid->id.guid = *guid;
788
789 spin_lock(&drv->dynids.lock);
790 list_add_tail(&dynid->node, &drv->dynids.list);
791 spin_unlock(&drv->dynids.lock);
792
793 return driver_attach(&drv->driver);
794 }
795
vmbus_free_dynids(struct hv_driver * drv)796 static void vmbus_free_dynids(struct hv_driver *drv)
797 {
798 struct vmbus_dynid *dynid, *n;
799
800 spin_lock(&drv->dynids.lock);
801 list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
802 list_del(&dynid->node);
803 kfree(dynid);
804 }
805 spin_unlock(&drv->dynids.lock);
806 }
807
808 /*
809 * store_new_id - sysfs frontend to vmbus_add_dynid()
810 *
811 * Allow GUIDs to be added to an existing driver via sysfs.
812 */
new_id_store(struct device_driver * driver,const char * buf,size_t count)813 static ssize_t new_id_store(struct device_driver *driver, const char *buf,
814 size_t count)
815 {
816 struct hv_driver *drv = drv_to_hv_drv(driver);
817 guid_t guid;
818 ssize_t retval;
819
820 retval = guid_parse(buf, &guid);
821 if (retval)
822 return retval;
823
824 if (hv_vmbus_dynid_match(drv, &guid))
825 return -EEXIST;
826
827 retval = vmbus_add_dynid(drv, &guid);
828 if (retval)
829 return retval;
830 return count;
831 }
832 static DRIVER_ATTR_WO(new_id);
833
834 /*
835 * store_remove_id - remove a PCI device ID from this driver
836 *
837 * Removes a dynamic pci device ID to this driver.
838 */
remove_id_store(struct device_driver * driver,const char * buf,size_t count)839 static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
840 size_t count)
841 {
842 struct hv_driver *drv = drv_to_hv_drv(driver);
843 struct vmbus_dynid *dynid, *n;
844 guid_t guid;
845 ssize_t retval;
846
847 retval = guid_parse(buf, &guid);
848 if (retval)
849 return retval;
850
851 retval = -ENODEV;
852 spin_lock(&drv->dynids.lock);
853 list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
854 struct hv_vmbus_device_id *id = &dynid->id;
855
856 if (guid_equal(&id->guid, &guid)) {
857 list_del(&dynid->node);
858 kfree(dynid);
859 retval = count;
860 break;
861 }
862 }
863 spin_unlock(&drv->dynids.lock);
864
865 return retval;
866 }
867 static DRIVER_ATTR_WO(remove_id);
868
869 static struct attribute *vmbus_drv_attrs[] = {
870 &driver_attr_new_id.attr,
871 &driver_attr_remove_id.attr,
872 NULL,
873 };
874 ATTRIBUTE_GROUPS(vmbus_drv);
875
876
877 /*
878 * vmbus_match - Attempt to match the specified device to the specified driver
879 */
vmbus_match(struct device * device,struct device_driver * driver)880 static int vmbus_match(struct device *device, struct device_driver *driver)
881 {
882 struct hv_driver *drv = drv_to_hv_drv(driver);
883 struct hv_device *hv_dev = device_to_hv_device(device);
884
885 /* The hv_sock driver handles all hv_sock offers. */
886 if (is_hvsock_channel(hv_dev->channel))
887 return drv->hvsock;
888
889 if (hv_vmbus_get_id(drv, hv_dev))
890 return 1;
891
892 return 0;
893 }
894
895 /*
896 * vmbus_probe - Add the new vmbus's child device
897 */
vmbus_probe(struct device * child_device)898 static int vmbus_probe(struct device *child_device)
899 {
900 int ret = 0;
901 struct hv_driver *drv =
902 drv_to_hv_drv(child_device->driver);
903 struct hv_device *dev = device_to_hv_device(child_device);
904 const struct hv_vmbus_device_id *dev_id;
905
906 dev_id = hv_vmbus_get_id(drv, dev);
907 if (drv->probe) {
908 ret = drv->probe(dev, dev_id);
909 if (ret != 0)
910 pr_err("probe failed for device %s (%d)\n",
911 dev_name(child_device), ret);
912
913 } else {
914 pr_err("probe not set for driver %s\n",
915 dev_name(child_device));
916 ret = -ENODEV;
917 }
918 return ret;
919 }
920
921 /*
922 * vmbus_remove - Remove a vmbus device
923 */
vmbus_remove(struct device * child_device)924 static int vmbus_remove(struct device *child_device)
925 {
926 struct hv_driver *drv;
927 struct hv_device *dev = device_to_hv_device(child_device);
928
929 if (child_device->driver) {
930 drv = drv_to_hv_drv(child_device->driver);
931 if (drv->remove)
932 drv->remove(dev);
933 }
934
935 return 0;
936 }
937
938
939 /*
940 * vmbus_shutdown - Shutdown a vmbus device
941 */
vmbus_shutdown(struct device * child_device)942 static void vmbus_shutdown(struct device *child_device)
943 {
944 struct hv_driver *drv;
945 struct hv_device *dev = device_to_hv_device(child_device);
946
947
948 /* The device may not be attached yet */
949 if (!child_device->driver)
950 return;
951
952 drv = drv_to_hv_drv(child_device->driver);
953
954 if (drv->shutdown)
955 drv->shutdown(dev);
956 }
957
958 #ifdef CONFIG_PM_SLEEP
959 /*
960 * vmbus_suspend - Suspend a vmbus device
961 */
vmbus_suspend(struct device * child_device)962 static int vmbus_suspend(struct device *child_device)
963 {
964 struct hv_driver *drv;
965 struct hv_device *dev = device_to_hv_device(child_device);
966
967 /* The device may not be attached yet */
968 if (!child_device->driver)
969 return 0;
970
971 drv = drv_to_hv_drv(child_device->driver);
972 if (!drv->suspend)
973 return -EOPNOTSUPP;
974
975 return drv->suspend(dev);
976 }
977
978 /*
979 * vmbus_resume - Resume a vmbus device
980 */
vmbus_resume(struct device * child_device)981 static int vmbus_resume(struct device *child_device)
982 {
983 struct hv_driver *drv;
984 struct hv_device *dev = device_to_hv_device(child_device);
985
986 /* The device may not be attached yet */
987 if (!child_device->driver)
988 return 0;
989
990 drv = drv_to_hv_drv(child_device->driver);
991 if (!drv->resume)
992 return -EOPNOTSUPP;
993
994 return drv->resume(dev);
995 }
996 #else
997 #define vmbus_suspend NULL
998 #define vmbus_resume NULL
999 #endif /* CONFIG_PM_SLEEP */
1000
1001 /*
1002 * vmbus_device_release - Final callback release of the vmbus child device
1003 */
vmbus_device_release(struct device * device)1004 static void vmbus_device_release(struct device *device)
1005 {
1006 struct hv_device *hv_dev = device_to_hv_device(device);
1007 struct vmbus_channel *channel = hv_dev->channel;
1008
1009 hv_debug_rm_dev_dir(hv_dev);
1010
1011 mutex_lock(&vmbus_connection.channel_mutex);
1012 hv_process_channel_removal(channel);
1013 mutex_unlock(&vmbus_connection.channel_mutex);
1014 kfree(hv_dev);
1015 }
1016
1017 /*
1018 * Note: we must use the "noirq" ops: see the comment before vmbus_bus_pm.
1019 *
1020 * suspend_noirq/resume_noirq are set to NULL to support Suspend-to-Idle: we
1021 * shouldn't suspend the vmbus devices upon Suspend-to-Idle, otherwise there
1022 * is no way to wake up a Generation-2 VM.
1023 *
1024 * The other 4 ops are for hibernation.
1025 */
1026
1027 static const struct dev_pm_ops vmbus_pm = {
1028 .suspend_noirq = NULL,
1029 .resume_noirq = NULL,
1030 .freeze_noirq = vmbus_suspend,
1031 .thaw_noirq = vmbus_resume,
1032 .poweroff_noirq = vmbus_suspend,
1033 .restore_noirq = vmbus_resume,
1034 };
1035
1036 /* The one and only one */
1037 static struct bus_type hv_bus = {
1038 .name = "vmbus",
1039 .match = vmbus_match,
1040 .shutdown = vmbus_shutdown,
1041 .remove = vmbus_remove,
1042 .probe = vmbus_probe,
1043 .uevent = vmbus_uevent,
1044 .dev_groups = vmbus_dev_groups,
1045 .drv_groups = vmbus_drv_groups,
1046 .bus_groups = vmbus_bus_groups,
1047 .pm = &vmbus_pm,
1048 };
1049
1050 struct onmessage_work_context {
1051 struct work_struct work;
1052 struct {
1053 struct hv_message_header header;
1054 u8 payload[];
1055 } msg;
1056 };
1057
vmbus_onmessage_work(struct work_struct * work)1058 static void vmbus_onmessage_work(struct work_struct *work)
1059 {
1060 struct onmessage_work_context *ctx;
1061
1062 /* Do not process messages if we're in DISCONNECTED state */
1063 if (vmbus_connection.conn_state == DISCONNECTED)
1064 return;
1065
1066 ctx = container_of(work, struct onmessage_work_context,
1067 work);
1068 vmbus_onmessage((struct vmbus_channel_message_header *)
1069 &ctx->msg.payload);
1070 kfree(ctx);
1071 }
1072
vmbus_on_msg_dpc(unsigned long data)1073 void vmbus_on_msg_dpc(unsigned long data)
1074 {
1075 struct hv_per_cpu_context *hv_cpu = (void *)data;
1076 void *page_addr = hv_cpu->synic_message_page;
1077 struct hv_message msg_copy, *msg = (struct hv_message *)page_addr +
1078 VMBUS_MESSAGE_SINT;
1079 struct vmbus_channel_message_header *hdr;
1080 enum vmbus_channel_message_type msgtype;
1081 const struct vmbus_channel_message_table_entry *entry;
1082 struct onmessage_work_context *ctx;
1083 __u8 payload_size;
1084 u32 message_type;
1085
1086 /*
1087 * 'enum vmbus_channel_message_type' is supposed to always be 'u32' as
1088 * it is being used in 'struct vmbus_channel_message_header' definition
1089 * which is supposed to match hypervisor ABI.
1090 */
1091 BUILD_BUG_ON(sizeof(enum vmbus_channel_message_type) != sizeof(u32));
1092
1093 /*
1094 * Since the message is in memory shared with the host, an erroneous or
1095 * malicious Hyper-V could modify the message while vmbus_on_msg_dpc()
1096 * or individual message handlers are executing; to prevent this, copy
1097 * the message into private memory.
1098 */
1099 memcpy(&msg_copy, msg, sizeof(struct hv_message));
1100
1101 message_type = msg_copy.header.message_type;
1102 if (message_type == HVMSG_NONE)
1103 /* no msg */
1104 return;
1105
1106 hdr = (struct vmbus_channel_message_header *)msg_copy.u.payload;
1107 msgtype = hdr->msgtype;
1108
1109 trace_vmbus_on_msg_dpc(hdr);
1110
1111 if (msgtype >= CHANNELMSG_COUNT) {
1112 WARN_ONCE(1, "unknown msgtype=%d\n", msgtype);
1113 goto msg_handled;
1114 }
1115
1116 payload_size = msg_copy.header.payload_size;
1117 if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT) {
1118 WARN_ONCE(1, "payload size is too large (%d)\n", payload_size);
1119 goto msg_handled;
1120 }
1121
1122 entry = &channel_message_table[msgtype];
1123
1124 if (!entry->message_handler)
1125 goto msg_handled;
1126
1127 if (payload_size < entry->min_payload_len) {
1128 WARN_ONCE(1, "message too short: msgtype=%d len=%d\n", msgtype, payload_size);
1129 goto msg_handled;
1130 }
1131
1132 if (entry->handler_type == VMHT_BLOCKING) {
1133 ctx = kmalloc(sizeof(*ctx) + payload_size, GFP_ATOMIC);
1134 if (ctx == NULL)
1135 return;
1136
1137 INIT_WORK(&ctx->work, vmbus_onmessage_work);
1138 memcpy(&ctx->msg, &msg_copy, sizeof(msg->header) + payload_size);
1139
1140 /*
1141 * The host can generate a rescind message while we
1142 * may still be handling the original offer. We deal with
1143 * this condition by relying on the synchronization provided
1144 * by offer_in_progress and by channel_mutex. See also the
1145 * inline comments in vmbus_onoffer_rescind().
1146 */
1147 switch (msgtype) {
1148 case CHANNELMSG_RESCIND_CHANNELOFFER:
1149 /*
1150 * If we are handling the rescind message;
1151 * schedule the work on the global work queue.
1152 *
1153 * The OFFER message and the RESCIND message should
1154 * not be handled by the same serialized work queue,
1155 * because the OFFER handler may call vmbus_open(),
1156 * which tries to open the channel by sending an
1157 * OPEN_CHANNEL message to the host and waits for
1158 * the host's response; however, if the host has
1159 * rescinded the channel before it receives the
1160 * OPEN_CHANNEL message, the host just silently
1161 * ignores the OPEN_CHANNEL message; as a result,
1162 * the guest's OFFER handler hangs for ever, if we
1163 * handle the RESCIND message in the same serialized
1164 * work queue: the RESCIND handler can not start to
1165 * run before the OFFER handler finishes.
1166 */
1167 schedule_work(&ctx->work);
1168 break;
1169
1170 case CHANNELMSG_OFFERCHANNEL:
1171 /*
1172 * The host sends the offer message of a given channel
1173 * before sending the rescind message of the same
1174 * channel. These messages are sent to the guest's
1175 * connect CPU; the guest then starts processing them
1176 * in the tasklet handler on this CPU:
1177 *
1178 * VMBUS_CONNECT_CPU
1179 *
1180 * [vmbus_on_msg_dpc()]
1181 * atomic_inc() // CHANNELMSG_OFFERCHANNEL
1182 * queue_work()
1183 * ...
1184 * [vmbus_on_msg_dpc()]
1185 * schedule_work() // CHANNELMSG_RESCIND_CHANNELOFFER
1186 *
1187 * We rely on the memory-ordering properties of the
1188 * queue_work() and schedule_work() primitives, which
1189 * guarantee that the atomic increment will be visible
1190 * to the CPUs which will execute the offer & rescind
1191 * works by the time these works will start execution.
1192 */
1193 atomic_inc(&vmbus_connection.offer_in_progress);
1194 fallthrough;
1195
1196 default:
1197 queue_work(vmbus_connection.work_queue, &ctx->work);
1198 }
1199 } else
1200 entry->message_handler(hdr);
1201
1202 msg_handled:
1203 vmbus_signal_eom(msg, message_type);
1204 }
1205
1206 #ifdef CONFIG_PM_SLEEP
1207 /*
1208 * Fake RESCIND_CHANNEL messages to clean up hv_sock channels by force for
1209 * hibernation, because hv_sock connections can not persist across hibernation.
1210 */
vmbus_force_channel_rescinded(struct vmbus_channel * channel)1211 static void vmbus_force_channel_rescinded(struct vmbus_channel *channel)
1212 {
1213 struct onmessage_work_context *ctx;
1214 struct vmbus_channel_rescind_offer *rescind;
1215
1216 WARN_ON(!is_hvsock_channel(channel));
1217
1218 /*
1219 * Allocation size is small and the allocation should really not fail,
1220 * otherwise the state of the hv_sock connections ends up in limbo.
1221 */
1222 ctx = kzalloc(sizeof(*ctx) + sizeof(*rescind),
1223 GFP_KERNEL | __GFP_NOFAIL);
1224
1225 /*
1226 * So far, these are not really used by Linux. Just set them to the
1227 * reasonable values conforming to the definitions of the fields.
1228 */
1229 ctx->msg.header.message_type = 1;
1230 ctx->msg.header.payload_size = sizeof(*rescind);
1231
1232 /* These values are actually used by Linux. */
1233 rescind = (struct vmbus_channel_rescind_offer *)ctx->msg.payload;
1234 rescind->header.msgtype = CHANNELMSG_RESCIND_CHANNELOFFER;
1235 rescind->child_relid = channel->offermsg.child_relid;
1236
1237 INIT_WORK(&ctx->work, vmbus_onmessage_work);
1238
1239 queue_work(vmbus_connection.work_queue, &ctx->work);
1240 }
1241 #endif /* CONFIG_PM_SLEEP */
1242
1243 /*
1244 * Schedule all channels with events pending
1245 */
vmbus_chan_sched(struct hv_per_cpu_context * hv_cpu)1246 static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
1247 {
1248 unsigned long *recv_int_page;
1249 u32 maxbits, relid;
1250
1251 if (vmbus_proto_version < VERSION_WIN8) {
1252 maxbits = MAX_NUM_CHANNELS_SUPPORTED;
1253 recv_int_page = vmbus_connection.recv_int_page;
1254 } else {
1255 /*
1256 * When the host is win8 and beyond, the event page
1257 * can be directly checked to get the id of the channel
1258 * that has the interrupt pending.
1259 */
1260 void *page_addr = hv_cpu->synic_event_page;
1261 union hv_synic_event_flags *event
1262 = (union hv_synic_event_flags *)page_addr +
1263 VMBUS_MESSAGE_SINT;
1264
1265 maxbits = HV_EVENT_FLAGS_COUNT;
1266 recv_int_page = event->flags;
1267 }
1268
1269 if (unlikely(!recv_int_page))
1270 return;
1271
1272 for_each_set_bit(relid, recv_int_page, maxbits) {
1273 void (*callback_fn)(void *context);
1274 struct vmbus_channel *channel;
1275
1276 if (!sync_test_and_clear_bit(relid, recv_int_page))
1277 continue;
1278
1279 /* Special case - vmbus channel protocol msg */
1280 if (relid == 0)
1281 continue;
1282
1283 /*
1284 * Pairs with the kfree_rcu() in vmbus_chan_release().
1285 * Guarantees that the channel data structure doesn't
1286 * get freed while the channel pointer below is being
1287 * dereferenced.
1288 */
1289 rcu_read_lock();
1290
1291 /* Find channel based on relid */
1292 channel = relid2channel(relid);
1293 if (channel == NULL)
1294 goto sched_unlock_rcu;
1295
1296 if (channel->rescind)
1297 goto sched_unlock_rcu;
1298
1299 /*
1300 * Make sure that the ring buffer data structure doesn't get
1301 * freed while we dereference the ring buffer pointer. Test
1302 * for the channel's onchannel_callback being NULL within a
1303 * sched_lock critical section. See also the inline comments
1304 * in vmbus_reset_channel_cb().
1305 */
1306 spin_lock(&channel->sched_lock);
1307
1308 callback_fn = channel->onchannel_callback;
1309 if (unlikely(callback_fn == NULL))
1310 goto sched_unlock;
1311
1312 trace_vmbus_chan_sched(channel);
1313
1314 ++channel->interrupts;
1315
1316 switch (channel->callback_mode) {
1317 case HV_CALL_ISR:
1318 (*callback_fn)(channel->channel_callback_context);
1319 break;
1320
1321 case HV_CALL_BATCHED:
1322 hv_begin_read(&channel->inbound);
1323 fallthrough;
1324 case HV_CALL_DIRECT:
1325 tasklet_schedule(&channel->callback_event);
1326 }
1327
1328 sched_unlock:
1329 spin_unlock(&channel->sched_lock);
1330 sched_unlock_rcu:
1331 rcu_read_unlock();
1332 }
1333 }
1334
vmbus_isr(void)1335 static void vmbus_isr(void)
1336 {
1337 struct hv_per_cpu_context *hv_cpu
1338 = this_cpu_ptr(hv_context.cpu_context);
1339 void *page_addr = hv_cpu->synic_event_page;
1340 struct hv_message *msg;
1341 union hv_synic_event_flags *event;
1342 bool handled = false;
1343
1344 if (unlikely(page_addr == NULL))
1345 return;
1346
1347 event = (union hv_synic_event_flags *)page_addr +
1348 VMBUS_MESSAGE_SINT;
1349 /*
1350 * Check for events before checking for messages. This is the order
1351 * in which events and messages are checked in Windows guests on
1352 * Hyper-V, and the Windows team suggested we do the same.
1353 */
1354
1355 if ((vmbus_proto_version == VERSION_WS2008) ||
1356 (vmbus_proto_version == VERSION_WIN7)) {
1357
1358 /* Since we are a child, we only need to check bit 0 */
1359 if (sync_test_and_clear_bit(0, event->flags))
1360 handled = true;
1361 } else {
1362 /*
1363 * Our host is win8 or above. The signaling mechanism
1364 * has changed and we can directly look at the event page.
1365 * If bit n is set then we have an interrup on the channel
1366 * whose id is n.
1367 */
1368 handled = true;
1369 }
1370
1371 if (handled)
1372 vmbus_chan_sched(hv_cpu);
1373
1374 page_addr = hv_cpu->synic_message_page;
1375 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
1376
1377 /* Check if there are actual msgs to be processed */
1378 if (msg->header.message_type != HVMSG_NONE) {
1379 if (msg->header.message_type == HVMSG_TIMER_EXPIRED) {
1380 hv_stimer0_isr();
1381 vmbus_signal_eom(msg, HVMSG_TIMER_EXPIRED);
1382 } else
1383 tasklet_schedule(&hv_cpu->msg_dpc);
1384 }
1385
1386 add_interrupt_randomness(vmbus_interrupt, 0);
1387 }
1388
vmbus_percpu_isr(int irq,void * dev_id)1389 static irqreturn_t vmbus_percpu_isr(int irq, void *dev_id)
1390 {
1391 vmbus_isr();
1392 return IRQ_HANDLED;
1393 }
1394
1395 /*
1396 * Callback from kmsg_dump. Grab as much as possible from the end of the kmsg
1397 * buffer and call into Hyper-V to transfer the data.
1398 */
hv_kmsg_dump(struct kmsg_dumper * dumper,enum kmsg_dump_reason reason)1399 static void hv_kmsg_dump(struct kmsg_dumper *dumper,
1400 enum kmsg_dump_reason reason)
1401 {
1402 struct kmsg_dump_iter iter;
1403 size_t bytes_written;
1404
1405 /* We are only interested in panics. */
1406 if ((reason != KMSG_DUMP_PANIC) || (!sysctl_record_panic_msg))
1407 return;
1408
1409 /*
1410 * Write dump contents to the page. No need to synchronize; panic should
1411 * be single-threaded.
1412 */
1413 kmsg_dump_rewind(&iter);
1414 kmsg_dump_get_buffer(&iter, false, hv_panic_page, HV_HYP_PAGE_SIZE,
1415 &bytes_written);
1416 if (!bytes_written)
1417 return;
1418 /*
1419 * P3 to contain the physical address of the panic page & P4 to
1420 * contain the size of the panic data in that page. Rest of the
1421 * registers are no-op when the NOTIFY_MSG flag is set.
1422 */
1423 hv_set_register(HV_REGISTER_CRASH_P0, 0);
1424 hv_set_register(HV_REGISTER_CRASH_P1, 0);
1425 hv_set_register(HV_REGISTER_CRASH_P2, 0);
1426 hv_set_register(HV_REGISTER_CRASH_P3, virt_to_phys(hv_panic_page));
1427 hv_set_register(HV_REGISTER_CRASH_P4, bytes_written);
1428
1429 /*
1430 * Let Hyper-V know there is crash data available along with
1431 * the panic message.
1432 */
1433 hv_set_register(HV_REGISTER_CRASH_CTL,
1434 (HV_CRASH_CTL_CRASH_NOTIFY | HV_CRASH_CTL_CRASH_NOTIFY_MSG));
1435 }
1436
1437 static struct kmsg_dumper hv_kmsg_dumper = {
1438 .dump = hv_kmsg_dump,
1439 };
1440
hv_kmsg_dump_register(void)1441 static void hv_kmsg_dump_register(void)
1442 {
1443 int ret;
1444
1445 hv_panic_page = hv_alloc_hyperv_zeroed_page();
1446 if (!hv_panic_page) {
1447 pr_err("Hyper-V: panic message page memory allocation failed\n");
1448 return;
1449 }
1450
1451 ret = kmsg_dump_register(&hv_kmsg_dumper);
1452 if (ret) {
1453 pr_err("Hyper-V: kmsg dump register error 0x%x\n", ret);
1454 hv_free_hyperv_page((unsigned long)hv_panic_page);
1455 hv_panic_page = NULL;
1456 }
1457 }
1458
1459 static struct ctl_table_header *hv_ctl_table_hdr;
1460
1461 /*
1462 * sysctl option to allow the user to control whether kmsg data should be
1463 * reported to Hyper-V on panic.
1464 */
1465 static struct ctl_table hv_ctl_table[] = {
1466 {
1467 .procname = "hyperv_record_panic_msg",
1468 .data = &sysctl_record_panic_msg,
1469 .maxlen = sizeof(int),
1470 .mode = 0644,
1471 .proc_handler = proc_dointvec_minmax,
1472 .extra1 = SYSCTL_ZERO,
1473 .extra2 = SYSCTL_ONE
1474 },
1475 {}
1476 };
1477
1478 static struct ctl_table hv_root_table[] = {
1479 {
1480 .procname = "kernel",
1481 .mode = 0555,
1482 .child = hv_ctl_table
1483 },
1484 {}
1485 };
1486
1487 /*
1488 * vmbus_bus_init -Main vmbus driver initialization routine.
1489 *
1490 * Here, we
1491 * - initialize the vmbus driver context
1492 * - invoke the vmbus hv main init routine
1493 * - retrieve the channel offers
1494 */
vmbus_bus_init(void)1495 static int vmbus_bus_init(void)
1496 {
1497 int ret;
1498
1499 ret = hv_init();
1500 if (ret != 0) {
1501 pr_err("Unable to initialize the hypervisor - 0x%x\n", ret);
1502 return ret;
1503 }
1504
1505 ret = bus_register(&hv_bus);
1506 if (ret)
1507 return ret;
1508
1509 /*
1510 * VMbus interrupts are best modeled as per-cpu interrupts. If
1511 * on an architecture with support for per-cpu IRQs (e.g. ARM64),
1512 * allocate a per-cpu IRQ using standard Linux kernel functionality.
1513 * If not on such an architecture (e.g., x86/x64), then rely on
1514 * code in the arch-specific portion of the code tree to connect
1515 * the VMbus interrupt handler.
1516 */
1517
1518 if (vmbus_irq == -1) {
1519 hv_setup_vmbus_handler(vmbus_isr);
1520 } else {
1521 vmbus_evt = alloc_percpu(long);
1522 ret = request_percpu_irq(vmbus_irq, vmbus_percpu_isr,
1523 "Hyper-V VMbus", vmbus_evt);
1524 if (ret) {
1525 pr_err("Can't request Hyper-V VMbus IRQ %d, Err %d",
1526 vmbus_irq, ret);
1527 free_percpu(vmbus_evt);
1528 goto err_setup;
1529 }
1530 }
1531
1532 ret = hv_synic_alloc();
1533 if (ret)
1534 goto err_alloc;
1535
1536 /*
1537 * Initialize the per-cpu interrupt state and stimer state.
1538 * Then connect to the host.
1539 */
1540 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hyperv/vmbus:online",
1541 hv_synic_init, hv_synic_cleanup);
1542 if (ret < 0)
1543 goto err_cpuhp;
1544 hyperv_cpuhp_online = ret;
1545
1546 ret = vmbus_connect();
1547 if (ret)
1548 goto err_connect;
1549
1550 /*
1551 * Only register if the crash MSRs are available
1552 */
1553 if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
1554 u64 hyperv_crash_ctl;
1555 /*
1556 * Sysctl registration is not fatal, since by default
1557 * reporting is enabled.
1558 */
1559 hv_ctl_table_hdr = register_sysctl_table(hv_root_table);
1560 if (!hv_ctl_table_hdr)
1561 pr_err("Hyper-V: sysctl table register error");
1562
1563 /*
1564 * Register for panic kmsg callback only if the right
1565 * capability is supported by the hypervisor.
1566 */
1567 hyperv_crash_ctl = hv_get_register(HV_REGISTER_CRASH_CTL);
1568 if (hyperv_crash_ctl & HV_CRASH_CTL_CRASH_NOTIFY_MSG)
1569 hv_kmsg_dump_register();
1570
1571 register_die_notifier(&hyperv_die_block);
1572 }
1573
1574 /*
1575 * Always register the panic notifier because we need to unload
1576 * the VMbus channel connection to prevent any VMbus
1577 * activity after the VM panics.
1578 */
1579 atomic_notifier_chain_register(&panic_notifier_list,
1580 &hyperv_panic_block);
1581
1582 vmbus_request_offers();
1583
1584 return 0;
1585
1586 err_connect:
1587 cpuhp_remove_state(hyperv_cpuhp_online);
1588 err_cpuhp:
1589 hv_synic_free();
1590 err_alloc:
1591 if (vmbus_irq == -1) {
1592 hv_remove_vmbus_handler();
1593 } else {
1594 free_percpu_irq(vmbus_irq, vmbus_evt);
1595 free_percpu(vmbus_evt);
1596 }
1597 err_setup:
1598 bus_unregister(&hv_bus);
1599 unregister_sysctl_table(hv_ctl_table_hdr);
1600 hv_ctl_table_hdr = NULL;
1601 return ret;
1602 }
1603
1604 /**
1605 * __vmbus_child_driver_register() - Register a vmbus's driver
1606 * @hv_driver: Pointer to driver structure you want to register
1607 * @owner: owner module of the drv
1608 * @mod_name: module name string
1609 *
1610 * Registers the given driver with Linux through the 'driver_register()' call
1611 * and sets up the hyper-v vmbus handling for this driver.
1612 * It will return the state of the 'driver_register()' call.
1613 *
1614 */
__vmbus_driver_register(struct hv_driver * hv_driver,struct module * owner,const char * mod_name)1615 int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name)
1616 {
1617 int ret;
1618
1619 pr_info("registering driver %s\n", hv_driver->name);
1620
1621 ret = vmbus_exists();
1622 if (ret < 0)
1623 return ret;
1624
1625 hv_driver->driver.name = hv_driver->name;
1626 hv_driver->driver.owner = owner;
1627 hv_driver->driver.mod_name = mod_name;
1628 hv_driver->driver.bus = &hv_bus;
1629
1630 spin_lock_init(&hv_driver->dynids.lock);
1631 INIT_LIST_HEAD(&hv_driver->dynids.list);
1632
1633 ret = driver_register(&hv_driver->driver);
1634
1635 return ret;
1636 }
1637 EXPORT_SYMBOL_GPL(__vmbus_driver_register);
1638
1639 /**
1640 * vmbus_driver_unregister() - Unregister a vmbus's driver
1641 * @hv_driver: Pointer to driver structure you want to
1642 * un-register
1643 *
1644 * Un-register the given driver that was previous registered with a call to
1645 * vmbus_driver_register()
1646 */
vmbus_driver_unregister(struct hv_driver * hv_driver)1647 void vmbus_driver_unregister(struct hv_driver *hv_driver)
1648 {
1649 pr_info("unregistering driver %s\n", hv_driver->name);
1650
1651 if (!vmbus_exists()) {
1652 driver_unregister(&hv_driver->driver);
1653 vmbus_free_dynids(hv_driver);
1654 }
1655 }
1656 EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
1657
1658
1659 /*
1660 * Called when last reference to channel is gone.
1661 */
vmbus_chan_release(struct kobject * kobj)1662 static void vmbus_chan_release(struct kobject *kobj)
1663 {
1664 struct vmbus_channel *channel
1665 = container_of(kobj, struct vmbus_channel, kobj);
1666
1667 kfree_rcu(channel, rcu);
1668 }
1669
1670 struct vmbus_chan_attribute {
1671 struct attribute attr;
1672 ssize_t (*show)(struct vmbus_channel *chan, char *buf);
1673 ssize_t (*store)(struct vmbus_channel *chan,
1674 const char *buf, size_t count);
1675 };
1676 #define VMBUS_CHAN_ATTR(_name, _mode, _show, _store) \
1677 struct vmbus_chan_attribute chan_attr_##_name \
1678 = __ATTR(_name, _mode, _show, _store)
1679 #define VMBUS_CHAN_ATTR_RW(_name) \
1680 struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RW(_name)
1681 #define VMBUS_CHAN_ATTR_RO(_name) \
1682 struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RO(_name)
1683 #define VMBUS_CHAN_ATTR_WO(_name) \
1684 struct vmbus_chan_attribute chan_attr_##_name = __ATTR_WO(_name)
1685
vmbus_chan_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)1686 static ssize_t vmbus_chan_attr_show(struct kobject *kobj,
1687 struct attribute *attr, char *buf)
1688 {
1689 const struct vmbus_chan_attribute *attribute
1690 = container_of(attr, struct vmbus_chan_attribute, attr);
1691 struct vmbus_channel *chan
1692 = container_of(kobj, struct vmbus_channel, kobj);
1693
1694 if (!attribute->show)
1695 return -EIO;
1696
1697 return attribute->show(chan, buf);
1698 }
1699
vmbus_chan_attr_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)1700 static ssize_t vmbus_chan_attr_store(struct kobject *kobj,
1701 struct attribute *attr, const char *buf,
1702 size_t count)
1703 {
1704 const struct vmbus_chan_attribute *attribute
1705 = container_of(attr, struct vmbus_chan_attribute, attr);
1706 struct vmbus_channel *chan
1707 = container_of(kobj, struct vmbus_channel, kobj);
1708
1709 if (!attribute->store)
1710 return -EIO;
1711
1712 return attribute->store(chan, buf, count);
1713 }
1714
1715 static const struct sysfs_ops vmbus_chan_sysfs_ops = {
1716 .show = vmbus_chan_attr_show,
1717 .store = vmbus_chan_attr_store,
1718 };
1719
out_mask_show(struct vmbus_channel * channel,char * buf)1720 static ssize_t out_mask_show(struct vmbus_channel *channel, char *buf)
1721 {
1722 struct hv_ring_buffer_info *rbi = &channel->outbound;
1723 ssize_t ret;
1724
1725 mutex_lock(&rbi->ring_buffer_mutex);
1726 if (!rbi->ring_buffer) {
1727 mutex_unlock(&rbi->ring_buffer_mutex);
1728 return -EINVAL;
1729 }
1730
1731 ret = sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
1732 mutex_unlock(&rbi->ring_buffer_mutex);
1733 return ret;
1734 }
1735 static VMBUS_CHAN_ATTR_RO(out_mask);
1736
in_mask_show(struct vmbus_channel * channel,char * buf)1737 static ssize_t in_mask_show(struct vmbus_channel *channel, char *buf)
1738 {
1739 struct hv_ring_buffer_info *rbi = &channel->inbound;
1740 ssize_t ret;
1741
1742 mutex_lock(&rbi->ring_buffer_mutex);
1743 if (!rbi->ring_buffer) {
1744 mutex_unlock(&rbi->ring_buffer_mutex);
1745 return -EINVAL;
1746 }
1747
1748 ret = sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
1749 mutex_unlock(&rbi->ring_buffer_mutex);
1750 return ret;
1751 }
1752 static VMBUS_CHAN_ATTR_RO(in_mask);
1753
read_avail_show(struct vmbus_channel * channel,char * buf)1754 static ssize_t read_avail_show(struct vmbus_channel *channel, char *buf)
1755 {
1756 struct hv_ring_buffer_info *rbi = &channel->inbound;
1757 ssize_t ret;
1758
1759 mutex_lock(&rbi->ring_buffer_mutex);
1760 if (!rbi->ring_buffer) {
1761 mutex_unlock(&rbi->ring_buffer_mutex);
1762 return -EINVAL;
1763 }
1764
1765 ret = sprintf(buf, "%u\n", hv_get_bytes_to_read(rbi));
1766 mutex_unlock(&rbi->ring_buffer_mutex);
1767 return ret;
1768 }
1769 static VMBUS_CHAN_ATTR_RO(read_avail);
1770
write_avail_show(struct vmbus_channel * channel,char * buf)1771 static ssize_t write_avail_show(struct vmbus_channel *channel, char *buf)
1772 {
1773 struct hv_ring_buffer_info *rbi = &channel->outbound;
1774 ssize_t ret;
1775
1776 mutex_lock(&rbi->ring_buffer_mutex);
1777 if (!rbi->ring_buffer) {
1778 mutex_unlock(&rbi->ring_buffer_mutex);
1779 return -EINVAL;
1780 }
1781
1782 ret = sprintf(buf, "%u\n", hv_get_bytes_to_write(rbi));
1783 mutex_unlock(&rbi->ring_buffer_mutex);
1784 return ret;
1785 }
1786 static VMBUS_CHAN_ATTR_RO(write_avail);
1787
target_cpu_show(struct vmbus_channel * channel,char * buf)1788 static ssize_t target_cpu_show(struct vmbus_channel *channel, char *buf)
1789 {
1790 return sprintf(buf, "%u\n", channel->target_cpu);
1791 }
target_cpu_store(struct vmbus_channel * channel,const char * buf,size_t count)1792 static ssize_t target_cpu_store(struct vmbus_channel *channel,
1793 const char *buf, size_t count)
1794 {
1795 u32 target_cpu, origin_cpu;
1796 ssize_t ret = count;
1797
1798 if (vmbus_proto_version < VERSION_WIN10_V4_1)
1799 return -EIO;
1800
1801 if (sscanf(buf, "%uu", &target_cpu) != 1)
1802 return -EIO;
1803
1804 /* Validate target_cpu for the cpumask_test_cpu() operation below. */
1805 if (target_cpu >= nr_cpumask_bits)
1806 return -EINVAL;
1807
1808 /* No CPUs should come up or down during this. */
1809 cpus_read_lock();
1810
1811 if (!cpu_online(target_cpu)) {
1812 cpus_read_unlock();
1813 return -EINVAL;
1814 }
1815
1816 /*
1817 * Synchronizes target_cpu_store() and channel closure:
1818 *
1819 * { Initially: state = CHANNEL_OPENED }
1820 *
1821 * CPU1 CPU2
1822 *
1823 * [target_cpu_store()] [vmbus_disconnect_ring()]
1824 *
1825 * LOCK channel_mutex LOCK channel_mutex
1826 * LOAD r1 = state LOAD r2 = state
1827 * IF (r1 == CHANNEL_OPENED) IF (r2 == CHANNEL_OPENED)
1828 * SEND MODIFYCHANNEL STORE state = CHANNEL_OPEN
1829 * [...] SEND CLOSECHANNEL
1830 * UNLOCK channel_mutex UNLOCK channel_mutex
1831 *
1832 * Forbids: r1 == r2 == CHANNEL_OPENED (i.e., CPU1's LOCK precedes
1833 * CPU2's LOCK) && CPU2's SEND precedes CPU1's SEND
1834 *
1835 * Note. The host processes the channel messages "sequentially", in
1836 * the order in which they are received on a per-partition basis.
1837 */
1838 mutex_lock(&vmbus_connection.channel_mutex);
1839
1840 /*
1841 * Hyper-V will ignore MODIFYCHANNEL messages for "non-open" channels;
1842 * avoid sending the message and fail here for such channels.
1843 */
1844 if (channel->state != CHANNEL_OPENED_STATE) {
1845 ret = -EIO;
1846 goto cpu_store_unlock;
1847 }
1848
1849 origin_cpu = channel->target_cpu;
1850 if (target_cpu == origin_cpu)
1851 goto cpu_store_unlock;
1852
1853 if (vmbus_send_modifychannel(channel,
1854 hv_cpu_number_to_vp_number(target_cpu))) {
1855 ret = -EIO;
1856 goto cpu_store_unlock;
1857 }
1858
1859 /*
1860 * For version before VERSION_WIN10_V5_3, the following warning holds:
1861 *
1862 * Warning. At this point, there is *no* guarantee that the host will
1863 * have successfully processed the vmbus_send_modifychannel() request.
1864 * See the header comment of vmbus_send_modifychannel() for more info.
1865 *
1866 * Lags in the processing of the above vmbus_send_modifychannel() can
1867 * result in missed interrupts if the "old" target CPU is taken offline
1868 * before Hyper-V starts sending interrupts to the "new" target CPU.
1869 * But apart from this offlining scenario, the code tolerates such
1870 * lags. It will function correctly even if a channel interrupt comes
1871 * in on a CPU that is different from the channel target_cpu value.
1872 */
1873
1874 channel->target_cpu = target_cpu;
1875
1876 /* See init_vp_index(). */
1877 if (hv_is_perf_channel(channel))
1878 hv_update_alloced_cpus(origin_cpu, target_cpu);
1879
1880 /* Currently set only for storvsc channels. */
1881 if (channel->change_target_cpu_callback) {
1882 (*channel->change_target_cpu_callback)(channel,
1883 origin_cpu, target_cpu);
1884 }
1885
1886 cpu_store_unlock:
1887 mutex_unlock(&vmbus_connection.channel_mutex);
1888 cpus_read_unlock();
1889 return ret;
1890 }
1891 static VMBUS_CHAN_ATTR(cpu, 0644, target_cpu_show, target_cpu_store);
1892
channel_pending_show(struct vmbus_channel * channel,char * buf)1893 static ssize_t channel_pending_show(struct vmbus_channel *channel,
1894 char *buf)
1895 {
1896 return sprintf(buf, "%d\n",
1897 channel_pending(channel,
1898 vmbus_connection.monitor_pages[1]));
1899 }
1900 static VMBUS_CHAN_ATTR(pending, 0444, channel_pending_show, NULL);
1901
channel_latency_show(struct vmbus_channel * channel,char * buf)1902 static ssize_t channel_latency_show(struct vmbus_channel *channel,
1903 char *buf)
1904 {
1905 return sprintf(buf, "%d\n",
1906 channel_latency(channel,
1907 vmbus_connection.monitor_pages[1]));
1908 }
1909 static VMBUS_CHAN_ATTR(latency, 0444, channel_latency_show, NULL);
1910
channel_interrupts_show(struct vmbus_channel * channel,char * buf)1911 static ssize_t channel_interrupts_show(struct vmbus_channel *channel, char *buf)
1912 {
1913 return sprintf(buf, "%llu\n", channel->interrupts);
1914 }
1915 static VMBUS_CHAN_ATTR(interrupts, 0444, channel_interrupts_show, NULL);
1916
channel_events_show(struct vmbus_channel * channel,char * buf)1917 static ssize_t channel_events_show(struct vmbus_channel *channel, char *buf)
1918 {
1919 return sprintf(buf, "%llu\n", channel->sig_events);
1920 }
1921 static VMBUS_CHAN_ATTR(events, 0444, channel_events_show, NULL);
1922
channel_intr_in_full_show(struct vmbus_channel * channel,char * buf)1923 static ssize_t channel_intr_in_full_show(struct vmbus_channel *channel,
1924 char *buf)
1925 {
1926 return sprintf(buf, "%llu\n",
1927 (unsigned long long)channel->intr_in_full);
1928 }
1929 static VMBUS_CHAN_ATTR(intr_in_full, 0444, channel_intr_in_full_show, NULL);
1930
channel_intr_out_empty_show(struct vmbus_channel * channel,char * buf)1931 static ssize_t channel_intr_out_empty_show(struct vmbus_channel *channel,
1932 char *buf)
1933 {
1934 return sprintf(buf, "%llu\n",
1935 (unsigned long long)channel->intr_out_empty);
1936 }
1937 static VMBUS_CHAN_ATTR(intr_out_empty, 0444, channel_intr_out_empty_show, NULL);
1938
channel_out_full_first_show(struct vmbus_channel * channel,char * buf)1939 static ssize_t channel_out_full_first_show(struct vmbus_channel *channel,
1940 char *buf)
1941 {
1942 return sprintf(buf, "%llu\n",
1943 (unsigned long long)channel->out_full_first);
1944 }
1945 static VMBUS_CHAN_ATTR(out_full_first, 0444, channel_out_full_first_show, NULL);
1946
channel_out_full_total_show(struct vmbus_channel * channel,char * buf)1947 static ssize_t channel_out_full_total_show(struct vmbus_channel *channel,
1948 char *buf)
1949 {
1950 return sprintf(buf, "%llu\n",
1951 (unsigned long long)channel->out_full_total);
1952 }
1953 static VMBUS_CHAN_ATTR(out_full_total, 0444, channel_out_full_total_show, NULL);
1954
subchannel_monitor_id_show(struct vmbus_channel * channel,char * buf)1955 static ssize_t subchannel_monitor_id_show(struct vmbus_channel *channel,
1956 char *buf)
1957 {
1958 return sprintf(buf, "%u\n", channel->offermsg.monitorid);
1959 }
1960 static VMBUS_CHAN_ATTR(monitor_id, 0444, subchannel_monitor_id_show, NULL);
1961
subchannel_id_show(struct vmbus_channel * channel,char * buf)1962 static ssize_t subchannel_id_show(struct vmbus_channel *channel,
1963 char *buf)
1964 {
1965 return sprintf(buf, "%u\n",
1966 channel->offermsg.offer.sub_channel_index);
1967 }
1968 static VMBUS_CHAN_ATTR_RO(subchannel_id);
1969
1970 static struct attribute *vmbus_chan_attrs[] = {
1971 &chan_attr_out_mask.attr,
1972 &chan_attr_in_mask.attr,
1973 &chan_attr_read_avail.attr,
1974 &chan_attr_write_avail.attr,
1975 &chan_attr_cpu.attr,
1976 &chan_attr_pending.attr,
1977 &chan_attr_latency.attr,
1978 &chan_attr_interrupts.attr,
1979 &chan_attr_events.attr,
1980 &chan_attr_intr_in_full.attr,
1981 &chan_attr_intr_out_empty.attr,
1982 &chan_attr_out_full_first.attr,
1983 &chan_attr_out_full_total.attr,
1984 &chan_attr_monitor_id.attr,
1985 &chan_attr_subchannel_id.attr,
1986 NULL
1987 };
1988
1989 /*
1990 * Channel-level attribute_group callback function. Returns the permission for
1991 * each attribute, and returns 0 if an attribute is not visible.
1992 */
vmbus_chan_attr_is_visible(struct kobject * kobj,struct attribute * attr,int idx)1993 static umode_t vmbus_chan_attr_is_visible(struct kobject *kobj,
1994 struct attribute *attr, int idx)
1995 {
1996 const struct vmbus_channel *channel =
1997 container_of(kobj, struct vmbus_channel, kobj);
1998
1999 /* Hide the monitor attributes if the monitor mechanism is not used. */
2000 if (!channel->offermsg.monitor_allocated &&
2001 (attr == &chan_attr_pending.attr ||
2002 attr == &chan_attr_latency.attr ||
2003 attr == &chan_attr_monitor_id.attr))
2004 return 0;
2005
2006 return attr->mode;
2007 }
2008
2009 static struct attribute_group vmbus_chan_group = {
2010 .attrs = vmbus_chan_attrs,
2011 .is_visible = vmbus_chan_attr_is_visible
2012 };
2013
2014 static struct kobj_type vmbus_chan_ktype = {
2015 .sysfs_ops = &vmbus_chan_sysfs_ops,
2016 .release = vmbus_chan_release,
2017 };
2018
2019 /*
2020 * vmbus_add_channel_kobj - setup a sub-directory under device/channels
2021 */
vmbus_add_channel_kobj(struct hv_device * dev,struct vmbus_channel * channel)2022 int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
2023 {
2024 const struct device *device = &dev->device;
2025 struct kobject *kobj = &channel->kobj;
2026 u32 relid = channel->offermsg.child_relid;
2027 int ret;
2028
2029 kobj->kset = dev->channels_kset;
2030 ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL,
2031 "%u", relid);
2032 if (ret)
2033 return ret;
2034
2035 ret = sysfs_create_group(kobj, &vmbus_chan_group);
2036
2037 if (ret) {
2038 /*
2039 * The calling functions' error handling paths will cleanup the
2040 * empty channel directory.
2041 */
2042 dev_err(device, "Unable to set up channel sysfs files\n");
2043 return ret;
2044 }
2045
2046 kobject_uevent(kobj, KOBJ_ADD);
2047
2048 return 0;
2049 }
2050
2051 /*
2052 * vmbus_remove_channel_attr_group - remove the channel's attribute group
2053 */
vmbus_remove_channel_attr_group(struct vmbus_channel * channel)2054 void vmbus_remove_channel_attr_group(struct vmbus_channel *channel)
2055 {
2056 sysfs_remove_group(&channel->kobj, &vmbus_chan_group);
2057 }
2058
2059 /*
2060 * vmbus_device_create - Creates and registers a new child device
2061 * on the vmbus.
2062 */
vmbus_device_create(const guid_t * type,const guid_t * instance,struct vmbus_channel * channel)2063 struct hv_device *vmbus_device_create(const guid_t *type,
2064 const guid_t *instance,
2065 struct vmbus_channel *channel)
2066 {
2067 struct hv_device *child_device_obj;
2068
2069 child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL);
2070 if (!child_device_obj) {
2071 pr_err("Unable to allocate device object for child device\n");
2072 return NULL;
2073 }
2074
2075 child_device_obj->channel = channel;
2076 guid_copy(&child_device_obj->dev_type, type);
2077 guid_copy(&child_device_obj->dev_instance, instance);
2078 child_device_obj->vendor_id = 0x1414; /* MSFT vendor ID */
2079
2080 return child_device_obj;
2081 }
2082
2083 /*
2084 * vmbus_device_register - Register the child device
2085 */
vmbus_device_register(struct hv_device * child_device_obj)2086 int vmbus_device_register(struct hv_device *child_device_obj)
2087 {
2088 struct kobject *kobj = &child_device_obj->device.kobj;
2089 int ret;
2090
2091 dev_set_name(&child_device_obj->device, "%pUl",
2092 &child_device_obj->channel->offermsg.offer.if_instance);
2093
2094 child_device_obj->device.bus = &hv_bus;
2095 child_device_obj->device.parent = &hv_acpi_dev->dev;
2096 child_device_obj->device.release = vmbus_device_release;
2097
2098 /*
2099 * Register with the LDM. This will kick off the driver/device
2100 * binding...which will eventually call vmbus_match() and vmbus_probe()
2101 */
2102 ret = device_register(&child_device_obj->device);
2103 if (ret) {
2104 pr_err("Unable to register child device\n");
2105 return ret;
2106 }
2107
2108 child_device_obj->channels_kset = kset_create_and_add("channels",
2109 NULL, kobj);
2110 if (!child_device_obj->channels_kset) {
2111 ret = -ENOMEM;
2112 goto err_dev_unregister;
2113 }
2114
2115 ret = vmbus_add_channel_kobj(child_device_obj,
2116 child_device_obj->channel);
2117 if (ret) {
2118 pr_err("Unable to register primary channeln");
2119 goto err_kset_unregister;
2120 }
2121 hv_debug_add_dev_dir(child_device_obj);
2122
2123 return 0;
2124
2125 err_kset_unregister:
2126 kset_unregister(child_device_obj->channels_kset);
2127
2128 err_dev_unregister:
2129 device_unregister(&child_device_obj->device);
2130 return ret;
2131 }
2132
2133 /*
2134 * vmbus_device_unregister - Remove the specified child device
2135 * from the vmbus.
2136 */
vmbus_device_unregister(struct hv_device * device_obj)2137 void vmbus_device_unregister(struct hv_device *device_obj)
2138 {
2139 pr_debug("child device %s unregistered\n",
2140 dev_name(&device_obj->device));
2141
2142 kset_unregister(device_obj->channels_kset);
2143
2144 /*
2145 * Kick off the process of unregistering the device.
2146 * This will call vmbus_remove() and eventually vmbus_device_release()
2147 */
2148 device_unregister(&device_obj->device);
2149 }
2150
2151
2152 /*
2153 * VMBUS is an acpi enumerated device. Get the information we
2154 * need from DSDT.
2155 */
2156 #define VTPM_BASE_ADDRESS 0xfed40000
vmbus_walk_resources(struct acpi_resource * res,void * ctx)2157 static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
2158 {
2159 resource_size_t start = 0;
2160 resource_size_t end = 0;
2161 struct resource *new_res;
2162 struct resource **old_res = &hyperv_mmio;
2163 struct resource **prev_res = NULL;
2164 struct resource r;
2165
2166 switch (res->type) {
2167
2168 /*
2169 * "Address" descriptors are for bus windows. Ignore
2170 * "memory" descriptors, which are for registers on
2171 * devices.
2172 */
2173 case ACPI_RESOURCE_TYPE_ADDRESS32:
2174 start = res->data.address32.address.minimum;
2175 end = res->data.address32.address.maximum;
2176 break;
2177
2178 case ACPI_RESOURCE_TYPE_ADDRESS64:
2179 start = res->data.address64.address.minimum;
2180 end = res->data.address64.address.maximum;
2181 break;
2182
2183 /*
2184 * The IRQ information is needed only on ARM64, which Hyper-V
2185 * sets up in the extended format. IRQ information is present
2186 * on x86/x64 in the non-extended format but it is not used by
2187 * Linux. So don't bother checking for the non-extended format.
2188 */
2189 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
2190 if (!acpi_dev_resource_interrupt(res, 0, &r)) {
2191 pr_err("Unable to parse Hyper-V ACPI interrupt\n");
2192 return AE_ERROR;
2193 }
2194 /* ARM64 INTID for VMbus */
2195 vmbus_interrupt = res->data.extended_irq.interrupts[0];
2196 /* Linux IRQ number */
2197 vmbus_irq = r.start;
2198 return AE_OK;
2199
2200 default:
2201 /* Unused resource type */
2202 return AE_OK;
2203
2204 }
2205 /*
2206 * Ignore ranges that are below 1MB, as they're not
2207 * necessary or useful here.
2208 */
2209 if (end < 0x100000)
2210 return AE_OK;
2211
2212 new_res = kzalloc(sizeof(*new_res), GFP_ATOMIC);
2213 if (!new_res)
2214 return AE_NO_MEMORY;
2215
2216 /* If this range overlaps the virtual TPM, truncate it. */
2217 if (end > VTPM_BASE_ADDRESS && start < VTPM_BASE_ADDRESS)
2218 end = VTPM_BASE_ADDRESS;
2219
2220 new_res->name = "hyperv mmio";
2221 new_res->flags = IORESOURCE_MEM;
2222 new_res->start = start;
2223 new_res->end = end;
2224
2225 /*
2226 * If two ranges are adjacent, merge them.
2227 */
2228 do {
2229 if (!*old_res) {
2230 *old_res = new_res;
2231 break;
2232 }
2233
2234 if (((*old_res)->end + 1) == new_res->start) {
2235 (*old_res)->end = new_res->end;
2236 kfree(new_res);
2237 break;
2238 }
2239
2240 if ((*old_res)->start == new_res->end + 1) {
2241 (*old_res)->start = new_res->start;
2242 kfree(new_res);
2243 break;
2244 }
2245
2246 if ((*old_res)->start > new_res->end) {
2247 new_res->sibling = *old_res;
2248 if (prev_res)
2249 (*prev_res)->sibling = new_res;
2250 *old_res = new_res;
2251 break;
2252 }
2253
2254 prev_res = old_res;
2255 old_res = &(*old_res)->sibling;
2256
2257 } while (1);
2258
2259 return AE_OK;
2260 }
2261
vmbus_acpi_remove(struct acpi_device * device)2262 static int vmbus_acpi_remove(struct acpi_device *device)
2263 {
2264 struct resource *cur_res;
2265 struct resource *next_res;
2266
2267 if (hyperv_mmio) {
2268 if (fb_mmio) {
2269 __release_region(hyperv_mmio, fb_mmio->start,
2270 resource_size(fb_mmio));
2271 fb_mmio = NULL;
2272 }
2273
2274 for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) {
2275 next_res = cur_res->sibling;
2276 kfree(cur_res);
2277 }
2278 }
2279
2280 return 0;
2281 }
2282
vmbus_reserve_fb(void)2283 static void vmbus_reserve_fb(void)
2284 {
2285 int size;
2286 /*
2287 * Make a claim for the frame buffer in the resource tree under the
2288 * first node, which will be the one below 4GB. The length seems to
2289 * be underreported, particularly in a Generation 1 VM. So start out
2290 * reserving a larger area and make it smaller until it succeeds.
2291 */
2292
2293 if (screen_info.lfb_base) {
2294 if (efi_enabled(EFI_BOOT))
2295 size = max_t(__u32, screen_info.lfb_size, 0x800000);
2296 else
2297 size = max_t(__u32, screen_info.lfb_size, 0x4000000);
2298
2299 for (; !fb_mmio && (size >= 0x100000); size >>= 1) {
2300 fb_mmio = __request_region(hyperv_mmio,
2301 screen_info.lfb_base, size,
2302 fb_mmio_name, 0);
2303 }
2304 }
2305 }
2306
2307 /**
2308 * vmbus_allocate_mmio() - Pick a memory-mapped I/O range.
2309 * @new: If successful, supplied a pointer to the
2310 * allocated MMIO space.
2311 * @device_obj: Identifies the caller
2312 * @min: Minimum guest physical address of the
2313 * allocation
2314 * @max: Maximum guest physical address
2315 * @size: Size of the range to be allocated
2316 * @align: Alignment of the range to be allocated
2317 * @fb_overlap_ok: Whether this allocation can be allowed
2318 * to overlap the video frame buffer.
2319 *
2320 * This function walks the resources granted to VMBus by the
2321 * _CRS object in the ACPI namespace underneath the parent
2322 * "bridge" whether that's a root PCI bus in the Generation 1
2323 * case or a Module Device in the Generation 2 case. It then
2324 * attempts to allocate from the global MMIO pool in a way that
2325 * matches the constraints supplied in these parameters and by
2326 * that _CRS.
2327 *
2328 * Return: 0 on success, -errno on failure
2329 */
vmbus_allocate_mmio(struct resource ** new,struct hv_device * device_obj,resource_size_t min,resource_size_t max,resource_size_t size,resource_size_t align,bool fb_overlap_ok)2330 int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
2331 resource_size_t min, resource_size_t max,
2332 resource_size_t size, resource_size_t align,
2333 bool fb_overlap_ok)
2334 {
2335 struct resource *iter, *shadow;
2336 resource_size_t range_min, range_max, start;
2337 const char *dev_n = dev_name(&device_obj->device);
2338 int retval;
2339
2340 retval = -ENXIO;
2341 mutex_lock(&hyperv_mmio_lock);
2342
2343 /*
2344 * If overlaps with frame buffers are allowed, then first attempt to
2345 * make the allocation from within the reserved region. Because it
2346 * is already reserved, no shadow allocation is necessary.
2347 */
2348 if (fb_overlap_ok && fb_mmio && !(min > fb_mmio->end) &&
2349 !(max < fb_mmio->start)) {
2350
2351 range_min = fb_mmio->start;
2352 range_max = fb_mmio->end;
2353 start = (range_min + align - 1) & ~(align - 1);
2354 for (; start + size - 1 <= range_max; start += align) {
2355 *new = request_mem_region_exclusive(start, size, dev_n);
2356 if (*new) {
2357 retval = 0;
2358 goto exit;
2359 }
2360 }
2361 }
2362
2363 for (iter = hyperv_mmio; iter; iter = iter->sibling) {
2364 if ((iter->start >= max) || (iter->end <= min))
2365 continue;
2366
2367 range_min = iter->start;
2368 range_max = iter->end;
2369 start = (range_min + align - 1) & ~(align - 1);
2370 for (; start + size - 1 <= range_max; start += align) {
2371 shadow = __request_region(iter, start, size, NULL,
2372 IORESOURCE_BUSY);
2373 if (!shadow)
2374 continue;
2375
2376 *new = request_mem_region_exclusive(start, size, dev_n);
2377 if (*new) {
2378 shadow->name = (char *)*new;
2379 retval = 0;
2380 goto exit;
2381 }
2382
2383 __release_region(iter, start, size);
2384 }
2385 }
2386
2387 exit:
2388 mutex_unlock(&hyperv_mmio_lock);
2389 return retval;
2390 }
2391 EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
2392
2393 /**
2394 * vmbus_free_mmio() - Free a memory-mapped I/O range.
2395 * @start: Base address of region to release.
2396 * @size: Size of the range to be allocated
2397 *
2398 * This function releases anything requested by
2399 * vmbus_mmio_allocate().
2400 */
vmbus_free_mmio(resource_size_t start,resource_size_t size)2401 void vmbus_free_mmio(resource_size_t start, resource_size_t size)
2402 {
2403 struct resource *iter;
2404
2405 mutex_lock(&hyperv_mmio_lock);
2406 for (iter = hyperv_mmio; iter; iter = iter->sibling) {
2407 if ((iter->start >= start + size) || (iter->end <= start))
2408 continue;
2409
2410 __release_region(iter, start, size);
2411 }
2412 release_mem_region(start, size);
2413 mutex_unlock(&hyperv_mmio_lock);
2414
2415 }
2416 EXPORT_SYMBOL_GPL(vmbus_free_mmio);
2417
vmbus_acpi_add(struct acpi_device * device)2418 static int vmbus_acpi_add(struct acpi_device *device)
2419 {
2420 acpi_status result;
2421 int ret_val = -ENODEV;
2422 struct acpi_device *ancestor;
2423
2424 hv_acpi_dev = device;
2425
2426 result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
2427 vmbus_walk_resources, NULL);
2428
2429 if (ACPI_FAILURE(result))
2430 goto acpi_walk_err;
2431 /*
2432 * Some ancestor of the vmbus acpi device (Gen1 or Gen2
2433 * firmware) is the VMOD that has the mmio ranges. Get that.
2434 */
2435 for (ancestor = device->parent; ancestor; ancestor = ancestor->parent) {
2436 result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS,
2437 vmbus_walk_resources, NULL);
2438
2439 if (ACPI_FAILURE(result))
2440 continue;
2441 if (hyperv_mmio) {
2442 vmbus_reserve_fb();
2443 break;
2444 }
2445 }
2446 ret_val = 0;
2447
2448 acpi_walk_err:
2449 complete(&probe_event);
2450 if (ret_val)
2451 vmbus_acpi_remove(device);
2452 return ret_val;
2453 }
2454
2455 #ifdef CONFIG_PM_SLEEP
vmbus_bus_suspend(struct device * dev)2456 static int vmbus_bus_suspend(struct device *dev)
2457 {
2458 struct vmbus_channel *channel, *sc;
2459
2460 while (atomic_read(&vmbus_connection.offer_in_progress) != 0) {
2461 /*
2462 * We wait here until the completion of any channel
2463 * offers that are currently in progress.
2464 */
2465 usleep_range(1000, 2000);
2466 }
2467
2468 mutex_lock(&vmbus_connection.channel_mutex);
2469 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
2470 if (!is_hvsock_channel(channel))
2471 continue;
2472
2473 vmbus_force_channel_rescinded(channel);
2474 }
2475 mutex_unlock(&vmbus_connection.channel_mutex);
2476
2477 /*
2478 * Wait until all the sub-channels and hv_sock channels have been
2479 * cleaned up. Sub-channels should be destroyed upon suspend, otherwise
2480 * they would conflict with the new sub-channels that will be created
2481 * in the resume path. hv_sock channels should also be destroyed, but
2482 * a hv_sock channel of an established hv_sock connection can not be
2483 * really destroyed since it may still be referenced by the userspace
2484 * application, so we just force the hv_sock channel to be rescinded
2485 * by vmbus_force_channel_rescinded(), and the userspace application
2486 * will thoroughly destroy the channel after hibernation.
2487 *
2488 * Note: the counter nr_chan_close_on_suspend may never go above 0 if
2489 * the VM has no sub-channel and hv_sock channel, e.g. a 1-vCPU VM.
2490 */
2491 if (atomic_read(&vmbus_connection.nr_chan_close_on_suspend) > 0)
2492 wait_for_completion(&vmbus_connection.ready_for_suspend_event);
2493
2494 if (atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0) {
2495 pr_err("Can not suspend due to a previous failed resuming\n");
2496 return -EBUSY;
2497 }
2498
2499 mutex_lock(&vmbus_connection.channel_mutex);
2500
2501 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
2502 /*
2503 * Remove the channel from the array of channels and invalidate
2504 * the channel's relid. Upon resume, vmbus_onoffer() will fix
2505 * up the relid (and other fields, if necessary) and add the
2506 * channel back to the array.
2507 */
2508 vmbus_channel_unmap_relid(channel);
2509 channel->offermsg.child_relid = INVALID_RELID;
2510
2511 if (is_hvsock_channel(channel)) {
2512 if (!channel->rescind) {
2513 pr_err("hv_sock channel not rescinded!\n");
2514 WARN_ON_ONCE(1);
2515 }
2516 continue;
2517 }
2518
2519 list_for_each_entry(sc, &channel->sc_list, sc_list) {
2520 pr_err("Sub-channel not deleted!\n");
2521 WARN_ON_ONCE(1);
2522 }
2523
2524 atomic_inc(&vmbus_connection.nr_chan_fixup_on_resume);
2525 }
2526
2527 mutex_unlock(&vmbus_connection.channel_mutex);
2528
2529 vmbus_initiate_unload(false);
2530
2531 /* Reset the event for the next resume. */
2532 reinit_completion(&vmbus_connection.ready_for_resume_event);
2533
2534 return 0;
2535 }
2536
vmbus_bus_resume(struct device * dev)2537 static int vmbus_bus_resume(struct device *dev)
2538 {
2539 struct vmbus_channel_msginfo *msginfo;
2540 size_t msgsize;
2541 int ret;
2542
2543 /*
2544 * We only use the 'vmbus_proto_version', which was in use before
2545 * hibernation, to re-negotiate with the host.
2546 */
2547 if (!vmbus_proto_version) {
2548 pr_err("Invalid proto version = 0x%x\n", vmbus_proto_version);
2549 return -EINVAL;
2550 }
2551
2552 msgsize = sizeof(*msginfo) +
2553 sizeof(struct vmbus_channel_initiate_contact);
2554
2555 msginfo = kzalloc(msgsize, GFP_KERNEL);
2556
2557 if (msginfo == NULL)
2558 return -ENOMEM;
2559
2560 ret = vmbus_negotiate_version(msginfo, vmbus_proto_version);
2561
2562 kfree(msginfo);
2563
2564 if (ret != 0)
2565 return ret;
2566
2567 WARN_ON(atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) == 0);
2568
2569 vmbus_request_offers();
2570
2571 if (wait_for_completion_timeout(
2572 &vmbus_connection.ready_for_resume_event, 10 * HZ) == 0)
2573 pr_err("Some vmbus device is missing after suspending?\n");
2574
2575 /* Reset the event for the next suspend. */
2576 reinit_completion(&vmbus_connection.ready_for_suspend_event);
2577
2578 return 0;
2579 }
2580 #else
2581 #define vmbus_bus_suspend NULL
2582 #define vmbus_bus_resume NULL
2583 #endif /* CONFIG_PM_SLEEP */
2584
2585 static const struct acpi_device_id vmbus_acpi_device_ids[] = {
2586 {"VMBUS", 0},
2587 {"VMBus", 0},
2588 {"", 0},
2589 };
2590 MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
2591
2592 /*
2593 * Note: we must use the "no_irq" ops, otherwise hibernation can not work with
2594 * PCI device assignment, because "pci_dev_pm_ops" uses the "noirq" ops: in
2595 * the resume path, the pci "noirq" restore op runs before "non-noirq" op (see
2596 * resume_target_kernel() -> dpm_resume_start(), and hibernation_restore() ->
2597 * dpm_resume_end()). This means vmbus_bus_resume() and the pci-hyperv's
2598 * resume callback must also run via the "noirq" ops.
2599 *
2600 * Set suspend_noirq/resume_noirq to NULL for Suspend-to-Idle: see the comment
2601 * earlier in this file before vmbus_pm.
2602 */
2603
2604 static const struct dev_pm_ops vmbus_bus_pm = {
2605 .suspend_noirq = NULL,
2606 .resume_noirq = NULL,
2607 .freeze_noirq = vmbus_bus_suspend,
2608 .thaw_noirq = vmbus_bus_resume,
2609 .poweroff_noirq = vmbus_bus_suspend,
2610 .restore_noirq = vmbus_bus_resume
2611 };
2612
2613 static struct acpi_driver vmbus_acpi_driver = {
2614 .name = "vmbus",
2615 .ids = vmbus_acpi_device_ids,
2616 .ops = {
2617 .add = vmbus_acpi_add,
2618 .remove = vmbus_acpi_remove,
2619 },
2620 .drv.pm = &vmbus_bus_pm,
2621 };
2622
hv_kexec_handler(void)2623 static void hv_kexec_handler(void)
2624 {
2625 hv_stimer_global_cleanup();
2626 vmbus_initiate_unload(false);
2627 /* Make sure conn_state is set as hv_synic_cleanup checks for it */
2628 mb();
2629 cpuhp_remove_state(hyperv_cpuhp_online);
2630 };
2631
hv_crash_handler(struct pt_regs * regs)2632 static void hv_crash_handler(struct pt_regs *regs)
2633 {
2634 int cpu;
2635
2636 vmbus_initiate_unload(true);
2637 /*
2638 * In crash handler we can't schedule synic cleanup for all CPUs,
2639 * doing the cleanup for current CPU only. This should be sufficient
2640 * for kdump.
2641 */
2642 cpu = smp_processor_id();
2643 hv_stimer_cleanup(cpu);
2644 hv_synic_disable_regs(cpu);
2645 };
2646
hv_synic_suspend(void)2647 static int hv_synic_suspend(void)
2648 {
2649 /*
2650 * When we reach here, all the non-boot CPUs have been offlined.
2651 * If we're in a legacy configuration where stimer Direct Mode is
2652 * not enabled, the stimers on the non-boot CPUs have been unbound
2653 * in hv_synic_cleanup() -> hv_stimer_legacy_cleanup() ->
2654 * hv_stimer_cleanup() -> clockevents_unbind_device().
2655 *
2656 * hv_synic_suspend() only runs on CPU0 with interrupts disabled.
2657 * Here we do not call hv_stimer_legacy_cleanup() on CPU0 because:
2658 * 1) it's unnecessary as interrupts remain disabled between
2659 * syscore_suspend() and syscore_resume(): see create_image() and
2660 * resume_target_kernel()
2661 * 2) the stimer on CPU0 is automatically disabled later by
2662 * syscore_suspend() -> timekeeping_suspend() -> tick_suspend() -> ...
2663 * -> clockevents_shutdown() -> ... -> hv_ce_shutdown()
2664 * 3) a warning would be triggered if we call
2665 * clockevents_unbind_device(), which may sleep, in an
2666 * interrupts-disabled context.
2667 */
2668
2669 hv_synic_disable_regs(0);
2670
2671 return 0;
2672 }
2673
hv_synic_resume(void)2674 static void hv_synic_resume(void)
2675 {
2676 hv_synic_enable_regs(0);
2677
2678 /*
2679 * Note: we don't need to call hv_stimer_init(0), because the timer
2680 * on CPU0 is not unbound in hv_synic_suspend(), and the timer is
2681 * automatically re-enabled in timekeeping_resume().
2682 */
2683 }
2684
2685 /* The callbacks run only on CPU0, with irqs_disabled. */
2686 static struct syscore_ops hv_synic_syscore_ops = {
2687 .suspend = hv_synic_suspend,
2688 .resume = hv_synic_resume,
2689 };
2690
hv_acpi_init(void)2691 static int __init hv_acpi_init(void)
2692 {
2693 int ret, t;
2694
2695 if (!hv_is_hyperv_initialized())
2696 return -ENODEV;
2697
2698 if (hv_root_partition)
2699 return 0;
2700
2701 init_completion(&probe_event);
2702
2703 /*
2704 * Get ACPI resources first.
2705 */
2706 ret = acpi_bus_register_driver(&vmbus_acpi_driver);
2707
2708 if (ret)
2709 return ret;
2710
2711 t = wait_for_completion_timeout(&probe_event, 5*HZ);
2712 if (t == 0) {
2713 ret = -ETIMEDOUT;
2714 goto cleanup;
2715 }
2716
2717 /*
2718 * If we're on an architecture with a hardcoded hypervisor
2719 * vector (i.e. x86/x64), override the VMbus interrupt found
2720 * in the ACPI tables. Ensure vmbus_irq is not set since the
2721 * normal Linux IRQ mechanism is not used in this case.
2722 */
2723 #ifdef HYPERVISOR_CALLBACK_VECTOR
2724 vmbus_interrupt = HYPERVISOR_CALLBACK_VECTOR;
2725 vmbus_irq = -1;
2726 #endif
2727
2728 hv_debug_init();
2729
2730 ret = vmbus_bus_init();
2731 if (ret)
2732 goto cleanup;
2733
2734 hv_setup_kexec_handler(hv_kexec_handler);
2735 hv_setup_crash_handler(hv_crash_handler);
2736
2737 register_syscore_ops(&hv_synic_syscore_ops);
2738
2739 return 0;
2740
2741 cleanup:
2742 acpi_bus_unregister_driver(&vmbus_acpi_driver);
2743 hv_acpi_dev = NULL;
2744 return ret;
2745 }
2746
vmbus_exit(void)2747 static void __exit vmbus_exit(void)
2748 {
2749 int cpu;
2750
2751 unregister_syscore_ops(&hv_synic_syscore_ops);
2752
2753 hv_remove_kexec_handler();
2754 hv_remove_crash_handler();
2755 vmbus_connection.conn_state = DISCONNECTED;
2756 hv_stimer_global_cleanup();
2757 vmbus_disconnect();
2758 if (vmbus_irq == -1) {
2759 hv_remove_vmbus_handler();
2760 } else {
2761 free_percpu_irq(vmbus_irq, vmbus_evt);
2762 free_percpu(vmbus_evt);
2763 }
2764 for_each_online_cpu(cpu) {
2765 struct hv_per_cpu_context *hv_cpu
2766 = per_cpu_ptr(hv_context.cpu_context, cpu);
2767
2768 tasklet_kill(&hv_cpu->msg_dpc);
2769 }
2770 hv_debug_rm_all_dir();
2771
2772 vmbus_free_channels();
2773 kfree(vmbus_connection.channels);
2774
2775 if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
2776 kmsg_dump_unregister(&hv_kmsg_dumper);
2777 unregister_die_notifier(&hyperv_die_block);
2778 atomic_notifier_chain_unregister(&panic_notifier_list,
2779 &hyperv_panic_block);
2780 }
2781
2782 free_page((unsigned long)hv_panic_page);
2783 unregister_sysctl_table(hv_ctl_table_hdr);
2784 hv_ctl_table_hdr = NULL;
2785 bus_unregister(&hv_bus);
2786
2787 cpuhp_remove_state(hyperv_cpuhp_online);
2788 hv_synic_free();
2789 acpi_bus_unregister_driver(&vmbus_acpi_driver);
2790 }
2791
2792
2793 MODULE_LICENSE("GPL");
2794 MODULE_DESCRIPTION("Microsoft Hyper-V VMBus Driver");
2795
2796 subsys_initcall(hv_acpi_init);
2797 module_exit(vmbus_exit);
2798