1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2003-2020, Intel Corporation. All rights reserved. 4 * Intel Management Engine Interface (Intel MEI) Linux driver 5 */ 6 7 #include <linux/sched/signal.h> 8 #include <linux/wait.h> 9 #include <linux/delay.h> 10 #include <linux/slab.h> 11 #include <linux/pm_runtime.h> 12 #include <linux/dma-mapping.h> 13 14 #include <linux/mei.h> 15 16 #include "mei_dev.h" 17 #include "hbm.h" 18 #include "client.h" 19 20 /** 21 * mei_me_cl_init - initialize me client 22 * 23 * @me_cl: me client 24 */ 25 void mei_me_cl_init(struct mei_me_client *me_cl) 26 { 27 INIT_LIST_HEAD(&me_cl->list); 28 kref_init(&me_cl->refcnt); 29 } 30 31 /** 32 * mei_me_cl_get - increases me client refcount 33 * 34 * @me_cl: me client 35 * 36 * Locking: called under "dev->device_lock" lock 37 * 38 * Return: me client or NULL 39 */ 40 struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl) 41 { 42 if (me_cl && kref_get_unless_zero(&me_cl->refcnt)) 43 return me_cl; 44 45 return NULL; 46 } 47 48 /** 49 * mei_me_cl_release - free me client 50 * 51 * Locking: called under "dev->device_lock" lock 52 * 53 * @ref: me_client refcount 54 */ 55 static void mei_me_cl_release(struct kref *ref) 56 { 57 struct mei_me_client *me_cl = 58 container_of(ref, struct mei_me_client, refcnt); 59 60 kfree(me_cl); 61 } 62 63 /** 64 * mei_me_cl_put - decrease me client refcount and free client if necessary 65 * 66 * Locking: called under "dev->device_lock" lock 67 * 68 * @me_cl: me client 69 */ 70 void mei_me_cl_put(struct mei_me_client *me_cl) 71 { 72 if (me_cl) 73 kref_put(&me_cl->refcnt, mei_me_cl_release); 74 } 75 76 /** 77 * __mei_me_cl_del - delete me client from the list and decrease 78 * reference counter 79 * 80 * @dev: mei device 81 * @me_cl: me client 82 * 83 * Locking: dev->me_clients_rwsem 84 */ 85 static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl) 86 { 87 if (!me_cl) 88 return; 89 90 list_del_init(&me_cl->list); 91 mei_me_cl_put(me_cl); 92 } 93 94 /** 95 * mei_me_cl_del - delete me client from the list and decrease 96 * reference counter 97 * 98 * @dev: mei device 99 * @me_cl: me client 100 */ 101 void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl) 102 { 103 down_write(&dev->me_clients_rwsem); 104 __mei_me_cl_del(dev, me_cl); 105 up_write(&dev->me_clients_rwsem); 106 } 107 108 /** 109 * mei_me_cl_add - add me client to the list 110 * 111 * @dev: mei device 112 * @me_cl: me client 113 */ 114 void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl) 115 { 116 down_write(&dev->me_clients_rwsem); 117 list_add(&me_cl->list, &dev->me_clients); 118 up_write(&dev->me_clients_rwsem); 119 } 120 121 /** 122 * __mei_me_cl_by_uuid - locate me client by uuid 123 * increases ref count 124 * 125 * @dev: mei device 126 * @uuid: me client uuid 127 * 128 * Return: me client or NULL if not found 129 * 130 * Locking: dev->me_clients_rwsem 131 */ 132 static struct mei_me_client *__mei_me_cl_by_uuid(struct mei_device *dev, 133 const uuid_le *uuid) 134 { 135 struct mei_me_client *me_cl; 136 const uuid_le *pn; 137 138 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem)); 139 140 list_for_each_entry(me_cl, &dev->me_clients, list) { 141 pn = &me_cl->props.protocol_name; 142 if (uuid_le_cmp(*uuid, *pn) == 0) 143 return mei_me_cl_get(me_cl); 144 } 145 146 return NULL; 147 } 148 149 /** 150 * mei_me_cl_by_uuid - locate me client by uuid 151 * increases ref count 152 * 153 * @dev: mei device 154 * @uuid: me client uuid 155 * 156 * Return: me client or NULL if not found 157 * 158 * Locking: dev->me_clients_rwsem 159 */ 160 struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev, 161 const uuid_le *uuid) 162 { 163 struct mei_me_client *me_cl; 164 165 down_read(&dev->me_clients_rwsem); 166 me_cl = __mei_me_cl_by_uuid(dev, uuid); 167 up_read(&dev->me_clients_rwsem); 168 169 return me_cl; 170 } 171 172 /** 173 * mei_me_cl_by_id - locate me client by client id 174 * increases ref count 175 * 176 * @dev: the device structure 177 * @client_id: me client id 178 * 179 * Return: me client or NULL if not found 180 * 181 * Locking: dev->me_clients_rwsem 182 */ 183 struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id) 184 { 185 186 struct mei_me_client *__me_cl, *me_cl = NULL; 187 188 down_read(&dev->me_clients_rwsem); 189 list_for_each_entry(__me_cl, &dev->me_clients, list) { 190 if (__me_cl->client_id == client_id) { 191 me_cl = mei_me_cl_get(__me_cl); 192 break; 193 } 194 } 195 up_read(&dev->me_clients_rwsem); 196 197 return me_cl; 198 } 199 200 /** 201 * __mei_me_cl_by_uuid_id - locate me client by client id and uuid 202 * increases ref count 203 * 204 * @dev: the device structure 205 * @uuid: me client uuid 206 * @client_id: me client id 207 * 208 * Return: me client or null if not found 209 * 210 * Locking: dev->me_clients_rwsem 211 */ 212 static struct mei_me_client *__mei_me_cl_by_uuid_id(struct mei_device *dev, 213 const uuid_le *uuid, u8 client_id) 214 { 215 struct mei_me_client *me_cl; 216 const uuid_le *pn; 217 218 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem)); 219 220 list_for_each_entry(me_cl, &dev->me_clients, list) { 221 pn = &me_cl->props.protocol_name; 222 if (uuid_le_cmp(*uuid, *pn) == 0 && 223 me_cl->client_id == client_id) 224 return mei_me_cl_get(me_cl); 225 } 226 227 return NULL; 228 } 229 230 231 /** 232 * mei_me_cl_by_uuid_id - locate me client by client id and uuid 233 * increases ref count 234 * 235 * @dev: the device structure 236 * @uuid: me client uuid 237 * @client_id: me client id 238 * 239 * Return: me client or null if not found 240 */ 241 struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev, 242 const uuid_le *uuid, u8 client_id) 243 { 244 struct mei_me_client *me_cl; 245 246 down_read(&dev->me_clients_rwsem); 247 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, client_id); 248 up_read(&dev->me_clients_rwsem); 249 250 return me_cl; 251 } 252 253 /** 254 * mei_me_cl_rm_by_uuid - remove all me clients matching uuid 255 * 256 * @dev: the device structure 257 * @uuid: me client uuid 258 * 259 * Locking: called under "dev->device_lock" lock 260 */ 261 void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid) 262 { 263 struct mei_me_client *me_cl; 264 265 dev_dbg(dev->dev, "remove %pUl\n", uuid); 266 267 down_write(&dev->me_clients_rwsem); 268 me_cl = __mei_me_cl_by_uuid(dev, uuid); 269 __mei_me_cl_del(dev, me_cl); 270 mei_me_cl_put(me_cl); 271 up_write(&dev->me_clients_rwsem); 272 } 273 274 /** 275 * mei_me_cl_rm_by_uuid_id - remove all me clients matching client id 276 * 277 * @dev: the device structure 278 * @uuid: me client uuid 279 * @id: me client id 280 * 281 * Locking: called under "dev->device_lock" lock 282 */ 283 void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id) 284 { 285 struct mei_me_client *me_cl; 286 287 dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id); 288 289 down_write(&dev->me_clients_rwsem); 290 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id); 291 __mei_me_cl_del(dev, me_cl); 292 mei_me_cl_put(me_cl); 293 up_write(&dev->me_clients_rwsem); 294 } 295 296 /** 297 * mei_me_cl_rm_all - remove all me clients 298 * 299 * @dev: the device structure 300 * 301 * Locking: called under "dev->device_lock" lock 302 */ 303 void mei_me_cl_rm_all(struct mei_device *dev) 304 { 305 struct mei_me_client *me_cl, *next; 306 307 down_write(&dev->me_clients_rwsem); 308 list_for_each_entry_safe(me_cl, next, &dev->me_clients, list) 309 __mei_me_cl_del(dev, me_cl); 310 up_write(&dev->me_clients_rwsem); 311 } 312 313 /** 314 * mei_io_cb_free - free mei_cb_private related memory 315 * 316 * @cb: mei callback struct 317 */ 318 void mei_io_cb_free(struct mei_cl_cb *cb) 319 { 320 if (cb == NULL) 321 return; 322 323 list_del(&cb->list); 324 kfree(cb->buf.data); 325 kfree(cb); 326 } 327 328 /** 329 * mei_tx_cb_enqueue - queue tx callback 330 * 331 * Locking: called under "dev->device_lock" lock 332 * 333 * @cb: mei callback struct 334 * @head: an instance of list to queue on 335 */ 336 static inline void mei_tx_cb_enqueue(struct mei_cl_cb *cb, 337 struct list_head *head) 338 { 339 list_add_tail(&cb->list, head); 340 cb->cl->tx_cb_queued++; 341 } 342 343 /** 344 * mei_tx_cb_dequeue - dequeue tx callback 345 * 346 * Locking: called under "dev->device_lock" lock 347 * 348 * @cb: mei callback struct to dequeue and free 349 */ 350 static inline void mei_tx_cb_dequeue(struct mei_cl_cb *cb) 351 { 352 if (!WARN_ON(cb->cl->tx_cb_queued == 0)) 353 cb->cl->tx_cb_queued--; 354 355 mei_io_cb_free(cb); 356 } 357 358 /** 359 * mei_cl_set_read_by_fp - set pending_read flag to vtag struct for given fp 360 * 361 * Locking: called under "dev->device_lock" lock 362 * 363 * @cl: mei client 364 * @fp: pointer to file structure 365 */ 366 static void mei_cl_set_read_by_fp(const struct mei_cl *cl, 367 const struct file *fp) 368 { 369 struct mei_cl_vtag *cl_vtag; 370 371 list_for_each_entry(cl_vtag, &cl->vtag_map, list) { 372 if (cl_vtag->fp == fp) { 373 cl_vtag->pending_read = true; 374 return; 375 } 376 } 377 } 378 379 /** 380 * mei_io_cb_init - allocate and initialize io callback 381 * 382 * @cl: mei client 383 * @type: operation type 384 * @fp: pointer to file structure 385 * 386 * Return: mei_cl_cb pointer or NULL; 387 */ 388 static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, 389 enum mei_cb_file_ops type, 390 const struct file *fp) 391 { 392 struct mei_cl_cb *cb; 393 394 cb = kzalloc(sizeof(*cb), GFP_KERNEL); 395 if (!cb) 396 return NULL; 397 398 INIT_LIST_HEAD(&cb->list); 399 cb->fp = fp; 400 cb->cl = cl; 401 cb->buf_idx = 0; 402 cb->fop_type = type; 403 cb->vtag = 0; 404 405 return cb; 406 } 407 408 /** 409 * mei_io_list_flush_cl - removes cbs belonging to the cl. 410 * 411 * @head: an instance of our list structure 412 * @cl: host client 413 */ 414 static void mei_io_list_flush_cl(struct list_head *head, 415 const struct mei_cl *cl) 416 { 417 struct mei_cl_cb *cb, *next; 418 419 list_for_each_entry_safe(cb, next, head, list) { 420 if (cl == cb->cl) { 421 list_del_init(&cb->list); 422 if (cb->fop_type == MEI_FOP_READ) 423 mei_io_cb_free(cb); 424 } 425 } 426 } 427 428 /** 429 * mei_io_tx_list_free_cl - removes cb belonging to the cl and free them 430 * 431 * @head: An instance of our list structure 432 * @cl: host client 433 * @fp: file pointer (matching cb file object), may be NULL 434 */ 435 static void mei_io_tx_list_free_cl(struct list_head *head, 436 const struct mei_cl *cl, 437 const struct file *fp) 438 { 439 struct mei_cl_cb *cb, *next; 440 441 list_for_each_entry_safe(cb, next, head, list) { 442 if (cl == cb->cl && (!fp || fp == cb->fp)) 443 mei_tx_cb_dequeue(cb); 444 } 445 } 446 447 /** 448 * mei_io_list_free_fp - free cb from a list that matches file pointer 449 * 450 * @head: io list 451 * @fp: file pointer (matching cb file object), may be NULL 452 */ 453 static void mei_io_list_free_fp(struct list_head *head, const struct file *fp) 454 { 455 struct mei_cl_cb *cb, *next; 456 457 list_for_each_entry_safe(cb, next, head, list) 458 if (!fp || fp == cb->fp) 459 mei_io_cb_free(cb); 460 } 461 462 /** 463 * mei_cl_free_pending - free pending cb 464 * 465 * @cl: host client 466 */ 467 static void mei_cl_free_pending(struct mei_cl *cl) 468 { 469 struct mei_cl_cb *cb; 470 471 cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list); 472 mei_io_cb_free(cb); 473 } 474 475 /** 476 * mei_cl_alloc_cb - a convenient wrapper for allocating read cb 477 * 478 * @cl: host client 479 * @length: size of the buffer 480 * @fop_type: operation type 481 * @fp: associated file pointer (might be NULL) 482 * 483 * Return: cb on success and NULL on failure 484 */ 485 struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length, 486 enum mei_cb_file_ops fop_type, 487 const struct file *fp) 488 { 489 struct mei_cl_cb *cb; 490 491 cb = mei_io_cb_init(cl, fop_type, fp); 492 if (!cb) 493 return NULL; 494 495 if (length == 0) 496 return cb; 497 498 cb->buf.data = kmalloc(roundup(length, MEI_SLOT_SIZE), GFP_KERNEL); 499 if (!cb->buf.data) { 500 mei_io_cb_free(cb); 501 return NULL; 502 } 503 cb->buf.size = length; 504 505 return cb; 506 } 507 508 /** 509 * mei_cl_enqueue_ctrl_wr_cb - a convenient wrapper for allocating 510 * and enqueuing of the control commands cb 511 * 512 * @cl: host client 513 * @length: size of the buffer 514 * @fop_type: operation type 515 * @fp: associated file pointer (might be NULL) 516 * 517 * Return: cb on success and NULL on failure 518 * Locking: called under "dev->device_lock" lock 519 */ 520 struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length, 521 enum mei_cb_file_ops fop_type, 522 const struct file *fp) 523 { 524 struct mei_cl_cb *cb; 525 526 /* for RX always allocate at least client's mtu */ 527 if (length) 528 length = max_t(size_t, length, mei_cl_mtu(cl)); 529 530 cb = mei_cl_alloc_cb(cl, length, fop_type, fp); 531 if (!cb) 532 return NULL; 533 534 list_add_tail(&cb->list, &cl->dev->ctrl_wr_list); 535 return cb; 536 } 537 538 /** 539 * mei_cl_read_cb - find this cl's callback in the read list 540 * for a specific file 541 * 542 * @cl: host client 543 * @fp: file pointer (matching cb file object), may be NULL 544 * 545 * Return: cb on success, NULL if cb is not found 546 */ 547 struct mei_cl_cb *mei_cl_read_cb(struct mei_cl *cl, const struct file *fp) 548 { 549 struct mei_cl_cb *cb; 550 struct mei_cl_cb *ret_cb = NULL; 551 552 spin_lock(&cl->rd_completed_lock); 553 list_for_each_entry(cb, &cl->rd_completed, list) 554 if (!fp || fp == cb->fp) { 555 ret_cb = cb; 556 break; 557 } 558 spin_unlock(&cl->rd_completed_lock); 559 return ret_cb; 560 } 561 562 /** 563 * mei_cl_flush_queues - flushes queue lists belonging to cl. 564 * 565 * @cl: host client 566 * @fp: file pointer (matching cb file object), may be NULL 567 * 568 * Return: 0 on success, -EINVAL if cl or cl->dev is NULL. 569 */ 570 int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp) 571 { 572 struct mei_device *dev; 573 574 if (WARN_ON(!cl || !cl->dev)) 575 return -EINVAL; 576 577 dev = cl->dev; 578 579 cl_dbg(dev, cl, "remove list entry belonging to cl\n"); 580 mei_io_tx_list_free_cl(&cl->dev->write_list, cl, fp); 581 mei_io_tx_list_free_cl(&cl->dev->write_waiting_list, cl, fp); 582 /* free pending and control cb only in final flush */ 583 if (!fp) { 584 mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl); 585 mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl); 586 mei_cl_free_pending(cl); 587 } 588 spin_lock(&cl->rd_completed_lock); 589 mei_io_list_free_fp(&cl->rd_completed, fp); 590 spin_unlock(&cl->rd_completed_lock); 591 592 return 0; 593 } 594 595 /** 596 * mei_cl_init - initializes cl. 597 * 598 * @cl: host client to be initialized 599 * @dev: mei device 600 */ 601 static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev) 602 { 603 memset(cl, 0, sizeof(*cl)); 604 init_waitqueue_head(&cl->wait); 605 init_waitqueue_head(&cl->rx_wait); 606 init_waitqueue_head(&cl->tx_wait); 607 init_waitqueue_head(&cl->ev_wait); 608 INIT_LIST_HEAD(&cl->vtag_map); 609 spin_lock_init(&cl->rd_completed_lock); 610 INIT_LIST_HEAD(&cl->rd_completed); 611 INIT_LIST_HEAD(&cl->rd_pending); 612 INIT_LIST_HEAD(&cl->link); 613 cl->writing_state = MEI_IDLE; 614 cl->state = MEI_FILE_UNINITIALIZED; 615 cl->dev = dev; 616 } 617 618 /** 619 * mei_cl_allocate - allocates cl structure and sets it up. 620 * 621 * @dev: mei device 622 * Return: The allocated file or NULL on failure 623 */ 624 struct mei_cl *mei_cl_allocate(struct mei_device *dev) 625 { 626 struct mei_cl *cl; 627 628 cl = kmalloc(sizeof(*cl), GFP_KERNEL); 629 if (!cl) 630 return NULL; 631 632 mei_cl_init(cl, dev); 633 634 return cl; 635 } 636 637 /** 638 * mei_cl_link - allocate host id in the host map 639 * 640 * @cl: host client 641 * 642 * Return: 0 on success 643 * -EINVAL on incorrect values 644 * -EMFILE if open count exceeded. 645 */ 646 int mei_cl_link(struct mei_cl *cl) 647 { 648 struct mei_device *dev; 649 int id; 650 651 if (WARN_ON(!cl || !cl->dev)) 652 return -EINVAL; 653 654 dev = cl->dev; 655 656 id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX); 657 if (id >= MEI_CLIENTS_MAX) { 658 dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX); 659 return -EMFILE; 660 } 661 662 if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) { 663 dev_err(dev->dev, "open_handle_count exceeded %d", 664 MEI_MAX_OPEN_HANDLE_COUNT); 665 return -EMFILE; 666 } 667 668 dev->open_handle_count++; 669 670 cl->host_client_id = id; 671 list_add_tail(&cl->link, &dev->file_list); 672 673 set_bit(id, dev->host_clients_map); 674 675 cl->state = MEI_FILE_INITIALIZING; 676 677 cl_dbg(dev, cl, "link cl\n"); 678 return 0; 679 } 680 681 /** 682 * mei_cl_unlink - remove host client from the list 683 * 684 * @cl: host client 685 * 686 * Return: always 0 687 */ 688 int mei_cl_unlink(struct mei_cl *cl) 689 { 690 struct mei_device *dev; 691 692 /* don't shout on error exit path */ 693 if (!cl) 694 return 0; 695 696 if (WARN_ON(!cl->dev)) 697 return 0; 698 699 dev = cl->dev; 700 701 cl_dbg(dev, cl, "unlink client"); 702 703 if (dev->open_handle_count > 0) 704 dev->open_handle_count--; 705 706 /* never clear the 0 bit */ 707 if (cl->host_client_id) 708 clear_bit(cl->host_client_id, dev->host_clients_map); 709 710 list_del_init(&cl->link); 711 712 cl->state = MEI_FILE_UNINITIALIZED; 713 cl->writing_state = MEI_IDLE; 714 715 WARN_ON(!list_empty(&cl->rd_completed) || 716 !list_empty(&cl->rd_pending) || 717 !list_empty(&cl->link)); 718 719 return 0; 720 } 721 722 void mei_host_client_init(struct mei_device *dev) 723 { 724 mei_set_devstate(dev, MEI_DEV_ENABLED); 725 dev->reset_count = 0; 726 727 schedule_work(&dev->bus_rescan_work); 728 729 pm_runtime_mark_last_busy(dev->dev); 730 dev_dbg(dev->dev, "rpm: autosuspend\n"); 731 pm_request_autosuspend(dev->dev); 732 } 733 734 /** 735 * mei_hbuf_acquire - try to acquire host buffer 736 * 737 * @dev: the device structure 738 * Return: true if host buffer was acquired 739 */ 740 bool mei_hbuf_acquire(struct mei_device *dev) 741 { 742 if (mei_pg_state(dev) == MEI_PG_ON || 743 mei_pg_in_transition(dev)) { 744 dev_dbg(dev->dev, "device is in pg\n"); 745 return false; 746 } 747 748 if (!dev->hbuf_is_ready) { 749 dev_dbg(dev->dev, "hbuf is not ready\n"); 750 return false; 751 } 752 753 dev->hbuf_is_ready = false; 754 755 return true; 756 } 757 758 /** 759 * mei_cl_wake_all - wake up readers, writers and event waiters so 760 * they can be interrupted 761 * 762 * @cl: host client 763 */ 764 static void mei_cl_wake_all(struct mei_cl *cl) 765 { 766 struct mei_device *dev = cl->dev; 767 768 /* synchronized under device mutex */ 769 if (waitqueue_active(&cl->rx_wait)) { 770 cl_dbg(dev, cl, "Waking up reading client!\n"); 771 wake_up_interruptible(&cl->rx_wait); 772 } 773 /* synchronized under device mutex */ 774 if (waitqueue_active(&cl->tx_wait)) { 775 cl_dbg(dev, cl, "Waking up writing client!\n"); 776 wake_up_interruptible(&cl->tx_wait); 777 } 778 /* synchronized under device mutex */ 779 if (waitqueue_active(&cl->ev_wait)) { 780 cl_dbg(dev, cl, "Waking up waiting for event clients!\n"); 781 wake_up_interruptible(&cl->ev_wait); 782 } 783 /* synchronized under device mutex */ 784 if (waitqueue_active(&cl->wait)) { 785 cl_dbg(dev, cl, "Waking up ctrl write clients!\n"); 786 wake_up(&cl->wait); 787 } 788 } 789 790 /** 791 * mei_cl_set_disconnected - set disconnected state and clear 792 * associated states and resources 793 * 794 * @cl: host client 795 */ 796 static void mei_cl_set_disconnected(struct mei_cl *cl) 797 { 798 struct mei_device *dev = cl->dev; 799 800 if (cl->state == MEI_FILE_DISCONNECTED || 801 cl->state <= MEI_FILE_INITIALIZING) 802 return; 803 804 cl->state = MEI_FILE_DISCONNECTED; 805 mei_io_tx_list_free_cl(&dev->write_list, cl, NULL); 806 mei_io_tx_list_free_cl(&dev->write_waiting_list, cl, NULL); 807 mei_io_list_flush_cl(&dev->ctrl_rd_list, cl); 808 mei_io_list_flush_cl(&dev->ctrl_wr_list, cl); 809 mei_cl_wake_all(cl); 810 cl->rx_flow_ctrl_creds = 0; 811 cl->tx_flow_ctrl_creds = 0; 812 cl->timer_count = 0; 813 814 if (!cl->me_cl) 815 return; 816 817 if (!WARN_ON(cl->me_cl->connect_count == 0)) 818 cl->me_cl->connect_count--; 819 820 if (cl->me_cl->connect_count == 0) 821 cl->me_cl->tx_flow_ctrl_creds = 0; 822 823 mei_me_cl_put(cl->me_cl); 824 cl->me_cl = NULL; 825 } 826 827 static int mei_cl_set_connecting(struct mei_cl *cl, struct mei_me_client *me_cl) 828 { 829 if (!mei_me_cl_get(me_cl)) 830 return -ENOENT; 831 832 /* only one connection is allowed for fixed address clients */ 833 if (me_cl->props.fixed_address) { 834 if (me_cl->connect_count) { 835 mei_me_cl_put(me_cl); 836 return -EBUSY; 837 } 838 } 839 840 cl->me_cl = me_cl; 841 cl->state = MEI_FILE_CONNECTING; 842 cl->me_cl->connect_count++; 843 844 return 0; 845 } 846 847 /* 848 * mei_cl_send_disconnect - send disconnect request 849 * 850 * @cl: host client 851 * @cb: callback block 852 * 853 * Return: 0, OK; otherwise, error. 854 */ 855 static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb) 856 { 857 struct mei_device *dev; 858 int ret; 859 860 dev = cl->dev; 861 862 ret = mei_hbm_cl_disconnect_req(dev, cl); 863 cl->status = ret; 864 if (ret) { 865 cl->state = MEI_FILE_DISCONNECT_REPLY; 866 return ret; 867 } 868 869 list_move_tail(&cb->list, &dev->ctrl_rd_list); 870 cl->timer_count = MEI_CONNECT_TIMEOUT; 871 mei_schedule_stall_timer(dev); 872 873 return 0; 874 } 875 876 /** 877 * mei_cl_irq_disconnect - processes close related operation from 878 * interrupt thread context - send disconnect request 879 * 880 * @cl: client 881 * @cb: callback block. 882 * @cmpl_list: complete list. 883 * 884 * Return: 0, OK; otherwise, error. 885 */ 886 int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb, 887 struct list_head *cmpl_list) 888 { 889 struct mei_device *dev = cl->dev; 890 u32 msg_slots; 891 int slots; 892 int ret; 893 894 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request)); 895 slots = mei_hbuf_empty_slots(dev); 896 if (slots < 0) 897 return -EOVERFLOW; 898 899 if ((u32)slots < msg_slots) 900 return -EMSGSIZE; 901 902 ret = mei_cl_send_disconnect(cl, cb); 903 if (ret) 904 list_move_tail(&cb->list, cmpl_list); 905 906 return ret; 907 } 908 909 /** 910 * __mei_cl_disconnect - disconnect host client from the me one 911 * internal function runtime pm has to be already acquired 912 * 913 * @cl: host client 914 * 915 * Return: 0 on success, <0 on failure. 916 */ 917 static int __mei_cl_disconnect(struct mei_cl *cl) 918 { 919 struct mei_device *dev; 920 struct mei_cl_cb *cb; 921 int rets; 922 923 dev = cl->dev; 924 925 cl->state = MEI_FILE_DISCONNECTING; 926 927 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT, NULL); 928 if (!cb) { 929 rets = -ENOMEM; 930 goto out; 931 } 932 933 if (mei_hbuf_acquire(dev)) { 934 rets = mei_cl_send_disconnect(cl, cb); 935 if (rets) { 936 cl_err(dev, cl, "failed to disconnect.\n"); 937 goto out; 938 } 939 } 940 941 mutex_unlock(&dev->device_lock); 942 wait_event_timeout(cl->wait, 943 cl->state == MEI_FILE_DISCONNECT_REPLY || 944 cl->state == MEI_FILE_DISCONNECTED, 945 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 946 mutex_lock(&dev->device_lock); 947 948 rets = cl->status; 949 if (cl->state != MEI_FILE_DISCONNECT_REPLY && 950 cl->state != MEI_FILE_DISCONNECTED) { 951 cl_dbg(dev, cl, "timeout on disconnect from FW client.\n"); 952 rets = -ETIME; 953 } 954 955 out: 956 /* we disconnect also on error */ 957 mei_cl_set_disconnected(cl); 958 if (!rets) 959 cl_dbg(dev, cl, "successfully disconnected from FW client.\n"); 960 961 mei_io_cb_free(cb); 962 return rets; 963 } 964 965 /** 966 * mei_cl_disconnect - disconnect host client from the me one 967 * 968 * @cl: host client 969 * 970 * Locking: called under "dev->device_lock" lock 971 * 972 * Return: 0 on success, <0 on failure. 973 */ 974 int mei_cl_disconnect(struct mei_cl *cl) 975 { 976 struct mei_device *dev; 977 int rets; 978 979 if (WARN_ON(!cl || !cl->dev)) 980 return -ENODEV; 981 982 dev = cl->dev; 983 984 cl_dbg(dev, cl, "disconnecting"); 985 986 if (!mei_cl_is_connected(cl)) 987 return 0; 988 989 if (mei_cl_is_fixed_address(cl)) { 990 mei_cl_set_disconnected(cl); 991 return 0; 992 } 993 994 if (dev->dev_state == MEI_DEV_POWERING_DOWN || 995 dev->dev_state == MEI_DEV_POWER_DOWN) { 996 cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n"); 997 mei_cl_set_disconnected(cl); 998 return 0; 999 } 1000 1001 rets = pm_runtime_get(dev->dev); 1002 if (rets < 0 && rets != -EINPROGRESS) { 1003 pm_runtime_put_noidle(dev->dev); 1004 cl_err(dev, cl, "rpm: get failed %d\n", rets); 1005 return rets; 1006 } 1007 1008 rets = __mei_cl_disconnect(cl); 1009 1010 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1011 pm_runtime_mark_last_busy(dev->dev); 1012 pm_runtime_put_autosuspend(dev->dev); 1013 1014 return rets; 1015 } 1016 1017 1018 /** 1019 * mei_cl_is_other_connecting - checks if other 1020 * client with the same me client id is connecting 1021 * 1022 * @cl: private data of the file object 1023 * 1024 * Return: true if other client is connected, false - otherwise. 1025 */ 1026 static bool mei_cl_is_other_connecting(struct mei_cl *cl) 1027 { 1028 struct mei_device *dev; 1029 struct mei_cl_cb *cb; 1030 1031 dev = cl->dev; 1032 1033 list_for_each_entry(cb, &dev->ctrl_rd_list, list) { 1034 if (cb->fop_type == MEI_FOP_CONNECT && 1035 mei_cl_me_id(cl) == mei_cl_me_id(cb->cl)) 1036 return true; 1037 } 1038 1039 return false; 1040 } 1041 1042 /** 1043 * mei_cl_send_connect - send connect request 1044 * 1045 * @cl: host client 1046 * @cb: callback block 1047 * 1048 * Return: 0, OK; otherwise, error. 1049 */ 1050 static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb) 1051 { 1052 struct mei_device *dev; 1053 int ret; 1054 1055 dev = cl->dev; 1056 1057 ret = mei_hbm_cl_connect_req(dev, cl); 1058 cl->status = ret; 1059 if (ret) { 1060 cl->state = MEI_FILE_DISCONNECT_REPLY; 1061 return ret; 1062 } 1063 1064 list_move_tail(&cb->list, &dev->ctrl_rd_list); 1065 cl->timer_count = MEI_CONNECT_TIMEOUT; 1066 mei_schedule_stall_timer(dev); 1067 return 0; 1068 } 1069 1070 /** 1071 * mei_cl_irq_connect - send connect request in irq_thread context 1072 * 1073 * @cl: host client 1074 * @cb: callback block 1075 * @cmpl_list: complete list 1076 * 1077 * Return: 0, OK; otherwise, error. 1078 */ 1079 int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb, 1080 struct list_head *cmpl_list) 1081 { 1082 struct mei_device *dev = cl->dev; 1083 u32 msg_slots; 1084 int slots; 1085 int rets; 1086 1087 if (mei_cl_is_other_connecting(cl)) 1088 return 0; 1089 1090 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request)); 1091 slots = mei_hbuf_empty_slots(dev); 1092 if (slots < 0) 1093 return -EOVERFLOW; 1094 1095 if ((u32)slots < msg_slots) 1096 return -EMSGSIZE; 1097 1098 rets = mei_cl_send_connect(cl, cb); 1099 if (rets) 1100 list_move_tail(&cb->list, cmpl_list); 1101 1102 return rets; 1103 } 1104 1105 /** 1106 * mei_cl_connect - connect host client to the me one 1107 * 1108 * @cl: host client 1109 * @me_cl: me client 1110 * @fp: pointer to file structure 1111 * 1112 * Locking: called under "dev->device_lock" lock 1113 * 1114 * Return: 0 on success, <0 on failure. 1115 */ 1116 int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl, 1117 const struct file *fp) 1118 { 1119 struct mei_device *dev; 1120 struct mei_cl_cb *cb; 1121 int rets; 1122 1123 if (WARN_ON(!cl || !cl->dev || !me_cl)) 1124 return -ENODEV; 1125 1126 dev = cl->dev; 1127 1128 rets = mei_cl_set_connecting(cl, me_cl); 1129 if (rets) 1130 goto nortpm; 1131 1132 if (mei_cl_is_fixed_address(cl)) { 1133 cl->state = MEI_FILE_CONNECTED; 1134 rets = 0; 1135 goto nortpm; 1136 } 1137 1138 rets = pm_runtime_get(dev->dev); 1139 if (rets < 0 && rets != -EINPROGRESS) { 1140 pm_runtime_put_noidle(dev->dev); 1141 cl_err(dev, cl, "rpm: get failed %d\n", rets); 1142 goto nortpm; 1143 } 1144 1145 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_CONNECT, fp); 1146 if (!cb) { 1147 rets = -ENOMEM; 1148 goto out; 1149 } 1150 1151 /* run hbuf acquire last so we don't have to undo */ 1152 if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) { 1153 rets = mei_cl_send_connect(cl, cb); 1154 if (rets) 1155 goto out; 1156 } 1157 1158 mutex_unlock(&dev->device_lock); 1159 wait_event_timeout(cl->wait, 1160 (cl->state == MEI_FILE_CONNECTED || 1161 cl->state == MEI_FILE_DISCONNECTED || 1162 cl->state == MEI_FILE_DISCONNECT_REQUIRED || 1163 cl->state == MEI_FILE_DISCONNECT_REPLY), 1164 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 1165 mutex_lock(&dev->device_lock); 1166 1167 if (!mei_cl_is_connected(cl)) { 1168 if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) { 1169 mei_io_list_flush_cl(&dev->ctrl_rd_list, cl); 1170 mei_io_list_flush_cl(&dev->ctrl_wr_list, cl); 1171 /* ignore disconnect return valuue; 1172 * in case of failure reset will be invoked 1173 */ 1174 __mei_cl_disconnect(cl); 1175 rets = -EFAULT; 1176 goto out; 1177 } 1178 1179 /* timeout or something went really wrong */ 1180 if (!cl->status) 1181 cl->status = -EFAULT; 1182 } 1183 1184 rets = cl->status; 1185 out: 1186 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1187 pm_runtime_mark_last_busy(dev->dev); 1188 pm_runtime_put_autosuspend(dev->dev); 1189 1190 mei_io_cb_free(cb); 1191 1192 nortpm: 1193 if (!mei_cl_is_connected(cl)) 1194 mei_cl_set_disconnected(cl); 1195 1196 return rets; 1197 } 1198 1199 /** 1200 * mei_cl_alloc_linked - allocate and link host client 1201 * 1202 * @dev: the device structure 1203 * 1204 * Return: cl on success ERR_PTR on failure 1205 */ 1206 struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev) 1207 { 1208 struct mei_cl *cl; 1209 int ret; 1210 1211 cl = mei_cl_allocate(dev); 1212 if (!cl) { 1213 ret = -ENOMEM; 1214 goto err; 1215 } 1216 1217 ret = mei_cl_link(cl); 1218 if (ret) 1219 goto err; 1220 1221 return cl; 1222 err: 1223 kfree(cl); 1224 return ERR_PTR(ret); 1225 } 1226 1227 /** 1228 * mei_cl_tx_flow_ctrl_creds - checks flow_control credits for cl. 1229 * 1230 * @cl: host client 1231 * 1232 * Return: 1 if tx_flow_ctrl_creds >0, 0 - otherwise. 1233 */ 1234 static int mei_cl_tx_flow_ctrl_creds(struct mei_cl *cl) 1235 { 1236 if (WARN_ON(!cl || !cl->me_cl)) 1237 return -EINVAL; 1238 1239 if (cl->tx_flow_ctrl_creds > 0) 1240 return 1; 1241 1242 if (mei_cl_is_fixed_address(cl)) 1243 return 1; 1244 1245 if (mei_cl_is_single_recv_buf(cl)) { 1246 if (cl->me_cl->tx_flow_ctrl_creds > 0) 1247 return 1; 1248 } 1249 return 0; 1250 } 1251 1252 /** 1253 * mei_cl_tx_flow_ctrl_creds_reduce - reduces transmit flow control credits 1254 * for a client 1255 * 1256 * @cl: host client 1257 * 1258 * Return: 1259 * 0 on success 1260 * -EINVAL when ctrl credits are <= 0 1261 */ 1262 static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl) 1263 { 1264 if (WARN_ON(!cl || !cl->me_cl)) 1265 return -EINVAL; 1266 1267 if (mei_cl_is_fixed_address(cl)) 1268 return 0; 1269 1270 if (mei_cl_is_single_recv_buf(cl)) { 1271 if (WARN_ON(cl->me_cl->tx_flow_ctrl_creds <= 0)) 1272 return -EINVAL; 1273 cl->me_cl->tx_flow_ctrl_creds--; 1274 } else { 1275 if (WARN_ON(cl->tx_flow_ctrl_creds <= 0)) 1276 return -EINVAL; 1277 cl->tx_flow_ctrl_creds--; 1278 } 1279 return 0; 1280 } 1281 1282 /** 1283 * mei_cl_vtag_alloc - allocate and fill the vtag structure 1284 * 1285 * @fp: pointer to file structure 1286 * @vtag: vm tag 1287 * 1288 * Return: 1289 * * Pointer to allocated struct - on success 1290 * * ERR_PTR(-ENOMEM) on memory allocation failure 1291 */ 1292 struct mei_cl_vtag *mei_cl_vtag_alloc(struct file *fp, u8 vtag) 1293 { 1294 struct mei_cl_vtag *cl_vtag; 1295 1296 cl_vtag = kzalloc(sizeof(*cl_vtag), GFP_KERNEL); 1297 if (!cl_vtag) 1298 return ERR_PTR(-ENOMEM); 1299 1300 INIT_LIST_HEAD(&cl_vtag->list); 1301 cl_vtag->vtag = vtag; 1302 cl_vtag->fp = fp; 1303 1304 return cl_vtag; 1305 } 1306 1307 /** 1308 * mei_cl_fp_by_vtag - obtain the file pointer by vtag 1309 * 1310 * @cl: host client 1311 * @vtag: virtual tag 1312 * 1313 * Return: 1314 * * A file pointer - on success 1315 * * ERR_PTR(-ENOENT) if vtag is not found in the client vtag list 1316 */ 1317 const struct file *mei_cl_fp_by_vtag(const struct mei_cl *cl, u8 vtag) 1318 { 1319 struct mei_cl_vtag *vtag_l; 1320 1321 list_for_each_entry(vtag_l, &cl->vtag_map, list) 1322 /* The client on bus has one fixed fp */ 1323 if ((cl->cldev && mei_cldev_enabled(cl->cldev)) || 1324 vtag_l->vtag == vtag) 1325 return vtag_l->fp; 1326 1327 return ERR_PTR(-ENOENT); 1328 } 1329 1330 /** 1331 * mei_cl_reset_read_by_vtag - reset pending_read flag by given vtag 1332 * 1333 * @cl: host client 1334 * @vtag: vm tag 1335 */ 1336 static void mei_cl_reset_read_by_vtag(const struct mei_cl *cl, u8 vtag) 1337 { 1338 struct mei_cl_vtag *vtag_l; 1339 1340 list_for_each_entry(vtag_l, &cl->vtag_map, list) { 1341 if (vtag_l->vtag == vtag) { 1342 vtag_l->pending_read = false; 1343 break; 1344 } 1345 } 1346 } 1347 1348 /** 1349 * mei_cl_read_vtag_add_fc - add flow control for next pending reader 1350 * in the vtag list 1351 * 1352 * @cl: host client 1353 */ 1354 static void mei_cl_read_vtag_add_fc(struct mei_cl *cl) 1355 { 1356 struct mei_cl_vtag *cl_vtag; 1357 1358 list_for_each_entry(cl_vtag, &cl->vtag_map, list) { 1359 if (cl_vtag->pending_read) { 1360 if (mei_cl_enqueue_ctrl_wr_cb(cl, 1361 mei_cl_mtu(cl), 1362 MEI_FOP_READ, 1363 cl_vtag->fp)) 1364 cl->rx_flow_ctrl_creds++; 1365 break; 1366 } 1367 } 1368 } 1369 1370 /** 1371 * mei_cl_vt_support_check - check if client support vtags 1372 * 1373 * @cl: host client 1374 * 1375 * Return: 1376 * * 0 - supported, or not connected at all 1377 * * -EOPNOTSUPP - vtags are not supported by client 1378 */ 1379 int mei_cl_vt_support_check(const struct mei_cl *cl) 1380 { 1381 struct mei_device *dev = cl->dev; 1382 1383 if (!dev->hbm_f_vt_supported) 1384 return -EOPNOTSUPP; 1385 1386 if (!cl->me_cl) 1387 return 0; 1388 1389 return cl->me_cl->props.vt_supported ? 0 : -EOPNOTSUPP; 1390 } 1391 1392 /** 1393 * mei_cl_add_rd_completed - add read completed callback to list with lock 1394 * and vtag check 1395 * 1396 * @cl: host client 1397 * @cb: callback block 1398 * 1399 */ 1400 void mei_cl_add_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb) 1401 { 1402 const struct file *fp; 1403 1404 if (!mei_cl_vt_support_check(cl)) { 1405 fp = mei_cl_fp_by_vtag(cl, cb->vtag); 1406 if (IS_ERR(fp)) { 1407 /* client already disconnected, discarding */ 1408 mei_io_cb_free(cb); 1409 return; 1410 } 1411 cb->fp = fp; 1412 mei_cl_reset_read_by_vtag(cl, cb->vtag); 1413 mei_cl_read_vtag_add_fc(cl); 1414 } 1415 1416 spin_lock(&cl->rd_completed_lock); 1417 list_add_tail(&cb->list, &cl->rd_completed); 1418 spin_unlock(&cl->rd_completed_lock); 1419 } 1420 1421 /** 1422 * mei_cl_del_rd_completed - free read completed callback with lock 1423 * 1424 * @cl: host client 1425 * @cb: callback block 1426 * 1427 */ 1428 void mei_cl_del_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb) 1429 { 1430 spin_lock(&cl->rd_completed_lock); 1431 mei_io_cb_free(cb); 1432 spin_unlock(&cl->rd_completed_lock); 1433 } 1434 1435 /** 1436 * mei_cl_notify_fop2req - convert fop to proper request 1437 * 1438 * @fop: client notification start response command 1439 * 1440 * Return: MEI_HBM_NOTIFICATION_START/STOP 1441 */ 1442 u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop) 1443 { 1444 if (fop == MEI_FOP_NOTIFY_START) 1445 return MEI_HBM_NOTIFICATION_START; 1446 else 1447 return MEI_HBM_NOTIFICATION_STOP; 1448 } 1449 1450 /** 1451 * mei_cl_notify_req2fop - convert notification request top file operation type 1452 * 1453 * @req: hbm notification request type 1454 * 1455 * Return: MEI_FOP_NOTIFY_START/STOP 1456 */ 1457 enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req) 1458 { 1459 if (req == MEI_HBM_NOTIFICATION_START) 1460 return MEI_FOP_NOTIFY_START; 1461 else 1462 return MEI_FOP_NOTIFY_STOP; 1463 } 1464 1465 /** 1466 * mei_cl_irq_notify - send notification request in irq_thread context 1467 * 1468 * @cl: client 1469 * @cb: callback block. 1470 * @cmpl_list: complete list. 1471 * 1472 * Return: 0 on such and error otherwise. 1473 */ 1474 int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb, 1475 struct list_head *cmpl_list) 1476 { 1477 struct mei_device *dev = cl->dev; 1478 u32 msg_slots; 1479 int slots; 1480 int ret; 1481 bool request; 1482 1483 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request)); 1484 slots = mei_hbuf_empty_slots(dev); 1485 if (slots < 0) 1486 return -EOVERFLOW; 1487 1488 if ((u32)slots < msg_slots) 1489 return -EMSGSIZE; 1490 1491 request = mei_cl_notify_fop2req(cb->fop_type); 1492 ret = mei_hbm_cl_notify_req(dev, cl, request); 1493 if (ret) { 1494 cl->status = ret; 1495 list_move_tail(&cb->list, cmpl_list); 1496 return ret; 1497 } 1498 1499 list_move_tail(&cb->list, &dev->ctrl_rd_list); 1500 return 0; 1501 } 1502 1503 /** 1504 * mei_cl_notify_request - send notification stop/start request 1505 * 1506 * @cl: host client 1507 * @fp: associate request with file 1508 * @request: 1 for start or 0 for stop 1509 * 1510 * Locking: called under "dev->device_lock" lock 1511 * 1512 * Return: 0 on such and error otherwise. 1513 */ 1514 int mei_cl_notify_request(struct mei_cl *cl, 1515 const struct file *fp, u8 request) 1516 { 1517 struct mei_device *dev; 1518 struct mei_cl_cb *cb; 1519 enum mei_cb_file_ops fop_type; 1520 int rets; 1521 1522 if (WARN_ON(!cl || !cl->dev)) 1523 return -ENODEV; 1524 1525 dev = cl->dev; 1526 1527 if (!dev->hbm_f_ev_supported) { 1528 cl_dbg(dev, cl, "notifications not supported\n"); 1529 return -EOPNOTSUPP; 1530 } 1531 1532 if (!mei_cl_is_connected(cl)) 1533 return -ENODEV; 1534 1535 rets = pm_runtime_get(dev->dev); 1536 if (rets < 0 && rets != -EINPROGRESS) { 1537 pm_runtime_put_noidle(dev->dev); 1538 cl_err(dev, cl, "rpm: get failed %d\n", rets); 1539 return rets; 1540 } 1541 1542 fop_type = mei_cl_notify_req2fop(request); 1543 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, fop_type, fp); 1544 if (!cb) { 1545 rets = -ENOMEM; 1546 goto out; 1547 } 1548 1549 if (mei_hbuf_acquire(dev)) { 1550 if (mei_hbm_cl_notify_req(dev, cl, request)) { 1551 rets = -ENODEV; 1552 goto out; 1553 } 1554 list_move_tail(&cb->list, &dev->ctrl_rd_list); 1555 } 1556 1557 mutex_unlock(&dev->device_lock); 1558 wait_event_timeout(cl->wait, 1559 cl->notify_en == request || 1560 cl->status || 1561 !mei_cl_is_connected(cl), 1562 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 1563 mutex_lock(&dev->device_lock); 1564 1565 if (cl->notify_en != request && !cl->status) 1566 cl->status = -EFAULT; 1567 1568 rets = cl->status; 1569 1570 out: 1571 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1572 pm_runtime_mark_last_busy(dev->dev); 1573 pm_runtime_put_autosuspend(dev->dev); 1574 1575 mei_io_cb_free(cb); 1576 return rets; 1577 } 1578 1579 /** 1580 * mei_cl_notify - raise notification 1581 * 1582 * @cl: host client 1583 * 1584 * Locking: called under "dev->device_lock" lock 1585 */ 1586 void mei_cl_notify(struct mei_cl *cl) 1587 { 1588 struct mei_device *dev; 1589 1590 if (!cl || !cl->dev) 1591 return; 1592 1593 dev = cl->dev; 1594 1595 if (!cl->notify_en) 1596 return; 1597 1598 cl_dbg(dev, cl, "notify event"); 1599 cl->notify_ev = true; 1600 if (!mei_cl_bus_notify_event(cl)) 1601 wake_up_interruptible(&cl->ev_wait); 1602 1603 if (cl->ev_async) 1604 kill_fasync(&cl->ev_async, SIGIO, POLL_PRI); 1605 1606 } 1607 1608 /** 1609 * mei_cl_notify_get - get or wait for notification event 1610 * 1611 * @cl: host client 1612 * @block: this request is blocking 1613 * @notify_ev: true if notification event was received 1614 * 1615 * Locking: called under "dev->device_lock" lock 1616 * 1617 * Return: 0 on such and error otherwise. 1618 */ 1619 int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev) 1620 { 1621 struct mei_device *dev; 1622 int rets; 1623 1624 *notify_ev = false; 1625 1626 if (WARN_ON(!cl || !cl->dev)) 1627 return -ENODEV; 1628 1629 dev = cl->dev; 1630 1631 if (!dev->hbm_f_ev_supported) { 1632 cl_dbg(dev, cl, "notifications not supported\n"); 1633 return -EOPNOTSUPP; 1634 } 1635 1636 if (!mei_cl_is_connected(cl)) 1637 return -ENODEV; 1638 1639 if (cl->notify_ev) 1640 goto out; 1641 1642 if (!block) 1643 return -EAGAIN; 1644 1645 mutex_unlock(&dev->device_lock); 1646 rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev); 1647 mutex_lock(&dev->device_lock); 1648 1649 if (rets < 0) 1650 return rets; 1651 1652 out: 1653 *notify_ev = cl->notify_ev; 1654 cl->notify_ev = false; 1655 return 0; 1656 } 1657 1658 /** 1659 * mei_cl_read_start - the start read client message function. 1660 * 1661 * @cl: host client 1662 * @length: number of bytes to read 1663 * @fp: pointer to file structure 1664 * 1665 * Return: 0 on success, <0 on failure. 1666 */ 1667 int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp) 1668 { 1669 struct mei_device *dev; 1670 struct mei_cl_cb *cb; 1671 int rets; 1672 1673 if (WARN_ON(!cl || !cl->dev)) 1674 return -ENODEV; 1675 1676 dev = cl->dev; 1677 1678 if (!mei_cl_is_connected(cl)) 1679 return -ENODEV; 1680 1681 if (!mei_me_cl_is_active(cl->me_cl)) { 1682 cl_err(dev, cl, "no such me client\n"); 1683 return -ENOTTY; 1684 } 1685 1686 if (mei_cl_is_fixed_address(cl)) 1687 return 0; 1688 1689 /* HW currently supports only one pending read */ 1690 if (cl->rx_flow_ctrl_creds) { 1691 mei_cl_set_read_by_fp(cl, fp); 1692 return -EBUSY; 1693 } 1694 1695 cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp); 1696 if (!cb) 1697 return -ENOMEM; 1698 1699 mei_cl_set_read_by_fp(cl, fp); 1700 1701 rets = pm_runtime_get(dev->dev); 1702 if (rets < 0 && rets != -EINPROGRESS) { 1703 pm_runtime_put_noidle(dev->dev); 1704 cl_err(dev, cl, "rpm: get failed %d\n", rets); 1705 goto nortpm; 1706 } 1707 1708 rets = 0; 1709 if (mei_hbuf_acquire(dev)) { 1710 rets = mei_hbm_cl_flow_control_req(dev, cl); 1711 if (rets < 0) 1712 goto out; 1713 1714 list_move_tail(&cb->list, &cl->rd_pending); 1715 } 1716 cl->rx_flow_ctrl_creds++; 1717 1718 out: 1719 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1720 pm_runtime_mark_last_busy(dev->dev); 1721 pm_runtime_put_autosuspend(dev->dev); 1722 nortpm: 1723 if (rets) 1724 mei_io_cb_free(cb); 1725 1726 return rets; 1727 } 1728 1729 static inline u8 mei_ext_hdr_set_vtag(void *ext, u8 vtag) 1730 { 1731 struct mei_ext_hdr_vtag *vtag_hdr = ext; 1732 1733 vtag_hdr->hdr.type = MEI_EXT_HDR_VTAG; 1734 vtag_hdr->hdr.length = mei_data2slots(sizeof(*vtag_hdr)); 1735 vtag_hdr->vtag = vtag; 1736 vtag_hdr->reserved = 0; 1737 return vtag_hdr->hdr.length; 1738 } 1739 1740 /** 1741 * mei_msg_hdr_init - allocate and initialize mei message header 1742 * 1743 * @cb: message callback structure 1744 * 1745 * Return: a pointer to initialized header or ERR_PTR on failure 1746 */ 1747 static struct mei_msg_hdr *mei_msg_hdr_init(const struct mei_cl_cb *cb) 1748 { 1749 size_t hdr_len; 1750 struct mei_ext_meta_hdr *meta; 1751 struct mei_msg_hdr *mei_hdr; 1752 bool is_ext, is_vtag; 1753 1754 if (!cb) 1755 return ERR_PTR(-EINVAL); 1756 1757 /* Extended header for vtag is attached only on the first fragment */ 1758 is_vtag = (cb->vtag && cb->buf_idx == 0); 1759 is_ext = is_vtag; 1760 1761 /* Compute extended header size */ 1762 hdr_len = sizeof(*mei_hdr); 1763 1764 if (!is_ext) 1765 goto setup_hdr; 1766 1767 hdr_len += sizeof(*meta); 1768 if (is_vtag) 1769 hdr_len += sizeof(struct mei_ext_hdr_vtag); 1770 1771 setup_hdr: 1772 mei_hdr = kzalloc(hdr_len, GFP_KERNEL); 1773 if (!mei_hdr) 1774 return ERR_PTR(-ENOMEM); 1775 1776 mei_hdr->host_addr = mei_cl_host_addr(cb->cl); 1777 mei_hdr->me_addr = mei_cl_me_id(cb->cl); 1778 mei_hdr->internal = cb->internal; 1779 mei_hdr->extended = is_ext; 1780 1781 if (!is_ext) 1782 goto out; 1783 1784 meta = (struct mei_ext_meta_hdr *)mei_hdr->extension; 1785 if (is_vtag) { 1786 meta->count++; 1787 meta->size += mei_ext_hdr_set_vtag(meta->hdrs, cb->vtag); 1788 } 1789 out: 1790 mei_hdr->length = hdr_len - sizeof(*mei_hdr); 1791 return mei_hdr; 1792 } 1793 1794 /** 1795 * mei_cl_irq_write - write a message to device 1796 * from the interrupt thread context 1797 * 1798 * @cl: client 1799 * @cb: callback block. 1800 * @cmpl_list: complete list. 1801 * 1802 * Return: 0, OK; otherwise error. 1803 */ 1804 int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, 1805 struct list_head *cmpl_list) 1806 { 1807 struct mei_device *dev; 1808 struct mei_msg_data *buf; 1809 struct mei_msg_hdr *mei_hdr = NULL; 1810 size_t hdr_len; 1811 size_t hbuf_len, dr_len; 1812 size_t buf_len; 1813 size_t data_len; 1814 int hbuf_slots; 1815 u32 dr_slots; 1816 u32 dma_len; 1817 int rets; 1818 bool first_chunk; 1819 const void *data; 1820 1821 if (WARN_ON(!cl || !cl->dev)) 1822 return -ENODEV; 1823 1824 dev = cl->dev; 1825 1826 buf = &cb->buf; 1827 1828 first_chunk = cb->buf_idx == 0; 1829 1830 rets = first_chunk ? mei_cl_tx_flow_ctrl_creds(cl) : 1; 1831 if (rets < 0) 1832 goto err; 1833 1834 if (rets == 0) { 1835 cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); 1836 return 0; 1837 } 1838 1839 buf_len = buf->size - cb->buf_idx; 1840 data = buf->data + cb->buf_idx; 1841 hbuf_slots = mei_hbuf_empty_slots(dev); 1842 if (hbuf_slots < 0) { 1843 rets = -EOVERFLOW; 1844 goto err; 1845 } 1846 1847 hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK; 1848 dr_slots = mei_dma_ring_empty_slots(dev); 1849 dr_len = mei_slots2data(dr_slots); 1850 1851 mei_hdr = mei_msg_hdr_init(cb); 1852 if (IS_ERR(mei_hdr)) { 1853 rets = PTR_ERR(mei_hdr); 1854 mei_hdr = NULL; 1855 goto err; 1856 } 1857 1858 cl_dbg(dev, cl, "Extended Header %d vtag = %d\n", 1859 mei_hdr->extended, cb->vtag); 1860 1861 hdr_len = sizeof(*mei_hdr) + mei_hdr->length; 1862 1863 /** 1864 * Split the message only if we can write the whole host buffer 1865 * otherwise wait for next time the host buffer is empty. 1866 */ 1867 if (hdr_len + buf_len <= hbuf_len) { 1868 data_len = buf_len; 1869 mei_hdr->msg_complete = 1; 1870 } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) { 1871 mei_hdr->dma_ring = 1; 1872 if (buf_len > dr_len) 1873 buf_len = dr_len; 1874 else 1875 mei_hdr->msg_complete = 1; 1876 1877 data_len = sizeof(dma_len); 1878 dma_len = buf_len; 1879 data = &dma_len; 1880 } else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) { 1881 buf_len = hbuf_len - hdr_len; 1882 data_len = buf_len; 1883 } else { 1884 kfree(mei_hdr); 1885 return 0; 1886 } 1887 mei_hdr->length += data_len; 1888 1889 if (mei_hdr->dma_ring) 1890 mei_dma_ring_write(dev, buf->data + cb->buf_idx, buf_len); 1891 rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len); 1892 1893 if (rets) 1894 goto err; 1895 1896 cl->status = 0; 1897 cl->writing_state = MEI_WRITING; 1898 cb->buf_idx += buf_len; 1899 1900 if (first_chunk) { 1901 if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) { 1902 rets = -EIO; 1903 goto err; 1904 } 1905 } 1906 1907 if (mei_hdr->msg_complete) 1908 list_move_tail(&cb->list, &dev->write_waiting_list); 1909 1910 kfree(mei_hdr); 1911 return 0; 1912 1913 err: 1914 kfree(mei_hdr); 1915 cl->status = rets; 1916 list_move_tail(&cb->list, cmpl_list); 1917 return rets; 1918 } 1919 1920 /** 1921 * mei_cl_write - submit a write cb to mei device 1922 * assumes device_lock is locked 1923 * 1924 * @cl: host client 1925 * @cb: write callback with filled data 1926 * 1927 * Return: number of bytes sent on success, <0 on failure. 1928 */ 1929 ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) 1930 { 1931 struct mei_device *dev; 1932 struct mei_msg_data *buf; 1933 struct mei_msg_hdr *mei_hdr = NULL; 1934 size_t hdr_len; 1935 size_t hbuf_len, dr_len; 1936 size_t buf_len; 1937 size_t data_len; 1938 int hbuf_slots; 1939 u32 dr_slots; 1940 u32 dma_len; 1941 ssize_t rets; 1942 bool blocking; 1943 const void *data; 1944 1945 if (WARN_ON(!cl || !cl->dev)) 1946 return -ENODEV; 1947 1948 if (WARN_ON(!cb)) 1949 return -EINVAL; 1950 1951 dev = cl->dev; 1952 1953 buf = &cb->buf; 1954 buf_len = buf->size; 1955 1956 cl_dbg(dev, cl, "buf_len=%zd\n", buf_len); 1957 1958 blocking = cb->blocking; 1959 data = buf->data; 1960 1961 rets = pm_runtime_get(dev->dev); 1962 if (rets < 0 && rets != -EINPROGRESS) { 1963 pm_runtime_put_noidle(dev->dev); 1964 cl_err(dev, cl, "rpm: get failed %zd\n", rets); 1965 goto free; 1966 } 1967 1968 cb->buf_idx = 0; 1969 cl->writing_state = MEI_IDLE; 1970 1971 1972 rets = mei_cl_tx_flow_ctrl_creds(cl); 1973 if (rets < 0) 1974 goto err; 1975 1976 mei_hdr = mei_msg_hdr_init(cb); 1977 if (IS_ERR(mei_hdr)) { 1978 rets = -PTR_ERR(mei_hdr); 1979 mei_hdr = NULL; 1980 goto err; 1981 } 1982 1983 cl_dbg(dev, cl, "Extended Header %d vtag = %d\n", 1984 mei_hdr->extended, cb->vtag); 1985 1986 hdr_len = sizeof(*mei_hdr) + mei_hdr->length; 1987 1988 if (rets == 0) { 1989 cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); 1990 rets = buf_len; 1991 goto out; 1992 } 1993 1994 if (!mei_hbuf_acquire(dev)) { 1995 cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n"); 1996 rets = buf_len; 1997 goto out; 1998 } 1999 2000 hbuf_slots = mei_hbuf_empty_slots(dev); 2001 if (hbuf_slots < 0) { 2002 rets = -EOVERFLOW; 2003 goto out; 2004 } 2005 2006 hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK; 2007 dr_slots = mei_dma_ring_empty_slots(dev); 2008 dr_len = mei_slots2data(dr_slots); 2009 2010 if (hdr_len + buf_len <= hbuf_len) { 2011 data_len = buf_len; 2012 mei_hdr->msg_complete = 1; 2013 } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) { 2014 mei_hdr->dma_ring = 1; 2015 if (buf_len > dr_len) 2016 buf_len = dr_len; 2017 else 2018 mei_hdr->msg_complete = 1; 2019 2020 data_len = sizeof(dma_len); 2021 dma_len = buf_len; 2022 data = &dma_len; 2023 } else { 2024 buf_len = hbuf_len - hdr_len; 2025 data_len = buf_len; 2026 } 2027 2028 mei_hdr->length += data_len; 2029 2030 if (mei_hdr->dma_ring) 2031 mei_dma_ring_write(dev, buf->data, buf_len); 2032 rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len); 2033 2034 if (rets) 2035 goto err; 2036 2037 rets = mei_cl_tx_flow_ctrl_creds_reduce(cl); 2038 if (rets) 2039 goto err; 2040 2041 cl->writing_state = MEI_WRITING; 2042 cb->buf_idx = buf_len; 2043 /* restore return value */ 2044 buf_len = buf->size; 2045 2046 out: 2047 if (mei_hdr->msg_complete) 2048 mei_tx_cb_enqueue(cb, &dev->write_waiting_list); 2049 else 2050 mei_tx_cb_enqueue(cb, &dev->write_list); 2051 2052 cb = NULL; 2053 if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) { 2054 2055 mutex_unlock(&dev->device_lock); 2056 rets = wait_event_interruptible(cl->tx_wait, 2057 cl->writing_state == MEI_WRITE_COMPLETE || 2058 (!mei_cl_is_connected(cl))); 2059 mutex_lock(&dev->device_lock); 2060 /* wait_event_interruptible returns -ERESTARTSYS */ 2061 if (rets) { 2062 if (signal_pending(current)) 2063 rets = -EINTR; 2064 goto err; 2065 } 2066 if (cl->writing_state != MEI_WRITE_COMPLETE) { 2067 rets = -EFAULT; 2068 goto err; 2069 } 2070 } 2071 2072 rets = buf_len; 2073 err: 2074 cl_dbg(dev, cl, "rpm: autosuspend\n"); 2075 pm_runtime_mark_last_busy(dev->dev); 2076 pm_runtime_put_autosuspend(dev->dev); 2077 free: 2078 mei_io_cb_free(cb); 2079 2080 kfree(mei_hdr); 2081 2082 return rets; 2083 } 2084 2085 /** 2086 * mei_cl_complete - processes completed operation for a client 2087 * 2088 * @cl: private data of the file object. 2089 * @cb: callback block. 2090 */ 2091 void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb) 2092 { 2093 struct mei_device *dev = cl->dev; 2094 2095 switch (cb->fop_type) { 2096 case MEI_FOP_WRITE: 2097 mei_tx_cb_dequeue(cb); 2098 cl->writing_state = MEI_WRITE_COMPLETE; 2099 if (waitqueue_active(&cl->tx_wait)) { 2100 wake_up_interruptible(&cl->tx_wait); 2101 } else { 2102 pm_runtime_mark_last_busy(dev->dev); 2103 pm_request_autosuspend(dev->dev); 2104 } 2105 break; 2106 2107 case MEI_FOP_READ: 2108 mei_cl_add_rd_completed(cl, cb); 2109 if (!mei_cl_is_fixed_address(cl) && 2110 !WARN_ON(!cl->rx_flow_ctrl_creds)) 2111 cl->rx_flow_ctrl_creds--; 2112 if (!mei_cl_bus_rx_event(cl)) 2113 wake_up_interruptible(&cl->rx_wait); 2114 break; 2115 2116 case MEI_FOP_CONNECT: 2117 case MEI_FOP_DISCONNECT: 2118 case MEI_FOP_NOTIFY_STOP: 2119 case MEI_FOP_NOTIFY_START: 2120 case MEI_FOP_DMA_MAP: 2121 case MEI_FOP_DMA_UNMAP: 2122 if (waitqueue_active(&cl->wait)) 2123 wake_up(&cl->wait); 2124 2125 break; 2126 case MEI_FOP_DISCONNECT_RSP: 2127 mei_io_cb_free(cb); 2128 mei_cl_set_disconnected(cl); 2129 break; 2130 default: 2131 BUG_ON(0); 2132 } 2133 } 2134 2135 2136 /** 2137 * mei_cl_all_disconnect - disconnect forcefully all connected clients 2138 * 2139 * @dev: mei device 2140 */ 2141 void mei_cl_all_disconnect(struct mei_device *dev) 2142 { 2143 struct mei_cl *cl; 2144 2145 list_for_each_entry(cl, &dev->file_list, link) 2146 mei_cl_set_disconnected(cl); 2147 } 2148 2149 static struct mei_cl *mei_cl_dma_map_find(struct mei_device *dev, u8 buffer_id) 2150 { 2151 struct mei_cl *cl; 2152 2153 list_for_each_entry(cl, &dev->file_list, link) 2154 if (cl->dma.buffer_id == buffer_id) 2155 return cl; 2156 return NULL; 2157 } 2158 2159 /** 2160 * mei_cl_irq_dma_map - send client dma map request in irq_thread context 2161 * 2162 * @cl: client 2163 * @cb: callback block. 2164 * @cmpl_list: complete list. 2165 * 2166 * Return: 0 on such and error otherwise. 2167 */ 2168 int mei_cl_irq_dma_map(struct mei_cl *cl, struct mei_cl_cb *cb, 2169 struct list_head *cmpl_list) 2170 { 2171 struct mei_device *dev = cl->dev; 2172 u32 msg_slots; 2173 int slots; 2174 int ret; 2175 2176 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_dma_map_request)); 2177 slots = mei_hbuf_empty_slots(dev); 2178 if (slots < 0) 2179 return -EOVERFLOW; 2180 2181 if ((u32)slots < msg_slots) 2182 return -EMSGSIZE; 2183 2184 ret = mei_hbm_cl_dma_map_req(dev, cl); 2185 if (ret) { 2186 cl->status = ret; 2187 list_move_tail(&cb->list, cmpl_list); 2188 return ret; 2189 } 2190 2191 list_move_tail(&cb->list, &dev->ctrl_rd_list); 2192 return 0; 2193 } 2194 2195 /** 2196 * mei_cl_irq_dma_unmap - send client dma unmap request in irq_thread context 2197 * 2198 * @cl: client 2199 * @cb: callback block. 2200 * @cmpl_list: complete list. 2201 * 2202 * Return: 0 on such and error otherwise. 2203 */ 2204 int mei_cl_irq_dma_unmap(struct mei_cl *cl, struct mei_cl_cb *cb, 2205 struct list_head *cmpl_list) 2206 { 2207 struct mei_device *dev = cl->dev; 2208 u32 msg_slots; 2209 int slots; 2210 int ret; 2211 2212 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_dma_unmap_request)); 2213 slots = mei_hbuf_empty_slots(dev); 2214 if (slots < 0) 2215 return -EOVERFLOW; 2216 2217 if ((u32)slots < msg_slots) 2218 return -EMSGSIZE; 2219 2220 ret = mei_hbm_cl_dma_unmap_req(dev, cl); 2221 if (ret) { 2222 cl->status = ret; 2223 list_move_tail(&cb->list, cmpl_list); 2224 return ret; 2225 } 2226 2227 list_move_tail(&cb->list, &dev->ctrl_rd_list); 2228 return 0; 2229 } 2230 2231 static int mei_cl_dma_alloc(struct mei_cl *cl, u8 buf_id, size_t size) 2232 { 2233 cl->dma.vaddr = dmam_alloc_coherent(cl->dev->dev, size, 2234 &cl->dma.daddr, GFP_KERNEL); 2235 if (!cl->dma.vaddr) 2236 return -ENOMEM; 2237 2238 cl->dma.buffer_id = buf_id; 2239 cl->dma.size = size; 2240 2241 return 0; 2242 } 2243 2244 static void mei_cl_dma_free(struct mei_cl *cl) 2245 { 2246 cl->dma.buffer_id = 0; 2247 dmam_free_coherent(cl->dev->dev, 2248 cl->dma.size, cl->dma.vaddr, cl->dma.daddr); 2249 cl->dma.size = 0; 2250 cl->dma.vaddr = NULL; 2251 cl->dma.daddr = 0; 2252 } 2253 2254 /** 2255 * mei_cl_dma_alloc_and_map - send client dma map request 2256 * 2257 * @cl: host client 2258 * @fp: pointer to file structure 2259 * @buffer_id: id of the mapped buffer 2260 * @size: size of the buffer 2261 * 2262 * Locking: called under "dev->device_lock" lock 2263 * 2264 * Return: 2265 * * -ENODEV 2266 * * -EINVAL 2267 * * -EOPNOTSUPP 2268 * * -EPROTO 2269 * * -ENOMEM; 2270 */ 2271 int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp, 2272 u8 buffer_id, size_t size) 2273 { 2274 struct mei_device *dev; 2275 struct mei_cl_cb *cb; 2276 int rets; 2277 2278 if (WARN_ON(!cl || !cl->dev)) 2279 return -ENODEV; 2280 2281 dev = cl->dev; 2282 2283 if (!dev->hbm_f_cd_supported) { 2284 cl_dbg(dev, cl, "client dma is not supported\n"); 2285 return -EOPNOTSUPP; 2286 } 2287 2288 if (buffer_id == 0) 2289 return -EINVAL; 2290 2291 if (mei_cl_is_connected(cl)) 2292 return -EPROTO; 2293 2294 if (cl->dma_mapped) 2295 return -EPROTO; 2296 2297 if (mei_cl_dma_map_find(dev, buffer_id)) { 2298 cl_dbg(dev, cl, "client dma with id %d is already allocated\n", 2299 cl->dma.buffer_id); 2300 return -EPROTO; 2301 } 2302 2303 rets = pm_runtime_get(dev->dev); 2304 if (rets < 0 && rets != -EINPROGRESS) { 2305 pm_runtime_put_noidle(dev->dev); 2306 cl_err(dev, cl, "rpm: get failed %d\n", rets); 2307 return rets; 2308 } 2309 2310 rets = mei_cl_dma_alloc(cl, buffer_id, size); 2311 if (rets) { 2312 pm_runtime_put_noidle(dev->dev); 2313 return rets; 2314 } 2315 2316 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DMA_MAP, fp); 2317 if (!cb) { 2318 rets = -ENOMEM; 2319 goto out; 2320 } 2321 2322 if (mei_hbuf_acquire(dev)) { 2323 if (mei_hbm_cl_dma_map_req(dev, cl)) { 2324 rets = -ENODEV; 2325 goto out; 2326 } 2327 list_move_tail(&cb->list, &dev->ctrl_rd_list); 2328 } 2329 2330 cl->status = 0; 2331 2332 mutex_unlock(&dev->device_lock); 2333 wait_event_timeout(cl->wait, 2334 cl->dma_mapped || cl->status, 2335 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 2336 mutex_lock(&dev->device_lock); 2337 2338 if (!cl->dma_mapped && !cl->status) 2339 cl->status = -EFAULT; 2340 2341 rets = cl->status; 2342 2343 out: 2344 if (rets) 2345 mei_cl_dma_free(cl); 2346 2347 cl_dbg(dev, cl, "rpm: autosuspend\n"); 2348 pm_runtime_mark_last_busy(dev->dev); 2349 pm_runtime_put_autosuspend(dev->dev); 2350 2351 mei_io_cb_free(cb); 2352 return rets; 2353 } 2354 2355 /** 2356 * mei_cl_dma_unmap - send client dma unmap request 2357 * 2358 * @cl: host client 2359 * @fp: pointer to file structure 2360 * 2361 * Locking: called under "dev->device_lock" lock 2362 * 2363 * Return: 0 on such and error otherwise. 2364 */ 2365 int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp) 2366 { 2367 struct mei_device *dev; 2368 struct mei_cl_cb *cb; 2369 int rets; 2370 2371 if (WARN_ON(!cl || !cl->dev)) 2372 return -ENODEV; 2373 2374 dev = cl->dev; 2375 2376 if (!dev->hbm_f_cd_supported) { 2377 cl_dbg(dev, cl, "client dma is not supported\n"); 2378 return -EOPNOTSUPP; 2379 } 2380 2381 /* do not allow unmap for connected client */ 2382 if (mei_cl_is_connected(cl)) 2383 return -EPROTO; 2384 2385 if (!cl->dma_mapped) 2386 return -EPROTO; 2387 2388 rets = pm_runtime_get(dev->dev); 2389 if (rets < 0 && rets != -EINPROGRESS) { 2390 pm_runtime_put_noidle(dev->dev); 2391 cl_err(dev, cl, "rpm: get failed %d\n", rets); 2392 return rets; 2393 } 2394 2395 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DMA_UNMAP, fp); 2396 if (!cb) { 2397 rets = -ENOMEM; 2398 goto out; 2399 } 2400 2401 if (mei_hbuf_acquire(dev)) { 2402 if (mei_hbm_cl_dma_unmap_req(dev, cl)) { 2403 rets = -ENODEV; 2404 goto out; 2405 } 2406 list_move_tail(&cb->list, &dev->ctrl_rd_list); 2407 } 2408 2409 cl->status = 0; 2410 2411 mutex_unlock(&dev->device_lock); 2412 wait_event_timeout(cl->wait, 2413 !cl->dma_mapped || cl->status, 2414 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 2415 mutex_lock(&dev->device_lock); 2416 2417 if (cl->dma_mapped && !cl->status) 2418 cl->status = -EFAULT; 2419 2420 rets = cl->status; 2421 2422 if (!rets) 2423 mei_cl_dma_free(cl); 2424 out: 2425 cl_dbg(dev, cl, "rpm: autosuspend\n"); 2426 pm_runtime_mark_last_busy(dev->dev); 2427 pm_runtime_put_autosuspend(dev->dev); 2428 2429 mei_io_cb_free(cb); 2430 return rets; 2431 } 2432