1 /* 2 * 3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Copyright (c) 2003-2012, Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 */ 16 17 #include <linux/sched.h> 18 #include <linux/wait.h> 19 #include <linux/delay.h> 20 #include <linux/slab.h> 21 #include <linux/pm_runtime.h> 22 23 #include <linux/mei.h> 24 25 #include "mei_dev.h" 26 #include "hbm.h" 27 #include "client.h" 28 29 /** 30 * mei_me_cl_init - initialize me client 31 * 32 * @me_cl: me client 33 */ 34 void mei_me_cl_init(struct mei_me_client *me_cl) 35 { 36 INIT_LIST_HEAD(&me_cl->list); 37 kref_init(&me_cl->refcnt); 38 } 39 40 /** 41 * mei_me_cl_get - increases me client refcount 42 * 43 * @me_cl: me client 44 * 45 * Locking: called under "dev->device_lock" lock 46 * 47 * Return: me client or NULL 48 */ 49 struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl) 50 { 51 if (me_cl && kref_get_unless_zero(&me_cl->refcnt)) 52 return me_cl; 53 54 return NULL; 55 } 56 57 /** 58 * mei_me_cl_release - free me client 59 * 60 * Locking: called under "dev->device_lock" lock 61 * 62 * @ref: me_client refcount 63 */ 64 static void mei_me_cl_release(struct kref *ref) 65 { 66 struct mei_me_client *me_cl = 67 container_of(ref, struct mei_me_client, refcnt); 68 69 kfree(me_cl); 70 } 71 72 /** 73 * mei_me_cl_put - decrease me client refcount and free client if necessary 74 * 75 * Locking: called under "dev->device_lock" lock 76 * 77 * @me_cl: me client 78 */ 79 void mei_me_cl_put(struct mei_me_client *me_cl) 80 { 81 if (me_cl) 82 kref_put(&me_cl->refcnt, mei_me_cl_release); 83 } 84 85 /** 86 * __mei_me_cl_del - delete me client from the list and decrease 87 * reference counter 88 * 89 * @dev: mei device 90 * @me_cl: me client 91 * 92 * Locking: dev->me_clients_rwsem 93 */ 94 static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl) 95 { 96 if (!me_cl) 97 return; 98 99 list_del_init(&me_cl->list); 100 mei_me_cl_put(me_cl); 101 } 102 103 /** 104 * mei_me_cl_del - delete me client from the list and decrease 105 * reference counter 106 * 107 * @dev: mei device 108 * @me_cl: me client 109 */ 110 void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl) 111 { 112 down_write(&dev->me_clients_rwsem); 113 __mei_me_cl_del(dev, me_cl); 114 up_write(&dev->me_clients_rwsem); 115 } 116 117 /** 118 * mei_me_cl_add - add me client to the list 119 * 120 * @dev: mei device 121 * @me_cl: me client 122 */ 123 void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl) 124 { 125 down_write(&dev->me_clients_rwsem); 126 list_add(&me_cl->list, &dev->me_clients); 127 up_write(&dev->me_clients_rwsem); 128 } 129 130 /** 131 * __mei_me_cl_by_uuid - locate me client by uuid 132 * increases ref count 133 * 134 * @dev: mei device 135 * @uuid: me client uuid 136 * 137 * Return: me client or NULL if not found 138 * 139 * Locking: dev->me_clients_rwsem 140 */ 141 static struct mei_me_client *__mei_me_cl_by_uuid(struct mei_device *dev, 142 const uuid_le *uuid) 143 { 144 struct mei_me_client *me_cl; 145 const uuid_le *pn; 146 147 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem)); 148 149 list_for_each_entry(me_cl, &dev->me_clients, list) { 150 pn = &me_cl->props.protocol_name; 151 if (uuid_le_cmp(*uuid, *pn) == 0) 152 return mei_me_cl_get(me_cl); 153 } 154 155 return NULL; 156 } 157 158 /** 159 * mei_me_cl_by_uuid - locate me client by uuid 160 * increases ref count 161 * 162 * @dev: mei device 163 * @uuid: me client uuid 164 * 165 * Return: me client or NULL if not found 166 * 167 * Locking: dev->me_clients_rwsem 168 */ 169 struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev, 170 const uuid_le *uuid) 171 { 172 struct mei_me_client *me_cl; 173 174 down_read(&dev->me_clients_rwsem); 175 me_cl = __mei_me_cl_by_uuid(dev, uuid); 176 up_read(&dev->me_clients_rwsem); 177 178 return me_cl; 179 } 180 181 /** 182 * mei_me_cl_by_id - locate me client by client id 183 * increases ref count 184 * 185 * @dev: the device structure 186 * @client_id: me client id 187 * 188 * Return: me client or NULL if not found 189 * 190 * Locking: dev->me_clients_rwsem 191 */ 192 struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id) 193 { 194 195 struct mei_me_client *__me_cl, *me_cl = NULL; 196 197 down_read(&dev->me_clients_rwsem); 198 list_for_each_entry(__me_cl, &dev->me_clients, list) { 199 if (__me_cl->client_id == client_id) { 200 me_cl = mei_me_cl_get(__me_cl); 201 break; 202 } 203 } 204 up_read(&dev->me_clients_rwsem); 205 206 return me_cl; 207 } 208 209 /** 210 * __mei_me_cl_by_uuid_id - locate me client by client id and uuid 211 * increases ref count 212 * 213 * @dev: the device structure 214 * @uuid: me client uuid 215 * @client_id: me client id 216 * 217 * Return: me client or null if not found 218 * 219 * Locking: dev->me_clients_rwsem 220 */ 221 static struct mei_me_client *__mei_me_cl_by_uuid_id(struct mei_device *dev, 222 const uuid_le *uuid, u8 client_id) 223 { 224 struct mei_me_client *me_cl; 225 const uuid_le *pn; 226 227 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem)); 228 229 list_for_each_entry(me_cl, &dev->me_clients, list) { 230 pn = &me_cl->props.protocol_name; 231 if (uuid_le_cmp(*uuid, *pn) == 0 && 232 me_cl->client_id == client_id) 233 return mei_me_cl_get(me_cl); 234 } 235 236 return NULL; 237 } 238 239 240 /** 241 * mei_me_cl_by_uuid_id - locate me client by client id and uuid 242 * increases ref count 243 * 244 * @dev: the device structure 245 * @uuid: me client uuid 246 * @client_id: me client id 247 * 248 * Return: me client or null if not found 249 */ 250 struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev, 251 const uuid_le *uuid, u8 client_id) 252 { 253 struct mei_me_client *me_cl; 254 255 down_read(&dev->me_clients_rwsem); 256 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, client_id); 257 up_read(&dev->me_clients_rwsem); 258 259 return me_cl; 260 } 261 262 /** 263 * mei_me_cl_rm_by_uuid - remove all me clients matching uuid 264 * 265 * @dev: the device structure 266 * @uuid: me client uuid 267 * 268 * Locking: called under "dev->device_lock" lock 269 */ 270 void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid) 271 { 272 struct mei_me_client *me_cl; 273 274 dev_dbg(dev->dev, "remove %pUl\n", uuid); 275 276 down_write(&dev->me_clients_rwsem); 277 me_cl = __mei_me_cl_by_uuid(dev, uuid); 278 __mei_me_cl_del(dev, me_cl); 279 up_write(&dev->me_clients_rwsem); 280 } 281 282 /** 283 * mei_me_cl_rm_by_uuid_id - remove all me clients matching client id 284 * 285 * @dev: the device structure 286 * @uuid: me client uuid 287 * @id: me client id 288 * 289 * Locking: called under "dev->device_lock" lock 290 */ 291 void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id) 292 { 293 struct mei_me_client *me_cl; 294 295 dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id); 296 297 down_write(&dev->me_clients_rwsem); 298 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id); 299 __mei_me_cl_del(dev, me_cl); 300 up_write(&dev->me_clients_rwsem); 301 } 302 303 /** 304 * mei_me_cl_rm_all - remove all me clients 305 * 306 * @dev: the device structure 307 * 308 * Locking: called under "dev->device_lock" lock 309 */ 310 void mei_me_cl_rm_all(struct mei_device *dev) 311 { 312 struct mei_me_client *me_cl, *next; 313 314 down_write(&dev->me_clients_rwsem); 315 list_for_each_entry_safe(me_cl, next, &dev->me_clients, list) 316 __mei_me_cl_del(dev, me_cl); 317 up_write(&dev->me_clients_rwsem); 318 } 319 320 /** 321 * mei_cl_cmp_id - tells if the clients are the same 322 * 323 * @cl1: host client 1 324 * @cl2: host client 2 325 * 326 * Return: true - if the clients has same host and me ids 327 * false - otherwise 328 */ 329 static inline bool mei_cl_cmp_id(const struct mei_cl *cl1, 330 const struct mei_cl *cl2) 331 { 332 return cl1 && cl2 && 333 (cl1->host_client_id == cl2->host_client_id) && 334 (mei_cl_me_id(cl1) == mei_cl_me_id(cl2)); 335 } 336 337 /** 338 * mei_io_cb_free - free mei_cb_private related memory 339 * 340 * @cb: mei callback struct 341 */ 342 void mei_io_cb_free(struct mei_cl_cb *cb) 343 { 344 if (cb == NULL) 345 return; 346 347 list_del(&cb->list); 348 kfree(cb->buf.data); 349 kfree(cb); 350 } 351 352 /** 353 * mei_io_cb_init - allocate and initialize io callback 354 * 355 * @cl: mei client 356 * @type: operation type 357 * @fp: pointer to file structure 358 * 359 * Return: mei_cl_cb pointer or NULL; 360 */ 361 struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type, 362 const struct file *fp) 363 { 364 struct mei_cl_cb *cb; 365 366 cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL); 367 if (!cb) 368 return NULL; 369 370 INIT_LIST_HEAD(&cb->list); 371 cb->fp = fp; 372 cb->cl = cl; 373 cb->buf_idx = 0; 374 cb->fop_type = type; 375 return cb; 376 } 377 378 /** 379 * __mei_io_list_flush - removes and frees cbs belonging to cl. 380 * 381 * @list: an instance of our list structure 382 * @cl: host client, can be NULL for flushing the whole list 383 * @free: whether to free the cbs 384 */ 385 static void __mei_io_list_flush(struct mei_cl_cb *list, 386 struct mei_cl *cl, bool free) 387 { 388 struct mei_cl_cb *cb, *next; 389 390 /* enable removing everything if no cl is specified */ 391 list_for_each_entry_safe(cb, next, &list->list, list) { 392 if (!cl || mei_cl_cmp_id(cl, cb->cl)) { 393 list_del_init(&cb->list); 394 if (free) 395 mei_io_cb_free(cb); 396 } 397 } 398 } 399 400 /** 401 * mei_io_list_flush - removes list entry belonging to cl. 402 * 403 * @list: An instance of our list structure 404 * @cl: host client 405 */ 406 void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl) 407 { 408 __mei_io_list_flush(list, cl, false); 409 } 410 411 /** 412 * mei_io_list_free - removes cb belonging to cl and free them 413 * 414 * @list: An instance of our list structure 415 * @cl: host client 416 */ 417 static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl) 418 { 419 __mei_io_list_flush(list, cl, true); 420 } 421 422 /** 423 * mei_io_cb_alloc_buf - allocate callback buffer 424 * 425 * @cb: io callback structure 426 * @length: size of the buffer 427 * 428 * Return: 0 on success 429 * -EINVAL if cb is NULL 430 * -ENOMEM if allocation failed 431 */ 432 int mei_io_cb_alloc_buf(struct mei_cl_cb *cb, size_t length) 433 { 434 if (!cb) 435 return -EINVAL; 436 437 if (length == 0) 438 return 0; 439 440 cb->buf.data = kmalloc(length, GFP_KERNEL); 441 if (!cb->buf.data) 442 return -ENOMEM; 443 cb->buf.size = length; 444 return 0; 445 } 446 447 /** 448 * mei_cl_alloc_cb - a convenient wrapper for allocating read cb 449 * 450 * @cl: host client 451 * @length: size of the buffer 452 * @type: operation type 453 * @fp: associated file pointer (might be NULL) 454 * 455 * Return: cb on success and NULL on failure 456 */ 457 struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length, 458 enum mei_cb_file_ops type, 459 const struct file *fp) 460 { 461 struct mei_cl_cb *cb; 462 463 cb = mei_io_cb_init(cl, type, fp); 464 if (!cb) 465 return NULL; 466 467 if (mei_io_cb_alloc_buf(cb, length)) { 468 mei_io_cb_free(cb); 469 return NULL; 470 } 471 472 return cb; 473 } 474 475 /** 476 * mei_cl_read_cb - find this cl's callback in the read list 477 * for a specific file 478 * 479 * @cl: host client 480 * @fp: file pointer (matching cb file object), may be NULL 481 * 482 * Return: cb on success, NULL if cb is not found 483 */ 484 struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, const struct file *fp) 485 { 486 struct mei_cl_cb *cb; 487 488 list_for_each_entry(cb, &cl->rd_completed, list) 489 if (!fp || fp == cb->fp) 490 return cb; 491 492 return NULL; 493 } 494 495 /** 496 * mei_cl_read_cb_flush - free client's read pending and completed cbs 497 * for a specific file 498 * 499 * @cl: host client 500 * @fp: file pointer (matching cb file object), may be NULL 501 */ 502 void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp) 503 { 504 struct mei_cl_cb *cb, *next; 505 506 list_for_each_entry_safe(cb, next, &cl->rd_completed, list) 507 if (!fp || fp == cb->fp) 508 mei_io_cb_free(cb); 509 510 511 list_for_each_entry_safe(cb, next, &cl->rd_pending, list) 512 if (!fp || fp == cb->fp) 513 mei_io_cb_free(cb); 514 } 515 516 /** 517 * mei_cl_flush_queues - flushes queue lists belonging to cl. 518 * 519 * @cl: host client 520 * @fp: file pointer (matching cb file object), may be NULL 521 * 522 * Return: 0 on success, -EINVAL if cl or cl->dev is NULL. 523 */ 524 int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp) 525 { 526 struct mei_device *dev; 527 528 if (WARN_ON(!cl || !cl->dev)) 529 return -EINVAL; 530 531 dev = cl->dev; 532 533 cl_dbg(dev, cl, "remove list entry belonging to cl\n"); 534 mei_io_list_free(&cl->dev->write_list, cl); 535 mei_io_list_free(&cl->dev->write_waiting_list, cl); 536 mei_io_list_flush(&cl->dev->ctrl_wr_list, cl); 537 mei_io_list_flush(&cl->dev->ctrl_rd_list, cl); 538 mei_io_list_flush(&cl->dev->amthif_cmd_list, cl); 539 540 mei_cl_read_cb_flush(cl, fp); 541 542 return 0; 543 } 544 545 546 /** 547 * mei_cl_init - initializes cl. 548 * 549 * @cl: host client to be initialized 550 * @dev: mei device 551 */ 552 void mei_cl_init(struct mei_cl *cl, struct mei_device *dev) 553 { 554 memset(cl, 0, sizeof(struct mei_cl)); 555 init_waitqueue_head(&cl->wait); 556 init_waitqueue_head(&cl->rx_wait); 557 init_waitqueue_head(&cl->tx_wait); 558 init_waitqueue_head(&cl->ev_wait); 559 INIT_LIST_HEAD(&cl->rd_completed); 560 INIT_LIST_HEAD(&cl->rd_pending); 561 INIT_LIST_HEAD(&cl->link); 562 cl->writing_state = MEI_IDLE; 563 cl->state = MEI_FILE_INITIALIZING; 564 cl->dev = dev; 565 } 566 567 /** 568 * mei_cl_allocate - allocates cl structure and sets it up. 569 * 570 * @dev: mei device 571 * Return: The allocated file or NULL on failure 572 */ 573 struct mei_cl *mei_cl_allocate(struct mei_device *dev) 574 { 575 struct mei_cl *cl; 576 577 cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL); 578 if (!cl) 579 return NULL; 580 581 mei_cl_init(cl, dev); 582 583 return cl; 584 } 585 586 /** 587 * mei_cl_link - allocate host id in the host map 588 * 589 * @cl: host client 590 * 591 * Return: 0 on success 592 * -EINVAL on incorrect values 593 * -EMFILE if open count exceeded. 594 */ 595 int mei_cl_link(struct mei_cl *cl) 596 { 597 struct mei_device *dev; 598 long open_handle_count; 599 int id; 600 601 if (WARN_ON(!cl || !cl->dev)) 602 return -EINVAL; 603 604 dev = cl->dev; 605 606 id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX); 607 if (id >= MEI_CLIENTS_MAX) { 608 dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX); 609 return -EMFILE; 610 } 611 612 open_handle_count = dev->open_handle_count + dev->iamthif_open_count; 613 if (open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) { 614 dev_err(dev->dev, "open_handle_count exceeded %d", 615 MEI_MAX_OPEN_HANDLE_COUNT); 616 return -EMFILE; 617 } 618 619 dev->open_handle_count++; 620 621 cl->host_client_id = id; 622 list_add_tail(&cl->link, &dev->file_list); 623 624 set_bit(id, dev->host_clients_map); 625 626 cl->state = MEI_FILE_INITIALIZING; 627 628 cl_dbg(dev, cl, "link cl\n"); 629 return 0; 630 } 631 632 /** 633 * mei_cl_unlink - remove host client from the list 634 * 635 * @cl: host client 636 * 637 * Return: always 0 638 */ 639 int mei_cl_unlink(struct mei_cl *cl) 640 { 641 struct mei_device *dev; 642 643 /* don't shout on error exit path */ 644 if (!cl) 645 return 0; 646 647 /* amthif might not be initialized */ 648 if (!cl->dev) 649 return 0; 650 651 dev = cl->dev; 652 653 cl_dbg(dev, cl, "unlink client"); 654 655 if (dev->open_handle_count > 0) 656 dev->open_handle_count--; 657 658 /* never clear the 0 bit */ 659 if (cl->host_client_id) 660 clear_bit(cl->host_client_id, dev->host_clients_map); 661 662 list_del_init(&cl->link); 663 664 cl->state = MEI_FILE_INITIALIZING; 665 666 return 0; 667 } 668 669 void mei_host_client_init(struct mei_device *dev) 670 { 671 dev->dev_state = MEI_DEV_ENABLED; 672 dev->reset_count = 0; 673 674 schedule_work(&dev->bus_rescan_work); 675 676 pm_runtime_mark_last_busy(dev->dev); 677 dev_dbg(dev->dev, "rpm: autosuspend\n"); 678 pm_runtime_autosuspend(dev->dev); 679 } 680 681 /** 682 * mei_hbuf_acquire - try to acquire host buffer 683 * 684 * @dev: the device structure 685 * Return: true if host buffer was acquired 686 */ 687 bool mei_hbuf_acquire(struct mei_device *dev) 688 { 689 if (mei_pg_state(dev) == MEI_PG_ON || 690 mei_pg_in_transition(dev)) { 691 dev_dbg(dev->dev, "device is in pg\n"); 692 return false; 693 } 694 695 if (!dev->hbuf_is_ready) { 696 dev_dbg(dev->dev, "hbuf is not ready\n"); 697 return false; 698 } 699 700 dev->hbuf_is_ready = false; 701 702 return true; 703 } 704 705 /** 706 * mei_cl_wake_all - wake up readers, writers and event waiters so 707 * they can be interrupted 708 * 709 * @cl: host client 710 */ 711 static void mei_cl_wake_all(struct mei_cl *cl) 712 { 713 struct mei_device *dev = cl->dev; 714 715 /* synchronized under device mutex */ 716 if (waitqueue_active(&cl->rx_wait)) { 717 cl_dbg(dev, cl, "Waking up reading client!\n"); 718 wake_up_interruptible(&cl->rx_wait); 719 } 720 /* synchronized under device mutex */ 721 if (waitqueue_active(&cl->tx_wait)) { 722 cl_dbg(dev, cl, "Waking up writing client!\n"); 723 wake_up_interruptible(&cl->tx_wait); 724 } 725 /* synchronized under device mutex */ 726 if (waitqueue_active(&cl->ev_wait)) { 727 cl_dbg(dev, cl, "Waking up waiting for event clients!\n"); 728 wake_up_interruptible(&cl->ev_wait); 729 } 730 /* synchronized under device mutex */ 731 if (waitqueue_active(&cl->wait)) { 732 cl_dbg(dev, cl, "Waking up ctrl write clients!\n"); 733 wake_up(&cl->wait); 734 } 735 } 736 737 /** 738 * mei_cl_set_disconnected - set disconnected state and clear 739 * associated states and resources 740 * 741 * @cl: host client 742 */ 743 void mei_cl_set_disconnected(struct mei_cl *cl) 744 { 745 struct mei_device *dev = cl->dev; 746 747 if (cl->state == MEI_FILE_DISCONNECTED || 748 cl->state == MEI_FILE_INITIALIZING) 749 return; 750 751 cl->state = MEI_FILE_DISCONNECTED; 752 mei_io_list_free(&dev->write_list, cl); 753 mei_io_list_free(&dev->write_waiting_list, cl); 754 mei_io_list_flush(&dev->ctrl_rd_list, cl); 755 mei_io_list_flush(&dev->ctrl_wr_list, cl); 756 mei_cl_wake_all(cl); 757 cl->mei_flow_ctrl_creds = 0; 758 cl->timer_count = 0; 759 760 if (!cl->me_cl) 761 return; 762 763 if (!WARN_ON(cl->me_cl->connect_count == 0)) 764 cl->me_cl->connect_count--; 765 766 if (cl->me_cl->connect_count == 0) 767 cl->me_cl->mei_flow_ctrl_creds = 0; 768 769 mei_me_cl_put(cl->me_cl); 770 cl->me_cl = NULL; 771 } 772 773 static int mei_cl_set_connecting(struct mei_cl *cl, struct mei_me_client *me_cl) 774 { 775 if (!mei_me_cl_get(me_cl)) 776 return -ENOENT; 777 778 /* only one connection is allowed for fixed address clients */ 779 if (me_cl->props.fixed_address) { 780 if (me_cl->connect_count) { 781 mei_me_cl_put(me_cl); 782 return -EBUSY; 783 } 784 } 785 786 cl->me_cl = me_cl; 787 cl->state = MEI_FILE_CONNECTING; 788 cl->me_cl->connect_count++; 789 790 return 0; 791 } 792 793 /* 794 * mei_cl_send_disconnect - send disconnect request 795 * 796 * @cl: host client 797 * @cb: callback block 798 * 799 * Return: 0, OK; otherwise, error. 800 */ 801 static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb) 802 { 803 struct mei_device *dev; 804 int ret; 805 806 dev = cl->dev; 807 808 ret = mei_hbm_cl_disconnect_req(dev, cl); 809 cl->status = ret; 810 if (ret) { 811 cl->state = MEI_FILE_DISCONNECT_REPLY; 812 return ret; 813 } 814 815 list_move_tail(&cb->list, &dev->ctrl_rd_list.list); 816 cl->timer_count = MEI_CONNECT_TIMEOUT; 817 818 return 0; 819 } 820 821 /** 822 * mei_cl_irq_disconnect - processes close related operation from 823 * interrupt thread context - send disconnect request 824 * 825 * @cl: client 826 * @cb: callback block. 827 * @cmpl_list: complete list. 828 * 829 * Return: 0, OK; otherwise, error. 830 */ 831 int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb, 832 struct mei_cl_cb *cmpl_list) 833 { 834 struct mei_device *dev = cl->dev; 835 u32 msg_slots; 836 int slots; 837 int ret; 838 839 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request)); 840 slots = mei_hbuf_empty_slots(dev); 841 842 if (slots < msg_slots) 843 return -EMSGSIZE; 844 845 ret = mei_cl_send_disconnect(cl, cb); 846 if (ret) 847 list_move_tail(&cb->list, &cmpl_list->list); 848 849 return ret; 850 } 851 852 /** 853 * __mei_cl_disconnect - disconnect host client from the me one 854 * internal function runtime pm has to be already acquired 855 * 856 * @cl: host client 857 * 858 * Return: 0 on success, <0 on failure. 859 */ 860 static int __mei_cl_disconnect(struct mei_cl *cl) 861 { 862 struct mei_device *dev; 863 struct mei_cl_cb *cb; 864 int rets; 865 866 dev = cl->dev; 867 868 cl->state = MEI_FILE_DISCONNECTING; 869 870 cb = mei_io_cb_init(cl, MEI_FOP_DISCONNECT, NULL); 871 rets = cb ? 0 : -ENOMEM; 872 if (rets) 873 goto out; 874 875 cl_dbg(dev, cl, "add disconnect cb to control write list\n"); 876 list_add_tail(&cb->list, &dev->ctrl_wr_list.list); 877 878 if (mei_hbuf_acquire(dev)) { 879 rets = mei_cl_send_disconnect(cl, cb); 880 if (rets) { 881 cl_err(dev, cl, "failed to disconnect.\n"); 882 goto out; 883 } 884 } 885 886 mutex_unlock(&dev->device_lock); 887 wait_event_timeout(cl->wait, 888 cl->state == MEI_FILE_DISCONNECT_REPLY || 889 cl->state == MEI_FILE_DISCONNECTED, 890 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 891 mutex_lock(&dev->device_lock); 892 893 rets = cl->status; 894 if (cl->state != MEI_FILE_DISCONNECT_REPLY && 895 cl->state != MEI_FILE_DISCONNECTED) { 896 cl_dbg(dev, cl, "timeout on disconnect from FW client.\n"); 897 rets = -ETIME; 898 } 899 900 out: 901 /* we disconnect also on error */ 902 mei_cl_set_disconnected(cl); 903 if (!rets) 904 cl_dbg(dev, cl, "successfully disconnected from FW client.\n"); 905 906 mei_io_cb_free(cb); 907 return rets; 908 } 909 910 /** 911 * mei_cl_disconnect - disconnect host client from the me one 912 * 913 * @cl: host client 914 * 915 * Locking: called under "dev->device_lock" lock 916 * 917 * Return: 0 on success, <0 on failure. 918 */ 919 int mei_cl_disconnect(struct mei_cl *cl) 920 { 921 struct mei_device *dev; 922 int rets; 923 924 if (WARN_ON(!cl || !cl->dev)) 925 return -ENODEV; 926 927 dev = cl->dev; 928 929 cl_dbg(dev, cl, "disconnecting"); 930 931 if (!mei_cl_is_connected(cl)) 932 return 0; 933 934 if (mei_cl_is_fixed_address(cl)) { 935 mei_cl_set_disconnected(cl); 936 return 0; 937 } 938 939 rets = pm_runtime_get(dev->dev); 940 if (rets < 0 && rets != -EINPROGRESS) { 941 pm_runtime_put_noidle(dev->dev); 942 cl_err(dev, cl, "rpm: get failed %d\n", rets); 943 return rets; 944 } 945 946 rets = __mei_cl_disconnect(cl); 947 948 cl_dbg(dev, cl, "rpm: autosuspend\n"); 949 pm_runtime_mark_last_busy(dev->dev); 950 pm_runtime_put_autosuspend(dev->dev); 951 952 return rets; 953 } 954 955 956 /** 957 * mei_cl_is_other_connecting - checks if other 958 * client with the same me client id is connecting 959 * 960 * @cl: private data of the file object 961 * 962 * Return: true if other client is connected, false - otherwise. 963 */ 964 static bool mei_cl_is_other_connecting(struct mei_cl *cl) 965 { 966 struct mei_device *dev; 967 struct mei_cl_cb *cb; 968 969 dev = cl->dev; 970 971 list_for_each_entry(cb, &dev->ctrl_rd_list.list, list) { 972 if (cb->fop_type == MEI_FOP_CONNECT && 973 mei_cl_me_id(cl) == mei_cl_me_id(cb->cl)) 974 return true; 975 } 976 977 return false; 978 } 979 980 /** 981 * mei_cl_send_connect - send connect request 982 * 983 * @cl: host client 984 * @cb: callback block 985 * 986 * Return: 0, OK; otherwise, error. 987 */ 988 static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb) 989 { 990 struct mei_device *dev; 991 int ret; 992 993 dev = cl->dev; 994 995 ret = mei_hbm_cl_connect_req(dev, cl); 996 cl->status = ret; 997 if (ret) { 998 cl->state = MEI_FILE_DISCONNECT_REPLY; 999 return ret; 1000 } 1001 1002 list_move_tail(&cb->list, &dev->ctrl_rd_list.list); 1003 cl->timer_count = MEI_CONNECT_TIMEOUT; 1004 return 0; 1005 } 1006 1007 /** 1008 * mei_cl_irq_connect - send connect request in irq_thread context 1009 * 1010 * @cl: host client 1011 * @cb: callback block 1012 * @cmpl_list: complete list 1013 * 1014 * Return: 0, OK; otherwise, error. 1015 */ 1016 int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb, 1017 struct mei_cl_cb *cmpl_list) 1018 { 1019 struct mei_device *dev = cl->dev; 1020 u32 msg_slots; 1021 int slots; 1022 int rets; 1023 1024 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request)); 1025 slots = mei_hbuf_empty_slots(dev); 1026 1027 if (mei_cl_is_other_connecting(cl)) 1028 return 0; 1029 1030 if (slots < msg_slots) 1031 return -EMSGSIZE; 1032 1033 rets = mei_cl_send_connect(cl, cb); 1034 if (rets) 1035 list_move_tail(&cb->list, &cmpl_list->list); 1036 1037 return rets; 1038 } 1039 1040 /** 1041 * mei_cl_connect - connect host client to the me one 1042 * 1043 * @cl: host client 1044 * @me_cl: me client 1045 * @file: pointer to file structure 1046 * 1047 * Locking: called under "dev->device_lock" lock 1048 * 1049 * Return: 0 on success, <0 on failure. 1050 */ 1051 int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl, 1052 const struct file *file) 1053 { 1054 struct mei_device *dev; 1055 struct mei_cl_cb *cb; 1056 int rets; 1057 1058 if (WARN_ON(!cl || !cl->dev || !me_cl)) 1059 return -ENODEV; 1060 1061 dev = cl->dev; 1062 1063 rets = mei_cl_set_connecting(cl, me_cl); 1064 if (rets) 1065 return rets; 1066 1067 if (mei_cl_is_fixed_address(cl)) { 1068 cl->state = MEI_FILE_CONNECTED; 1069 return 0; 1070 } 1071 1072 rets = pm_runtime_get(dev->dev); 1073 if (rets < 0 && rets != -EINPROGRESS) { 1074 pm_runtime_put_noidle(dev->dev); 1075 cl_err(dev, cl, "rpm: get failed %d\n", rets); 1076 goto nortpm; 1077 } 1078 1079 cb = mei_io_cb_init(cl, MEI_FOP_CONNECT, file); 1080 rets = cb ? 0 : -ENOMEM; 1081 if (rets) 1082 goto out; 1083 1084 list_add_tail(&cb->list, &dev->ctrl_wr_list.list); 1085 1086 /* run hbuf acquire last so we don't have to undo */ 1087 if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) { 1088 rets = mei_cl_send_connect(cl, cb); 1089 if (rets) 1090 goto out; 1091 } 1092 1093 mutex_unlock(&dev->device_lock); 1094 wait_event_timeout(cl->wait, 1095 (cl->state == MEI_FILE_CONNECTED || 1096 cl->state == MEI_FILE_DISCONNECTED || 1097 cl->state == MEI_FILE_DISCONNECT_REQUIRED || 1098 cl->state == MEI_FILE_DISCONNECT_REPLY), 1099 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 1100 mutex_lock(&dev->device_lock); 1101 1102 if (!mei_cl_is_connected(cl)) { 1103 if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) { 1104 mei_io_list_flush(&dev->ctrl_rd_list, cl); 1105 mei_io_list_flush(&dev->ctrl_wr_list, cl); 1106 /* ignore disconnect return valuue; 1107 * in case of failure reset will be invoked 1108 */ 1109 __mei_cl_disconnect(cl); 1110 rets = -EFAULT; 1111 goto out; 1112 } 1113 1114 /* timeout or something went really wrong */ 1115 if (!cl->status) 1116 cl->status = -EFAULT; 1117 } 1118 1119 rets = cl->status; 1120 out: 1121 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1122 pm_runtime_mark_last_busy(dev->dev); 1123 pm_runtime_put_autosuspend(dev->dev); 1124 1125 mei_io_cb_free(cb); 1126 1127 nortpm: 1128 if (!mei_cl_is_connected(cl)) 1129 mei_cl_set_disconnected(cl); 1130 1131 return rets; 1132 } 1133 1134 /** 1135 * mei_cl_alloc_linked - allocate and link host client 1136 * 1137 * @dev: the device structure 1138 * 1139 * Return: cl on success ERR_PTR on failure 1140 */ 1141 struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev) 1142 { 1143 struct mei_cl *cl; 1144 int ret; 1145 1146 cl = mei_cl_allocate(dev); 1147 if (!cl) { 1148 ret = -ENOMEM; 1149 goto err; 1150 } 1151 1152 ret = mei_cl_link(cl); 1153 if (ret) 1154 goto err; 1155 1156 return cl; 1157 err: 1158 kfree(cl); 1159 return ERR_PTR(ret); 1160 } 1161 1162 1163 1164 /** 1165 * mei_cl_flow_ctrl_creds - checks flow_control credits for cl. 1166 * 1167 * @cl: host client 1168 * @fp: the file pointer associated with the pointer 1169 * 1170 * Return: 1 if mei_flow_ctrl_creds >0, 0 - otherwise. 1171 */ 1172 static int mei_cl_flow_ctrl_creds(struct mei_cl *cl, const struct file *fp) 1173 { 1174 int rets; 1175 1176 if (WARN_ON(!cl || !cl->me_cl)) 1177 return -EINVAL; 1178 1179 if (cl->mei_flow_ctrl_creds > 0) 1180 return 1; 1181 1182 if (mei_cl_is_fixed_address(cl)) { 1183 rets = mei_cl_read_start(cl, mei_cl_mtu(cl), fp); 1184 if (rets && rets != -EBUSY) 1185 return rets; 1186 return 1; 1187 } 1188 1189 if (mei_cl_is_single_recv_buf(cl)) { 1190 if (cl->me_cl->mei_flow_ctrl_creds > 0) 1191 return 1; 1192 } 1193 return 0; 1194 } 1195 1196 /** 1197 * mei_cl_flow_ctrl_reduce - reduces flow_control. 1198 * 1199 * @cl: private data of the file object 1200 * 1201 * Return: 1202 * 0 on success 1203 * -EINVAL when ctrl credits are <= 0 1204 */ 1205 static int mei_cl_flow_ctrl_reduce(struct mei_cl *cl) 1206 { 1207 if (WARN_ON(!cl || !cl->me_cl)) 1208 return -EINVAL; 1209 1210 if (mei_cl_is_fixed_address(cl)) 1211 return 0; 1212 1213 if (mei_cl_is_single_recv_buf(cl)) { 1214 if (WARN_ON(cl->me_cl->mei_flow_ctrl_creds <= 0)) 1215 return -EINVAL; 1216 cl->me_cl->mei_flow_ctrl_creds--; 1217 } else { 1218 if (WARN_ON(cl->mei_flow_ctrl_creds <= 0)) 1219 return -EINVAL; 1220 cl->mei_flow_ctrl_creds--; 1221 } 1222 return 0; 1223 } 1224 1225 /** 1226 * mei_cl_notify_fop2req - convert fop to proper request 1227 * 1228 * @fop: client notification start response command 1229 * 1230 * Return: MEI_HBM_NOTIFICATION_START/STOP 1231 */ 1232 u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop) 1233 { 1234 if (fop == MEI_FOP_NOTIFY_START) 1235 return MEI_HBM_NOTIFICATION_START; 1236 else 1237 return MEI_HBM_NOTIFICATION_STOP; 1238 } 1239 1240 /** 1241 * mei_cl_notify_req2fop - convert notification request top file operation type 1242 * 1243 * @req: hbm notification request type 1244 * 1245 * Return: MEI_FOP_NOTIFY_START/STOP 1246 */ 1247 enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req) 1248 { 1249 if (req == MEI_HBM_NOTIFICATION_START) 1250 return MEI_FOP_NOTIFY_START; 1251 else 1252 return MEI_FOP_NOTIFY_STOP; 1253 } 1254 1255 /** 1256 * mei_cl_irq_notify - send notification request in irq_thread context 1257 * 1258 * @cl: client 1259 * @cb: callback block. 1260 * @cmpl_list: complete list. 1261 * 1262 * Return: 0 on such and error otherwise. 1263 */ 1264 int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb, 1265 struct mei_cl_cb *cmpl_list) 1266 { 1267 struct mei_device *dev = cl->dev; 1268 u32 msg_slots; 1269 int slots; 1270 int ret; 1271 bool request; 1272 1273 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request)); 1274 slots = mei_hbuf_empty_slots(dev); 1275 1276 if (slots < msg_slots) 1277 return -EMSGSIZE; 1278 1279 request = mei_cl_notify_fop2req(cb->fop_type); 1280 ret = mei_hbm_cl_notify_req(dev, cl, request); 1281 if (ret) { 1282 cl->status = ret; 1283 list_move_tail(&cb->list, &cmpl_list->list); 1284 return ret; 1285 } 1286 1287 list_move_tail(&cb->list, &dev->ctrl_rd_list.list); 1288 return 0; 1289 } 1290 1291 /** 1292 * mei_cl_notify_request - send notification stop/start request 1293 * 1294 * @cl: host client 1295 * @file: associate request with file 1296 * @request: 1 for start or 0 for stop 1297 * 1298 * Locking: called under "dev->device_lock" lock 1299 * 1300 * Return: 0 on such and error otherwise. 1301 */ 1302 int mei_cl_notify_request(struct mei_cl *cl, 1303 const struct file *file, u8 request) 1304 { 1305 struct mei_device *dev; 1306 struct mei_cl_cb *cb; 1307 enum mei_cb_file_ops fop_type; 1308 int rets; 1309 1310 if (WARN_ON(!cl || !cl->dev)) 1311 return -ENODEV; 1312 1313 dev = cl->dev; 1314 1315 if (!dev->hbm_f_ev_supported) { 1316 cl_dbg(dev, cl, "notifications not supported\n"); 1317 return -EOPNOTSUPP; 1318 } 1319 1320 rets = pm_runtime_get(dev->dev); 1321 if (rets < 0 && rets != -EINPROGRESS) { 1322 pm_runtime_put_noidle(dev->dev); 1323 cl_err(dev, cl, "rpm: get failed %d\n", rets); 1324 return rets; 1325 } 1326 1327 fop_type = mei_cl_notify_req2fop(request); 1328 cb = mei_io_cb_init(cl, fop_type, file); 1329 if (!cb) { 1330 rets = -ENOMEM; 1331 goto out; 1332 } 1333 1334 if (mei_hbuf_acquire(dev)) { 1335 if (mei_hbm_cl_notify_req(dev, cl, request)) { 1336 rets = -ENODEV; 1337 goto out; 1338 } 1339 list_add_tail(&cb->list, &dev->ctrl_rd_list.list); 1340 } else { 1341 list_add_tail(&cb->list, &dev->ctrl_wr_list.list); 1342 } 1343 1344 mutex_unlock(&dev->device_lock); 1345 wait_event_timeout(cl->wait, 1346 cl->notify_en == request || !mei_cl_is_connected(cl), 1347 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 1348 mutex_lock(&dev->device_lock); 1349 1350 if (cl->notify_en != request && !cl->status) 1351 cl->status = -EFAULT; 1352 1353 rets = cl->status; 1354 1355 out: 1356 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1357 pm_runtime_mark_last_busy(dev->dev); 1358 pm_runtime_put_autosuspend(dev->dev); 1359 1360 mei_io_cb_free(cb); 1361 return rets; 1362 } 1363 1364 /** 1365 * mei_cl_notify - raise notification 1366 * 1367 * @cl: host client 1368 * 1369 * Locking: called under "dev->device_lock" lock 1370 */ 1371 void mei_cl_notify(struct mei_cl *cl) 1372 { 1373 struct mei_device *dev; 1374 1375 if (!cl || !cl->dev) 1376 return; 1377 1378 dev = cl->dev; 1379 1380 if (!cl->notify_en) 1381 return; 1382 1383 cl_dbg(dev, cl, "notify event"); 1384 cl->notify_ev = true; 1385 if (!mei_cl_bus_notify_event(cl)) 1386 wake_up_interruptible(&cl->ev_wait); 1387 1388 if (cl->ev_async) 1389 kill_fasync(&cl->ev_async, SIGIO, POLL_PRI); 1390 1391 } 1392 1393 /** 1394 * mei_cl_notify_get - get or wait for notification event 1395 * 1396 * @cl: host client 1397 * @block: this request is blocking 1398 * @notify_ev: true if notification event was received 1399 * 1400 * Locking: called under "dev->device_lock" lock 1401 * 1402 * Return: 0 on such and error otherwise. 1403 */ 1404 int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev) 1405 { 1406 struct mei_device *dev; 1407 int rets; 1408 1409 *notify_ev = false; 1410 1411 if (WARN_ON(!cl || !cl->dev)) 1412 return -ENODEV; 1413 1414 dev = cl->dev; 1415 1416 if (!mei_cl_is_connected(cl)) 1417 return -ENODEV; 1418 1419 if (cl->notify_ev) 1420 goto out; 1421 1422 if (!block) 1423 return -EAGAIN; 1424 1425 mutex_unlock(&dev->device_lock); 1426 rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev); 1427 mutex_lock(&dev->device_lock); 1428 1429 if (rets < 0) 1430 return rets; 1431 1432 out: 1433 *notify_ev = cl->notify_ev; 1434 cl->notify_ev = false; 1435 return 0; 1436 } 1437 1438 /** 1439 * mei_cl_is_read_fc_cb - check if read cb is waiting for flow control 1440 * for given host client 1441 * 1442 * @cl: host client 1443 * 1444 * Return: true, if found at least one cb. 1445 */ 1446 static bool mei_cl_is_read_fc_cb(struct mei_cl *cl) 1447 { 1448 struct mei_device *dev = cl->dev; 1449 struct mei_cl_cb *cb; 1450 1451 list_for_each_entry(cb, &dev->ctrl_wr_list.list, list) 1452 if (cb->fop_type == MEI_FOP_READ && cb->cl == cl) 1453 return true; 1454 return false; 1455 } 1456 1457 /** 1458 * mei_cl_read_start - the start read client message function. 1459 * 1460 * @cl: host client 1461 * @length: number of bytes to read 1462 * @fp: pointer to file structure 1463 * 1464 * Return: 0 on success, <0 on failure. 1465 */ 1466 int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp) 1467 { 1468 struct mei_device *dev; 1469 struct mei_cl_cb *cb; 1470 int rets; 1471 1472 if (WARN_ON(!cl || !cl->dev)) 1473 return -ENODEV; 1474 1475 dev = cl->dev; 1476 1477 if (!mei_cl_is_connected(cl)) 1478 return -ENODEV; 1479 1480 /* HW currently supports only one pending read */ 1481 if (!list_empty(&cl->rd_pending) || mei_cl_is_read_fc_cb(cl)) 1482 return -EBUSY; 1483 1484 if (!mei_me_cl_is_active(cl->me_cl)) { 1485 cl_err(dev, cl, "no such me client\n"); 1486 return -ENOTTY; 1487 } 1488 1489 /* always allocate at least client max message */ 1490 length = max_t(size_t, length, mei_cl_mtu(cl)); 1491 cb = mei_cl_alloc_cb(cl, length, MEI_FOP_READ, fp); 1492 if (!cb) 1493 return -ENOMEM; 1494 1495 if (mei_cl_is_fixed_address(cl)) { 1496 list_add_tail(&cb->list, &cl->rd_pending); 1497 return 0; 1498 } 1499 1500 rets = pm_runtime_get(dev->dev); 1501 if (rets < 0 && rets != -EINPROGRESS) { 1502 pm_runtime_put_noidle(dev->dev); 1503 cl_err(dev, cl, "rpm: get failed %d\n", rets); 1504 goto nortpm; 1505 } 1506 1507 if (mei_hbuf_acquire(dev)) { 1508 rets = mei_hbm_cl_flow_control_req(dev, cl); 1509 if (rets < 0) 1510 goto out; 1511 1512 list_add_tail(&cb->list, &cl->rd_pending); 1513 } else { 1514 rets = 0; 1515 list_add_tail(&cb->list, &dev->ctrl_wr_list.list); 1516 } 1517 1518 out: 1519 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1520 pm_runtime_mark_last_busy(dev->dev); 1521 pm_runtime_put_autosuspend(dev->dev); 1522 nortpm: 1523 if (rets) 1524 mei_io_cb_free(cb); 1525 1526 return rets; 1527 } 1528 1529 /** 1530 * mei_cl_irq_write - write a message to device 1531 * from the interrupt thread context 1532 * 1533 * @cl: client 1534 * @cb: callback block. 1535 * @cmpl_list: complete list. 1536 * 1537 * Return: 0, OK; otherwise error. 1538 */ 1539 int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, 1540 struct mei_cl_cb *cmpl_list) 1541 { 1542 struct mei_device *dev; 1543 struct mei_msg_data *buf; 1544 struct mei_msg_hdr mei_hdr; 1545 size_t len; 1546 u32 msg_slots; 1547 int slots; 1548 int rets; 1549 bool first_chunk; 1550 1551 if (WARN_ON(!cl || !cl->dev)) 1552 return -ENODEV; 1553 1554 dev = cl->dev; 1555 1556 buf = &cb->buf; 1557 1558 first_chunk = cb->buf_idx == 0; 1559 1560 rets = first_chunk ? mei_cl_flow_ctrl_creds(cl, cb->fp) : 1; 1561 if (rets < 0) 1562 return rets; 1563 1564 if (rets == 0) { 1565 cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); 1566 return 0; 1567 } 1568 1569 slots = mei_hbuf_empty_slots(dev); 1570 len = buf->size - cb->buf_idx; 1571 msg_slots = mei_data2slots(len); 1572 1573 mei_hdr.host_addr = mei_cl_host_addr(cl); 1574 mei_hdr.me_addr = mei_cl_me_id(cl); 1575 mei_hdr.reserved = 0; 1576 mei_hdr.internal = cb->internal; 1577 1578 if (slots >= msg_slots) { 1579 mei_hdr.length = len; 1580 mei_hdr.msg_complete = 1; 1581 /* Split the message only if we can write the whole host buffer */ 1582 } else if (slots == dev->hbuf_depth) { 1583 msg_slots = slots; 1584 len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr); 1585 mei_hdr.length = len; 1586 mei_hdr.msg_complete = 0; 1587 } else { 1588 /* wait for next time the host buffer is empty */ 1589 return 0; 1590 } 1591 1592 cl_dbg(dev, cl, "buf: size = %zu idx = %zu\n", 1593 cb->buf.size, cb->buf_idx); 1594 1595 rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx); 1596 if (rets) { 1597 cl->status = rets; 1598 list_move_tail(&cb->list, &cmpl_list->list); 1599 return rets; 1600 } 1601 1602 cl->status = 0; 1603 cl->writing_state = MEI_WRITING; 1604 cb->buf_idx += mei_hdr.length; 1605 cb->completed = mei_hdr.msg_complete == 1; 1606 1607 if (first_chunk) { 1608 if (mei_cl_flow_ctrl_reduce(cl)) 1609 return -EIO; 1610 } 1611 1612 if (mei_hdr.msg_complete) 1613 list_move_tail(&cb->list, &dev->write_waiting_list.list); 1614 1615 return 0; 1616 } 1617 1618 /** 1619 * mei_cl_write - submit a write cb to mei device 1620 * assumes device_lock is locked 1621 * 1622 * @cl: host client 1623 * @cb: write callback with filled data 1624 * @blocking: block until completed 1625 * 1626 * Return: number of bytes sent on success, <0 on failure. 1627 */ 1628 int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking) 1629 { 1630 struct mei_device *dev; 1631 struct mei_msg_data *buf; 1632 struct mei_msg_hdr mei_hdr; 1633 int size; 1634 int rets; 1635 1636 1637 if (WARN_ON(!cl || !cl->dev)) 1638 return -ENODEV; 1639 1640 if (WARN_ON(!cb)) 1641 return -EINVAL; 1642 1643 dev = cl->dev; 1644 1645 buf = &cb->buf; 1646 size = buf->size; 1647 1648 cl_dbg(dev, cl, "size=%d\n", size); 1649 1650 rets = pm_runtime_get(dev->dev); 1651 if (rets < 0 && rets != -EINPROGRESS) { 1652 pm_runtime_put_noidle(dev->dev); 1653 cl_err(dev, cl, "rpm: get failed %d\n", rets); 1654 goto free; 1655 } 1656 1657 cb->buf_idx = 0; 1658 cl->writing_state = MEI_IDLE; 1659 1660 mei_hdr.host_addr = mei_cl_host_addr(cl); 1661 mei_hdr.me_addr = mei_cl_me_id(cl); 1662 mei_hdr.reserved = 0; 1663 mei_hdr.msg_complete = 0; 1664 mei_hdr.internal = cb->internal; 1665 1666 rets = mei_cl_flow_ctrl_creds(cl, cb->fp); 1667 if (rets < 0) 1668 goto err; 1669 1670 if (rets == 0) { 1671 cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); 1672 rets = size; 1673 goto out; 1674 } 1675 if (!mei_hbuf_acquire(dev)) { 1676 cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n"); 1677 rets = size; 1678 goto out; 1679 } 1680 1681 /* Check for a maximum length */ 1682 if (size > mei_hbuf_max_len(dev)) { 1683 mei_hdr.length = mei_hbuf_max_len(dev); 1684 mei_hdr.msg_complete = 0; 1685 } else { 1686 mei_hdr.length = size; 1687 mei_hdr.msg_complete = 1; 1688 } 1689 1690 rets = mei_write_message(dev, &mei_hdr, buf->data); 1691 if (rets) 1692 goto err; 1693 1694 rets = mei_cl_flow_ctrl_reduce(cl); 1695 if (rets) 1696 goto err; 1697 1698 cl->writing_state = MEI_WRITING; 1699 cb->buf_idx = mei_hdr.length; 1700 cb->completed = mei_hdr.msg_complete == 1; 1701 1702 out: 1703 if (mei_hdr.msg_complete) 1704 list_add_tail(&cb->list, &dev->write_waiting_list.list); 1705 else 1706 list_add_tail(&cb->list, &dev->write_list.list); 1707 1708 cb = NULL; 1709 if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) { 1710 1711 mutex_unlock(&dev->device_lock); 1712 rets = wait_event_interruptible(cl->tx_wait, 1713 cl->writing_state == MEI_WRITE_COMPLETE || 1714 (!mei_cl_is_connected(cl))); 1715 mutex_lock(&dev->device_lock); 1716 /* wait_event_interruptible returns -ERESTARTSYS */ 1717 if (rets) { 1718 if (signal_pending(current)) 1719 rets = -EINTR; 1720 goto err; 1721 } 1722 if (cl->writing_state != MEI_WRITE_COMPLETE) { 1723 rets = -EFAULT; 1724 goto err; 1725 } 1726 } 1727 1728 rets = size; 1729 err: 1730 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1731 pm_runtime_mark_last_busy(dev->dev); 1732 pm_runtime_put_autosuspend(dev->dev); 1733 free: 1734 mei_io_cb_free(cb); 1735 1736 return rets; 1737 } 1738 1739 1740 /** 1741 * mei_cl_complete - processes completed operation for a client 1742 * 1743 * @cl: private data of the file object. 1744 * @cb: callback block. 1745 */ 1746 void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb) 1747 { 1748 struct mei_device *dev = cl->dev; 1749 1750 switch (cb->fop_type) { 1751 case MEI_FOP_WRITE: 1752 mei_io_cb_free(cb); 1753 cl->writing_state = MEI_WRITE_COMPLETE; 1754 if (waitqueue_active(&cl->tx_wait)) { 1755 wake_up_interruptible(&cl->tx_wait); 1756 } else { 1757 pm_runtime_mark_last_busy(dev->dev); 1758 pm_request_autosuspend(dev->dev); 1759 } 1760 break; 1761 1762 case MEI_FOP_READ: 1763 list_add_tail(&cb->list, &cl->rd_completed); 1764 if (!mei_cl_bus_rx_event(cl)) 1765 wake_up_interruptible(&cl->rx_wait); 1766 break; 1767 1768 case MEI_FOP_CONNECT: 1769 case MEI_FOP_DISCONNECT: 1770 case MEI_FOP_NOTIFY_STOP: 1771 case MEI_FOP_NOTIFY_START: 1772 if (waitqueue_active(&cl->wait)) 1773 wake_up(&cl->wait); 1774 1775 break; 1776 case MEI_FOP_DISCONNECT_RSP: 1777 mei_io_cb_free(cb); 1778 mei_cl_set_disconnected(cl); 1779 break; 1780 default: 1781 BUG_ON(0); 1782 } 1783 } 1784 1785 1786 /** 1787 * mei_cl_all_disconnect - disconnect forcefully all connected clients 1788 * 1789 * @dev: mei device 1790 */ 1791 void mei_cl_all_disconnect(struct mei_device *dev) 1792 { 1793 struct mei_cl *cl; 1794 1795 list_for_each_entry(cl, &dev->file_list, link) 1796 mei_cl_set_disconnected(cl); 1797 } 1798