1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Intel(R) Trace Hub Memory Storage Unit 4 * 5 * Copyright (C) 2014-2015 Intel Corporation. 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/types.h> 11 #include <linux/module.h> 12 #include <linux/device.h> 13 #include <linux/uaccess.h> 14 #include <linux/sizes.h> 15 #include <linux/printk.h> 16 #include <linux/slab.h> 17 #include <linux/mm.h> 18 #include <linux/fs.h> 19 #include <linux/io.h> 20 #include <linux/workqueue.h> 21 #include <linux/dma-mapping.h> 22 23 #ifdef CONFIG_X86 24 #include <asm/set_memory.h> 25 #endif 26 27 #include <linux/intel_th.h> 28 #include "intel_th.h" 29 #include "msu.h" 30 31 #define msc_dev(x) (&(x)->thdev->dev) 32 33 /* 34 * Lockout state transitions: 35 * READY -> INUSE -+-> LOCKED -+-> READY -> etc. 36 * \-----------/ 37 * WIN_READY: window can be used by HW 38 * WIN_INUSE: window is in use 39 * WIN_LOCKED: window is filled up and is being processed by the buffer 40 * handling code 41 * 42 * All state transitions happen automatically, except for the LOCKED->READY, 43 * which needs to be signalled by the buffer code by calling 44 * intel_th_msc_window_unlock(). 45 * 46 * When the interrupt handler has to switch to the next window, it checks 47 * whether it's READY, and if it is, it performs the switch and tracing 48 * continues. If it's LOCKED, it stops the trace. 49 */ 50 enum lockout_state { 51 WIN_READY = 0, 52 WIN_INUSE, 53 WIN_LOCKED 54 }; 55 56 /** 57 * struct msc_window - multiblock mode window descriptor 58 * @entry: window list linkage (msc::win_list) 59 * @pgoff: page offset into the buffer that this window starts at 60 * @lockout: lockout state, see comment below 61 * @lo_lock: lockout state serialization 62 * @nr_blocks: number of blocks (pages) in this window 63 * @nr_segs: number of segments in this window (<= @nr_blocks) 64 * @_sgt: array of block descriptors 65 * @sgt: array of block descriptors 66 */ 67 struct msc_window { 68 struct list_head entry; 69 unsigned long pgoff; 70 enum lockout_state lockout; 71 spinlock_t lo_lock; 72 unsigned int nr_blocks; 73 unsigned int nr_segs; 74 struct msc *msc; 75 struct sg_table _sgt; 76 struct sg_table *sgt; 77 }; 78 79 /** 80 * struct msc_iter - iterator for msc buffer 81 * @entry: msc::iter_list linkage 82 * @msc: pointer to the MSC device 83 * @start_win: oldest window 84 * @win: current window 85 * @offset: current logical offset into the buffer 86 * @start_block: oldest block in the window 87 * @block: block number in the window 88 * @block_off: offset into current block 89 * @wrap_count: block wrapping handling 90 * @eof: end of buffer reached 91 */ 92 struct msc_iter { 93 struct list_head entry; 94 struct msc *msc; 95 struct msc_window *start_win; 96 struct msc_window *win; 97 unsigned long offset; 98 struct scatterlist *start_block; 99 struct scatterlist *block; 100 unsigned int block_off; 101 unsigned int wrap_count; 102 unsigned int eof; 103 }; 104 105 /** 106 * struct msc - MSC device representation 107 * @reg_base: register window base address 108 * @thdev: intel_th_device pointer 109 * @mbuf: MSU buffer, if assigned 110 * @mbuf_priv MSU buffer's private data, if @mbuf 111 * @win_list: list of windows in multiblock mode 112 * @single_sgt: single mode buffer 113 * @cur_win: current window 114 * @nr_pages: total number of pages allocated for this buffer 115 * @single_sz: amount of data in single mode 116 * @single_wrap: single mode wrap occurred 117 * @base: buffer's base pointer 118 * @base_addr: buffer's base address 119 * @user_count: number of users of the buffer 120 * @mmap_count: number of mappings 121 * @buf_mutex: mutex to serialize access to buffer-related bits 122 123 * @enabled: MSC is enabled 124 * @wrap: wrapping is enabled 125 * @mode: MSC operating mode 126 * @burst_len: write burst length 127 * @index: number of this MSC in the MSU 128 */ 129 struct msc { 130 void __iomem *reg_base; 131 void __iomem *msu_base; 132 struct intel_th_device *thdev; 133 134 const struct msu_buffer *mbuf; 135 void *mbuf_priv; 136 137 struct work_struct work; 138 struct list_head win_list; 139 struct sg_table single_sgt; 140 struct msc_window *cur_win; 141 unsigned long nr_pages; 142 unsigned long single_sz; 143 unsigned int single_wrap : 1; 144 void *base; 145 dma_addr_t base_addr; 146 147 /* <0: no buffer, 0: no users, >0: active users */ 148 atomic_t user_count; 149 150 atomic_t mmap_count; 151 struct mutex buf_mutex; 152 153 struct list_head iter_list; 154 155 /* config */ 156 unsigned int enabled : 1, 157 wrap : 1, 158 do_irq : 1; 159 unsigned int mode; 160 unsigned int burst_len; 161 unsigned int index; 162 }; 163 164 static LIST_HEAD(msu_buffer_list); 165 static struct mutex msu_buffer_mutex; 166 167 /** 168 * struct msu_buffer_entry - internal MSU buffer bookkeeping 169 * @entry: link to msu_buffer_list 170 * @mbuf: MSU buffer object 171 * @owner: module that provides this MSU buffer 172 */ 173 struct msu_buffer_entry { 174 struct list_head entry; 175 const struct msu_buffer *mbuf; 176 struct module *owner; 177 }; 178 179 static struct msu_buffer_entry *__msu_buffer_entry_find(const char *name) 180 { 181 struct msu_buffer_entry *mbe; 182 183 lockdep_assert_held(&msu_buffer_mutex); 184 185 list_for_each_entry(mbe, &msu_buffer_list, entry) { 186 if (!strcmp(mbe->mbuf->name, name)) 187 return mbe; 188 } 189 190 return NULL; 191 } 192 193 static const struct msu_buffer * 194 msu_buffer_get(const char *name) 195 { 196 struct msu_buffer_entry *mbe; 197 198 mutex_lock(&msu_buffer_mutex); 199 mbe = __msu_buffer_entry_find(name); 200 if (mbe && !try_module_get(mbe->owner)) 201 mbe = NULL; 202 mutex_unlock(&msu_buffer_mutex); 203 204 return mbe ? mbe->mbuf : NULL; 205 } 206 207 static void msu_buffer_put(const struct msu_buffer *mbuf) 208 { 209 struct msu_buffer_entry *mbe; 210 211 mutex_lock(&msu_buffer_mutex); 212 mbe = __msu_buffer_entry_find(mbuf->name); 213 if (mbe) 214 module_put(mbe->owner); 215 mutex_unlock(&msu_buffer_mutex); 216 } 217 218 int intel_th_msu_buffer_register(const struct msu_buffer *mbuf, 219 struct module *owner) 220 { 221 struct msu_buffer_entry *mbe; 222 int ret = 0; 223 224 mbe = kzalloc(sizeof(*mbe), GFP_KERNEL); 225 if (!mbe) 226 return -ENOMEM; 227 228 mutex_lock(&msu_buffer_mutex); 229 if (__msu_buffer_entry_find(mbuf->name)) { 230 ret = -EEXIST; 231 kfree(mbe); 232 goto unlock; 233 } 234 235 mbe->mbuf = mbuf; 236 mbe->owner = owner; 237 list_add_tail(&mbe->entry, &msu_buffer_list); 238 unlock: 239 mutex_unlock(&msu_buffer_mutex); 240 241 return ret; 242 } 243 EXPORT_SYMBOL_GPL(intel_th_msu_buffer_register); 244 245 void intel_th_msu_buffer_unregister(const struct msu_buffer *mbuf) 246 { 247 struct msu_buffer_entry *mbe; 248 249 mutex_lock(&msu_buffer_mutex); 250 mbe = __msu_buffer_entry_find(mbuf->name); 251 if (mbe) { 252 list_del(&mbe->entry); 253 kfree(mbe); 254 } 255 mutex_unlock(&msu_buffer_mutex); 256 } 257 EXPORT_SYMBOL_GPL(intel_th_msu_buffer_unregister); 258 259 static inline bool msc_block_is_empty(struct msc_block_desc *bdesc) 260 { 261 /* header hasn't been written */ 262 if (!bdesc->valid_dw) 263 return true; 264 265 /* valid_dw includes the header */ 266 if (!msc_data_sz(bdesc)) 267 return true; 268 269 return false; 270 } 271 272 static inline struct scatterlist *msc_win_base_sg(struct msc_window *win) 273 { 274 return win->sgt->sgl; 275 } 276 277 static inline struct msc_block_desc *msc_win_base(struct msc_window *win) 278 { 279 return sg_virt(msc_win_base_sg(win)); 280 } 281 282 static inline dma_addr_t msc_win_base_dma(struct msc_window *win) 283 { 284 return sg_dma_address(msc_win_base_sg(win)); 285 } 286 287 static inline unsigned long 288 msc_win_base_pfn(struct msc_window *win) 289 { 290 return PFN_DOWN(msc_win_base_dma(win)); 291 } 292 293 /** 294 * msc_is_last_win() - check if a window is the last one for a given MSC 295 * @win: window 296 * Return: true if @win is the last window in MSC's multiblock buffer 297 */ 298 static inline bool msc_is_last_win(struct msc_window *win) 299 { 300 return win->entry.next == &win->msc->win_list; 301 } 302 303 /** 304 * msc_next_window() - return next window in the multiblock buffer 305 * @win: current window 306 * 307 * Return: window following the current one 308 */ 309 static struct msc_window *msc_next_window(struct msc_window *win) 310 { 311 if (msc_is_last_win(win)) 312 return list_first_entry(&win->msc->win_list, struct msc_window, 313 entry); 314 315 return list_next_entry(win, entry); 316 } 317 318 static size_t msc_win_total_sz(struct msc_window *win) 319 { 320 struct scatterlist *sg; 321 unsigned int blk; 322 size_t size = 0; 323 324 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { 325 struct msc_block_desc *bdesc = sg_virt(sg); 326 327 if (msc_block_wrapped(bdesc)) 328 return win->nr_blocks << PAGE_SHIFT; 329 330 size += msc_total_sz(bdesc); 331 if (msc_block_last_written(bdesc)) 332 break; 333 } 334 335 return size; 336 } 337 338 /** 339 * msc_find_window() - find a window matching a given sg_table 340 * @msc: MSC device 341 * @sgt: SG table of the window 342 * @nonempty: skip over empty windows 343 * 344 * Return: MSC window structure pointer or NULL if the window 345 * could not be found. 346 */ 347 static struct msc_window * 348 msc_find_window(struct msc *msc, struct sg_table *sgt, bool nonempty) 349 { 350 struct msc_window *win; 351 unsigned int found = 0; 352 353 if (list_empty(&msc->win_list)) 354 return NULL; 355 356 /* 357 * we might need a radix tree for this, depending on how 358 * many windows a typical user would allocate; ideally it's 359 * something like 2, in which case we're good 360 */ 361 list_for_each_entry(win, &msc->win_list, entry) { 362 if (win->sgt == sgt) 363 found++; 364 365 /* skip the empty ones */ 366 if (nonempty && msc_block_is_empty(msc_win_base(win))) 367 continue; 368 369 if (found) 370 return win; 371 } 372 373 return NULL; 374 } 375 376 /** 377 * msc_oldest_window() - locate the window with oldest data 378 * @msc: MSC device 379 * 380 * This should only be used in multiblock mode. Caller should hold the 381 * msc::user_count reference. 382 * 383 * Return: the oldest window with valid data 384 */ 385 static struct msc_window *msc_oldest_window(struct msc *msc) 386 { 387 struct msc_window *win; 388 389 if (list_empty(&msc->win_list)) 390 return NULL; 391 392 win = msc_find_window(msc, msc_next_window(msc->cur_win)->sgt, true); 393 if (win) 394 return win; 395 396 return list_first_entry(&msc->win_list, struct msc_window, entry); 397 } 398 399 /** 400 * msc_win_oldest_sg() - locate the oldest block in a given window 401 * @win: window to look at 402 * 403 * Return: index of the block with the oldest data 404 */ 405 static struct scatterlist *msc_win_oldest_sg(struct msc_window *win) 406 { 407 unsigned int blk; 408 struct scatterlist *sg; 409 struct msc_block_desc *bdesc = msc_win_base(win); 410 411 /* without wrapping, first block is the oldest */ 412 if (!msc_block_wrapped(bdesc)) 413 return msc_win_base_sg(win); 414 415 /* 416 * with wrapping, last written block contains both the newest and the 417 * oldest data for this window. 418 */ 419 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { 420 struct msc_block_desc *bdesc = sg_virt(sg); 421 422 if (msc_block_last_written(bdesc)) 423 return sg; 424 } 425 426 return msc_win_base_sg(win); 427 } 428 429 static struct msc_block_desc *msc_iter_bdesc(struct msc_iter *iter) 430 { 431 return sg_virt(iter->block); 432 } 433 434 static struct msc_iter *msc_iter_install(struct msc *msc) 435 { 436 struct msc_iter *iter; 437 438 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 439 if (!iter) 440 return ERR_PTR(-ENOMEM); 441 442 mutex_lock(&msc->buf_mutex); 443 444 /* 445 * Reading and tracing are mutually exclusive; if msc is 446 * enabled, open() will fail; otherwise existing readers 447 * will prevent enabling the msc and the rest of fops don't 448 * need to worry about it. 449 */ 450 if (msc->enabled) { 451 kfree(iter); 452 iter = ERR_PTR(-EBUSY); 453 goto unlock; 454 } 455 456 iter->msc = msc; 457 458 list_add_tail(&iter->entry, &msc->iter_list); 459 unlock: 460 mutex_unlock(&msc->buf_mutex); 461 462 return iter; 463 } 464 465 static void msc_iter_remove(struct msc_iter *iter, struct msc *msc) 466 { 467 mutex_lock(&msc->buf_mutex); 468 list_del(&iter->entry); 469 mutex_unlock(&msc->buf_mutex); 470 471 kfree(iter); 472 } 473 474 static void msc_iter_block_start(struct msc_iter *iter) 475 { 476 if (iter->start_block) 477 return; 478 479 iter->start_block = msc_win_oldest_sg(iter->win); 480 iter->block = iter->start_block; 481 iter->wrap_count = 0; 482 483 /* 484 * start with the block with oldest data; if data has wrapped 485 * in this window, it should be in this block 486 */ 487 if (msc_block_wrapped(msc_iter_bdesc(iter))) 488 iter->wrap_count = 2; 489 490 } 491 492 static int msc_iter_win_start(struct msc_iter *iter, struct msc *msc) 493 { 494 /* already started, nothing to do */ 495 if (iter->start_win) 496 return 0; 497 498 iter->start_win = msc_oldest_window(msc); 499 if (!iter->start_win) 500 return -EINVAL; 501 502 iter->win = iter->start_win; 503 iter->start_block = NULL; 504 505 msc_iter_block_start(iter); 506 507 return 0; 508 } 509 510 static int msc_iter_win_advance(struct msc_iter *iter) 511 { 512 iter->win = msc_next_window(iter->win); 513 iter->start_block = NULL; 514 515 if (iter->win == iter->start_win) { 516 iter->eof++; 517 return 1; 518 } 519 520 msc_iter_block_start(iter); 521 522 return 0; 523 } 524 525 static int msc_iter_block_advance(struct msc_iter *iter) 526 { 527 iter->block_off = 0; 528 529 /* wrapping */ 530 if (iter->wrap_count && iter->block == iter->start_block) { 531 iter->wrap_count--; 532 if (!iter->wrap_count) 533 /* copied newest data from the wrapped block */ 534 return msc_iter_win_advance(iter); 535 } 536 537 /* no wrapping, check for last written block */ 538 if (!iter->wrap_count && msc_block_last_written(msc_iter_bdesc(iter))) 539 /* copied newest data for the window */ 540 return msc_iter_win_advance(iter); 541 542 /* block advance */ 543 if (sg_is_last(iter->block)) 544 iter->block = msc_win_base_sg(iter->win); 545 else 546 iter->block = sg_next(iter->block); 547 548 /* no wrapping, sanity check in case there is no last written block */ 549 if (!iter->wrap_count && iter->block == iter->start_block) 550 return msc_iter_win_advance(iter); 551 552 return 0; 553 } 554 555 /** 556 * msc_buffer_iterate() - go through multiblock buffer's data 557 * @iter: iterator structure 558 * @size: amount of data to scan 559 * @data: callback's private data 560 * @fn: iterator callback 561 * 562 * This will start at the window which will be written to next (containing 563 * the oldest data) and work its way to the current window, calling @fn 564 * for each chunk of data as it goes. 565 * 566 * Caller should have msc::user_count reference to make sure the buffer 567 * doesn't disappear from under us. 568 * 569 * Return: amount of data actually scanned. 570 */ 571 static ssize_t 572 msc_buffer_iterate(struct msc_iter *iter, size_t size, void *data, 573 unsigned long (*fn)(void *, void *, size_t)) 574 { 575 struct msc *msc = iter->msc; 576 size_t len = size; 577 unsigned int advance; 578 579 if (iter->eof) 580 return 0; 581 582 /* start with the oldest window */ 583 if (msc_iter_win_start(iter, msc)) 584 return 0; 585 586 do { 587 unsigned long data_bytes = msc_data_sz(msc_iter_bdesc(iter)); 588 void *src = (void *)msc_iter_bdesc(iter) + MSC_BDESC; 589 size_t tocopy = data_bytes, copied = 0; 590 size_t remaining = 0; 591 592 advance = 1; 593 594 /* 595 * If block wrapping happened, we need to visit the last block 596 * twice, because it contains both the oldest and the newest 597 * data in this window. 598 * 599 * First time (wrap_count==2), in the very beginning, to collect 600 * the oldest data, which is in the range 601 * (data_bytes..DATA_IN_PAGE). 602 * 603 * Second time (wrap_count==1), it's just like any other block, 604 * containing data in the range of [MSC_BDESC..data_bytes]. 605 */ 606 if (iter->block == iter->start_block && iter->wrap_count == 2) { 607 tocopy = DATA_IN_PAGE - data_bytes; 608 src += data_bytes; 609 } 610 611 if (!tocopy) 612 goto next_block; 613 614 tocopy -= iter->block_off; 615 src += iter->block_off; 616 617 if (len < tocopy) { 618 tocopy = len; 619 advance = 0; 620 } 621 622 remaining = fn(data, src, tocopy); 623 624 if (remaining) 625 advance = 0; 626 627 copied = tocopy - remaining; 628 len -= copied; 629 iter->block_off += copied; 630 iter->offset += copied; 631 632 if (!advance) 633 break; 634 635 next_block: 636 if (msc_iter_block_advance(iter)) 637 break; 638 639 } while (len); 640 641 return size - len; 642 } 643 644 /** 645 * msc_buffer_clear_hw_header() - clear hw header for multiblock 646 * @msc: MSC device 647 */ 648 static void msc_buffer_clear_hw_header(struct msc *msc) 649 { 650 struct msc_window *win; 651 struct scatterlist *sg; 652 653 list_for_each_entry(win, &msc->win_list, entry) { 654 unsigned int blk; 655 size_t hw_sz = sizeof(struct msc_block_desc) - 656 offsetof(struct msc_block_desc, hw_tag); 657 658 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { 659 struct msc_block_desc *bdesc = sg_virt(sg); 660 661 memset(&bdesc->hw_tag, 0, hw_sz); 662 } 663 } 664 } 665 666 static int intel_th_msu_init(struct msc *msc) 667 { 668 u32 mintctl, msusts; 669 670 if (!msc->do_irq) 671 return 0; 672 673 if (!msc->mbuf) 674 return 0; 675 676 mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL); 677 mintctl |= msc->index ? M1BLIE : M0BLIE; 678 iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL); 679 if (mintctl != ioread32(msc->msu_base + REG_MSU_MINTCTL)) { 680 dev_info(msc_dev(msc), "MINTCTL ignores writes: no usable interrupts\n"); 681 msc->do_irq = 0; 682 return 0; 683 } 684 685 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS); 686 iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS); 687 688 return 0; 689 } 690 691 static void intel_th_msu_deinit(struct msc *msc) 692 { 693 u32 mintctl; 694 695 if (!msc->do_irq) 696 return; 697 698 mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL); 699 mintctl &= msc->index ? ~M1BLIE : ~M0BLIE; 700 iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL); 701 } 702 703 static int msc_win_set_lockout(struct msc_window *win, 704 enum lockout_state expect, 705 enum lockout_state new) 706 { 707 enum lockout_state old; 708 unsigned long flags; 709 int ret = 0; 710 711 if (!win->msc->mbuf) 712 return 0; 713 714 spin_lock_irqsave(&win->lo_lock, flags); 715 old = win->lockout; 716 717 if (old != expect) { 718 ret = -EINVAL; 719 dev_warn_ratelimited(msc_dev(win->msc), 720 "expected lockout state %d, got %d\n", 721 expect, old); 722 goto unlock; 723 } 724 725 win->lockout = new; 726 727 if (old == expect && new == WIN_LOCKED) 728 atomic_inc(&win->msc->user_count); 729 else if (old == expect && old == WIN_LOCKED) 730 atomic_dec(&win->msc->user_count); 731 732 unlock: 733 spin_unlock_irqrestore(&win->lo_lock, flags); 734 735 if (ret) { 736 if (expect == WIN_READY && old == WIN_LOCKED) 737 return -EBUSY; 738 739 /* from intel_th_msc_window_unlock(), don't warn if not locked */ 740 if (expect == WIN_LOCKED && old == new) 741 return 0; 742 } 743 744 return ret; 745 } 746 /** 747 * msc_configure() - set up MSC hardware 748 * @msc: the MSC device to configure 749 * 750 * Program storage mode, wrapping, burst length and trace buffer address 751 * into a given MSC. Then, enable tracing and set msc::enabled. 752 * The latter is serialized on msc::buf_mutex, so make sure to hold it. 753 */ 754 static int msc_configure(struct msc *msc) 755 { 756 u32 reg; 757 758 lockdep_assert_held(&msc->buf_mutex); 759 760 if (msc->mode > MSC_MODE_MULTI) 761 return -ENOTSUPP; 762 763 if (msc->mode == MSC_MODE_MULTI) { 764 if (msc_win_set_lockout(msc->cur_win, WIN_READY, WIN_INUSE)) 765 return -EBUSY; 766 767 msc_buffer_clear_hw_header(msc); 768 } 769 770 reg = msc->base_addr >> PAGE_SHIFT; 771 iowrite32(reg, msc->reg_base + REG_MSU_MSC0BAR); 772 773 if (msc->mode == MSC_MODE_SINGLE) { 774 reg = msc->nr_pages; 775 iowrite32(reg, msc->reg_base + REG_MSU_MSC0SIZE); 776 } 777 778 reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL); 779 reg &= ~(MSC_MODE | MSC_WRAPEN | MSC_EN | MSC_RD_HDR_OVRD); 780 781 reg |= MSC_EN; 782 reg |= msc->mode << __ffs(MSC_MODE); 783 reg |= msc->burst_len << __ffs(MSC_LEN); 784 785 if (msc->wrap) 786 reg |= MSC_WRAPEN; 787 788 iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL); 789 790 intel_th_msu_init(msc); 791 792 msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI; 793 intel_th_trace_enable(msc->thdev); 794 msc->enabled = 1; 795 796 if (msc->mbuf && msc->mbuf->activate) 797 msc->mbuf->activate(msc->mbuf_priv); 798 799 return 0; 800 } 801 802 /** 803 * msc_disable() - disable MSC hardware 804 * @msc: MSC device to disable 805 * 806 * If @msc is enabled, disable tracing on the switch and then disable MSC 807 * storage. Caller must hold msc::buf_mutex. 808 */ 809 static void msc_disable(struct msc *msc) 810 { 811 struct msc_window *win = msc->cur_win; 812 u32 reg; 813 814 lockdep_assert_held(&msc->buf_mutex); 815 816 if (msc->mode == MSC_MODE_MULTI) 817 msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED); 818 819 if (msc->mbuf && msc->mbuf->deactivate) 820 msc->mbuf->deactivate(msc->mbuf_priv); 821 intel_th_msu_deinit(msc); 822 intel_th_trace_disable(msc->thdev); 823 824 if (msc->mode == MSC_MODE_SINGLE) { 825 reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); 826 msc->single_wrap = !!(reg & MSCSTS_WRAPSTAT); 827 828 reg = ioread32(msc->reg_base + REG_MSU_MSC0MWP); 829 msc->single_sz = reg & ((msc->nr_pages << PAGE_SHIFT) - 1); 830 dev_dbg(msc_dev(msc), "MSCnMWP: %08x/%08lx, wrap: %d\n", 831 reg, msc->single_sz, msc->single_wrap); 832 } 833 834 reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL); 835 reg &= ~MSC_EN; 836 iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL); 837 838 if (msc->mbuf && msc->mbuf->ready) 839 msc->mbuf->ready(msc->mbuf_priv, win->sgt, 840 msc_win_total_sz(win)); 841 842 msc->enabled = 0; 843 844 iowrite32(0, msc->reg_base + REG_MSU_MSC0BAR); 845 iowrite32(0, msc->reg_base + REG_MSU_MSC0SIZE); 846 847 dev_dbg(msc_dev(msc), "MSCnNWSA: %08x\n", 848 ioread32(msc->reg_base + REG_MSU_MSC0NWSA)); 849 850 reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); 851 dev_dbg(msc_dev(msc), "MSCnSTS: %08x\n", reg); 852 853 reg = ioread32(msc->reg_base + REG_MSU_MSUSTS); 854 reg &= msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST; 855 iowrite32(reg, msc->reg_base + REG_MSU_MSUSTS); 856 } 857 858 static int intel_th_msc_activate(struct intel_th_device *thdev) 859 { 860 struct msc *msc = dev_get_drvdata(&thdev->dev); 861 int ret = -EBUSY; 862 863 if (!atomic_inc_unless_negative(&msc->user_count)) 864 return -ENODEV; 865 866 mutex_lock(&msc->buf_mutex); 867 868 /* if there are readers, refuse */ 869 if (list_empty(&msc->iter_list)) 870 ret = msc_configure(msc); 871 872 mutex_unlock(&msc->buf_mutex); 873 874 if (ret) 875 atomic_dec(&msc->user_count); 876 877 return ret; 878 } 879 880 static void intel_th_msc_deactivate(struct intel_th_device *thdev) 881 { 882 struct msc *msc = dev_get_drvdata(&thdev->dev); 883 884 mutex_lock(&msc->buf_mutex); 885 if (msc->enabled) { 886 msc_disable(msc); 887 atomic_dec(&msc->user_count); 888 } 889 mutex_unlock(&msc->buf_mutex); 890 } 891 892 /** 893 * msc_buffer_contig_alloc() - allocate a contiguous buffer for SINGLE mode 894 * @msc: MSC device 895 * @size: allocation size in bytes 896 * 897 * This modifies msc::base, which requires msc::buf_mutex to serialize, so the 898 * caller is expected to hold it. 899 * 900 * Return: 0 on success, -errno otherwise. 901 */ 902 static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size) 903 { 904 unsigned long nr_pages = size >> PAGE_SHIFT; 905 unsigned int order = get_order(size); 906 struct page *page; 907 int ret; 908 909 if (!size) 910 return 0; 911 912 ret = sg_alloc_table(&msc->single_sgt, 1, GFP_KERNEL); 913 if (ret) 914 goto err_out; 915 916 ret = -ENOMEM; 917 page = alloc_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32, order); 918 if (!page) 919 goto err_free_sgt; 920 921 split_page(page, order); 922 sg_set_buf(msc->single_sgt.sgl, page_address(page), size); 923 924 ret = dma_map_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 1, 925 DMA_FROM_DEVICE); 926 if (ret < 0) 927 goto err_free_pages; 928 929 msc->nr_pages = nr_pages; 930 msc->base = page_address(page); 931 msc->base_addr = sg_dma_address(msc->single_sgt.sgl); 932 933 return 0; 934 935 err_free_pages: 936 __free_pages(page, order); 937 938 err_free_sgt: 939 sg_free_table(&msc->single_sgt); 940 941 err_out: 942 return ret; 943 } 944 945 /** 946 * msc_buffer_contig_free() - free a contiguous buffer 947 * @msc: MSC configured in SINGLE mode 948 */ 949 static void msc_buffer_contig_free(struct msc *msc) 950 { 951 unsigned long off; 952 953 dma_unmap_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 954 1, DMA_FROM_DEVICE); 955 sg_free_table(&msc->single_sgt); 956 957 for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) { 958 struct page *page = virt_to_page(msc->base + off); 959 960 page->mapping = NULL; 961 __free_page(page); 962 } 963 964 msc->nr_pages = 0; 965 } 966 967 /** 968 * msc_buffer_contig_get_page() - find a page at a given offset 969 * @msc: MSC configured in SINGLE mode 970 * @pgoff: page offset 971 * 972 * Return: page, if @pgoff is within the range, NULL otherwise. 973 */ 974 static struct page *msc_buffer_contig_get_page(struct msc *msc, 975 unsigned long pgoff) 976 { 977 if (pgoff >= msc->nr_pages) 978 return NULL; 979 980 return virt_to_page(msc->base + (pgoff << PAGE_SHIFT)); 981 } 982 983 static int __msc_buffer_win_alloc(struct msc_window *win, 984 unsigned int nr_segs) 985 { 986 struct scatterlist *sg_ptr; 987 void *block; 988 int i, ret; 989 990 ret = sg_alloc_table(win->sgt, nr_segs, GFP_KERNEL); 991 if (ret) 992 return -ENOMEM; 993 994 for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) { 995 block = dma_alloc_coherent(msc_dev(win->msc)->parent->parent, 996 PAGE_SIZE, &sg_dma_address(sg_ptr), 997 GFP_KERNEL); 998 if (!block) 999 goto err_nomem; 1000 1001 sg_set_buf(sg_ptr, block, PAGE_SIZE); 1002 } 1003 1004 return nr_segs; 1005 1006 err_nomem: 1007 for_each_sg(win->sgt->sgl, sg_ptr, i, ret) 1008 dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, 1009 sg_virt(sg_ptr), sg_dma_address(sg_ptr)); 1010 1011 sg_free_table(win->sgt); 1012 1013 return -ENOMEM; 1014 } 1015 1016 #ifdef CONFIG_X86 1017 static void msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs) 1018 { 1019 struct scatterlist *sg_ptr; 1020 int i; 1021 1022 for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) { 1023 /* Set the page as uncached */ 1024 set_memory_uc((unsigned long)sg_virt(sg_ptr), 1025 PFN_DOWN(sg_ptr->length)); 1026 } 1027 } 1028 1029 static void msc_buffer_set_wb(struct msc_window *win) 1030 { 1031 struct scatterlist *sg_ptr; 1032 int i; 1033 1034 for_each_sg(win->sgt->sgl, sg_ptr, win->nr_segs, i) { 1035 /* Reset the page to write-back */ 1036 set_memory_wb((unsigned long)sg_virt(sg_ptr), 1037 PFN_DOWN(sg_ptr->length)); 1038 } 1039 } 1040 #else /* !X86 */ 1041 static inline void 1042 msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs) {} 1043 static inline void msc_buffer_set_wb(struct msc_window *win) {} 1044 #endif /* CONFIG_X86 */ 1045 1046 /** 1047 * msc_buffer_win_alloc() - alloc a window for a multiblock mode 1048 * @msc: MSC device 1049 * @nr_blocks: number of pages in this window 1050 * 1051 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex 1052 * to serialize, so the caller is expected to hold it. 1053 * 1054 * Return: 0 on success, -errno otherwise. 1055 */ 1056 static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks) 1057 { 1058 struct msc_window *win; 1059 int ret = -ENOMEM; 1060 1061 if (!nr_blocks) 1062 return 0; 1063 1064 win = kzalloc(sizeof(*win), GFP_KERNEL); 1065 if (!win) 1066 return -ENOMEM; 1067 1068 win->msc = msc; 1069 win->sgt = &win->_sgt; 1070 win->lockout = WIN_READY; 1071 spin_lock_init(&win->lo_lock); 1072 1073 if (!list_empty(&msc->win_list)) { 1074 struct msc_window *prev = list_last_entry(&msc->win_list, 1075 struct msc_window, 1076 entry); 1077 1078 win->pgoff = prev->pgoff + prev->nr_blocks; 1079 } 1080 1081 if (msc->mbuf && msc->mbuf->alloc_window) 1082 ret = msc->mbuf->alloc_window(msc->mbuf_priv, &win->sgt, 1083 nr_blocks << PAGE_SHIFT); 1084 else 1085 ret = __msc_buffer_win_alloc(win, nr_blocks); 1086 1087 if (ret <= 0) 1088 goto err_nomem; 1089 1090 msc_buffer_set_uc(win, ret); 1091 1092 win->nr_segs = ret; 1093 win->nr_blocks = nr_blocks; 1094 1095 if (list_empty(&msc->win_list)) { 1096 msc->base = msc_win_base(win); 1097 msc->base_addr = msc_win_base_dma(win); 1098 msc->cur_win = win; 1099 } 1100 1101 list_add_tail(&win->entry, &msc->win_list); 1102 msc->nr_pages += nr_blocks; 1103 1104 return 0; 1105 1106 err_nomem: 1107 kfree(win); 1108 1109 return ret; 1110 } 1111 1112 static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win) 1113 { 1114 struct scatterlist *sg; 1115 int i; 1116 1117 for_each_sg(win->sgt->sgl, sg, win->nr_segs, i) { 1118 struct page *page = sg_page(sg); 1119 1120 page->mapping = NULL; 1121 dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, 1122 sg_virt(sg), sg_dma_address(sg)); 1123 } 1124 sg_free_table(win->sgt); 1125 } 1126 1127 /** 1128 * msc_buffer_win_free() - free a window from MSC's window list 1129 * @msc: MSC device 1130 * @win: window to free 1131 * 1132 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex 1133 * to serialize, so the caller is expected to hold it. 1134 */ 1135 static void msc_buffer_win_free(struct msc *msc, struct msc_window *win) 1136 { 1137 msc->nr_pages -= win->nr_blocks; 1138 1139 list_del(&win->entry); 1140 if (list_empty(&msc->win_list)) { 1141 msc->base = NULL; 1142 msc->base_addr = 0; 1143 } 1144 1145 msc_buffer_set_wb(win); 1146 1147 if (msc->mbuf && msc->mbuf->free_window) 1148 msc->mbuf->free_window(msc->mbuf_priv, win->sgt); 1149 else 1150 __msc_buffer_win_free(msc, win); 1151 1152 kfree(win); 1153 } 1154 1155 /** 1156 * msc_buffer_relink() - set up block descriptors for multiblock mode 1157 * @msc: MSC device 1158 * 1159 * This traverses msc::win_list, which requires msc::buf_mutex to serialize, 1160 * so the caller is expected to hold it. 1161 */ 1162 static void msc_buffer_relink(struct msc *msc) 1163 { 1164 struct msc_window *win, *next_win; 1165 1166 /* call with msc::mutex locked */ 1167 list_for_each_entry(win, &msc->win_list, entry) { 1168 struct scatterlist *sg; 1169 unsigned int blk; 1170 u32 sw_tag = 0; 1171 1172 /* 1173 * Last window's next_win should point to the first window 1174 * and MSC_SW_TAG_LASTWIN should be set. 1175 */ 1176 if (msc_is_last_win(win)) { 1177 sw_tag |= MSC_SW_TAG_LASTWIN; 1178 next_win = list_first_entry(&msc->win_list, 1179 struct msc_window, entry); 1180 } else { 1181 next_win = list_next_entry(win, entry); 1182 } 1183 1184 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { 1185 struct msc_block_desc *bdesc = sg_virt(sg); 1186 1187 memset(bdesc, 0, sizeof(*bdesc)); 1188 1189 bdesc->next_win = msc_win_base_pfn(next_win); 1190 1191 /* 1192 * Similarly to last window, last block should point 1193 * to the first one. 1194 */ 1195 if (blk == win->nr_segs - 1) { 1196 sw_tag |= MSC_SW_TAG_LASTBLK; 1197 bdesc->next_blk = msc_win_base_pfn(win); 1198 } else { 1199 dma_addr_t addr = sg_dma_address(sg_next(sg)); 1200 1201 bdesc->next_blk = PFN_DOWN(addr); 1202 } 1203 1204 bdesc->sw_tag = sw_tag; 1205 bdesc->block_sz = sg->length / 64; 1206 } 1207 } 1208 1209 /* 1210 * Make the above writes globally visible before tracing is 1211 * enabled to make sure hardware sees them coherently. 1212 */ 1213 wmb(); 1214 } 1215 1216 static void msc_buffer_multi_free(struct msc *msc) 1217 { 1218 struct msc_window *win, *iter; 1219 1220 list_for_each_entry_safe(win, iter, &msc->win_list, entry) 1221 msc_buffer_win_free(msc, win); 1222 } 1223 1224 static int msc_buffer_multi_alloc(struct msc *msc, unsigned long *nr_pages, 1225 unsigned int nr_wins) 1226 { 1227 int ret, i; 1228 1229 for (i = 0; i < nr_wins; i++) { 1230 ret = msc_buffer_win_alloc(msc, nr_pages[i]); 1231 if (ret) { 1232 msc_buffer_multi_free(msc); 1233 return ret; 1234 } 1235 } 1236 1237 msc_buffer_relink(msc); 1238 1239 return 0; 1240 } 1241 1242 /** 1243 * msc_buffer_free() - free buffers for MSC 1244 * @msc: MSC device 1245 * 1246 * Free MSC's storage buffers. 1247 * 1248 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex to 1249 * serialize, so the caller is expected to hold it. 1250 */ 1251 static void msc_buffer_free(struct msc *msc) 1252 { 1253 if (msc->mode == MSC_MODE_SINGLE) 1254 msc_buffer_contig_free(msc); 1255 else if (msc->mode == MSC_MODE_MULTI) 1256 msc_buffer_multi_free(msc); 1257 } 1258 1259 /** 1260 * msc_buffer_alloc() - allocate a buffer for MSC 1261 * @msc: MSC device 1262 * @size: allocation size in bytes 1263 * 1264 * Allocate a storage buffer for MSC, depending on the msc::mode, it will be 1265 * either done via msc_buffer_contig_alloc() for SINGLE operation mode or 1266 * msc_buffer_win_alloc() for multiblock operation. The latter allocates one 1267 * window per invocation, so in multiblock mode this can be called multiple 1268 * times for the same MSC to allocate multiple windows. 1269 * 1270 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex 1271 * to serialize, so the caller is expected to hold it. 1272 * 1273 * Return: 0 on success, -errno otherwise. 1274 */ 1275 static int msc_buffer_alloc(struct msc *msc, unsigned long *nr_pages, 1276 unsigned int nr_wins) 1277 { 1278 int ret; 1279 1280 /* -1: buffer not allocated */ 1281 if (atomic_read(&msc->user_count) != -1) 1282 return -EBUSY; 1283 1284 if (msc->mode == MSC_MODE_SINGLE) { 1285 if (nr_wins != 1) 1286 return -EINVAL; 1287 1288 ret = msc_buffer_contig_alloc(msc, nr_pages[0] << PAGE_SHIFT); 1289 } else if (msc->mode == MSC_MODE_MULTI) { 1290 ret = msc_buffer_multi_alloc(msc, nr_pages, nr_wins); 1291 } else { 1292 ret = -ENOTSUPP; 1293 } 1294 1295 if (!ret) { 1296 /* allocation should be visible before the counter goes to 0 */ 1297 smp_mb__before_atomic(); 1298 1299 if (WARN_ON_ONCE(atomic_cmpxchg(&msc->user_count, -1, 0) != -1)) 1300 return -EINVAL; 1301 } 1302 1303 return ret; 1304 } 1305 1306 /** 1307 * msc_buffer_unlocked_free_unless_used() - free a buffer unless it's in use 1308 * @msc: MSC device 1309 * 1310 * This will free MSC buffer unless it is in use or there is no allocated 1311 * buffer. 1312 * Caller needs to hold msc::buf_mutex. 1313 * 1314 * Return: 0 on successful deallocation or if there was no buffer to 1315 * deallocate, -EBUSY if there are active users. 1316 */ 1317 static int msc_buffer_unlocked_free_unless_used(struct msc *msc) 1318 { 1319 int count, ret = 0; 1320 1321 count = atomic_cmpxchg(&msc->user_count, 0, -1); 1322 1323 /* > 0: buffer is allocated and has users */ 1324 if (count > 0) 1325 ret = -EBUSY; 1326 /* 0: buffer is allocated, no users */ 1327 else if (!count) 1328 msc_buffer_free(msc); 1329 /* < 0: no buffer, nothing to do */ 1330 1331 return ret; 1332 } 1333 1334 /** 1335 * msc_buffer_free_unless_used() - free a buffer unless it's in use 1336 * @msc: MSC device 1337 * 1338 * This is a locked version of msc_buffer_unlocked_free_unless_used(). 1339 */ 1340 static int msc_buffer_free_unless_used(struct msc *msc) 1341 { 1342 int ret; 1343 1344 mutex_lock(&msc->buf_mutex); 1345 ret = msc_buffer_unlocked_free_unless_used(msc); 1346 mutex_unlock(&msc->buf_mutex); 1347 1348 return ret; 1349 } 1350 1351 /** 1352 * msc_buffer_get_page() - get MSC buffer page at a given offset 1353 * @msc: MSC device 1354 * @pgoff: page offset into the storage buffer 1355 * 1356 * This traverses msc::win_list, so holding msc::buf_mutex is expected from 1357 * the caller. 1358 * 1359 * Return: page if @pgoff corresponds to a valid buffer page or NULL. 1360 */ 1361 static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff) 1362 { 1363 struct msc_window *win; 1364 struct scatterlist *sg; 1365 unsigned int blk; 1366 1367 if (msc->mode == MSC_MODE_SINGLE) 1368 return msc_buffer_contig_get_page(msc, pgoff); 1369 1370 list_for_each_entry(win, &msc->win_list, entry) 1371 if (pgoff >= win->pgoff && pgoff < win->pgoff + win->nr_blocks) 1372 goto found; 1373 1374 return NULL; 1375 1376 found: 1377 pgoff -= win->pgoff; 1378 1379 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { 1380 struct page *page = sg_page(sg); 1381 size_t pgsz = PFN_DOWN(sg->length); 1382 1383 if (pgoff < pgsz) 1384 return page + pgoff; 1385 1386 pgoff -= pgsz; 1387 } 1388 1389 return NULL; 1390 } 1391 1392 /** 1393 * struct msc_win_to_user_struct - data for copy_to_user() callback 1394 * @buf: userspace buffer to copy data to 1395 * @offset: running offset 1396 */ 1397 struct msc_win_to_user_struct { 1398 char __user *buf; 1399 unsigned long offset; 1400 }; 1401 1402 /** 1403 * msc_win_to_user() - iterator for msc_buffer_iterate() to copy data to user 1404 * @data: callback's private data 1405 * @src: source buffer 1406 * @len: amount of data to copy from the source buffer 1407 */ 1408 static unsigned long msc_win_to_user(void *data, void *src, size_t len) 1409 { 1410 struct msc_win_to_user_struct *u = data; 1411 unsigned long ret; 1412 1413 ret = copy_to_user(u->buf + u->offset, src, len); 1414 u->offset += len - ret; 1415 1416 return ret; 1417 } 1418 1419 1420 /* 1421 * file operations' callbacks 1422 */ 1423 1424 static int intel_th_msc_open(struct inode *inode, struct file *file) 1425 { 1426 struct intel_th_device *thdev = file->private_data; 1427 struct msc *msc = dev_get_drvdata(&thdev->dev); 1428 struct msc_iter *iter; 1429 1430 if (!capable(CAP_SYS_RAWIO)) 1431 return -EPERM; 1432 1433 iter = msc_iter_install(msc); 1434 if (IS_ERR(iter)) 1435 return PTR_ERR(iter); 1436 1437 file->private_data = iter; 1438 1439 return nonseekable_open(inode, file); 1440 } 1441 1442 static int intel_th_msc_release(struct inode *inode, struct file *file) 1443 { 1444 struct msc_iter *iter = file->private_data; 1445 struct msc *msc = iter->msc; 1446 1447 msc_iter_remove(iter, msc); 1448 1449 return 0; 1450 } 1451 1452 static ssize_t 1453 msc_single_to_user(struct msc *msc, char __user *buf, loff_t off, size_t len) 1454 { 1455 unsigned long size = msc->nr_pages << PAGE_SHIFT, rem = len; 1456 unsigned long start = off, tocopy = 0; 1457 1458 if (msc->single_wrap) { 1459 start += msc->single_sz; 1460 if (start < size) { 1461 tocopy = min(rem, size - start); 1462 if (copy_to_user(buf, msc->base + start, tocopy)) 1463 return -EFAULT; 1464 1465 buf += tocopy; 1466 rem -= tocopy; 1467 start += tocopy; 1468 } 1469 1470 start &= size - 1; 1471 if (rem) { 1472 tocopy = min(rem, msc->single_sz - start); 1473 if (copy_to_user(buf, msc->base + start, tocopy)) 1474 return -EFAULT; 1475 1476 rem -= tocopy; 1477 } 1478 1479 return len - rem; 1480 } 1481 1482 if (copy_to_user(buf, msc->base + start, rem)) 1483 return -EFAULT; 1484 1485 return len; 1486 } 1487 1488 static ssize_t intel_th_msc_read(struct file *file, char __user *buf, 1489 size_t len, loff_t *ppos) 1490 { 1491 struct msc_iter *iter = file->private_data; 1492 struct msc *msc = iter->msc; 1493 size_t size; 1494 loff_t off = *ppos; 1495 ssize_t ret = 0; 1496 1497 if (!atomic_inc_unless_negative(&msc->user_count)) 1498 return 0; 1499 1500 if (msc->mode == MSC_MODE_SINGLE && !msc->single_wrap) 1501 size = msc->single_sz; 1502 else 1503 size = msc->nr_pages << PAGE_SHIFT; 1504 1505 if (!size) 1506 goto put_count; 1507 1508 if (off >= size) 1509 goto put_count; 1510 1511 if (off + len >= size) 1512 len = size - off; 1513 1514 if (msc->mode == MSC_MODE_SINGLE) { 1515 ret = msc_single_to_user(msc, buf, off, len); 1516 if (ret >= 0) 1517 *ppos += ret; 1518 } else if (msc->mode == MSC_MODE_MULTI) { 1519 struct msc_win_to_user_struct u = { 1520 .buf = buf, 1521 .offset = 0, 1522 }; 1523 1524 ret = msc_buffer_iterate(iter, len, &u, msc_win_to_user); 1525 if (ret >= 0) 1526 *ppos = iter->offset; 1527 } else { 1528 ret = -ENOTSUPP; 1529 } 1530 1531 put_count: 1532 atomic_dec(&msc->user_count); 1533 1534 return ret; 1535 } 1536 1537 /* 1538 * vm operations callbacks (vm_ops) 1539 */ 1540 1541 static void msc_mmap_open(struct vm_area_struct *vma) 1542 { 1543 struct msc_iter *iter = vma->vm_file->private_data; 1544 struct msc *msc = iter->msc; 1545 1546 atomic_inc(&msc->mmap_count); 1547 } 1548 1549 static void msc_mmap_close(struct vm_area_struct *vma) 1550 { 1551 struct msc_iter *iter = vma->vm_file->private_data; 1552 struct msc *msc = iter->msc; 1553 unsigned long pg; 1554 1555 if (!atomic_dec_and_mutex_lock(&msc->mmap_count, &msc->buf_mutex)) 1556 return; 1557 1558 /* drop page _refcounts */ 1559 for (pg = 0; pg < msc->nr_pages; pg++) { 1560 struct page *page = msc_buffer_get_page(msc, pg); 1561 1562 if (WARN_ON_ONCE(!page)) 1563 continue; 1564 1565 if (page->mapping) 1566 page->mapping = NULL; 1567 } 1568 1569 /* last mapping -- drop user_count */ 1570 atomic_dec(&msc->user_count); 1571 mutex_unlock(&msc->buf_mutex); 1572 } 1573 1574 static vm_fault_t msc_mmap_fault(struct vm_fault *vmf) 1575 { 1576 struct msc_iter *iter = vmf->vma->vm_file->private_data; 1577 struct msc *msc = iter->msc; 1578 1579 vmf->page = msc_buffer_get_page(msc, vmf->pgoff); 1580 if (!vmf->page) 1581 return VM_FAULT_SIGBUS; 1582 1583 get_page(vmf->page); 1584 vmf->page->mapping = vmf->vma->vm_file->f_mapping; 1585 vmf->page->index = vmf->pgoff; 1586 1587 return 0; 1588 } 1589 1590 static const struct vm_operations_struct msc_mmap_ops = { 1591 .open = msc_mmap_open, 1592 .close = msc_mmap_close, 1593 .fault = msc_mmap_fault, 1594 }; 1595 1596 static int intel_th_msc_mmap(struct file *file, struct vm_area_struct *vma) 1597 { 1598 unsigned long size = vma->vm_end - vma->vm_start; 1599 struct msc_iter *iter = vma->vm_file->private_data; 1600 struct msc *msc = iter->msc; 1601 int ret = -EINVAL; 1602 1603 if (!size || offset_in_page(size)) 1604 return -EINVAL; 1605 1606 if (vma->vm_pgoff) 1607 return -EINVAL; 1608 1609 /* grab user_count once per mmap; drop in msc_mmap_close() */ 1610 if (!atomic_inc_unless_negative(&msc->user_count)) 1611 return -EINVAL; 1612 1613 if (msc->mode != MSC_MODE_SINGLE && 1614 msc->mode != MSC_MODE_MULTI) 1615 goto out; 1616 1617 if (size >> PAGE_SHIFT != msc->nr_pages) 1618 goto out; 1619 1620 atomic_set(&msc->mmap_count, 1); 1621 ret = 0; 1622 1623 out: 1624 if (ret) 1625 atomic_dec(&msc->user_count); 1626 1627 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1628 vma->vm_flags |= VM_DONTEXPAND | VM_DONTCOPY; 1629 vma->vm_ops = &msc_mmap_ops; 1630 return ret; 1631 } 1632 1633 static const struct file_operations intel_th_msc_fops = { 1634 .open = intel_th_msc_open, 1635 .release = intel_th_msc_release, 1636 .read = intel_th_msc_read, 1637 .mmap = intel_th_msc_mmap, 1638 .llseek = no_llseek, 1639 .owner = THIS_MODULE, 1640 }; 1641 1642 static void intel_th_msc_wait_empty(struct intel_th_device *thdev) 1643 { 1644 struct msc *msc = dev_get_drvdata(&thdev->dev); 1645 unsigned long count; 1646 u32 reg; 1647 1648 for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH; 1649 count && !(reg & MSCSTS_PLE); count--) { 1650 reg = __raw_readl(msc->reg_base + REG_MSU_MSC0STS); 1651 cpu_relax(); 1652 } 1653 1654 if (!count) 1655 dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n"); 1656 } 1657 1658 static int intel_th_msc_init(struct msc *msc) 1659 { 1660 atomic_set(&msc->user_count, -1); 1661 1662 msc->mode = MSC_MODE_MULTI; 1663 mutex_init(&msc->buf_mutex); 1664 INIT_LIST_HEAD(&msc->win_list); 1665 INIT_LIST_HEAD(&msc->iter_list); 1666 1667 msc->burst_len = 1668 (ioread32(msc->reg_base + REG_MSU_MSC0CTL) & MSC_LEN) >> 1669 __ffs(MSC_LEN); 1670 1671 return 0; 1672 } 1673 1674 static void msc_win_switch(struct msc *msc) 1675 { 1676 struct msc_window *first; 1677 1678 first = list_first_entry(&msc->win_list, struct msc_window, entry); 1679 1680 if (msc_is_last_win(msc->cur_win)) 1681 msc->cur_win = first; 1682 else 1683 msc->cur_win = list_next_entry(msc->cur_win, entry); 1684 1685 msc->base = msc_win_base(msc->cur_win); 1686 msc->base_addr = msc_win_base_dma(msc->cur_win); 1687 1688 intel_th_trace_switch(msc->thdev); 1689 } 1690 1691 /** 1692 * intel_th_msc_window_unlock - put the window back in rotation 1693 * @dev: MSC device to which this relates 1694 * @sgt: buffer's sg_table for the window, does nothing if NULL 1695 */ 1696 void intel_th_msc_window_unlock(struct device *dev, struct sg_table *sgt) 1697 { 1698 struct msc *msc = dev_get_drvdata(dev); 1699 struct msc_window *win; 1700 1701 if (!sgt) 1702 return; 1703 1704 win = msc_find_window(msc, sgt, false); 1705 if (!win) 1706 return; 1707 1708 msc_win_set_lockout(win, WIN_LOCKED, WIN_READY); 1709 } 1710 EXPORT_SYMBOL_GPL(intel_th_msc_window_unlock); 1711 1712 static void msc_work(struct work_struct *work) 1713 { 1714 struct msc *msc = container_of(work, struct msc, work); 1715 1716 intel_th_msc_deactivate(msc->thdev); 1717 } 1718 1719 static irqreturn_t intel_th_msc_interrupt(struct intel_th_device *thdev) 1720 { 1721 struct msc *msc = dev_get_drvdata(&thdev->dev); 1722 u32 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS); 1723 u32 mask = msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST; 1724 struct msc_window *win, *next_win; 1725 1726 if (!msc->do_irq || !msc->mbuf) 1727 return IRQ_NONE; 1728 1729 msusts &= mask; 1730 1731 if (!msusts) 1732 return msc->enabled ? IRQ_HANDLED : IRQ_NONE; 1733 1734 iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS); 1735 1736 if (!msc->enabled) 1737 return IRQ_NONE; 1738 1739 /* grab the window before we do the switch */ 1740 win = msc->cur_win; 1741 if (!win) 1742 return IRQ_HANDLED; 1743 next_win = msc_next_window(win); 1744 if (!next_win) 1745 return IRQ_HANDLED; 1746 1747 /* next window: if READY, proceed, if LOCKED, stop the trace */ 1748 if (msc_win_set_lockout(next_win, WIN_READY, WIN_INUSE)) { 1749 schedule_work(&msc->work); 1750 return IRQ_HANDLED; 1751 } 1752 1753 /* current window: INUSE -> LOCKED */ 1754 msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED); 1755 1756 msc_win_switch(msc); 1757 1758 if (msc->mbuf && msc->mbuf->ready) 1759 msc->mbuf->ready(msc->mbuf_priv, win->sgt, 1760 msc_win_total_sz(win)); 1761 1762 return IRQ_HANDLED; 1763 } 1764 1765 static const char * const msc_mode[] = { 1766 [MSC_MODE_SINGLE] = "single", 1767 [MSC_MODE_MULTI] = "multi", 1768 [MSC_MODE_EXI] = "ExI", 1769 [MSC_MODE_DEBUG] = "debug", 1770 }; 1771 1772 static ssize_t 1773 wrap_show(struct device *dev, struct device_attribute *attr, char *buf) 1774 { 1775 struct msc *msc = dev_get_drvdata(dev); 1776 1777 return scnprintf(buf, PAGE_SIZE, "%d\n", msc->wrap); 1778 } 1779 1780 static ssize_t 1781 wrap_store(struct device *dev, struct device_attribute *attr, const char *buf, 1782 size_t size) 1783 { 1784 struct msc *msc = dev_get_drvdata(dev); 1785 unsigned long val; 1786 int ret; 1787 1788 ret = kstrtoul(buf, 10, &val); 1789 if (ret) 1790 return ret; 1791 1792 msc->wrap = !!val; 1793 1794 return size; 1795 } 1796 1797 static DEVICE_ATTR_RW(wrap); 1798 1799 static void msc_buffer_unassign(struct msc *msc) 1800 { 1801 lockdep_assert_held(&msc->buf_mutex); 1802 1803 if (!msc->mbuf) 1804 return; 1805 1806 msc->mbuf->unassign(msc->mbuf_priv); 1807 msu_buffer_put(msc->mbuf); 1808 msc->mbuf_priv = NULL; 1809 msc->mbuf = NULL; 1810 } 1811 1812 static ssize_t 1813 mode_show(struct device *dev, struct device_attribute *attr, char *buf) 1814 { 1815 struct msc *msc = dev_get_drvdata(dev); 1816 const char *mode = msc_mode[msc->mode]; 1817 ssize_t ret; 1818 1819 mutex_lock(&msc->buf_mutex); 1820 if (msc->mbuf) 1821 mode = msc->mbuf->name; 1822 ret = scnprintf(buf, PAGE_SIZE, "%s\n", mode); 1823 mutex_unlock(&msc->buf_mutex); 1824 1825 return ret; 1826 } 1827 1828 static ssize_t 1829 mode_store(struct device *dev, struct device_attribute *attr, const char *buf, 1830 size_t size) 1831 { 1832 const struct msu_buffer *mbuf = NULL; 1833 struct msc *msc = dev_get_drvdata(dev); 1834 size_t len = size; 1835 char *cp, *mode; 1836 int i, ret; 1837 1838 if (!capable(CAP_SYS_RAWIO)) 1839 return -EPERM; 1840 1841 cp = memchr(buf, '\n', len); 1842 if (cp) 1843 len = cp - buf; 1844 1845 mode = kstrndup(buf, len, GFP_KERNEL); 1846 i = match_string(msc_mode, ARRAY_SIZE(msc_mode), mode); 1847 if (i >= 0) 1848 goto found; 1849 1850 /* Buffer sinks only work with a usable IRQ */ 1851 if (!msc->do_irq) { 1852 kfree(mode); 1853 return -EINVAL; 1854 } 1855 1856 mbuf = msu_buffer_get(mode); 1857 kfree(mode); 1858 if (mbuf) 1859 goto found; 1860 1861 return -EINVAL; 1862 1863 found: 1864 mutex_lock(&msc->buf_mutex); 1865 ret = 0; 1866 1867 /* Same buffer: do nothing */ 1868 if (mbuf && mbuf == msc->mbuf) { 1869 /* put the extra reference we just got */ 1870 msu_buffer_put(mbuf); 1871 goto unlock; 1872 } 1873 1874 ret = msc_buffer_unlocked_free_unless_used(msc); 1875 if (ret) 1876 goto unlock; 1877 1878 if (mbuf) { 1879 void *mbuf_priv = mbuf->assign(dev, &i); 1880 1881 if (!mbuf_priv) { 1882 ret = -ENOMEM; 1883 goto unlock; 1884 } 1885 1886 msc_buffer_unassign(msc); 1887 msc->mbuf_priv = mbuf_priv; 1888 msc->mbuf = mbuf; 1889 } else { 1890 msc_buffer_unassign(msc); 1891 } 1892 1893 msc->mode = i; 1894 1895 unlock: 1896 if (ret && mbuf) 1897 msu_buffer_put(mbuf); 1898 mutex_unlock(&msc->buf_mutex); 1899 1900 return ret ? ret : size; 1901 } 1902 1903 static DEVICE_ATTR_RW(mode); 1904 1905 static ssize_t 1906 nr_pages_show(struct device *dev, struct device_attribute *attr, char *buf) 1907 { 1908 struct msc *msc = dev_get_drvdata(dev); 1909 struct msc_window *win; 1910 size_t count = 0; 1911 1912 mutex_lock(&msc->buf_mutex); 1913 1914 if (msc->mode == MSC_MODE_SINGLE) 1915 count = scnprintf(buf, PAGE_SIZE, "%ld\n", msc->nr_pages); 1916 else if (msc->mode == MSC_MODE_MULTI) { 1917 list_for_each_entry(win, &msc->win_list, entry) { 1918 count += scnprintf(buf + count, PAGE_SIZE - count, 1919 "%d%c", win->nr_blocks, 1920 msc_is_last_win(win) ? '\n' : ','); 1921 } 1922 } else { 1923 count = scnprintf(buf, PAGE_SIZE, "unsupported\n"); 1924 } 1925 1926 mutex_unlock(&msc->buf_mutex); 1927 1928 return count; 1929 } 1930 1931 static ssize_t 1932 nr_pages_store(struct device *dev, struct device_attribute *attr, 1933 const char *buf, size_t size) 1934 { 1935 struct msc *msc = dev_get_drvdata(dev); 1936 unsigned long val, *win = NULL, *rewin; 1937 size_t len = size; 1938 const char *p = buf; 1939 char *end, *s; 1940 int ret, nr_wins = 0; 1941 1942 if (!capable(CAP_SYS_RAWIO)) 1943 return -EPERM; 1944 1945 ret = msc_buffer_free_unless_used(msc); 1946 if (ret) 1947 return ret; 1948 1949 /* scan the comma-separated list of allocation sizes */ 1950 end = memchr(buf, '\n', len); 1951 if (end) 1952 len = end - buf; 1953 1954 do { 1955 end = memchr(p, ',', len); 1956 s = kstrndup(p, end ? end - p : len, GFP_KERNEL); 1957 if (!s) { 1958 ret = -ENOMEM; 1959 goto free_win; 1960 } 1961 1962 ret = kstrtoul(s, 10, &val); 1963 kfree(s); 1964 1965 if (ret || !val) 1966 goto free_win; 1967 1968 if (nr_wins && msc->mode == MSC_MODE_SINGLE) { 1969 ret = -EINVAL; 1970 goto free_win; 1971 } 1972 1973 nr_wins++; 1974 rewin = krealloc(win, sizeof(*win) * nr_wins, GFP_KERNEL); 1975 if (!rewin) { 1976 kfree(win); 1977 return -ENOMEM; 1978 } 1979 1980 win = rewin; 1981 win[nr_wins - 1] = val; 1982 1983 if (!end) 1984 break; 1985 1986 /* consume the number and the following comma, hence +1 */ 1987 len -= end - p + 1; 1988 p = end + 1; 1989 } while (len); 1990 1991 mutex_lock(&msc->buf_mutex); 1992 ret = msc_buffer_alloc(msc, win, nr_wins); 1993 mutex_unlock(&msc->buf_mutex); 1994 1995 free_win: 1996 kfree(win); 1997 1998 return ret ? ret : size; 1999 } 2000 2001 static DEVICE_ATTR_RW(nr_pages); 2002 2003 static ssize_t 2004 win_switch_store(struct device *dev, struct device_attribute *attr, 2005 const char *buf, size_t size) 2006 { 2007 struct msc *msc = dev_get_drvdata(dev); 2008 unsigned long val; 2009 int ret; 2010 2011 ret = kstrtoul(buf, 10, &val); 2012 if (ret) 2013 return ret; 2014 2015 if (val != 1) 2016 return -EINVAL; 2017 2018 mutex_lock(&msc->buf_mutex); 2019 /* 2020 * Window switch can only happen in the "multi" mode. 2021 * If a external buffer is engaged, they have the full 2022 * control over window switching. 2023 */ 2024 if (msc->mode != MSC_MODE_MULTI || msc->mbuf) 2025 ret = -ENOTSUPP; 2026 else 2027 msc_win_switch(msc); 2028 mutex_unlock(&msc->buf_mutex); 2029 2030 return ret ? ret : size; 2031 } 2032 2033 static DEVICE_ATTR_WO(win_switch); 2034 2035 static struct attribute *msc_output_attrs[] = { 2036 &dev_attr_wrap.attr, 2037 &dev_attr_mode.attr, 2038 &dev_attr_nr_pages.attr, 2039 &dev_attr_win_switch.attr, 2040 NULL, 2041 }; 2042 2043 static struct attribute_group msc_output_group = { 2044 .attrs = msc_output_attrs, 2045 }; 2046 2047 static int intel_th_msc_probe(struct intel_th_device *thdev) 2048 { 2049 struct device *dev = &thdev->dev; 2050 struct resource *res; 2051 struct msc *msc; 2052 void __iomem *base; 2053 int err; 2054 2055 res = intel_th_device_get_resource(thdev, IORESOURCE_MEM, 0); 2056 if (!res) 2057 return -ENODEV; 2058 2059 base = devm_ioremap(dev, res->start, resource_size(res)); 2060 if (!base) 2061 return -ENOMEM; 2062 2063 msc = devm_kzalloc(dev, sizeof(*msc), GFP_KERNEL); 2064 if (!msc) 2065 return -ENOMEM; 2066 2067 res = intel_th_device_get_resource(thdev, IORESOURCE_IRQ, 1); 2068 if (!res) 2069 msc->do_irq = 1; 2070 2071 msc->index = thdev->id; 2072 2073 msc->thdev = thdev; 2074 msc->reg_base = base + msc->index * 0x100; 2075 msc->msu_base = base; 2076 2077 INIT_WORK(&msc->work, msc_work); 2078 err = intel_th_msc_init(msc); 2079 if (err) 2080 return err; 2081 2082 dev_set_drvdata(dev, msc); 2083 2084 return 0; 2085 } 2086 2087 static void intel_th_msc_remove(struct intel_th_device *thdev) 2088 { 2089 struct msc *msc = dev_get_drvdata(&thdev->dev); 2090 int ret; 2091 2092 intel_th_msc_deactivate(thdev); 2093 2094 /* 2095 * Buffers should not be used at this point except if the 2096 * output character device is still open and the parent 2097 * device gets detached from its bus, which is a FIXME. 2098 */ 2099 ret = msc_buffer_free_unless_used(msc); 2100 WARN_ON_ONCE(ret); 2101 } 2102 2103 static struct intel_th_driver intel_th_msc_driver = { 2104 .probe = intel_th_msc_probe, 2105 .remove = intel_th_msc_remove, 2106 .irq = intel_th_msc_interrupt, 2107 .wait_empty = intel_th_msc_wait_empty, 2108 .activate = intel_th_msc_activate, 2109 .deactivate = intel_th_msc_deactivate, 2110 .fops = &intel_th_msc_fops, 2111 .attr_group = &msc_output_group, 2112 .driver = { 2113 .name = "msc", 2114 .owner = THIS_MODULE, 2115 }, 2116 }; 2117 2118 module_driver(intel_th_msc_driver, 2119 intel_th_driver_register, 2120 intel_th_driver_unregister); 2121 2122 MODULE_LICENSE("GPL v2"); 2123 MODULE_DESCRIPTION("Intel(R) Trace Hub Memory Storage Unit driver"); 2124 MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>"); 2125