xref: /linux/drivers/hwtracing/intel_th/msu.c (revision f505e91e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Intel(R) Trace Hub Memory Storage Unit
4  *
5  * Copyright (C) 2014-2015 Intel Corporation.
6  */
7 
8 #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
9 
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/device.h>
13 #include <linux/uaccess.h>
14 #include <linux/sizes.h>
15 #include <linux/printk.h>
16 #include <linux/slab.h>
17 #include <linux/mm.h>
18 #include <linux/fs.h>
19 #include <linux/io.h>
20 #include <linux/dma-mapping.h>
21 
22 #ifdef CONFIG_X86
23 #include <asm/set_memory.h>
24 #endif
25 
26 #include "intel_th.h"
27 #include "msu.h"
28 
29 #define msc_dev(x) (&(x)->thdev->dev)
30 
31 /**
32  * struct msc_window - multiblock mode window descriptor
33  * @entry:	window list linkage (msc::win_list)
34  * @pgoff:	page offset into the buffer that this window starts at
35  * @nr_blocks:	number of blocks (pages) in this window
36  * @nr_segs:	number of segments in this window (<= @nr_blocks)
37  * @_sgt:	array of block descriptors
38  * @sgt:	array of block descriptors
39  */
40 struct msc_window {
41 	struct list_head	entry;
42 	unsigned long		pgoff;
43 	unsigned int		nr_blocks;
44 	unsigned int		nr_segs;
45 	struct msc		*msc;
46 	struct sg_table		_sgt;
47 	struct sg_table		*sgt;
48 };
49 
50 /**
51  * struct msc_iter - iterator for msc buffer
52  * @entry:		msc::iter_list linkage
53  * @msc:		pointer to the MSC device
54  * @start_win:		oldest window
55  * @win:		current window
56  * @offset:		current logical offset into the buffer
57  * @start_block:	oldest block in the window
58  * @block:		block number in the window
59  * @block_off:		offset into current block
60  * @wrap_count:		block wrapping handling
61  * @eof:		end of buffer reached
62  */
63 struct msc_iter {
64 	struct list_head	entry;
65 	struct msc		*msc;
66 	struct msc_window	*start_win;
67 	struct msc_window	*win;
68 	unsigned long		offset;
69 	int			start_block;
70 	int			block;
71 	unsigned int		block_off;
72 	unsigned int		wrap_count;
73 	unsigned int		eof;
74 };
75 
76 /**
77  * struct msc - MSC device representation
78  * @reg_base:		register window base address
79  * @thdev:		intel_th_device pointer
80  * @win_list:		list of windows in multiblock mode
81  * @single_sgt:		single mode buffer
82  * @cur_win:		current window
83  * @nr_pages:		total number of pages allocated for this buffer
84  * @single_sz:		amount of data in single mode
85  * @single_wrap:	single mode wrap occurred
86  * @base:		buffer's base pointer
87  * @base_addr:		buffer's base address
88  * @user_count:		number of users of the buffer
89  * @mmap_count:		number of mappings
90  * @buf_mutex:		mutex to serialize access to buffer-related bits
91 
92  * @enabled:		MSC is enabled
93  * @wrap:		wrapping is enabled
94  * @mode:		MSC operating mode
95  * @burst_len:		write burst length
96  * @index:		number of this MSC in the MSU
97  */
98 struct msc {
99 	void __iomem		*reg_base;
100 	void __iomem		*msu_base;
101 	struct intel_th_device	*thdev;
102 
103 	struct list_head	win_list;
104 	struct sg_table		single_sgt;
105 	struct msc_window	*cur_win;
106 	unsigned long		nr_pages;
107 	unsigned long		single_sz;
108 	unsigned int		single_wrap : 1;
109 	void			*base;
110 	dma_addr_t		base_addr;
111 
112 	/* <0: no buffer, 0: no users, >0: active users */
113 	atomic_t		user_count;
114 
115 	atomic_t		mmap_count;
116 	struct mutex		buf_mutex;
117 
118 	struct list_head	iter_list;
119 
120 	/* config */
121 	unsigned int		enabled : 1,
122 				wrap	: 1,
123 				do_irq	: 1;
124 	unsigned int		mode;
125 	unsigned int		burst_len;
126 	unsigned int		index;
127 };
128 
129 static inline bool msc_block_is_empty(struct msc_block_desc *bdesc)
130 {
131 	/* header hasn't been written */
132 	if (!bdesc->valid_dw)
133 		return true;
134 
135 	/* valid_dw includes the header */
136 	if (!msc_data_sz(bdesc))
137 		return true;
138 
139 	return false;
140 }
141 
142 static inline struct msc_block_desc *
143 msc_win_block(struct msc_window *win, unsigned int block)
144 {
145 	return sg_virt(&win->sgt->sgl[block]);
146 }
147 
148 static inline size_t
149 msc_win_actual_bsz(struct msc_window *win, unsigned int block)
150 {
151 	return win->sgt->sgl[block].length;
152 }
153 
154 static inline dma_addr_t
155 msc_win_baddr(struct msc_window *win, unsigned int block)
156 {
157 	return sg_dma_address(&win->sgt->sgl[block]);
158 }
159 
160 static inline unsigned long
161 msc_win_bpfn(struct msc_window *win, unsigned int block)
162 {
163 	return msc_win_baddr(win, block) >> PAGE_SHIFT;
164 }
165 
166 /**
167  * msc_is_last_win() - check if a window is the last one for a given MSC
168  * @win:	window
169  * Return:	true if @win is the last window in MSC's multiblock buffer
170  */
171 static inline bool msc_is_last_win(struct msc_window *win)
172 {
173 	return win->entry.next == &win->msc->win_list;
174 }
175 
176 /**
177  * msc_next_window() - return next window in the multiblock buffer
178  * @win:	current window
179  *
180  * Return:	window following the current one
181  */
182 static struct msc_window *msc_next_window(struct msc_window *win)
183 {
184 	if (msc_is_last_win(win))
185 		return list_first_entry(&win->msc->win_list, struct msc_window,
186 					entry);
187 
188 	return list_next_entry(win, entry);
189 }
190 
191 /**
192  * msc_find_window() - find a window matching a given sg_table
193  * @msc:	MSC device
194  * @sgt:	SG table of the window
195  * @nonempty:	skip over empty windows
196  *
197  * Return:	MSC window structure pointer or NULL if the window
198  *		could not be found.
199  */
200 static struct msc_window *
201 msc_find_window(struct msc *msc, struct sg_table *sgt, bool nonempty)
202 {
203 	struct msc_window *win;
204 	unsigned int found = 0;
205 
206 	if (list_empty(&msc->win_list))
207 		return NULL;
208 
209 	/*
210 	 * we might need a radix tree for this, depending on how
211 	 * many windows a typical user would allocate; ideally it's
212 	 * something like 2, in which case we're good
213 	 */
214 	list_for_each_entry(win, &msc->win_list, entry) {
215 		if (win->sgt == sgt)
216 			found++;
217 
218 		/* skip the empty ones */
219 		if (nonempty && msc_block_is_empty(msc_win_block(win, 0)))
220 			continue;
221 
222 		if (found)
223 			return win;
224 	}
225 
226 	return NULL;
227 }
228 
229 /**
230  * msc_oldest_window() - locate the window with oldest data
231  * @msc:	MSC device
232  *
233  * This should only be used in multiblock mode. Caller should hold the
234  * msc::user_count reference.
235  *
236  * Return:	the oldest window with valid data
237  */
238 static struct msc_window *msc_oldest_window(struct msc *msc)
239 {
240 	struct msc_window *win;
241 
242 	if (list_empty(&msc->win_list))
243 		return NULL;
244 
245 	win = msc_find_window(msc, msc_next_window(msc->cur_win)->sgt, true);
246 	if (win)
247 		return win;
248 
249 	return list_first_entry(&msc->win_list, struct msc_window, entry);
250 }
251 
252 /**
253  * msc_win_oldest_block() - locate the oldest block in a given window
254  * @win:	window to look at
255  *
256  * Return:	index of the block with the oldest data
257  */
258 static unsigned int msc_win_oldest_block(struct msc_window *win)
259 {
260 	unsigned int blk;
261 	struct msc_block_desc *bdesc = msc_win_block(win, 0);
262 
263 	/* without wrapping, first block is the oldest */
264 	if (!msc_block_wrapped(bdesc))
265 		return 0;
266 
267 	/*
268 	 * with wrapping, last written block contains both the newest and the
269 	 * oldest data for this window.
270 	 */
271 	for (blk = 0; blk < win->nr_segs; blk++) {
272 		bdesc = msc_win_block(win, blk);
273 
274 		if (msc_block_last_written(bdesc))
275 			return blk;
276 	}
277 
278 	return 0;
279 }
280 
281 static struct msc_block_desc *msc_iter_bdesc(struct msc_iter *iter)
282 {
283 	return msc_win_block(iter->win, iter->block);
284 }
285 
286 static void msc_iter_init(struct msc_iter *iter)
287 {
288 	memset(iter, 0, sizeof(*iter));
289 	iter->start_block = -1;
290 	iter->block = -1;
291 }
292 
293 static struct msc_iter *msc_iter_install(struct msc *msc)
294 {
295 	struct msc_iter *iter;
296 
297 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
298 	if (!iter)
299 		return ERR_PTR(-ENOMEM);
300 
301 	mutex_lock(&msc->buf_mutex);
302 
303 	/*
304 	 * Reading and tracing are mutually exclusive; if msc is
305 	 * enabled, open() will fail; otherwise existing readers
306 	 * will prevent enabling the msc and the rest of fops don't
307 	 * need to worry about it.
308 	 */
309 	if (msc->enabled) {
310 		kfree(iter);
311 		iter = ERR_PTR(-EBUSY);
312 		goto unlock;
313 	}
314 
315 	msc_iter_init(iter);
316 	iter->msc = msc;
317 
318 	list_add_tail(&iter->entry, &msc->iter_list);
319 unlock:
320 	mutex_unlock(&msc->buf_mutex);
321 
322 	return iter;
323 }
324 
325 static void msc_iter_remove(struct msc_iter *iter, struct msc *msc)
326 {
327 	mutex_lock(&msc->buf_mutex);
328 	list_del(&iter->entry);
329 	mutex_unlock(&msc->buf_mutex);
330 
331 	kfree(iter);
332 }
333 
334 static void msc_iter_block_start(struct msc_iter *iter)
335 {
336 	if (iter->start_block != -1)
337 		return;
338 
339 	iter->start_block = msc_win_oldest_block(iter->win);
340 	iter->block = iter->start_block;
341 	iter->wrap_count = 0;
342 
343 	/*
344 	 * start with the block with oldest data; if data has wrapped
345 	 * in this window, it should be in this block
346 	 */
347 	if (msc_block_wrapped(msc_iter_bdesc(iter)))
348 		iter->wrap_count = 2;
349 
350 }
351 
352 static int msc_iter_win_start(struct msc_iter *iter, struct msc *msc)
353 {
354 	/* already started, nothing to do */
355 	if (iter->start_win)
356 		return 0;
357 
358 	iter->start_win = msc_oldest_window(msc);
359 	if (!iter->start_win)
360 		return -EINVAL;
361 
362 	iter->win = iter->start_win;
363 	iter->start_block = -1;
364 
365 	msc_iter_block_start(iter);
366 
367 	return 0;
368 }
369 
370 static int msc_iter_win_advance(struct msc_iter *iter)
371 {
372 	iter->win = msc_next_window(iter->win);
373 	iter->start_block = -1;
374 
375 	if (iter->win == iter->start_win) {
376 		iter->eof++;
377 		return 1;
378 	}
379 
380 	msc_iter_block_start(iter);
381 
382 	return 0;
383 }
384 
385 static int msc_iter_block_advance(struct msc_iter *iter)
386 {
387 	iter->block_off = 0;
388 
389 	/* wrapping */
390 	if (iter->wrap_count && iter->block == iter->start_block) {
391 		iter->wrap_count--;
392 		if (!iter->wrap_count)
393 			/* copied newest data from the wrapped block */
394 			return msc_iter_win_advance(iter);
395 	}
396 
397 	/* no wrapping, check for last written block */
398 	if (!iter->wrap_count && msc_block_last_written(msc_iter_bdesc(iter)))
399 		/* copied newest data for the window */
400 		return msc_iter_win_advance(iter);
401 
402 	/* block advance */
403 	if (++iter->block == iter->win->nr_segs)
404 		iter->block = 0;
405 
406 	/* no wrapping, sanity check in case there is no last written block */
407 	if (!iter->wrap_count && iter->block == iter->start_block)
408 		return msc_iter_win_advance(iter);
409 
410 	return 0;
411 }
412 
413 /**
414  * msc_buffer_iterate() - go through multiblock buffer's data
415  * @iter:	iterator structure
416  * @size:	amount of data to scan
417  * @data:	callback's private data
418  * @fn:		iterator callback
419  *
420  * This will start at the window which will be written to next (containing
421  * the oldest data) and work its way to the current window, calling @fn
422  * for each chunk of data as it goes.
423  *
424  * Caller should have msc::user_count reference to make sure the buffer
425  * doesn't disappear from under us.
426  *
427  * Return:	amount of data actually scanned.
428  */
429 static ssize_t
430 msc_buffer_iterate(struct msc_iter *iter, size_t size, void *data,
431 		   unsigned long (*fn)(void *, void *, size_t))
432 {
433 	struct msc *msc = iter->msc;
434 	size_t len = size;
435 	unsigned int advance;
436 
437 	if (iter->eof)
438 		return 0;
439 
440 	/* start with the oldest window */
441 	if (msc_iter_win_start(iter, msc))
442 		return 0;
443 
444 	do {
445 		unsigned long data_bytes = msc_data_sz(msc_iter_bdesc(iter));
446 		void *src = (void *)msc_iter_bdesc(iter) + MSC_BDESC;
447 		size_t tocopy = data_bytes, copied = 0;
448 		size_t remaining = 0;
449 
450 		advance = 1;
451 
452 		/*
453 		 * If block wrapping happened, we need to visit the last block
454 		 * twice, because it contains both the oldest and the newest
455 		 * data in this window.
456 		 *
457 		 * First time (wrap_count==2), in the very beginning, to collect
458 		 * the oldest data, which is in the range
459 		 * (data_bytes..DATA_IN_PAGE).
460 		 *
461 		 * Second time (wrap_count==1), it's just like any other block,
462 		 * containing data in the range of [MSC_BDESC..data_bytes].
463 		 */
464 		if (iter->block == iter->start_block && iter->wrap_count == 2) {
465 			tocopy = DATA_IN_PAGE - data_bytes;
466 			src += data_bytes;
467 		}
468 
469 		if (!tocopy)
470 			goto next_block;
471 
472 		tocopy -= iter->block_off;
473 		src += iter->block_off;
474 
475 		if (len < tocopy) {
476 			tocopy = len;
477 			advance = 0;
478 		}
479 
480 		remaining = fn(data, src, tocopy);
481 
482 		if (remaining)
483 			advance = 0;
484 
485 		copied = tocopy - remaining;
486 		len -= copied;
487 		iter->block_off += copied;
488 		iter->offset += copied;
489 
490 		if (!advance)
491 			break;
492 
493 next_block:
494 		if (msc_iter_block_advance(iter))
495 			break;
496 
497 	} while (len);
498 
499 	return size - len;
500 }
501 
502 /**
503  * msc_buffer_clear_hw_header() - clear hw header for multiblock
504  * @msc:	MSC device
505  */
506 static void msc_buffer_clear_hw_header(struct msc *msc)
507 {
508 	struct msc_window *win;
509 
510 	list_for_each_entry(win, &msc->win_list, entry) {
511 		unsigned int blk;
512 		size_t hw_sz = sizeof(struct msc_block_desc) -
513 			offsetof(struct msc_block_desc, hw_tag);
514 
515 		for (blk = 0; blk < win->nr_segs; blk++) {
516 			struct msc_block_desc *bdesc = msc_win_block(win, blk);
517 
518 			memset(&bdesc->hw_tag, 0, hw_sz);
519 		}
520 	}
521 }
522 
523 static int intel_th_msu_init(struct msc *msc)
524 {
525 	u32 mintctl, msusts;
526 
527 	if (!msc->do_irq)
528 		return 0;
529 
530 	mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL);
531 	mintctl |= msc->index ? M1BLIE : M0BLIE;
532 	iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL);
533 	if (mintctl != ioread32(msc->msu_base + REG_MSU_MINTCTL)) {
534 		dev_info(msc_dev(msc), "MINTCTL ignores writes: no usable interrupts\n");
535 		msc->do_irq = 0;
536 		return 0;
537 	}
538 
539 	msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS);
540 	iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS);
541 
542 	return 0;
543 }
544 
545 static void intel_th_msu_deinit(struct msc *msc)
546 {
547 	u32 mintctl;
548 
549 	if (!msc->do_irq)
550 		return;
551 
552 	mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL);
553 	mintctl &= msc->index ? ~M1BLIE : ~M0BLIE;
554 	iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL);
555 }
556 
557 /**
558  * msc_configure() - set up MSC hardware
559  * @msc:	the MSC device to configure
560  *
561  * Program storage mode, wrapping, burst length and trace buffer address
562  * into a given MSC. Then, enable tracing and set msc::enabled.
563  * The latter is serialized on msc::buf_mutex, so make sure to hold it.
564  */
565 static int msc_configure(struct msc *msc)
566 {
567 	u32 reg;
568 
569 	lockdep_assert_held(&msc->buf_mutex);
570 
571 	if (msc->mode > MSC_MODE_MULTI)
572 		return -ENOTSUPP;
573 
574 	if (msc->mode == MSC_MODE_MULTI)
575 		msc_buffer_clear_hw_header(msc);
576 
577 	reg = msc->base_addr >> PAGE_SHIFT;
578 	iowrite32(reg, msc->reg_base + REG_MSU_MSC0BAR);
579 
580 	if (msc->mode == MSC_MODE_SINGLE) {
581 		reg = msc->nr_pages;
582 		iowrite32(reg, msc->reg_base + REG_MSU_MSC0SIZE);
583 	}
584 
585 	reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL);
586 	reg &= ~(MSC_MODE | MSC_WRAPEN | MSC_EN | MSC_RD_HDR_OVRD);
587 
588 	reg |= MSC_EN;
589 	reg |= msc->mode << __ffs(MSC_MODE);
590 	reg |= msc->burst_len << __ffs(MSC_LEN);
591 
592 	if (msc->wrap)
593 		reg |= MSC_WRAPEN;
594 
595 	iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
596 
597 	msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI;
598 	intel_th_trace_enable(msc->thdev);
599 	msc->enabled = 1;
600 
601 
602 	return 0;
603 }
604 
605 /**
606  * msc_disable() - disable MSC hardware
607  * @msc:	MSC device to disable
608  *
609  * If @msc is enabled, disable tracing on the switch and then disable MSC
610  * storage. Caller must hold msc::buf_mutex.
611  */
612 static void msc_disable(struct msc *msc)
613 {
614 	u32 reg;
615 
616 	lockdep_assert_held(&msc->buf_mutex);
617 
618 	intel_th_trace_disable(msc->thdev);
619 
620 	if (msc->mode == MSC_MODE_SINGLE) {
621 		reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
622 		msc->single_wrap = !!(reg & MSCSTS_WRAPSTAT);
623 
624 		reg = ioread32(msc->reg_base + REG_MSU_MSC0MWP);
625 		msc->single_sz = reg & ((msc->nr_pages << PAGE_SHIFT) - 1);
626 		dev_dbg(msc_dev(msc), "MSCnMWP: %08x/%08lx, wrap: %d\n",
627 			reg, msc->single_sz, msc->single_wrap);
628 	}
629 
630 	reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL);
631 	reg &= ~MSC_EN;
632 	iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
633 	msc->enabled = 0;
634 
635 	iowrite32(0, msc->reg_base + REG_MSU_MSC0BAR);
636 	iowrite32(0, msc->reg_base + REG_MSU_MSC0SIZE);
637 
638 	dev_dbg(msc_dev(msc), "MSCnNWSA: %08x\n",
639 		ioread32(msc->reg_base + REG_MSU_MSC0NWSA));
640 
641 	reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
642 	dev_dbg(msc_dev(msc), "MSCnSTS: %08x\n", reg);
643 }
644 
645 static int intel_th_msc_activate(struct intel_th_device *thdev)
646 {
647 	struct msc *msc = dev_get_drvdata(&thdev->dev);
648 	int ret = -EBUSY;
649 
650 	if (!atomic_inc_unless_negative(&msc->user_count))
651 		return -ENODEV;
652 
653 	mutex_lock(&msc->buf_mutex);
654 
655 	/* if there are readers, refuse */
656 	if (list_empty(&msc->iter_list))
657 		ret = msc_configure(msc);
658 
659 	mutex_unlock(&msc->buf_mutex);
660 
661 	if (ret)
662 		atomic_dec(&msc->user_count);
663 
664 	return ret;
665 }
666 
667 static void intel_th_msc_deactivate(struct intel_th_device *thdev)
668 {
669 	struct msc *msc = dev_get_drvdata(&thdev->dev);
670 
671 	mutex_lock(&msc->buf_mutex);
672 	if (msc->enabled) {
673 		msc_disable(msc);
674 		atomic_dec(&msc->user_count);
675 	}
676 	mutex_unlock(&msc->buf_mutex);
677 }
678 
679 /**
680  * msc_buffer_contig_alloc() - allocate a contiguous buffer for SINGLE mode
681  * @msc:	MSC device
682  * @size:	allocation size in bytes
683  *
684  * This modifies msc::base, which requires msc::buf_mutex to serialize, so the
685  * caller is expected to hold it.
686  *
687  * Return:	0 on success, -errno otherwise.
688  */
689 static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size)
690 {
691 	unsigned long nr_pages = size >> PAGE_SHIFT;
692 	unsigned int order = get_order(size);
693 	struct page *page;
694 	int ret;
695 
696 	if (!size)
697 		return 0;
698 
699 	ret = sg_alloc_table(&msc->single_sgt, 1, GFP_KERNEL);
700 	if (ret)
701 		goto err_out;
702 
703 	ret = -ENOMEM;
704 	page = alloc_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32, order);
705 	if (!page)
706 		goto err_free_sgt;
707 
708 	split_page(page, order);
709 	sg_set_buf(msc->single_sgt.sgl, page_address(page), size);
710 
711 	ret = dma_map_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 1,
712 			 DMA_FROM_DEVICE);
713 	if (ret < 0)
714 		goto err_free_pages;
715 
716 	msc->nr_pages = nr_pages;
717 	msc->base = page_address(page);
718 	msc->base_addr = sg_dma_address(msc->single_sgt.sgl);
719 
720 	return 0;
721 
722 err_free_pages:
723 	__free_pages(page, order);
724 
725 err_free_sgt:
726 	sg_free_table(&msc->single_sgt);
727 
728 err_out:
729 	return ret;
730 }
731 
732 /**
733  * msc_buffer_contig_free() - free a contiguous buffer
734  * @msc:	MSC configured in SINGLE mode
735  */
736 static void msc_buffer_contig_free(struct msc *msc)
737 {
738 	unsigned long off;
739 
740 	dma_unmap_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl,
741 		     1, DMA_FROM_DEVICE);
742 	sg_free_table(&msc->single_sgt);
743 
744 	for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) {
745 		struct page *page = virt_to_page(msc->base + off);
746 
747 		page->mapping = NULL;
748 		__free_page(page);
749 	}
750 
751 	msc->nr_pages = 0;
752 }
753 
754 /**
755  * msc_buffer_contig_get_page() - find a page at a given offset
756  * @msc:	MSC configured in SINGLE mode
757  * @pgoff:	page offset
758  *
759  * Return:	page, if @pgoff is within the range, NULL otherwise.
760  */
761 static struct page *msc_buffer_contig_get_page(struct msc *msc,
762 					       unsigned long pgoff)
763 {
764 	if (pgoff >= msc->nr_pages)
765 		return NULL;
766 
767 	return virt_to_page(msc->base + (pgoff << PAGE_SHIFT));
768 }
769 
770 static int __msc_buffer_win_alloc(struct msc_window *win,
771 				  unsigned int nr_segs)
772 {
773 	struct scatterlist *sg_ptr;
774 	void *block;
775 	int i, ret;
776 
777 	ret = sg_alloc_table(win->sgt, nr_segs, GFP_KERNEL);
778 	if (ret)
779 		return -ENOMEM;
780 
781 	for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) {
782 		block = dma_alloc_coherent(msc_dev(win->msc)->parent->parent,
783 					  PAGE_SIZE, &sg_dma_address(sg_ptr),
784 					  GFP_KERNEL);
785 		if (!block)
786 			goto err_nomem;
787 
788 		sg_set_buf(sg_ptr, block, PAGE_SIZE);
789 	}
790 
791 	return nr_segs;
792 
793 err_nomem:
794 	for (i--; i >= 0; i--)
795 		dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
796 				  msc_win_block(win, i),
797 				  msc_win_baddr(win, i));
798 
799 	sg_free_table(win->sgt);
800 
801 	return -ENOMEM;
802 }
803 
804 #ifdef CONFIG_X86
805 static void msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs)
806 {
807 	int i;
808 
809 	for (i = 0; i < nr_segs; i++)
810 		/* Set the page as uncached */
811 		set_memory_uc((unsigned long)msc_win_block(win, i), 1);
812 }
813 
814 static void msc_buffer_set_wb(struct msc_window *win)
815 {
816 	int i;
817 
818 	for (i = 0; i < win->nr_segs; i++)
819 		/* Reset the page to write-back */
820 		set_memory_wb((unsigned long)msc_win_block(win, i), 1);
821 }
822 #else /* !X86 */
823 static inline void
824 msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs) {}
825 static inline void msc_buffer_set_wb(struct msc_window *win) {}
826 #endif /* CONFIG_X86 */
827 
828 /**
829  * msc_buffer_win_alloc() - alloc a window for a multiblock mode
830  * @msc:	MSC device
831  * @nr_blocks:	number of pages in this window
832  *
833  * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
834  * to serialize, so the caller is expected to hold it.
835  *
836  * Return:	0 on success, -errno otherwise.
837  */
838 static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
839 {
840 	struct msc_window *win;
841 	int ret = -ENOMEM;
842 
843 	if (!nr_blocks)
844 		return 0;
845 
846 	/*
847 	 * This limitation hold as long as we need random access to the
848 	 * block. When that changes, this can go away.
849 	 */
850 	if (nr_blocks > SG_MAX_SINGLE_ALLOC)
851 		return -EINVAL;
852 
853 	win = kzalloc(sizeof(*win), GFP_KERNEL);
854 	if (!win)
855 		return -ENOMEM;
856 
857 	win->msc = msc;
858 	win->sgt = &win->_sgt;
859 
860 	if (!list_empty(&msc->win_list)) {
861 		struct msc_window *prev = list_last_entry(&msc->win_list,
862 							  struct msc_window,
863 							  entry);
864 
865 		win->pgoff = prev->pgoff + prev->nr_blocks;
866 	}
867 
868 	ret = __msc_buffer_win_alloc(win, nr_blocks);
869 	if (ret < 0)
870 		goto err_nomem;
871 
872 	msc_buffer_set_uc(win, ret);
873 
874 	win->nr_segs = ret;
875 	win->nr_blocks = nr_blocks;
876 
877 	if (list_empty(&msc->win_list)) {
878 		msc->base = msc_win_block(win, 0);
879 		msc->base_addr = msc_win_baddr(win, 0);
880 		msc->cur_win = win;
881 	}
882 
883 	list_add_tail(&win->entry, &msc->win_list);
884 	msc->nr_pages += nr_blocks;
885 
886 	return 0;
887 
888 err_nomem:
889 	kfree(win);
890 
891 	return ret;
892 }
893 
894 static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win)
895 {
896 	int i;
897 
898 	for (i = 0; i < win->nr_segs; i++) {
899 		struct page *page = sg_page(&win->sgt->sgl[i]);
900 
901 		page->mapping = NULL;
902 		dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
903 				  msc_win_block(win, i), msc_win_baddr(win, i));
904 	}
905 	sg_free_table(win->sgt);
906 }
907 
908 /**
909  * msc_buffer_win_free() - free a window from MSC's window list
910  * @msc:	MSC device
911  * @win:	window to free
912  *
913  * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
914  * to serialize, so the caller is expected to hold it.
915  */
916 static void msc_buffer_win_free(struct msc *msc, struct msc_window *win)
917 {
918 	msc->nr_pages -= win->nr_blocks;
919 
920 	list_del(&win->entry);
921 	if (list_empty(&msc->win_list)) {
922 		msc->base = NULL;
923 		msc->base_addr = 0;
924 	}
925 
926 	msc_buffer_set_wb(win);
927 
928 	__msc_buffer_win_free(msc, win);
929 
930 	kfree(win);
931 }
932 
933 /**
934  * msc_buffer_relink() - set up block descriptors for multiblock mode
935  * @msc:	MSC device
936  *
937  * This traverses msc::win_list, which requires msc::buf_mutex to serialize,
938  * so the caller is expected to hold it.
939  */
940 static void msc_buffer_relink(struct msc *msc)
941 {
942 	struct msc_window *win, *next_win;
943 
944 	/* call with msc::mutex locked */
945 	list_for_each_entry(win, &msc->win_list, entry) {
946 		unsigned int blk;
947 		u32 sw_tag = 0;
948 
949 		/*
950 		 * Last window's next_win should point to the first window
951 		 * and MSC_SW_TAG_LASTWIN should be set.
952 		 */
953 		if (msc_is_last_win(win)) {
954 			sw_tag |= MSC_SW_TAG_LASTWIN;
955 			next_win = list_first_entry(&msc->win_list,
956 						    struct msc_window, entry);
957 		} else {
958 			next_win = list_next_entry(win, entry);
959 		}
960 
961 		for (blk = 0; blk < win->nr_segs; blk++) {
962 			struct msc_block_desc *bdesc = msc_win_block(win, blk);
963 
964 			memset(bdesc, 0, sizeof(*bdesc));
965 
966 			bdesc->next_win = msc_win_bpfn(next_win, 0);
967 
968 			/*
969 			 * Similarly to last window, last block should point
970 			 * to the first one.
971 			 */
972 			if (blk == win->nr_segs - 1) {
973 				sw_tag |= MSC_SW_TAG_LASTBLK;
974 				bdesc->next_blk = msc_win_bpfn(win, 0);
975 			} else {
976 				bdesc->next_blk = msc_win_bpfn(win, blk + 1);
977 			}
978 
979 			bdesc->sw_tag = sw_tag;
980 			bdesc->block_sz = msc_win_actual_bsz(win, blk) / 64;
981 		}
982 	}
983 
984 	/*
985 	 * Make the above writes globally visible before tracing is
986 	 * enabled to make sure hardware sees them coherently.
987 	 */
988 	wmb();
989 }
990 
991 static void msc_buffer_multi_free(struct msc *msc)
992 {
993 	struct msc_window *win, *iter;
994 
995 	list_for_each_entry_safe(win, iter, &msc->win_list, entry)
996 		msc_buffer_win_free(msc, win);
997 }
998 
999 static int msc_buffer_multi_alloc(struct msc *msc, unsigned long *nr_pages,
1000 				  unsigned int nr_wins)
1001 {
1002 	int ret, i;
1003 
1004 	for (i = 0; i < nr_wins; i++) {
1005 		ret = msc_buffer_win_alloc(msc, nr_pages[i]);
1006 		if (ret) {
1007 			msc_buffer_multi_free(msc);
1008 			return ret;
1009 		}
1010 	}
1011 
1012 	msc_buffer_relink(msc);
1013 
1014 	return 0;
1015 }
1016 
1017 /**
1018  * msc_buffer_free() - free buffers for MSC
1019  * @msc:	MSC device
1020  *
1021  * Free MSC's storage buffers.
1022  *
1023  * This modifies msc::win_list and msc::base, which requires msc::buf_mutex to
1024  * serialize, so the caller is expected to hold it.
1025  */
1026 static void msc_buffer_free(struct msc *msc)
1027 {
1028 	if (msc->mode == MSC_MODE_SINGLE)
1029 		msc_buffer_contig_free(msc);
1030 	else if (msc->mode == MSC_MODE_MULTI)
1031 		msc_buffer_multi_free(msc);
1032 }
1033 
1034 /**
1035  * msc_buffer_alloc() - allocate a buffer for MSC
1036  * @msc:	MSC device
1037  * @size:	allocation size in bytes
1038  *
1039  * Allocate a storage buffer for MSC, depending on the msc::mode, it will be
1040  * either done via msc_buffer_contig_alloc() for SINGLE operation mode or
1041  * msc_buffer_win_alloc() for multiblock operation. The latter allocates one
1042  * window per invocation, so in multiblock mode this can be called multiple
1043  * times for the same MSC to allocate multiple windows.
1044  *
1045  * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
1046  * to serialize, so the caller is expected to hold it.
1047  *
1048  * Return:	0 on success, -errno otherwise.
1049  */
1050 static int msc_buffer_alloc(struct msc *msc, unsigned long *nr_pages,
1051 			    unsigned int nr_wins)
1052 {
1053 	int ret;
1054 
1055 	/* -1: buffer not allocated */
1056 	if (atomic_read(&msc->user_count) != -1)
1057 		return -EBUSY;
1058 
1059 	if (msc->mode == MSC_MODE_SINGLE) {
1060 		if (nr_wins != 1)
1061 			return -EINVAL;
1062 
1063 		ret = msc_buffer_contig_alloc(msc, nr_pages[0] << PAGE_SHIFT);
1064 	} else if (msc->mode == MSC_MODE_MULTI) {
1065 		ret = msc_buffer_multi_alloc(msc, nr_pages, nr_wins);
1066 	} else {
1067 		ret = -ENOTSUPP;
1068 	}
1069 
1070 	if (!ret) {
1071 		/* allocation should be visible before the counter goes to 0 */
1072 		smp_mb__before_atomic();
1073 
1074 		if (WARN_ON_ONCE(atomic_cmpxchg(&msc->user_count, -1, 0) != -1))
1075 			return -EINVAL;
1076 	}
1077 
1078 	return ret;
1079 }
1080 
1081 /**
1082  * msc_buffer_unlocked_free_unless_used() - free a buffer unless it's in use
1083  * @msc:	MSC device
1084  *
1085  * This will free MSC buffer unless it is in use or there is no allocated
1086  * buffer.
1087  * Caller needs to hold msc::buf_mutex.
1088  *
1089  * Return:	0 on successful deallocation or if there was no buffer to
1090  *		deallocate, -EBUSY if there are active users.
1091  */
1092 static int msc_buffer_unlocked_free_unless_used(struct msc *msc)
1093 {
1094 	int count, ret = 0;
1095 
1096 	count = atomic_cmpxchg(&msc->user_count, 0, -1);
1097 
1098 	/* > 0: buffer is allocated and has users */
1099 	if (count > 0)
1100 		ret = -EBUSY;
1101 	/* 0: buffer is allocated, no users */
1102 	else if (!count)
1103 		msc_buffer_free(msc);
1104 	/* < 0: no buffer, nothing to do */
1105 
1106 	return ret;
1107 }
1108 
1109 /**
1110  * msc_buffer_free_unless_used() - free a buffer unless it's in use
1111  * @msc:	MSC device
1112  *
1113  * This is a locked version of msc_buffer_unlocked_free_unless_used().
1114  */
1115 static int msc_buffer_free_unless_used(struct msc *msc)
1116 {
1117 	int ret;
1118 
1119 	mutex_lock(&msc->buf_mutex);
1120 	ret = msc_buffer_unlocked_free_unless_used(msc);
1121 	mutex_unlock(&msc->buf_mutex);
1122 
1123 	return ret;
1124 }
1125 
1126 /**
1127  * msc_buffer_get_page() - get MSC buffer page at a given offset
1128  * @msc:	MSC device
1129  * @pgoff:	page offset into the storage buffer
1130  *
1131  * This traverses msc::win_list, so holding msc::buf_mutex is expected from
1132  * the caller.
1133  *
1134  * Return:	page if @pgoff corresponds to a valid buffer page or NULL.
1135  */
1136 static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff)
1137 {
1138 	struct msc_window *win;
1139 	unsigned int blk;
1140 
1141 	if (msc->mode == MSC_MODE_SINGLE)
1142 		return msc_buffer_contig_get_page(msc, pgoff);
1143 
1144 	list_for_each_entry(win, &msc->win_list, entry)
1145 		if (pgoff >= win->pgoff && pgoff < win->pgoff + win->nr_blocks)
1146 			goto found;
1147 
1148 	return NULL;
1149 
1150 found:
1151 	pgoff -= win->pgoff;
1152 
1153 	for (blk = 0; blk < win->nr_segs; blk++) {
1154 		struct page *page = sg_page(&win->sgt->sgl[blk]);
1155 		size_t pgsz = PFN_DOWN(msc_win_actual_bsz(win, blk));
1156 
1157 		if (pgoff < pgsz)
1158 			return page + pgoff;
1159 
1160 		pgoff -= pgsz;
1161 	}
1162 
1163 	return NULL;
1164 }
1165 
1166 /**
1167  * struct msc_win_to_user_struct - data for copy_to_user() callback
1168  * @buf:	userspace buffer to copy data to
1169  * @offset:	running offset
1170  */
1171 struct msc_win_to_user_struct {
1172 	char __user	*buf;
1173 	unsigned long	offset;
1174 };
1175 
1176 /**
1177  * msc_win_to_user() - iterator for msc_buffer_iterate() to copy data to user
1178  * @data:	callback's private data
1179  * @src:	source buffer
1180  * @len:	amount of data to copy from the source buffer
1181  */
1182 static unsigned long msc_win_to_user(void *data, void *src, size_t len)
1183 {
1184 	struct msc_win_to_user_struct *u = data;
1185 	unsigned long ret;
1186 
1187 	ret = copy_to_user(u->buf + u->offset, src, len);
1188 	u->offset += len - ret;
1189 
1190 	return ret;
1191 }
1192 
1193 
1194 /*
1195  * file operations' callbacks
1196  */
1197 
1198 static int intel_th_msc_open(struct inode *inode, struct file *file)
1199 {
1200 	struct intel_th_device *thdev = file->private_data;
1201 	struct msc *msc = dev_get_drvdata(&thdev->dev);
1202 	struct msc_iter *iter;
1203 
1204 	if (!capable(CAP_SYS_RAWIO))
1205 		return -EPERM;
1206 
1207 	iter = msc_iter_install(msc);
1208 	if (IS_ERR(iter))
1209 		return PTR_ERR(iter);
1210 
1211 	file->private_data = iter;
1212 
1213 	return nonseekable_open(inode, file);
1214 }
1215 
1216 static int intel_th_msc_release(struct inode *inode, struct file *file)
1217 {
1218 	struct msc_iter *iter = file->private_data;
1219 	struct msc *msc = iter->msc;
1220 
1221 	msc_iter_remove(iter, msc);
1222 
1223 	return 0;
1224 }
1225 
1226 static ssize_t
1227 msc_single_to_user(struct msc *msc, char __user *buf, loff_t off, size_t len)
1228 {
1229 	unsigned long size = msc->nr_pages << PAGE_SHIFT, rem = len;
1230 	unsigned long start = off, tocopy = 0;
1231 
1232 	if (msc->single_wrap) {
1233 		start += msc->single_sz;
1234 		if (start < size) {
1235 			tocopy = min(rem, size - start);
1236 			if (copy_to_user(buf, msc->base + start, tocopy))
1237 				return -EFAULT;
1238 
1239 			buf += tocopy;
1240 			rem -= tocopy;
1241 			start += tocopy;
1242 		}
1243 
1244 		start &= size - 1;
1245 		if (rem) {
1246 			tocopy = min(rem, msc->single_sz - start);
1247 			if (copy_to_user(buf, msc->base + start, tocopy))
1248 				return -EFAULT;
1249 
1250 			rem -= tocopy;
1251 		}
1252 
1253 		return len - rem;
1254 	}
1255 
1256 	if (copy_to_user(buf, msc->base + start, rem))
1257 		return -EFAULT;
1258 
1259 	return len;
1260 }
1261 
1262 static ssize_t intel_th_msc_read(struct file *file, char __user *buf,
1263 				 size_t len, loff_t *ppos)
1264 {
1265 	struct msc_iter *iter = file->private_data;
1266 	struct msc *msc = iter->msc;
1267 	size_t size;
1268 	loff_t off = *ppos;
1269 	ssize_t ret = 0;
1270 
1271 	if (!atomic_inc_unless_negative(&msc->user_count))
1272 		return 0;
1273 
1274 	if (msc->mode == MSC_MODE_SINGLE && !msc->single_wrap)
1275 		size = msc->single_sz;
1276 	else
1277 		size = msc->nr_pages << PAGE_SHIFT;
1278 
1279 	if (!size)
1280 		goto put_count;
1281 
1282 	if (off >= size)
1283 		goto put_count;
1284 
1285 	if (off + len >= size)
1286 		len = size - off;
1287 
1288 	if (msc->mode == MSC_MODE_SINGLE) {
1289 		ret = msc_single_to_user(msc, buf, off, len);
1290 		if (ret >= 0)
1291 			*ppos += ret;
1292 	} else if (msc->mode == MSC_MODE_MULTI) {
1293 		struct msc_win_to_user_struct u = {
1294 			.buf	= buf,
1295 			.offset	= 0,
1296 		};
1297 
1298 		ret = msc_buffer_iterate(iter, len, &u, msc_win_to_user);
1299 		if (ret >= 0)
1300 			*ppos = iter->offset;
1301 	} else {
1302 		ret = -ENOTSUPP;
1303 	}
1304 
1305 put_count:
1306 	atomic_dec(&msc->user_count);
1307 
1308 	return ret;
1309 }
1310 
1311 /*
1312  * vm operations callbacks (vm_ops)
1313  */
1314 
1315 static void msc_mmap_open(struct vm_area_struct *vma)
1316 {
1317 	struct msc_iter *iter = vma->vm_file->private_data;
1318 	struct msc *msc = iter->msc;
1319 
1320 	atomic_inc(&msc->mmap_count);
1321 }
1322 
1323 static void msc_mmap_close(struct vm_area_struct *vma)
1324 {
1325 	struct msc_iter *iter = vma->vm_file->private_data;
1326 	struct msc *msc = iter->msc;
1327 	unsigned long pg;
1328 
1329 	if (!atomic_dec_and_mutex_lock(&msc->mmap_count, &msc->buf_mutex))
1330 		return;
1331 
1332 	/* drop page _refcounts */
1333 	for (pg = 0; pg < msc->nr_pages; pg++) {
1334 		struct page *page = msc_buffer_get_page(msc, pg);
1335 
1336 		if (WARN_ON_ONCE(!page))
1337 			continue;
1338 
1339 		if (page->mapping)
1340 			page->mapping = NULL;
1341 	}
1342 
1343 	/* last mapping -- drop user_count */
1344 	atomic_dec(&msc->user_count);
1345 	mutex_unlock(&msc->buf_mutex);
1346 }
1347 
1348 static vm_fault_t msc_mmap_fault(struct vm_fault *vmf)
1349 {
1350 	struct msc_iter *iter = vmf->vma->vm_file->private_data;
1351 	struct msc *msc = iter->msc;
1352 
1353 	vmf->page = msc_buffer_get_page(msc, vmf->pgoff);
1354 	if (!vmf->page)
1355 		return VM_FAULT_SIGBUS;
1356 
1357 	get_page(vmf->page);
1358 	vmf->page->mapping = vmf->vma->vm_file->f_mapping;
1359 	vmf->page->index = vmf->pgoff;
1360 
1361 	return 0;
1362 }
1363 
1364 static const struct vm_operations_struct msc_mmap_ops = {
1365 	.open	= msc_mmap_open,
1366 	.close	= msc_mmap_close,
1367 	.fault	= msc_mmap_fault,
1368 };
1369 
1370 static int intel_th_msc_mmap(struct file *file, struct vm_area_struct *vma)
1371 {
1372 	unsigned long size = vma->vm_end - vma->vm_start;
1373 	struct msc_iter *iter = vma->vm_file->private_data;
1374 	struct msc *msc = iter->msc;
1375 	int ret = -EINVAL;
1376 
1377 	if (!size || offset_in_page(size))
1378 		return -EINVAL;
1379 
1380 	if (vma->vm_pgoff)
1381 		return -EINVAL;
1382 
1383 	/* grab user_count once per mmap; drop in msc_mmap_close() */
1384 	if (!atomic_inc_unless_negative(&msc->user_count))
1385 		return -EINVAL;
1386 
1387 	if (msc->mode != MSC_MODE_SINGLE &&
1388 	    msc->mode != MSC_MODE_MULTI)
1389 		goto out;
1390 
1391 	if (size >> PAGE_SHIFT != msc->nr_pages)
1392 		goto out;
1393 
1394 	atomic_set(&msc->mmap_count, 1);
1395 	ret = 0;
1396 
1397 out:
1398 	if (ret)
1399 		atomic_dec(&msc->user_count);
1400 
1401 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1402 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTCOPY;
1403 	vma->vm_ops = &msc_mmap_ops;
1404 	return ret;
1405 }
1406 
1407 static const struct file_operations intel_th_msc_fops = {
1408 	.open		= intel_th_msc_open,
1409 	.release	= intel_th_msc_release,
1410 	.read		= intel_th_msc_read,
1411 	.mmap		= intel_th_msc_mmap,
1412 	.llseek		= no_llseek,
1413 	.owner		= THIS_MODULE,
1414 };
1415 
1416 static void intel_th_msc_wait_empty(struct intel_th_device *thdev)
1417 {
1418 	struct msc *msc = dev_get_drvdata(&thdev->dev);
1419 	unsigned long count;
1420 	u32 reg;
1421 
1422 	for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH;
1423 	     count && !(reg & MSCSTS_PLE); count--) {
1424 		reg = __raw_readl(msc->reg_base + REG_MSU_MSC0STS);
1425 		cpu_relax();
1426 	}
1427 
1428 	if (!count)
1429 		dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n");
1430 }
1431 
1432 static int intel_th_msc_init(struct msc *msc)
1433 {
1434 	atomic_set(&msc->user_count, -1);
1435 
1436 	msc->mode = MSC_MODE_MULTI;
1437 	mutex_init(&msc->buf_mutex);
1438 	INIT_LIST_HEAD(&msc->win_list);
1439 	INIT_LIST_HEAD(&msc->iter_list);
1440 
1441 	msc->burst_len =
1442 		(ioread32(msc->reg_base + REG_MSU_MSC0CTL) & MSC_LEN) >>
1443 		__ffs(MSC_LEN);
1444 
1445 	return 0;
1446 }
1447 
1448 static void msc_win_switch(struct msc *msc)
1449 {
1450 	struct msc_window *first;
1451 
1452 	first = list_first_entry(&msc->win_list, struct msc_window, entry);
1453 
1454 	if (msc_is_last_win(msc->cur_win))
1455 		msc->cur_win = first;
1456 	else
1457 		msc->cur_win = list_next_entry(msc->cur_win, entry);
1458 
1459 	msc->base = msc_win_block(msc->cur_win, 0);
1460 	msc->base_addr = msc_win_baddr(msc->cur_win, 0);
1461 
1462 	intel_th_trace_switch(msc->thdev);
1463 }
1464 
1465 static irqreturn_t intel_th_msc_interrupt(struct intel_th_device *thdev)
1466 {
1467 	struct msc *msc = dev_get_drvdata(&thdev->dev);
1468 	u32 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS);
1469 	u32 mask = msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST;
1470 
1471 	if (!(msusts & mask)) {
1472 		if (msc->enabled)
1473 			return IRQ_HANDLED;
1474 		return IRQ_NONE;
1475 	}
1476 
1477 	return IRQ_HANDLED;
1478 }
1479 
1480 static const char * const msc_mode[] = {
1481 	[MSC_MODE_SINGLE]	= "single",
1482 	[MSC_MODE_MULTI]	= "multi",
1483 	[MSC_MODE_EXI]		= "ExI",
1484 	[MSC_MODE_DEBUG]	= "debug",
1485 };
1486 
1487 static ssize_t
1488 wrap_show(struct device *dev, struct device_attribute *attr, char *buf)
1489 {
1490 	struct msc *msc = dev_get_drvdata(dev);
1491 
1492 	return scnprintf(buf, PAGE_SIZE, "%d\n", msc->wrap);
1493 }
1494 
1495 static ssize_t
1496 wrap_store(struct device *dev, struct device_attribute *attr, const char *buf,
1497 	   size_t size)
1498 {
1499 	struct msc *msc = dev_get_drvdata(dev);
1500 	unsigned long val;
1501 	int ret;
1502 
1503 	ret = kstrtoul(buf, 10, &val);
1504 	if (ret)
1505 		return ret;
1506 
1507 	msc->wrap = !!val;
1508 
1509 	return size;
1510 }
1511 
1512 static DEVICE_ATTR_RW(wrap);
1513 
1514 static ssize_t
1515 mode_show(struct device *dev, struct device_attribute *attr, char *buf)
1516 {
1517 	struct msc *msc = dev_get_drvdata(dev);
1518 
1519 	return scnprintf(buf, PAGE_SIZE, "%s\n", msc_mode[msc->mode]);
1520 }
1521 
1522 static ssize_t
1523 mode_store(struct device *dev, struct device_attribute *attr, const char *buf,
1524 	   size_t size)
1525 {
1526 	struct msc *msc = dev_get_drvdata(dev);
1527 	size_t len = size;
1528 	char *cp;
1529 	int i, ret;
1530 
1531 	if (!capable(CAP_SYS_RAWIO))
1532 		return -EPERM;
1533 
1534 	cp = memchr(buf, '\n', len);
1535 	if (cp)
1536 		len = cp - buf;
1537 
1538 	for (i = 0; i < ARRAY_SIZE(msc_mode); i++)
1539 		if (!strncmp(msc_mode[i], buf, len))
1540 			goto found;
1541 
1542 	return -EINVAL;
1543 
1544 found:
1545 	mutex_lock(&msc->buf_mutex);
1546 	ret = msc_buffer_unlocked_free_unless_used(msc);
1547 	if (!ret)
1548 		msc->mode = i;
1549 	mutex_unlock(&msc->buf_mutex);
1550 
1551 	return ret ? ret : size;
1552 }
1553 
1554 static DEVICE_ATTR_RW(mode);
1555 
1556 static ssize_t
1557 nr_pages_show(struct device *dev, struct device_attribute *attr, char *buf)
1558 {
1559 	struct msc *msc = dev_get_drvdata(dev);
1560 	struct msc_window *win;
1561 	size_t count = 0;
1562 
1563 	mutex_lock(&msc->buf_mutex);
1564 
1565 	if (msc->mode == MSC_MODE_SINGLE)
1566 		count = scnprintf(buf, PAGE_SIZE, "%ld\n", msc->nr_pages);
1567 	else if (msc->mode == MSC_MODE_MULTI) {
1568 		list_for_each_entry(win, &msc->win_list, entry) {
1569 			count += scnprintf(buf + count, PAGE_SIZE - count,
1570 					   "%d%c", win->nr_blocks,
1571 					   msc_is_last_win(win) ? '\n' : ',');
1572 		}
1573 	} else {
1574 		count = scnprintf(buf, PAGE_SIZE, "unsupported\n");
1575 	}
1576 
1577 	mutex_unlock(&msc->buf_mutex);
1578 
1579 	return count;
1580 }
1581 
1582 static ssize_t
1583 nr_pages_store(struct device *dev, struct device_attribute *attr,
1584 	       const char *buf, size_t size)
1585 {
1586 	struct msc *msc = dev_get_drvdata(dev);
1587 	unsigned long val, *win = NULL, *rewin;
1588 	size_t len = size;
1589 	const char *p = buf;
1590 	char *end, *s;
1591 	int ret, nr_wins = 0;
1592 
1593 	if (!capable(CAP_SYS_RAWIO))
1594 		return -EPERM;
1595 
1596 	ret = msc_buffer_free_unless_used(msc);
1597 	if (ret)
1598 		return ret;
1599 
1600 	/* scan the comma-separated list of allocation sizes */
1601 	end = memchr(buf, '\n', len);
1602 	if (end)
1603 		len = end - buf;
1604 
1605 	do {
1606 		end = memchr(p, ',', len);
1607 		s = kstrndup(p, end ? end - p : len, GFP_KERNEL);
1608 		if (!s) {
1609 			ret = -ENOMEM;
1610 			goto free_win;
1611 		}
1612 
1613 		ret = kstrtoul(s, 10, &val);
1614 		kfree(s);
1615 
1616 		if (ret || !val)
1617 			goto free_win;
1618 
1619 		if (nr_wins && msc->mode == MSC_MODE_SINGLE) {
1620 			ret = -EINVAL;
1621 			goto free_win;
1622 		}
1623 
1624 		nr_wins++;
1625 		rewin = krealloc(win, sizeof(*win) * nr_wins, GFP_KERNEL);
1626 		if (!rewin) {
1627 			kfree(win);
1628 			return -ENOMEM;
1629 		}
1630 
1631 		win = rewin;
1632 		win[nr_wins - 1] = val;
1633 
1634 		if (!end)
1635 			break;
1636 
1637 		/* consume the number and the following comma, hence +1 */
1638 		len -= end - p + 1;
1639 		p = end + 1;
1640 	} while (len);
1641 
1642 	mutex_lock(&msc->buf_mutex);
1643 	ret = msc_buffer_alloc(msc, win, nr_wins);
1644 	mutex_unlock(&msc->buf_mutex);
1645 
1646 free_win:
1647 	kfree(win);
1648 
1649 	return ret ? ret : size;
1650 }
1651 
1652 static DEVICE_ATTR_RW(nr_pages);
1653 
1654 static ssize_t
1655 win_switch_store(struct device *dev, struct device_attribute *attr,
1656 		 const char *buf, size_t size)
1657 {
1658 	struct msc *msc = dev_get_drvdata(dev);
1659 	unsigned long val;
1660 	int ret;
1661 
1662 	ret = kstrtoul(buf, 10, &val);
1663 	if (ret)
1664 		return ret;
1665 
1666 	if (val != 1)
1667 		return -EINVAL;
1668 
1669 	mutex_lock(&msc->buf_mutex);
1670 	if (msc->mode != MSC_MODE_MULTI)
1671 		ret = -ENOTSUPP;
1672 	else
1673 		msc_win_switch(msc);
1674 	mutex_unlock(&msc->buf_mutex);
1675 
1676 	return ret ? ret : size;
1677 }
1678 
1679 static DEVICE_ATTR_WO(win_switch);
1680 
1681 static struct attribute *msc_output_attrs[] = {
1682 	&dev_attr_wrap.attr,
1683 	&dev_attr_mode.attr,
1684 	&dev_attr_nr_pages.attr,
1685 	&dev_attr_win_switch.attr,
1686 	NULL,
1687 };
1688 
1689 static struct attribute_group msc_output_group = {
1690 	.attrs	= msc_output_attrs,
1691 };
1692 
1693 static int intel_th_msc_probe(struct intel_th_device *thdev)
1694 {
1695 	struct device *dev = &thdev->dev;
1696 	struct resource *res;
1697 	struct msc *msc;
1698 	void __iomem *base;
1699 	int err;
1700 
1701 	res = intel_th_device_get_resource(thdev, IORESOURCE_MEM, 0);
1702 	if (!res)
1703 		return -ENODEV;
1704 
1705 	base = devm_ioremap(dev, res->start, resource_size(res));
1706 	if (!base)
1707 		return -ENOMEM;
1708 
1709 	msc = devm_kzalloc(dev, sizeof(*msc), GFP_KERNEL);
1710 	if (!msc)
1711 		return -ENOMEM;
1712 
1713 	res = intel_th_device_get_resource(thdev, IORESOURCE_IRQ, 1);
1714 	if (!res)
1715 		msc->do_irq = 1;
1716 
1717 	msc->index = thdev->id;
1718 
1719 	msc->thdev = thdev;
1720 	msc->reg_base = base + msc->index * 0x100;
1721 	msc->msu_base = base;
1722 
1723 	err = intel_th_msu_init(msc);
1724 	if (err)
1725 		return err;
1726 
1727 	err = intel_th_msc_init(msc);
1728 	if (err)
1729 		return err;
1730 
1731 	dev_set_drvdata(dev, msc);
1732 
1733 	return 0;
1734 }
1735 
1736 static void intel_th_msc_remove(struct intel_th_device *thdev)
1737 {
1738 	struct msc *msc = dev_get_drvdata(&thdev->dev);
1739 	int ret;
1740 
1741 	intel_th_msc_deactivate(thdev);
1742 	intel_th_msu_deinit(msc);
1743 
1744 	/*
1745 	 * Buffers should not be used at this point except if the
1746 	 * output character device is still open and the parent
1747 	 * device gets detached from its bus, which is a FIXME.
1748 	 */
1749 	ret = msc_buffer_free_unless_used(msc);
1750 	WARN_ON_ONCE(ret);
1751 }
1752 
1753 static struct intel_th_driver intel_th_msc_driver = {
1754 	.probe	= intel_th_msc_probe,
1755 	.remove	= intel_th_msc_remove,
1756 	.irq		= intel_th_msc_interrupt,
1757 	.wait_empty	= intel_th_msc_wait_empty,
1758 	.activate	= intel_th_msc_activate,
1759 	.deactivate	= intel_th_msc_deactivate,
1760 	.fops	= &intel_th_msc_fops,
1761 	.attr_group	= &msc_output_group,
1762 	.driver	= {
1763 		.name	= "msc",
1764 		.owner	= THIS_MODULE,
1765 	},
1766 };
1767 
1768 module_driver(intel_th_msc_driver,
1769 	      intel_th_driver_register,
1770 	      intel_th_driver_unregister);
1771 
1772 MODULE_LICENSE("GPL v2");
1773 MODULE_DESCRIPTION("Intel(R) Trace Hub Memory Storage Unit driver");
1774 MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");
1775