xref: /linux/drivers/hid/intel-ish-hid/ishtp/client.c (revision 021bc4b9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * ISHTP client logic
4  *
5  * Copyright (c) 2003-2016, Intel Corporation.
6  */
7 
8 #include <linux/slab.h>
9 #include <linux/sched.h>
10 #include <linux/wait.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <asm/cacheflush.h>
14 #include "hbm.h"
15 #include "client.h"
16 
17 int ishtp_cl_get_tx_free_buffer_size(struct ishtp_cl *cl)
18 {
19 	unsigned long tx_free_flags;
20 	int size;
21 
22 	spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
23 	size = cl->tx_ring_free_size * cl->device->fw_client->props.max_msg_length;
24 	spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
25 
26 	return size;
27 }
28 EXPORT_SYMBOL(ishtp_cl_get_tx_free_buffer_size);
29 
30 int ishtp_cl_get_tx_free_rings(struct ishtp_cl *cl)
31 {
32 	return cl->tx_ring_free_size;
33 }
34 EXPORT_SYMBOL(ishtp_cl_get_tx_free_rings);
35 
36 /**
37  * ishtp_read_list_flush() - Flush read queue
38  * @cl: ishtp client instance
39  *
40  * Used to remove all entries from read queue for a client
41  */
42 static void ishtp_read_list_flush(struct ishtp_cl *cl)
43 {
44 	struct ishtp_cl_rb *rb;
45 	struct ishtp_cl_rb *next;
46 	unsigned long	flags;
47 
48 	spin_lock_irqsave(&cl->dev->read_list_spinlock, flags);
49 	list_for_each_entry_safe(rb, next, &cl->dev->read_list.list, list)
50 		if (rb->cl && ishtp_cl_cmp_id(cl, rb->cl)) {
51 			list_del(&rb->list);
52 			ishtp_io_rb_free(rb);
53 		}
54 	spin_unlock_irqrestore(&cl->dev->read_list_spinlock, flags);
55 }
56 
57 /**
58  * ishtp_cl_flush_queues() - Flush all queues for a client
59  * @cl: ishtp client instance
60  *
61  * Used to remove all queues for a client. This is called when a client device
62  * needs reset due to error, S3 resume or during module removal
63  *
64  * Return: 0 on success else -EINVAL if device is NULL
65  */
66 int ishtp_cl_flush_queues(struct ishtp_cl *cl)
67 {
68 	if (WARN_ON(!cl || !cl->dev))
69 		return -EINVAL;
70 
71 	ishtp_read_list_flush(cl);
72 
73 	return 0;
74 }
75 EXPORT_SYMBOL(ishtp_cl_flush_queues);
76 
77 /**
78  * ishtp_cl_init() - Initialize all fields of a client device
79  * @cl: ishtp client instance
80  * @dev: ishtp device
81  *
82  * Initializes a client device fields: Init spinlocks, init queues etc.
83  * This function is called during new client creation
84  */
85 static void ishtp_cl_init(struct ishtp_cl *cl, struct ishtp_device *dev)
86 {
87 	memset(cl, 0, sizeof(struct ishtp_cl));
88 	init_waitqueue_head(&cl->wait_ctrl_res);
89 	spin_lock_init(&cl->free_list_spinlock);
90 	spin_lock_init(&cl->in_process_spinlock);
91 	spin_lock_init(&cl->tx_list_spinlock);
92 	spin_lock_init(&cl->tx_free_list_spinlock);
93 	spin_lock_init(&cl->fc_spinlock);
94 	INIT_LIST_HEAD(&cl->link);
95 	cl->dev = dev;
96 
97 	INIT_LIST_HEAD(&cl->free_rb_list.list);
98 	INIT_LIST_HEAD(&cl->tx_list.list);
99 	INIT_LIST_HEAD(&cl->tx_free_list.list);
100 	INIT_LIST_HEAD(&cl->in_process_list.list);
101 
102 	cl->rx_ring_size = CL_DEF_RX_RING_SIZE;
103 	cl->tx_ring_size = CL_DEF_TX_RING_SIZE;
104 	cl->tx_ring_free_size = cl->tx_ring_size;
105 
106 	/* dma */
107 	cl->last_tx_path = CL_TX_PATH_IPC;
108 	cl->last_dma_acked = 1;
109 	cl->last_dma_addr = NULL;
110 	cl->last_ipc_acked = 1;
111 }
112 
113 /**
114  * ishtp_cl_allocate() - allocates client structure and sets it up.
115  * @cl_device: ishtp client device
116  *
117  * Allocate memory for new client device and call to initialize each field.
118  *
119  * Return: The allocated client instance or NULL on failure
120  */
121 struct ishtp_cl *ishtp_cl_allocate(struct ishtp_cl_device *cl_device)
122 {
123 	struct ishtp_cl *cl;
124 
125 	cl = kmalloc(sizeof(struct ishtp_cl), GFP_KERNEL);
126 	if (!cl)
127 		return NULL;
128 
129 	ishtp_cl_init(cl, cl_device->ishtp_dev);
130 	return cl;
131 }
132 EXPORT_SYMBOL(ishtp_cl_allocate);
133 
134 /**
135  * ishtp_cl_free() - Frees a client device
136  * @cl: client device instance
137  *
138  * Frees a client device
139  */
140 void	ishtp_cl_free(struct ishtp_cl *cl)
141 {
142 	struct ishtp_device *dev;
143 	unsigned long flags;
144 
145 	if (!cl)
146 		return;
147 
148 	dev = cl->dev;
149 	if (!dev)
150 		return;
151 
152 	spin_lock_irqsave(&dev->cl_list_lock, flags);
153 	ishtp_cl_free_rx_ring(cl);
154 	ishtp_cl_free_tx_ring(cl);
155 	kfree(cl);
156 	spin_unlock_irqrestore(&dev->cl_list_lock, flags);
157 }
158 EXPORT_SYMBOL(ishtp_cl_free);
159 
160 /**
161  * ishtp_cl_link() - Reserve a host id and link the client instance
162  * @cl: client device instance
163  *
164  * This allocates a single bit in the hostmap. This function will make sure
165  * that not many client sessions are opened at the same time. Once allocated
166  * the client device instance is added to the ishtp device in the current
167  * client list
168  *
169  * Return: 0 or error code on failure
170  */
171 int ishtp_cl_link(struct ishtp_cl *cl)
172 {
173 	struct ishtp_device *dev;
174 	unsigned long flags, flags_cl;
175 	int id, ret = 0;
176 
177 	if (WARN_ON(!cl || !cl->dev))
178 		return -EINVAL;
179 
180 	dev = cl->dev;
181 
182 	spin_lock_irqsave(&dev->device_lock, flags);
183 
184 	if (dev->open_handle_count >= ISHTP_MAX_OPEN_HANDLE_COUNT) {
185 		ret = -EMFILE;
186 		goto unlock_dev;
187 	}
188 
189 	id = find_first_zero_bit(dev->host_clients_map, ISHTP_CLIENTS_MAX);
190 
191 	if (id >= ISHTP_CLIENTS_MAX) {
192 		spin_unlock_irqrestore(&dev->device_lock, flags);
193 		dev_err(&cl->device->dev, "id exceeded %d", ISHTP_CLIENTS_MAX);
194 		return -ENOENT;
195 	}
196 
197 	dev->open_handle_count++;
198 	cl->host_client_id = id;
199 	spin_lock_irqsave(&dev->cl_list_lock, flags_cl);
200 	if (dev->dev_state != ISHTP_DEV_ENABLED) {
201 		ret = -ENODEV;
202 		goto unlock_cl;
203 	}
204 	list_add_tail(&cl->link, &dev->cl_list);
205 	set_bit(id, dev->host_clients_map);
206 	cl->state = ISHTP_CL_INITIALIZING;
207 
208 unlock_cl:
209 	spin_unlock_irqrestore(&dev->cl_list_lock, flags_cl);
210 unlock_dev:
211 	spin_unlock_irqrestore(&dev->device_lock, flags);
212 	return ret;
213 }
214 EXPORT_SYMBOL(ishtp_cl_link);
215 
216 /**
217  * ishtp_cl_unlink() - remove fw_cl from the client device list
218  * @cl: client device instance
219  *
220  * Remove a previously linked device to a ishtp device
221  */
222 void ishtp_cl_unlink(struct ishtp_cl *cl)
223 {
224 	struct ishtp_device *dev;
225 	struct ishtp_cl *pos;
226 	unsigned long	flags;
227 
228 	/* don't shout on error exit path */
229 	if (!cl || !cl->dev)
230 		return;
231 
232 	dev = cl->dev;
233 
234 	spin_lock_irqsave(&dev->device_lock, flags);
235 	if (dev->open_handle_count > 0) {
236 		clear_bit(cl->host_client_id, dev->host_clients_map);
237 		dev->open_handle_count--;
238 	}
239 	spin_unlock_irqrestore(&dev->device_lock, flags);
240 
241 	/*
242 	 * This checks that 'cl' is actually linked into device's structure,
243 	 * before attempting 'list_del'
244 	 */
245 	spin_lock_irqsave(&dev->cl_list_lock, flags);
246 	list_for_each_entry(pos, &dev->cl_list, link)
247 		if (cl->host_client_id == pos->host_client_id) {
248 			list_del_init(&pos->link);
249 			break;
250 		}
251 	spin_unlock_irqrestore(&dev->cl_list_lock, flags);
252 }
253 EXPORT_SYMBOL(ishtp_cl_unlink);
254 
255 /**
256  * ishtp_cl_disconnect() - Send disconnect request to firmware
257  * @cl: client device instance
258  *
259  * Send a disconnect request for a client to firmware.
260  *
261  * Return: 0 if successful disconnect response from the firmware or error
262  * code on failure
263  */
264 int ishtp_cl_disconnect(struct ishtp_cl *cl)
265 {
266 	struct ishtp_device *dev;
267 
268 	if (WARN_ON(!cl || !cl->dev))
269 		return -ENODEV;
270 
271 	dev = cl->dev;
272 
273 	dev->print_log(dev, "%s() state %d\n", __func__, cl->state);
274 
275 	if (cl->state != ISHTP_CL_DISCONNECTING) {
276 		dev->print_log(dev, "%s() Disconnect in progress\n", __func__);
277 		return 0;
278 	}
279 
280 	if (ishtp_hbm_cl_disconnect_req(dev, cl)) {
281 		dev->print_log(dev, "%s() Failed to disconnect\n", __func__);
282 		dev_err(&cl->device->dev, "failed to disconnect.\n");
283 		return -ENODEV;
284 	}
285 
286 	wait_event_interruptible_timeout(cl->wait_ctrl_res,
287 			(dev->dev_state != ISHTP_DEV_ENABLED ||
288 			cl->state == ISHTP_CL_DISCONNECTED),
289 			ishtp_secs_to_jiffies(ISHTP_CL_CONNECT_TIMEOUT));
290 
291 	/*
292 	 * If FW reset arrived, this will happen. Don't check cl->,
293 	 * as 'cl' may be freed already
294 	 */
295 	if (dev->dev_state != ISHTP_DEV_ENABLED) {
296 		dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n",
297 			       __func__);
298 		return -ENODEV;
299 	}
300 
301 	if (cl->state == ISHTP_CL_DISCONNECTED) {
302 		dev->print_log(dev, "%s() successful\n", __func__);
303 		return 0;
304 	}
305 
306 	return -ENODEV;
307 }
308 EXPORT_SYMBOL(ishtp_cl_disconnect);
309 
310 /**
311  * ishtp_cl_is_other_connecting() - Check other client is connecting
312  * @cl: client device instance
313  *
314  * Checks if other client with the same fw client id is connecting
315  *
316  * Return: true if other client is connected else false
317  */
318 static bool ishtp_cl_is_other_connecting(struct ishtp_cl *cl)
319 {
320 	struct ishtp_device *dev;
321 	struct ishtp_cl *pos;
322 	unsigned long	flags;
323 
324 	if (WARN_ON(!cl || !cl->dev))
325 		return false;
326 
327 	dev = cl->dev;
328 	spin_lock_irqsave(&dev->cl_list_lock, flags);
329 	list_for_each_entry(pos, &dev->cl_list, link) {
330 		if ((pos->state == ISHTP_CL_CONNECTING) && (pos != cl) &&
331 				cl->fw_client_id == pos->fw_client_id) {
332 			spin_unlock_irqrestore(&dev->cl_list_lock, flags);
333 			return true;
334 		}
335 	}
336 	spin_unlock_irqrestore(&dev->cl_list_lock, flags);
337 
338 	return false;
339 }
340 
341 /**
342  * ishtp_cl_connect_to_fw() - Send connect request to firmware
343  * @cl: client device instance
344  *
345  * Send a connect request to the firmware and wait for firmware response.
346  * If there is successful connection response from the firmware, change
347  * client state to ISHTP_CL_CONNECTED, and bind client to related
348  * firmware client_id.
349  *
350  * Return: 0 for success and error code on failure
351  */
352 static int ishtp_cl_connect_to_fw(struct ishtp_cl *cl)
353 {
354 	struct ishtp_device *dev;
355 	int rets;
356 
357 	if (WARN_ON(!cl || !cl->dev))
358 		return -ENODEV;
359 
360 	dev = cl->dev;
361 
362 	if (ishtp_cl_is_other_connecting(cl)) {
363 		dev->print_log(dev, "%s() Busy\n", __func__);
364 		return	-EBUSY;
365 	}
366 
367 	if (ishtp_hbm_cl_connect_req(dev, cl)) {
368 		dev->print_log(dev, "%s() HBM connect req fail\n", __func__);
369 		return -ENODEV;
370 	}
371 
372 	rets = wait_event_interruptible_timeout(cl->wait_ctrl_res,
373 				(dev->dev_state == ISHTP_DEV_ENABLED &&
374 				(cl->state == ISHTP_CL_CONNECTED ||
375 				 cl->state == ISHTP_CL_DISCONNECTED)),
376 				ishtp_secs_to_jiffies(
377 					ISHTP_CL_CONNECT_TIMEOUT));
378 	/*
379 	 * If FW reset arrived, this will happen. Don't check cl->,
380 	 * as 'cl' may be freed already
381 	 */
382 	if (dev->dev_state != ISHTP_DEV_ENABLED) {
383 		dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n",
384 			       __func__);
385 		return -EFAULT;
386 	}
387 
388 	if (cl->state != ISHTP_CL_CONNECTED) {
389 		dev->print_log(dev, "%s() state != ISHTP_CL_CONNECTED\n",
390 			       __func__);
391 		return -EFAULT;
392 	}
393 
394 	rets = cl->status;
395 	if (rets) {
396 		dev->print_log(dev, "%s() Invalid status\n", __func__);
397 		return rets;
398 	}
399 
400 	rets = ishtp_cl_device_bind(cl);
401 	if (rets) {
402 		dev->print_log(dev, "%s() Bind error\n", __func__);
403 		ishtp_cl_disconnect(cl);
404 		return rets;
405 	}
406 
407 	return rets;
408 }
409 
410 /**
411  * ishtp_cl_connect() - Build connection with firmware
412  * @cl: client device instance
413  *
414  * Call ishtp_cl_connect_to_fw() to connect and bind to firmware. If successful,
415  * allocate RX and TX ring buffers, and start flow control with firmware to
416  * start communication.
417  *
418  * Return: 0 if there is successful connection to the firmware, allocate
419  * ring buffers.
420  */
421 int ishtp_cl_connect(struct ishtp_cl *cl)
422 {
423 	struct ishtp_device *dev;
424 	int rets;
425 
426 	if (!cl || !cl->dev)
427 		return -ENODEV;
428 
429 	dev = cl->dev;
430 
431 	dev->print_log(dev, "%s() current_state = %d\n", __func__, cl->state);
432 
433 	rets = ishtp_cl_connect_to_fw(cl);
434 	if (rets) {
435 		dev->print_log(dev, "%s() Connect to fw failed\n", __func__);
436 		return rets;
437 	}
438 
439 	rets = ishtp_cl_alloc_rx_ring(cl);
440 	if (rets) {
441 		dev->print_log(dev, "%s() Alloc RX ring failed\n", __func__);
442 		/* if failed allocation, disconnect */
443 		ishtp_cl_disconnect(cl);
444 		return rets;
445 	}
446 
447 	rets = ishtp_cl_alloc_tx_ring(cl);
448 	if (rets) {
449 		dev->print_log(dev, "%s() Alloc TX ring failed\n", __func__);
450 		/* if failed allocation, disconnect */
451 		ishtp_cl_free_rx_ring(cl);
452 		ishtp_cl_disconnect(cl);
453 		return rets;
454 	}
455 
456 	/*
457 	 * Upon successful connection and allocation, start flow-control.
458 	 */
459 	rets = ishtp_cl_read_start(cl);
460 
461 	return rets;
462 }
463 EXPORT_SYMBOL(ishtp_cl_connect);
464 
465 /**
466  * ishtp_cl_establish_connection() - Establish connection with the firmware
467  * @cl: client device instance
468  * @uuid: uuid of the client to search
469  * @tx_size: TX ring buffer size
470  * @rx_size: RX ring buffer size
471  * @reset: true if called for reset connection, otherwise for first connection
472  *
473  * This is a helper function for client driver to build connection with firmware.
474  * If it's first time connecting to the firmware, set reset to false, this
475  * function will link client to bus, find client id and send connect request to
476  * the firmware.
477  *
478  * If it's called for reset handler where client lost connection after
479  * firmware reset, set reset to true, this function will reinit client state and
480  * establish connection again. In this case, this function reuses current client
481  * structure and ring buffers to avoid allocation failure and memory fragments.
482  *
483  * Return: 0 for successful connection with the firmware,
484  * or error code on failure
485  */
486 int ishtp_cl_establish_connection(struct ishtp_cl *cl, const guid_t *uuid,
487 				  int tx_size, int rx_size, bool reset)
488 {
489 	struct ishtp_device *dev;
490 	struct ishtp_fw_client *fw_client;
491 	int rets;
492 
493 	if (!cl || !cl->dev)
494 		return -ENODEV;
495 
496 	dev = cl->dev;
497 
498 	ishtp_set_connection_state(cl, ISHTP_CL_INITIALIZING);
499 
500 	/* reinit ishtp_cl structure if call for reset */
501 	if (reset) {
502 		cl->host_client_id = 0;
503 		cl->fw_client_id = 0;
504 		cl->ishtp_flow_ctrl_creds = 0;
505 		cl->out_flow_ctrl_creds = 0;
506 
507 		cl->last_tx_path = CL_TX_PATH_IPC;
508 		cl->last_dma_acked = 1;
509 		cl->last_dma_addr = NULL;
510 		cl->last_ipc_acked = 1;
511 
512 		cl->sending = 0;
513 		cl->err_send_msg = 0;
514 		cl->err_send_fc = 0;
515 
516 		cl->send_msg_cnt_ipc = 0;
517 		cl->send_msg_cnt_dma = 0;
518 		cl->recv_msg_cnt_ipc = 0;
519 		cl->recv_msg_cnt_dma = 0;
520 		cl->recv_msg_num_frags = 0;
521 		cl->ishtp_flow_ctrl_cnt = 0;
522 		cl->out_flow_ctrl_cnt = 0;
523 	}
524 
525 	/* link to bus */
526 	rets = ishtp_cl_link(cl);
527 	if (rets) {
528 		dev->print_log(dev, "%s() ishtp_cl_link failed\n", __func__);
529 		return rets;
530 	}
531 
532 	/* find firmware client */
533 	fw_client = ishtp_fw_cl_get_client(dev, uuid);
534 	if (!fw_client) {
535 		dev->print_log(dev,
536 			       "%s() ish client uuid not found\n", __func__);
537 		return -ENOENT;
538 	}
539 
540 	ishtp_set_tx_ring_size(cl, tx_size);
541 	ishtp_set_rx_ring_size(cl, rx_size);
542 
543 	ishtp_cl_set_fw_client_id(cl, ishtp_get_fw_client_id(fw_client));
544 
545 	ishtp_set_connection_state(cl, ISHTP_CL_CONNECTING);
546 
547 	/*
548 	 * For reset case, not allocate tx/rx ring buffer which are already
549 	 * done in ishtp_cl_connect() during first connection.
550 	 */
551 	if (reset) {
552 		rets = ishtp_cl_connect_to_fw(cl);
553 		if (!rets)
554 			rets = ishtp_cl_read_start(cl);
555 		else
556 			dev->print_log(dev,
557 				"%s() connect to fw failed\n", __func__);
558 	} else {
559 		rets = ishtp_cl_connect(cl);
560 	}
561 
562 	return rets;
563 }
564 EXPORT_SYMBOL(ishtp_cl_establish_connection);
565 
566 /**
567  * ishtp_cl_destroy_connection() - Disconnect with the firmware
568  * @cl: client device instance
569  * @reset: true if called for firmware reset, false for normal disconnection
570  *
571  * This is a helper function for client driver to disconnect with firmware,
572  * unlink to bus and flush message queue.
573  */
574 void ishtp_cl_destroy_connection(struct ishtp_cl *cl, bool reset)
575 {
576 	if (!cl)
577 		return;
578 
579 	if (reset) {
580 		/*
581 		 * For reset case, connection is already lost during fw reset.
582 		 * Just set state to DISCONNECTED is enough.
583 		 */
584 		ishtp_set_connection_state(cl, ISHTP_CL_DISCONNECTED);
585 	} else {
586 		if (cl->state != ISHTP_CL_DISCONNECTED) {
587 			ishtp_set_connection_state(cl, ISHTP_CL_DISCONNECTING);
588 			ishtp_cl_disconnect(cl);
589 		}
590 	}
591 
592 	ishtp_cl_unlink(cl);
593 	ishtp_cl_flush_queues(cl);
594 }
595 EXPORT_SYMBOL(ishtp_cl_destroy_connection);
596 
597 /**
598  * ishtp_cl_read_start() - Prepare to read client message
599  * @cl: client device instance
600  *
601  * Get a free buffer from pool of free read buffers and add to read buffer
602  * pool to add contents. Send a flow control request to firmware to be able
603  * send next message.
604  *
605  * Return: 0 if successful or error code on failure
606  */
607 int ishtp_cl_read_start(struct ishtp_cl *cl)
608 {
609 	struct ishtp_device *dev;
610 	struct ishtp_cl_rb *rb;
611 	int rets;
612 	int i;
613 	unsigned long	flags;
614 	unsigned long	dev_flags;
615 
616 	if (WARN_ON(!cl || !cl->dev))
617 		return -ENODEV;
618 
619 	dev = cl->dev;
620 
621 	if (cl->state != ISHTP_CL_CONNECTED)
622 		return -ENODEV;
623 
624 	if (dev->dev_state != ISHTP_DEV_ENABLED)
625 		return -ENODEV;
626 
627 	i = ishtp_fw_cl_by_id(dev, cl->fw_client_id);
628 	if (i < 0) {
629 		dev_err(&cl->device->dev, "no such fw client %d\n",
630 			cl->fw_client_id);
631 		return -ENODEV;
632 	}
633 
634 	/* The current rb is the head of the free rb list */
635 	spin_lock_irqsave(&cl->free_list_spinlock, flags);
636 	if (list_empty(&cl->free_rb_list.list)) {
637 		dev_warn(&cl->device->dev,
638 			 "[ishtp-ish] Rx buffers pool is empty\n");
639 		rets = -ENOMEM;
640 		rb = NULL;
641 		spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
642 		goto out;
643 	}
644 	rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb, list);
645 	list_del_init(&rb->list);
646 	spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
647 
648 	rb->cl = cl;
649 	rb->buf_idx = 0;
650 
651 	INIT_LIST_HEAD(&rb->list);
652 	rets = 0;
653 
654 	/*
655 	 * This must be BEFORE sending flow control -
656 	 * response in ISR may come too fast...
657 	 */
658 	spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
659 	list_add_tail(&rb->list, &dev->read_list.list);
660 	spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
661 	if (ishtp_hbm_cl_flow_control_req(dev, cl)) {
662 		rets = -ENODEV;
663 		goto out;
664 	}
665 out:
666 	/* if ishtp_hbm_cl_flow_control_req failed, return rb to free list */
667 	if (rets && rb) {
668 		spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
669 		list_del(&rb->list);
670 		spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
671 
672 		spin_lock_irqsave(&cl->free_list_spinlock, flags);
673 		list_add_tail(&rb->list, &cl->free_rb_list.list);
674 		spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
675 	}
676 	return rets;
677 }
678 
679 /**
680  * ishtp_cl_send() - Send a message to firmware
681  * @cl: client device instance
682  * @buf: message buffer
683  * @length: length of message
684  *
685  * If the client is correct state to send message, this function gets a buffer
686  * from tx ring buffers, copy the message data and call to send the message
687  * using ishtp_cl_send_msg()
688  *
689  * Return: 0 if successful or error code on failure
690  */
691 int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length)
692 {
693 	struct ishtp_device	*dev;
694 	int	id;
695 	struct ishtp_cl_tx_ring	*cl_msg;
696 	int	have_msg_to_send = 0;
697 	unsigned long	tx_flags, tx_free_flags;
698 
699 	if (WARN_ON(!cl || !cl->dev))
700 		return -ENODEV;
701 
702 	dev = cl->dev;
703 
704 	if (cl->state != ISHTP_CL_CONNECTED) {
705 		++cl->err_send_msg;
706 		return -EPIPE;
707 	}
708 
709 	if (dev->dev_state != ISHTP_DEV_ENABLED) {
710 		++cl->err_send_msg;
711 		return -ENODEV;
712 	}
713 
714 	/* Check if we have fw client device */
715 	id = ishtp_fw_cl_by_id(dev, cl->fw_client_id);
716 	if (id < 0) {
717 		++cl->err_send_msg;
718 		return -ENOENT;
719 	}
720 
721 	if (length > dev->fw_clients[id].props.max_msg_length) {
722 		++cl->err_send_msg;
723 		return -EMSGSIZE;
724 	}
725 
726 	/* No free bufs */
727 	spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
728 	if (list_empty(&cl->tx_free_list.list)) {
729 		spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
730 			tx_free_flags);
731 		++cl->err_send_msg;
732 		return	-ENOMEM;
733 	}
734 
735 	cl_msg = list_first_entry(&cl->tx_free_list.list,
736 		struct ishtp_cl_tx_ring, list);
737 	if (!cl_msg->send_buf.data) {
738 		spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
739 			tx_free_flags);
740 		return	-EIO;
741 		/* Should not happen, as free list is pre-allocated */
742 	}
743 	/*
744 	 * This is safe, as 'length' is already checked for not exceeding
745 	 * max ISHTP message size per client
746 	 */
747 	list_del_init(&cl_msg->list);
748 	--cl->tx_ring_free_size;
749 
750 	spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
751 	memcpy(cl_msg->send_buf.data, buf, length);
752 	cl_msg->send_buf.size = length;
753 	spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
754 	have_msg_to_send = !list_empty(&cl->tx_list.list);
755 	list_add_tail(&cl_msg->list, &cl->tx_list.list);
756 	spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
757 
758 	if (!have_msg_to_send && cl->ishtp_flow_ctrl_creds > 0)
759 		ishtp_cl_send_msg(dev, cl);
760 
761 	return	0;
762 }
763 EXPORT_SYMBOL(ishtp_cl_send);
764 
765 /**
766  * ishtp_cl_read_complete() - read complete
767  * @rb: Pointer to client request block
768  *
769  * If the message is completely received call ishtp_cl_bus_rx_event()
770  * to process message
771  */
772 static void ishtp_cl_read_complete(struct ishtp_cl_rb *rb)
773 {
774 	unsigned long	flags;
775 	int	schedule_work_flag = 0;
776 	struct ishtp_cl	*cl = rb->cl;
777 
778 	spin_lock_irqsave(&cl->in_process_spinlock, flags);
779 	/*
780 	 * if in-process list is empty, then need to schedule
781 	 * the processing thread
782 	 */
783 	schedule_work_flag = list_empty(&cl->in_process_list.list);
784 	list_add_tail(&rb->list, &cl->in_process_list.list);
785 	spin_unlock_irqrestore(&cl->in_process_spinlock, flags);
786 
787 	if (schedule_work_flag)
788 		ishtp_cl_bus_rx_event(cl->device);
789 }
790 
791 /**
792  * ipc_tx_send() - IPC tx send function
793  * @prm: Pointer to client device instance
794  *
795  * Send message over IPC. Message will be split into fragments
796  * if message size is bigger than IPC FIFO size, and all
797  * fragments will be sent one by one.
798  */
799 static void ipc_tx_send(void *prm)
800 {
801 	struct ishtp_cl	*cl = prm;
802 	struct ishtp_cl_tx_ring	*cl_msg;
803 	size_t	rem;
804 	struct ishtp_device	*dev = (cl ? cl->dev : NULL);
805 	struct ishtp_msg_hdr	ishtp_hdr;
806 	unsigned long	tx_flags, tx_free_flags;
807 	unsigned char	*pmsg;
808 
809 	if (!dev)
810 		return;
811 
812 	/*
813 	 * Other conditions if some critical error has
814 	 * occurred before this callback is called
815 	 */
816 	if (dev->dev_state != ISHTP_DEV_ENABLED)
817 		return;
818 
819 	if (cl->state != ISHTP_CL_CONNECTED)
820 		return;
821 
822 	spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
823 	if (list_empty(&cl->tx_list.list)) {
824 		spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
825 		return;
826 	}
827 
828 	if (cl->ishtp_flow_ctrl_creds != 1 && !cl->sending) {
829 		spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
830 		return;
831 	}
832 
833 	if (!cl->sending) {
834 		--cl->ishtp_flow_ctrl_creds;
835 		cl->last_ipc_acked = 0;
836 		cl->last_tx_path = CL_TX_PATH_IPC;
837 		cl->sending = 1;
838 	}
839 
840 	cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring,
841 			    list);
842 	rem = cl_msg->send_buf.size - cl->tx_offs;
843 
844 	while (rem > 0) {
845 		ishtp_hdr.host_addr = cl->host_client_id;
846 		ishtp_hdr.fw_addr = cl->fw_client_id;
847 		ishtp_hdr.reserved = 0;
848 		pmsg = cl_msg->send_buf.data + cl->tx_offs;
849 
850 		if (rem <= dev->mtu) {
851 			/* Last fragment or only one packet */
852 			ishtp_hdr.length = rem;
853 			ishtp_hdr.msg_complete = 1;
854 			/* Submit to IPC queue with no callback */
855 			ishtp_write_message(dev, &ishtp_hdr, pmsg);
856 			cl->tx_offs = 0;
857 			cl->sending = 0;
858 
859 			break;
860 		} else {
861 			/* Send ipc fragment */
862 			ishtp_hdr.length = dev->mtu;
863 			ishtp_hdr.msg_complete = 0;
864 			/* All fregments submitted to IPC queue with no callback */
865 			ishtp_write_message(dev, &ishtp_hdr, pmsg);
866 			cl->tx_offs += dev->mtu;
867 			rem = cl_msg->send_buf.size - cl->tx_offs;
868 		}
869 	}
870 
871 	list_del_init(&cl_msg->list);
872 	spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
873 
874 	spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
875 	list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
876 	++cl->tx_ring_free_size;
877 	spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
878 		tx_free_flags);
879 }
880 
881 /**
882  * ishtp_cl_send_msg_ipc() -Send message using IPC
883  * @dev: ISHTP device instance
884  * @cl: Pointer to client device instance
885  *
886  * Send message over IPC not using DMA
887  */
888 static void ishtp_cl_send_msg_ipc(struct ishtp_device *dev,
889 				  struct ishtp_cl *cl)
890 {
891 	/* If last DMA message wasn't acked yet, leave this one in Tx queue */
892 	if (cl->last_tx_path == CL_TX_PATH_DMA && cl->last_dma_acked == 0)
893 		return;
894 
895 	cl->tx_offs = 0;
896 	ipc_tx_send(cl);
897 	++cl->send_msg_cnt_ipc;
898 }
899 
900 /**
901  * ishtp_cl_send_msg_dma() -Send message using DMA
902  * @dev: ISHTP device instance
903  * @cl: Pointer to client device instance
904  *
905  * Send message using DMA
906  */
907 static void ishtp_cl_send_msg_dma(struct ishtp_device *dev,
908 	struct ishtp_cl *cl)
909 {
910 	struct ishtp_msg_hdr	hdr;
911 	struct dma_xfer_hbm	dma_xfer;
912 	unsigned char	*msg_addr;
913 	int off;
914 	struct ishtp_cl_tx_ring	*cl_msg;
915 	unsigned long tx_flags, tx_free_flags;
916 
917 	/* If last IPC message wasn't acked yet, leave this one in Tx queue */
918 	if (cl->last_tx_path == CL_TX_PATH_IPC && cl->last_ipc_acked == 0)
919 		return;
920 
921 	spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
922 	if (list_empty(&cl->tx_list.list)) {
923 		spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
924 		return;
925 	}
926 
927 	cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring,
928 		list);
929 
930 	msg_addr = ishtp_cl_get_dma_send_buf(dev, cl_msg->send_buf.size);
931 	if (!msg_addr) {
932 		spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
933 		if (dev->transfer_path == CL_TX_PATH_DEFAULT)
934 			ishtp_cl_send_msg_ipc(dev, cl);
935 		return;
936 	}
937 
938 	list_del_init(&cl_msg->list);	/* Must be before write */
939 	spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
940 
941 	--cl->ishtp_flow_ctrl_creds;
942 	cl->last_dma_acked = 0;
943 	cl->last_dma_addr = msg_addr;
944 	cl->last_tx_path = CL_TX_PATH_DMA;
945 
946 	/* write msg to dma buf */
947 	memcpy(msg_addr, cl_msg->send_buf.data, cl_msg->send_buf.size);
948 
949 	/*
950 	 * if current fw don't support cache snooping, driver have to
951 	 * flush the cache manually.
952 	 */
953 	if (dev->ops->dma_no_cache_snooping &&
954 		dev->ops->dma_no_cache_snooping(dev))
955 		clflush_cache_range(msg_addr, cl_msg->send_buf.size);
956 
957 	/* send dma_xfer hbm msg */
958 	off = msg_addr - (unsigned char *)dev->ishtp_host_dma_tx_buf;
959 	ishtp_hbm_hdr(&hdr, sizeof(struct dma_xfer_hbm));
960 	dma_xfer.hbm = DMA_XFER;
961 	dma_xfer.fw_client_id = cl->fw_client_id;
962 	dma_xfer.host_client_id = cl->host_client_id;
963 	dma_xfer.reserved = 0;
964 	dma_xfer.msg_addr = dev->ishtp_host_dma_tx_buf_phys + off;
965 	dma_xfer.msg_length = cl_msg->send_buf.size;
966 	dma_xfer.reserved2 = 0;
967 	ishtp_write_message(dev, &hdr, (unsigned char *)&dma_xfer);
968 	spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
969 	list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
970 	++cl->tx_ring_free_size;
971 	spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
972 	++cl->send_msg_cnt_dma;
973 }
974 
975 /**
976  * ishtp_cl_send_msg() -Send message using DMA or IPC
977  * @dev: ISHTP device instance
978  * @cl: Pointer to client device instance
979  *
980  * Send message using DMA or IPC based on transfer_path
981  */
982 void ishtp_cl_send_msg(struct ishtp_device *dev, struct ishtp_cl *cl)
983 {
984 	if (dev->transfer_path == CL_TX_PATH_DMA)
985 		ishtp_cl_send_msg_dma(dev, cl);
986 	else
987 		ishtp_cl_send_msg_ipc(dev, cl);
988 }
989 
990 /**
991  * recv_ishtp_cl_msg() -Receive client message
992  * @dev: ISHTP device instance
993  * @ishtp_hdr: Pointer to message header
994  *
995  * Receive and dispatch ISHTP client messages. This function executes in ISR
996  * or work queue context
997  */
998 void recv_ishtp_cl_msg(struct ishtp_device *dev,
999 		       struct ishtp_msg_hdr *ishtp_hdr)
1000 {
1001 	struct ishtp_cl *cl;
1002 	struct ishtp_cl_rb *rb;
1003 	struct ishtp_cl_rb *new_rb;
1004 	unsigned char *buffer = NULL;
1005 	struct ishtp_cl_rb *complete_rb = NULL;
1006 	unsigned long	flags;
1007 
1008 	if (ishtp_hdr->reserved) {
1009 		dev_err(dev->devc, "corrupted message header.\n");
1010 		goto	eoi;
1011 	}
1012 
1013 	if (ishtp_hdr->length > IPC_PAYLOAD_SIZE) {
1014 		dev_err(dev->devc,
1015 			"ISHTP message length in hdr exceeds IPC MTU\n");
1016 		goto	eoi;
1017 	}
1018 
1019 	spin_lock_irqsave(&dev->read_list_spinlock, flags);
1020 	list_for_each_entry(rb, &dev->read_list.list, list) {
1021 		cl = rb->cl;
1022 		if (!cl || !(cl->host_client_id == ishtp_hdr->host_addr &&
1023 				cl->fw_client_id == ishtp_hdr->fw_addr) ||
1024 				!(cl->state == ISHTP_CL_CONNECTED))
1025 			continue;
1026 
1027 		 /* If no Rx buffer is allocated, disband the rb */
1028 		if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
1029 			spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
1030 			dev_err(&cl->device->dev,
1031 				"Rx buffer is not allocated.\n");
1032 			list_del(&rb->list);
1033 			ishtp_io_rb_free(rb);
1034 			cl->status = -ENOMEM;
1035 			goto	eoi;
1036 		}
1037 
1038 		/*
1039 		 * If message buffer overflown (exceeds max. client msg
1040 		 * size, drop message and return to free buffer.
1041 		 * Do we need to disconnect such a client? (We don't send
1042 		 * back FC, so communication will be stuck anyway)
1043 		 */
1044 		if (rb->buffer.size < ishtp_hdr->length + rb->buf_idx) {
1045 			spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
1046 			dev_err(&cl->device->dev,
1047 				"message overflow. size %d len %d idx %ld\n",
1048 				rb->buffer.size, ishtp_hdr->length,
1049 				rb->buf_idx);
1050 			list_del(&rb->list);
1051 			ishtp_cl_io_rb_recycle(rb);
1052 			cl->status = -EIO;
1053 			goto	eoi;
1054 		}
1055 
1056 		buffer = rb->buffer.data + rb->buf_idx;
1057 		dev->ops->ishtp_read(dev, buffer, ishtp_hdr->length);
1058 
1059 		rb->buf_idx += ishtp_hdr->length;
1060 		if (ishtp_hdr->msg_complete) {
1061 			/* Last fragment in message - it's complete */
1062 			cl->status = 0;
1063 			list_del(&rb->list);
1064 			complete_rb = rb;
1065 
1066 			--cl->out_flow_ctrl_creds;
1067 			/*
1068 			 * the whole msg arrived, send a new FC, and add a new
1069 			 * rb buffer for the next coming msg
1070 			 */
1071 			spin_lock(&cl->free_list_spinlock);
1072 
1073 			if (!list_empty(&cl->free_rb_list.list)) {
1074 				new_rb = list_entry(cl->free_rb_list.list.next,
1075 					struct ishtp_cl_rb, list);
1076 				list_del_init(&new_rb->list);
1077 				spin_unlock(&cl->free_list_spinlock);
1078 				new_rb->cl = cl;
1079 				new_rb->buf_idx = 0;
1080 				INIT_LIST_HEAD(&new_rb->list);
1081 				list_add_tail(&new_rb->list,
1082 					&dev->read_list.list);
1083 
1084 				ishtp_hbm_cl_flow_control_req(dev, cl);
1085 			} else {
1086 				spin_unlock(&cl->free_list_spinlock);
1087 			}
1088 		}
1089 		/* One more fragment in message (even if this was last) */
1090 		++cl->recv_msg_num_frags;
1091 
1092 		/*
1093 		 * We can safely break here (and in BH too),
1094 		 * a single input message can go only to a single request!
1095 		 */
1096 		break;
1097 	}
1098 
1099 	spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
1100 	/* If it's nobody's message, just read and discard it */
1101 	if (!buffer) {
1102 		uint8_t	rd_msg_buf[ISHTP_RD_MSG_BUF_SIZE];
1103 
1104 		dev_err(dev->devc, "Dropped Rx msg - no request\n");
1105 		dev->ops->ishtp_read(dev, rd_msg_buf, ishtp_hdr->length);
1106 		goto	eoi;
1107 	}
1108 
1109 	if (complete_rb) {
1110 		cl = complete_rb->cl;
1111 		cl->ts_rx = ktime_get();
1112 		++cl->recv_msg_cnt_ipc;
1113 		ishtp_cl_read_complete(complete_rb);
1114 	}
1115 eoi:
1116 	return;
1117 }
1118 
1119 /**
1120  * recv_ishtp_cl_msg_dma() -Receive client message
1121  * @dev: ISHTP device instance
1122  * @msg: message pointer
1123  * @hbm: hbm buffer
1124  *
1125  * Receive and dispatch ISHTP client messages using DMA. This function executes
1126  * in ISR or work queue context
1127  */
1128 void recv_ishtp_cl_msg_dma(struct ishtp_device *dev, void *msg,
1129 			   struct dma_xfer_hbm *hbm)
1130 {
1131 	struct ishtp_cl *cl;
1132 	struct ishtp_cl_rb *rb;
1133 	struct ishtp_cl_rb *new_rb;
1134 	unsigned char *buffer = NULL;
1135 	struct ishtp_cl_rb *complete_rb = NULL;
1136 	unsigned long	flags;
1137 
1138 	spin_lock_irqsave(&dev->read_list_spinlock, flags);
1139 
1140 	list_for_each_entry(rb, &dev->read_list.list, list) {
1141 		cl = rb->cl;
1142 		if (!cl || !(cl->host_client_id == hbm->host_client_id &&
1143 				cl->fw_client_id == hbm->fw_client_id) ||
1144 				!(cl->state == ISHTP_CL_CONNECTED))
1145 			continue;
1146 
1147 		/*
1148 		 * If no Rx buffer is allocated, disband the rb
1149 		 */
1150 		if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
1151 			spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
1152 			dev_err(&cl->device->dev,
1153 				"response buffer is not allocated.\n");
1154 			list_del(&rb->list);
1155 			ishtp_io_rb_free(rb);
1156 			cl->status = -ENOMEM;
1157 			goto	eoi;
1158 		}
1159 
1160 		/*
1161 		 * If message buffer overflown (exceeds max. client msg
1162 		 * size, drop message and return to free buffer.
1163 		 * Do we need to disconnect such a client? (We don't send
1164 		 * back FC, so communication will be stuck anyway)
1165 		 */
1166 		if (rb->buffer.size < hbm->msg_length) {
1167 			spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
1168 			dev_err(&cl->device->dev,
1169 				"message overflow. size %d len %d idx %ld\n",
1170 				rb->buffer.size, hbm->msg_length, rb->buf_idx);
1171 			list_del(&rb->list);
1172 			ishtp_cl_io_rb_recycle(rb);
1173 			cl->status = -EIO;
1174 			goto	eoi;
1175 		}
1176 
1177 		buffer = rb->buffer.data;
1178 
1179 		/*
1180 		 * if current fw don't support cache snooping, driver have to
1181 		 * flush the cache manually.
1182 		 */
1183 		if (dev->ops->dma_no_cache_snooping &&
1184 			dev->ops->dma_no_cache_snooping(dev))
1185 			clflush_cache_range(msg, hbm->msg_length);
1186 
1187 		memcpy(buffer, msg, hbm->msg_length);
1188 		rb->buf_idx = hbm->msg_length;
1189 
1190 		/* Last fragment in message - it's complete */
1191 		cl->status = 0;
1192 		list_del(&rb->list);
1193 		complete_rb = rb;
1194 
1195 		--cl->out_flow_ctrl_creds;
1196 		/*
1197 		 * the whole msg arrived, send a new FC, and add a new
1198 		 * rb buffer for the next coming msg
1199 		 */
1200 		spin_lock(&cl->free_list_spinlock);
1201 
1202 		if (!list_empty(&cl->free_rb_list.list)) {
1203 			new_rb = list_entry(cl->free_rb_list.list.next,
1204 				struct ishtp_cl_rb, list);
1205 			list_del_init(&new_rb->list);
1206 			spin_unlock(&cl->free_list_spinlock);
1207 			new_rb->cl = cl;
1208 			new_rb->buf_idx = 0;
1209 			INIT_LIST_HEAD(&new_rb->list);
1210 			list_add_tail(&new_rb->list,
1211 				&dev->read_list.list);
1212 
1213 			ishtp_hbm_cl_flow_control_req(dev, cl);
1214 		} else {
1215 			spin_unlock(&cl->free_list_spinlock);
1216 		}
1217 
1218 		/* One more fragment in message (this is always last) */
1219 		++cl->recv_msg_num_frags;
1220 
1221 		/*
1222 		 * We can safely break here (and in BH too),
1223 		 * a single input message can go only to a single request!
1224 		 */
1225 		break;
1226 	}
1227 
1228 	spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
1229 	/* If it's nobody's message, just read and discard it */
1230 	if (!buffer) {
1231 		dev_err(dev->devc, "Dropped Rx (DMA) msg - no request\n");
1232 		goto	eoi;
1233 	}
1234 
1235 	if (complete_rb) {
1236 		cl = complete_rb->cl;
1237 		cl->ts_rx = ktime_get();
1238 		++cl->recv_msg_cnt_dma;
1239 		ishtp_cl_read_complete(complete_rb);
1240 	}
1241 eoi:
1242 	return;
1243 }
1244 
1245 void *ishtp_get_client_data(struct ishtp_cl *cl)
1246 {
1247 	return cl->client_data;
1248 }
1249 EXPORT_SYMBOL(ishtp_get_client_data);
1250 
1251 void ishtp_set_client_data(struct ishtp_cl *cl, void *data)
1252 {
1253 	cl->client_data = data;
1254 }
1255 EXPORT_SYMBOL(ishtp_set_client_data);
1256 
1257 struct ishtp_device *ishtp_get_ishtp_device(struct ishtp_cl *cl)
1258 {
1259 	return cl->dev;
1260 }
1261 EXPORT_SYMBOL(ishtp_get_ishtp_device);
1262 
1263 void ishtp_set_tx_ring_size(struct ishtp_cl *cl, int size)
1264 {
1265 	cl->tx_ring_size = size;
1266 }
1267 EXPORT_SYMBOL(ishtp_set_tx_ring_size);
1268 
1269 void ishtp_set_rx_ring_size(struct ishtp_cl *cl, int size)
1270 {
1271 	cl->rx_ring_size = size;
1272 }
1273 EXPORT_SYMBOL(ishtp_set_rx_ring_size);
1274 
1275 void ishtp_set_connection_state(struct ishtp_cl *cl, int state)
1276 {
1277 	cl->state = state;
1278 }
1279 EXPORT_SYMBOL(ishtp_set_connection_state);
1280 
1281 void ishtp_cl_set_fw_client_id(struct ishtp_cl *cl, int fw_client_id)
1282 {
1283 	cl->fw_client_id = fw_client_id;
1284 }
1285 EXPORT_SYMBOL(ishtp_cl_set_fw_client_id);
1286