1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*******************************************************************************
3   Header File to describe Normal/enhanced descriptor functions used for RING
4   and CHAINED modes.
5 
6   Copyright(C) 2011  STMicroelectronics Ltd
7 
8   It defines all the functions used to handle the normal/enhanced
9   descriptors in case of the DMA is configured to work in chained or
10   in ring mode.
11 
12 
13   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
14 *******************************************************************************/
15 
16 #ifndef __DESC_COM_H__
17 #define __DESC_COM_H__
18 
19 /* Specific functions used for Ring mode */
20 
21 /* Enhanced descriptors */
22 static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end,
23 					   int bfsize)
24 {
25 	if (bfsize == BUF_SIZE_16KiB)
26 		p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
27 				<< ERDES1_BUFFER2_SIZE_SHIFT)
28 			   & ERDES1_BUFFER2_SIZE_MASK);
29 
30 	if (end)
31 		p->des1 |= cpu_to_le32(ERDES1_END_RING);
32 }
33 
34 static inline void enh_desc_end_tx_desc_on_ring(struct dma_desc *p, int end)
35 {
36 	if (end)
37 		p->des0 |= cpu_to_le32(ETDES0_END_RING);
38 	else
39 		p->des0 &= cpu_to_le32(~ETDES0_END_RING);
40 }
41 
42 static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
43 {
44 	if (unlikely(len > BUF_SIZE_4KiB)) {
45 		p->des1 |= cpu_to_le32((((len - BUF_SIZE_4KiB)
46 					<< ETDES1_BUFFER2_SIZE_SHIFT)
47 			    & ETDES1_BUFFER2_SIZE_MASK) | (BUF_SIZE_4KiB
48 			    & ETDES1_BUFFER1_SIZE_MASK));
49 	} else
50 		p->des1 |= cpu_to_le32((len & ETDES1_BUFFER1_SIZE_MASK));
51 }
52 
53 /* Normal descriptors */
54 static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end, int bfsize)
55 {
56 	if (bfsize >= BUF_SIZE_2KiB) {
57 		int bfsize2;
58 
59 		bfsize2 = min(bfsize - BUF_SIZE_2KiB + 1, BUF_SIZE_2KiB - 1);
60 		p->des1 |= cpu_to_le32((bfsize2 << RDES1_BUFFER2_SIZE_SHIFT)
61 			    & RDES1_BUFFER2_SIZE_MASK);
62 	}
63 
64 	if (end)
65 		p->des1 |= cpu_to_le32(RDES1_END_RING);
66 }
67 
68 static inline void ndesc_end_tx_desc_on_ring(struct dma_desc *p, int end)
69 {
70 	if (end)
71 		p->des1 |= cpu_to_le32(TDES1_END_RING);
72 	else
73 		p->des1 &= cpu_to_le32(~TDES1_END_RING);
74 }
75 
76 static inline void norm_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
77 {
78 	if (unlikely(len > BUF_SIZE_2KiB)) {
79 		unsigned int buffer1 = (BUF_SIZE_2KiB - 1)
80 					& TDES1_BUFFER1_SIZE_MASK;
81 		p->des1 |= cpu_to_le32((((len - buffer1)
82 					<< TDES1_BUFFER2_SIZE_SHIFT)
83 				& TDES1_BUFFER2_SIZE_MASK) | buffer1);
84 	} else
85 		p->des1 |= cpu_to_le32((len & TDES1_BUFFER1_SIZE_MASK));
86 }
87 
88 /* Specific functions used for Chain mode */
89 
90 /* Enhanced descriptors */
91 static inline void ehn_desc_rx_set_on_chain(struct dma_desc *p)
92 {
93 	p->des1 |= cpu_to_le32(ERDES1_SECOND_ADDRESS_CHAINED);
94 }
95 
96 static inline void enh_desc_end_tx_desc_on_chain(struct dma_desc *p)
97 {
98 	p->des0 |= cpu_to_le32(ETDES0_SECOND_ADDRESS_CHAINED);
99 }
100 
101 static inline void enh_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
102 {
103 	p->des1 |= cpu_to_le32(len & ETDES1_BUFFER1_SIZE_MASK);
104 }
105 
106 /* Normal descriptors */
107 static inline void ndesc_rx_set_on_chain(struct dma_desc *p, int end)
108 {
109 	p->des1 |= cpu_to_le32(RDES1_SECOND_ADDRESS_CHAINED);
110 }
111 
112 static inline void ndesc_tx_set_on_chain(struct dma_desc *p)
113 {
114 	p->des1 |= cpu_to_le32(TDES1_SECOND_ADDRESS_CHAINED);
115 }
116 
117 static inline void norm_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
118 {
119 	p->des1 |= cpu_to_le32(len & TDES1_BUFFER1_SIZE_MASK);
120 }
121 #endif /* __DESC_COM_H__ */
122