1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 /*
3  * Wave5 series multi-standard codec IP - low level access functions
4  *
5  * Copyright (C) 2021-2023 CHIPS&MEDIA INC
6  */
7 
8 #include <linux/bug.h>
9 #include "wave5-vdi.h"
10 #include "wave5-vpu.h"
11 #include "wave5-regdefine.h"
12 #include <linux/delay.h>
13 
14 static int wave5_vdi_allocate_common_memory(struct device *dev)
15 {
16 	struct vpu_device *vpu_dev = dev_get_drvdata(dev);
17 
18 	if (!vpu_dev->common_mem.vaddr) {
19 		int ret;
20 
21 		vpu_dev->common_mem.size = SIZE_COMMON;
22 		ret = wave5_vdi_allocate_dma_memory(vpu_dev, &vpu_dev->common_mem);
23 		if (ret) {
24 			dev_err(dev, "unable to allocate common buffer\n");
25 			return ret;
26 		}
27 	}
28 
29 	dev_dbg(dev, "[VDI] common_mem: daddr=%pad size=%zu vaddr=0x%p\n",
30 		&vpu_dev->common_mem.daddr, vpu_dev->common_mem.size, vpu_dev->common_mem.vaddr);
31 
32 	return 0;
33 }
34 
35 int wave5_vdi_init(struct device *dev)
36 {
37 	struct vpu_device *vpu_dev = dev_get_drvdata(dev);
38 	int ret;
39 
40 	ret = wave5_vdi_allocate_common_memory(dev);
41 	if (ret < 0) {
42 		dev_err(dev, "[VDI] failed to get vpu common buffer from driver\n");
43 		return ret;
44 	}
45 
46 	if (!PRODUCT_CODE_W_SERIES(vpu_dev->product_code)) {
47 		WARN_ONCE(1, "unsupported product code: 0x%x\n", vpu_dev->product_code);
48 		return -EOPNOTSUPP;
49 	}
50 
51 	/* if BIT processor is not running. */
52 	if (wave5_vdi_read_register(vpu_dev, W5_VCPU_CUR_PC) == 0) {
53 		int i;
54 
55 		for (i = 0; i < 64; i++)
56 			wave5_vdi_write_register(vpu_dev, (i * 4) + 0x100, 0x0);
57 	}
58 
59 	dev_dbg(dev, "[VDI] driver initialized successfully\n");
60 
61 	return 0;
62 }
63 
64 int wave5_vdi_release(struct device *dev)
65 {
66 	struct vpu_device *vpu_dev = dev_get_drvdata(dev);
67 
68 	vpu_dev->vdb_register = NULL;
69 	wave5_vdi_free_dma_memory(vpu_dev, &vpu_dev->common_mem);
70 
71 	return 0;
72 }
73 
74 void wave5_vdi_write_register(struct vpu_device *vpu_dev, u32 addr, u32 data)
75 {
76 	writel(data, vpu_dev->vdb_register + addr);
77 }
78 
79 unsigned int wave5_vdi_read_register(struct vpu_device *vpu_dev, u32 addr)
80 {
81 	return readl(vpu_dev->vdb_register + addr);
82 }
83 
84 int wave5_vdi_clear_memory(struct vpu_device *vpu_dev, struct vpu_buf *vb)
85 {
86 	if (!vb || !vb->vaddr) {
87 		dev_err(vpu_dev->dev, "%s: unable to clear unmapped buffer\n", __func__);
88 		return -EINVAL;
89 	}
90 
91 	memset(vb->vaddr, 0, vb->size);
92 	return vb->size;
93 }
94 
95 int wave5_vdi_write_memory(struct vpu_device *vpu_dev, struct vpu_buf *vb, size_t offset,
96 			   u8 *data, size_t len)
97 {
98 	if (!vb || !vb->vaddr) {
99 		dev_err(vpu_dev->dev, "%s: unable to write to unmapped buffer\n", __func__);
100 		return -EINVAL;
101 	}
102 
103 	if (offset > vb->size || len > vb->size || offset + len > vb->size) {
104 		dev_err(vpu_dev->dev, "%s: buffer too small\n", __func__);
105 		return -ENOSPC;
106 	}
107 
108 	memcpy(vb->vaddr + offset, data, len);
109 
110 	return len;
111 }
112 
113 int wave5_vdi_allocate_dma_memory(struct vpu_device *vpu_dev, struct vpu_buf *vb)
114 {
115 	void *vaddr;
116 	dma_addr_t daddr;
117 
118 	if (!vb->size) {
119 		dev_err(vpu_dev->dev, "%s: requested size==0\n", __func__);
120 		return -EINVAL;
121 	}
122 
123 	vaddr = dma_alloc_coherent(vpu_dev->dev, vb->size, &daddr, GFP_KERNEL);
124 	if (!vaddr)
125 		return -ENOMEM;
126 	vb->vaddr = vaddr;
127 	vb->daddr = daddr;
128 
129 	return 0;
130 }
131 
132 int wave5_vdi_free_dma_memory(struct vpu_device *vpu_dev, struct vpu_buf *vb)
133 {
134 	if (vb->size == 0)
135 		return -EINVAL;
136 
137 	if (!vb->vaddr)
138 		dev_err(vpu_dev->dev, "%s: requested free of unmapped buffer\n", __func__);
139 	else
140 		dma_free_coherent(vpu_dev->dev, vb->size, vb->vaddr, vb->daddr);
141 
142 	memset(vb, 0, sizeof(*vb));
143 
144 	return 0;
145 }
146 
147 int wave5_vdi_allocate_array(struct vpu_device *vpu_dev, struct vpu_buf *array, unsigned int count,
148 			     size_t size)
149 {
150 	struct vpu_buf vb_buf;
151 	int i, ret = 0;
152 
153 	vb_buf.size = size;
154 
155 	for (i = 0; i < count; i++) {
156 		if (array[i].size == size)
157 			continue;
158 
159 		if (array[i].size != 0)
160 			wave5_vdi_free_dma_memory(vpu_dev, &array[i]);
161 
162 		ret = wave5_vdi_allocate_dma_memory(vpu_dev, &vb_buf);
163 		if (ret)
164 			return -ENOMEM;
165 		array[i] = vb_buf;
166 	}
167 
168 	for (i = count; i < MAX_REG_FRAME; i++)
169 		wave5_vdi_free_dma_memory(vpu_dev, &array[i]);
170 
171 	return 0;
172 }
173 
174 void wave5_vdi_allocate_sram(struct vpu_device *vpu_dev)
175 {
176 	struct vpu_buf *vb = &vpu_dev->sram_buf;
177 
178 	if (!vpu_dev->sram_pool || !vpu_dev->sram_size)
179 		return;
180 
181 	if (!vb->vaddr) {
182 		vb->size = vpu_dev->sram_size;
183 		vb->vaddr = gen_pool_dma_alloc(vpu_dev->sram_pool, vb->size,
184 					       &vb->daddr);
185 		if (!vb->vaddr)
186 			vb->size = 0;
187 	}
188 
189 	dev_dbg(vpu_dev->dev, "%s: sram daddr: %pad, size: %zu, vaddr: 0x%p\n",
190 		__func__, &vb->daddr, vb->size, vb->vaddr);
191 }
192 
193 void wave5_vdi_free_sram(struct vpu_device *vpu_dev)
194 {
195 	struct vpu_buf *vb = &vpu_dev->sram_buf;
196 
197 	if (!vb->size || !vb->vaddr)
198 		return;
199 
200 	if (vb->vaddr)
201 		gen_pool_free(vpu_dev->sram_pool, (unsigned long)vb->vaddr,
202 			      vb->size);
203 
204 	memset(vb, 0, sizeof(*vb));
205 }
206