1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2019, The Linux Foundation. All rights reserved.
4  */
5 
6 #include <linux/adreno-smmu-priv.h>
7 #include <linux/of_device.h>
8 #include <linux/qcom_scm.h>
9 
10 #include "arm-smmu.h"
11 
12 struct qcom_smmu {
13 	struct arm_smmu_device smmu;
14 	bool bypass_quirk;
15 	u8 bypass_cbndx;
16 };
17 
to_qcom_smmu(struct arm_smmu_device * smmu)18 static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu)
19 {
20 	return container_of(smmu, struct qcom_smmu, smmu);
21 }
22 
qcom_adreno_smmu_write_sctlr(struct arm_smmu_device * smmu,int idx,u32 reg)23 static void qcom_adreno_smmu_write_sctlr(struct arm_smmu_device *smmu, int idx,
24 		u32 reg)
25 {
26 	/*
27 	 * On the GPU device we want to process subsequent transactions after a
28 	 * fault to keep the GPU from hanging
29 	 */
30 	reg |= ARM_SMMU_SCTLR_HUPCF;
31 
32 	arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
33 }
34 
35 #define QCOM_ADRENO_SMMU_GPU_SID 0
36 
qcom_adreno_smmu_is_gpu_device(struct device * dev)37 static bool qcom_adreno_smmu_is_gpu_device(struct device *dev)
38 {
39 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
40 	int i;
41 
42 	/*
43 	 * The GPU will always use SID 0 so that is a handy way to uniquely
44 	 * identify it and configure it for per-instance pagetables
45 	 */
46 	for (i = 0; i < fwspec->num_ids; i++) {
47 		u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
48 
49 		if (sid == QCOM_ADRENO_SMMU_GPU_SID)
50 			return true;
51 	}
52 
53 	return false;
54 }
55 
qcom_adreno_smmu_get_ttbr1_cfg(const void * cookie)56 static const struct io_pgtable_cfg *qcom_adreno_smmu_get_ttbr1_cfg(
57 		const void *cookie)
58 {
59 	struct arm_smmu_domain *smmu_domain = (void *)cookie;
60 	struct io_pgtable *pgtable =
61 		io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops);
62 	return &pgtable->cfg;
63 }
64 
65 /*
66  * Local implementation to configure TTBR0 with the specified pagetable config.
67  * The GPU driver will call this to enable TTBR0 when per-instance pagetables
68  * are active
69  */
70 
qcom_adreno_smmu_set_ttbr0_cfg(const void * cookie,const struct io_pgtable_cfg * pgtbl_cfg)71 static int qcom_adreno_smmu_set_ttbr0_cfg(const void *cookie,
72 		const struct io_pgtable_cfg *pgtbl_cfg)
73 {
74 	struct arm_smmu_domain *smmu_domain = (void *)cookie;
75 	struct io_pgtable *pgtable = io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops);
76 	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
77 	struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
78 
79 	/* The domain must have split pagetables already enabled */
80 	if (cb->tcr[0] & ARM_SMMU_TCR_EPD1)
81 		return -EINVAL;
82 
83 	/* If the pagetable config is NULL, disable TTBR0 */
84 	if (!pgtbl_cfg) {
85 		/* Do nothing if it is already disabled */
86 		if ((cb->tcr[0] & ARM_SMMU_TCR_EPD0))
87 			return -EINVAL;
88 
89 		/* Set TCR to the original configuration */
90 		cb->tcr[0] = arm_smmu_lpae_tcr(&pgtable->cfg);
91 		cb->ttbr[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID, cb->cfg->asid);
92 	} else {
93 		u32 tcr = cb->tcr[0];
94 
95 		/* Don't call this again if TTBR0 is already enabled */
96 		if (!(cb->tcr[0] & ARM_SMMU_TCR_EPD0))
97 			return -EINVAL;
98 
99 		tcr |= arm_smmu_lpae_tcr(pgtbl_cfg);
100 		tcr &= ~(ARM_SMMU_TCR_EPD0 | ARM_SMMU_TCR_EPD1);
101 
102 		cb->tcr[0] = tcr;
103 		cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
104 		cb->ttbr[0] |= FIELD_PREP(ARM_SMMU_TTBRn_ASID, cb->cfg->asid);
105 	}
106 
107 	arm_smmu_write_context_bank(smmu_domain->smmu, cb->cfg->cbndx);
108 
109 	return 0;
110 }
111 
qcom_adreno_smmu_alloc_context_bank(struct arm_smmu_domain * smmu_domain,struct arm_smmu_device * smmu,struct device * dev,int start)112 static int qcom_adreno_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain,
113 					       struct arm_smmu_device *smmu,
114 					       struct device *dev, int start)
115 {
116 	int count;
117 
118 	/*
119 	 * Assign context bank 0 to the GPU device so the GPU hardware can
120 	 * switch pagetables
121 	 */
122 	if (qcom_adreno_smmu_is_gpu_device(dev)) {
123 		start = 0;
124 		count = 1;
125 	} else {
126 		start = 1;
127 		count = smmu->num_context_banks;
128 	}
129 
130 	return __arm_smmu_alloc_bitmap(smmu->context_map, start, count);
131 }
132 
qcom_adreno_smmu_init_context(struct arm_smmu_domain * smmu_domain,struct io_pgtable_cfg * pgtbl_cfg,struct device * dev)133 static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
134 		struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
135 {
136 	struct adreno_smmu_priv *priv;
137 
138 	/* Only enable split pagetables for the GPU device (SID 0) */
139 	if (!qcom_adreno_smmu_is_gpu_device(dev))
140 		return 0;
141 
142 	/*
143 	 * All targets that use the qcom,adreno-smmu compatible string *should*
144 	 * be AARCH64 stage 1 but double check because the arm-smmu code assumes
145 	 * that is the case when the TTBR1 quirk is enabled
146 	 */
147 	if ((smmu_domain->stage == ARM_SMMU_DOMAIN_S1) &&
148 	    (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64))
149 		pgtbl_cfg->quirks |= IO_PGTABLE_QUIRK_ARM_TTBR1;
150 
151 	/*
152 	 * Initialize private interface with GPU:
153 	 */
154 
155 	priv = dev_get_drvdata(dev);
156 	priv->cookie = smmu_domain;
157 	priv->get_ttbr1_cfg = qcom_adreno_smmu_get_ttbr1_cfg;
158 	priv->set_ttbr0_cfg = qcom_adreno_smmu_set_ttbr0_cfg;
159 
160 	return 0;
161 }
162 
163 static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
164 	{ .compatible = "qcom,adreno" },
165 	{ .compatible = "qcom,mdp4" },
166 	{ .compatible = "qcom,mdss" },
167 	{ .compatible = "qcom,sc7180-mdss" },
168 	{ .compatible = "qcom,sc7180-mss-pil" },
169 	{ .compatible = "qcom,sc8180x-mdss" },
170 	{ .compatible = "qcom,sdm845-mdss" },
171 	{ .compatible = "qcom,sdm845-mss-pil" },
172 	{ }
173 };
174 
qcom_smmu_cfg_probe(struct arm_smmu_device * smmu)175 static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
176 {
177 	unsigned int last_s2cr = ARM_SMMU_GR0_S2CR(smmu->num_mapping_groups - 1);
178 	struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
179 	u32 reg;
180 	u32 smr;
181 	int i;
182 
183 	/*
184 	 * With some firmware versions writes to S2CR of type FAULT are
185 	 * ignored, and writing BYPASS will end up written as FAULT in the
186 	 * register. Perform a write to S2CR to detect if this is the case and
187 	 * if so reserve a context bank to emulate bypass streams.
188 	 */
189 	reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, S2CR_TYPE_BYPASS) |
190 	      FIELD_PREP(ARM_SMMU_S2CR_CBNDX, 0xff) |
191 	      FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, S2CR_PRIVCFG_DEFAULT);
192 	arm_smmu_gr0_write(smmu, last_s2cr, reg);
193 	reg = arm_smmu_gr0_read(smmu, last_s2cr);
194 	if (FIELD_GET(ARM_SMMU_S2CR_TYPE, reg) != S2CR_TYPE_BYPASS) {
195 		qsmmu->bypass_quirk = true;
196 		qsmmu->bypass_cbndx = smmu->num_context_banks - 1;
197 
198 		set_bit(qsmmu->bypass_cbndx, smmu->context_map);
199 
200 		arm_smmu_cb_write(smmu, qsmmu->bypass_cbndx, ARM_SMMU_CB_SCTLR, 0);
201 
202 		reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, CBAR_TYPE_S1_TRANS_S2_BYPASS);
203 		arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(qsmmu->bypass_cbndx), reg);
204 	}
205 
206 	for (i = 0; i < smmu->num_mapping_groups; i++) {
207 		smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
208 
209 		if (FIELD_GET(ARM_SMMU_SMR_VALID, smr)) {
210 			/* Ignore valid bit for SMR mask extraction. */
211 			smr &= ~ARM_SMMU_SMR_VALID;
212 			smmu->smrs[i].id = FIELD_GET(ARM_SMMU_SMR_ID, smr);
213 			smmu->smrs[i].mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
214 			smmu->smrs[i].valid = true;
215 
216 			smmu->s2crs[i].type = S2CR_TYPE_BYPASS;
217 			smmu->s2crs[i].privcfg = S2CR_PRIVCFG_DEFAULT;
218 			smmu->s2crs[i].cbndx = 0xff;
219 		}
220 	}
221 
222 	return 0;
223 }
224 
qcom_smmu_write_s2cr(struct arm_smmu_device * smmu,int idx)225 static void qcom_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
226 {
227 	struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
228 	struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
229 	u32 cbndx = s2cr->cbndx;
230 	u32 type = s2cr->type;
231 	u32 reg;
232 
233 	if (qsmmu->bypass_quirk) {
234 		if (type == S2CR_TYPE_BYPASS) {
235 			/*
236 			 * Firmware with quirky S2CR handling will substitute
237 			 * BYPASS writes with FAULT, so point the stream to the
238 			 * reserved context bank and ask for translation on the
239 			 * stream
240 			 */
241 			type = S2CR_TYPE_TRANS;
242 			cbndx = qsmmu->bypass_cbndx;
243 		} else if (type == S2CR_TYPE_FAULT) {
244 			/*
245 			 * Firmware with quirky S2CR handling will ignore FAULT
246 			 * writes, so trick it to write FAULT by asking for a
247 			 * BYPASS.
248 			 */
249 			type = S2CR_TYPE_BYPASS;
250 			cbndx = 0xff;
251 		}
252 	}
253 
254 	reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, type) |
255 	      FIELD_PREP(ARM_SMMU_S2CR_CBNDX, cbndx) |
256 	      FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg);
257 	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
258 }
259 
qcom_smmu_def_domain_type(struct device * dev)260 static int qcom_smmu_def_domain_type(struct device *dev)
261 {
262 	const struct of_device_id *match =
263 		of_match_device(qcom_smmu_client_of_match, dev);
264 
265 	return match ? IOMMU_DOMAIN_IDENTITY : 0;
266 }
267 
qcom_sdm845_smmu500_reset(struct arm_smmu_device * smmu)268 static int qcom_sdm845_smmu500_reset(struct arm_smmu_device *smmu)
269 {
270 	int ret;
271 
272 	/*
273 	 * To address performance degradation in non-real time clients,
274 	 * such as USB and UFS, turn off wait-for-safe on sdm845 based boards,
275 	 * such as MTP and db845, whose firmwares implement secure monitor
276 	 * call handlers to turn on/off the wait-for-safe logic.
277 	 */
278 	ret = qcom_scm_qsmmu500_wait_safe_toggle(0);
279 	if (ret)
280 		dev_warn(smmu->dev, "Failed to turn off SAFE logic\n");
281 
282 	return ret;
283 }
284 
qcom_smmu500_reset(struct arm_smmu_device * smmu)285 static int qcom_smmu500_reset(struct arm_smmu_device *smmu)
286 {
287 	const struct device_node *np = smmu->dev->of_node;
288 
289 	arm_mmu500_reset(smmu);
290 
291 	if (of_device_is_compatible(np, "qcom,sdm845-smmu-500"))
292 		return qcom_sdm845_smmu500_reset(smmu);
293 
294 	return 0;
295 }
296 
297 static const struct arm_smmu_impl qcom_smmu_impl = {
298 	.cfg_probe = qcom_smmu_cfg_probe,
299 	.def_domain_type = qcom_smmu_def_domain_type,
300 	.reset = qcom_smmu500_reset,
301 	.write_s2cr = qcom_smmu_write_s2cr,
302 };
303 
304 static const struct arm_smmu_impl qcom_adreno_smmu_impl = {
305 	.init_context = qcom_adreno_smmu_init_context,
306 	.def_domain_type = qcom_smmu_def_domain_type,
307 	.reset = qcom_smmu500_reset,
308 	.alloc_context_bank = qcom_adreno_smmu_alloc_context_bank,
309 	.write_sctlr = qcom_adreno_smmu_write_sctlr,
310 };
311 
qcom_smmu_create(struct arm_smmu_device * smmu,const struct arm_smmu_impl * impl)312 static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
313 		const struct arm_smmu_impl *impl)
314 {
315 	struct qcom_smmu *qsmmu;
316 
317 	/* Check to make sure qcom_scm has finished probing */
318 	if (!qcom_scm_is_available())
319 		return ERR_PTR(-EPROBE_DEFER);
320 
321 	qsmmu = devm_krealloc(smmu->dev, smmu, sizeof(*qsmmu), GFP_KERNEL);
322 	if (!qsmmu)
323 		return ERR_PTR(-ENOMEM);
324 
325 	qsmmu->smmu.impl = impl;
326 
327 	return &qsmmu->smmu;
328 }
329 
330 static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
331 	{ .compatible = "qcom,msm8998-smmu-v2" },
332 	{ .compatible = "qcom,sc7180-smmu-500" },
333 	{ .compatible = "qcom,sc8180x-smmu-500" },
334 	{ .compatible = "qcom,sdm630-smmu-v2" },
335 	{ .compatible = "qcom,sdm845-smmu-500" },
336 	{ .compatible = "qcom,sm8150-smmu-500" },
337 	{ .compatible = "qcom,sm8250-smmu-500" },
338 	{ .compatible = "qcom,sm8350-smmu-500" },
339 	{ }
340 };
341 
qcom_smmu_impl_init(struct arm_smmu_device * smmu)342 struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu)
343 {
344 	const struct device_node *np = smmu->dev->of_node;
345 
346 	if (of_match_node(qcom_smmu_impl_of_match, np))
347 		return qcom_smmu_create(smmu, &qcom_smmu_impl);
348 
349 	if (of_device_is_compatible(np, "qcom,adreno-smmu"))
350 		return qcom_smmu_create(smmu, &qcom_adreno_smmu_impl);
351 
352 	return smmu;
353 }
354