xref: /linux/drivers/infiniband/hw/mlx5/cmd.c (revision 2e8e631d)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2017-2020, Mellanox Technologies inc. All rights reserved.
4  */
5 
6 #include "cmd.h"
7 
mlx5r_cmd_query_special_mkeys(struct mlx5_ib_dev * dev)8 int mlx5r_cmd_query_special_mkeys(struct mlx5_ib_dev *dev)
9 {
10 	u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
11 	u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
12 	bool is_terminate, is_dump, is_null;
13 	int err;
14 
15 	is_terminate = MLX5_CAP_GEN(dev->mdev, terminate_scatter_list_mkey);
16 	is_dump = MLX5_CAP_GEN(dev->mdev, dump_fill_mkey);
17 	is_null = MLX5_CAP_GEN(dev->mdev, null_mkey);
18 
19 	dev->mkeys.terminate_scatter_list_mkey = MLX5_TERMINATE_SCATTER_LIST_LKEY;
20 	if (!is_terminate && !is_dump && !is_null)
21 		return 0;
22 
23 	MLX5_SET(query_special_contexts_in, in, opcode,
24 		 MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
25 	err = mlx5_cmd_exec_inout(dev->mdev, query_special_contexts, in, out);
26 	if (err)
27 		return err;
28 
29 	if (is_dump)
30 		dev->mkeys.dump_fill_mkey = MLX5_GET(query_special_contexts_out,
31 						     out, dump_fill_mkey);
32 
33 	if (is_null)
34 		dev->mkeys.null_mkey = cpu_to_be32(
35 			MLX5_GET(query_special_contexts_out, out, null_mkey));
36 
37 	if (is_terminate)
38 		dev->mkeys.terminate_scatter_list_mkey =
39 			cpu_to_be32(MLX5_GET(query_special_contexts_out, out,
40 					     terminate_scatter_list_mkey));
41 
42 	return 0;
43 }
44 
mlx5_cmd_query_cong_params(struct mlx5_core_dev * dev,int cong_point,void * out)45 int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
46 			       void *out)
47 {
48 	u32 in[MLX5_ST_SZ_DW(query_cong_params_in)] = {};
49 
50 	MLX5_SET(query_cong_params_in, in, opcode,
51 		 MLX5_CMD_OP_QUERY_CONG_PARAMS);
52 	MLX5_SET(query_cong_params_in, in, cong_protocol, cong_point);
53 
54 	return mlx5_cmd_exec_inout(dev, query_cong_params, in, out);
55 }
56 
mlx5_cmd_destroy_tir(struct mlx5_core_dev * dev,u32 tirn,u16 uid)57 void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid)
58 {
59 	u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {};
60 
61 	MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
62 	MLX5_SET(destroy_tir_in, in, tirn, tirn);
63 	MLX5_SET(destroy_tir_in, in, uid, uid);
64 	mlx5_cmd_exec_in(dev, destroy_tir, in);
65 }
66 
mlx5_cmd_destroy_tis(struct mlx5_core_dev * dev,u32 tisn,u16 uid)67 void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid)
68 {
69 	u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
70 
71 	MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
72 	MLX5_SET(destroy_tis_in, in, tisn, tisn);
73 	MLX5_SET(destroy_tis_in, in, uid, uid);
74 	mlx5_cmd_exec_in(dev, destroy_tis, in);
75 }
76 
mlx5_cmd_destroy_rqt(struct mlx5_core_dev * dev,u32 rqtn,u16 uid)77 int mlx5_cmd_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn, u16 uid)
78 {
79 	u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
80 
81 	MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
82 	MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
83 	MLX5_SET(destroy_rqt_in, in, uid, uid);
84 	return mlx5_cmd_exec_in(dev, destroy_rqt, in);
85 }
86 
mlx5_cmd_alloc_transport_domain(struct mlx5_core_dev * dev,u32 * tdn,u16 uid)87 int mlx5_cmd_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn,
88 				    u16 uid)
89 {
90 	u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {};
91 	u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {};
92 	int err;
93 
94 	MLX5_SET(alloc_transport_domain_in, in, opcode,
95 		 MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
96 	MLX5_SET(alloc_transport_domain_in, in, uid, uid);
97 
98 	err = mlx5_cmd_exec_inout(dev, alloc_transport_domain, in, out);
99 	if (!err)
100 		*tdn = MLX5_GET(alloc_transport_domain_out, out,
101 				transport_domain);
102 
103 	return err;
104 }
105 
mlx5_cmd_dealloc_transport_domain(struct mlx5_core_dev * dev,u32 tdn,u16 uid)106 void mlx5_cmd_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn,
107 				       u16 uid)
108 {
109 	u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {};
110 
111 	MLX5_SET(dealloc_transport_domain_in, in, opcode,
112 		 MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
113 	MLX5_SET(dealloc_transport_domain_in, in, uid, uid);
114 	MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
115 	mlx5_cmd_exec_in(dev, dealloc_transport_domain, in);
116 }
117 
mlx5_cmd_dealloc_pd(struct mlx5_core_dev * dev,u32 pdn,u16 uid)118 int mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid)
119 {
120 	u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {};
121 
122 	MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
123 	MLX5_SET(dealloc_pd_in, in, pd, pdn);
124 	MLX5_SET(dealloc_pd_in, in, uid, uid);
125 	return mlx5_cmd_exec_in(dev, dealloc_pd, in);
126 }
127 
mlx5_cmd_attach_mcg(struct mlx5_core_dev * dev,union ib_gid * mgid,u32 qpn,u16 uid)128 int mlx5_cmd_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
129 			u32 qpn, u16 uid)
130 {
131 	u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)] = {};
132 	void *gid;
133 
134 	MLX5_SET(attach_to_mcg_in, in, opcode, MLX5_CMD_OP_ATTACH_TO_MCG);
135 	MLX5_SET(attach_to_mcg_in, in, qpn, qpn);
136 	MLX5_SET(attach_to_mcg_in, in, uid, uid);
137 	gid = MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid);
138 	memcpy(gid, mgid, sizeof(*mgid));
139 	return mlx5_cmd_exec_in(dev, attach_to_mcg, in);
140 }
141 
mlx5_cmd_detach_mcg(struct mlx5_core_dev * dev,union ib_gid * mgid,u32 qpn,u16 uid)142 int mlx5_cmd_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
143 			u32 qpn, u16 uid)
144 {
145 	u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)] = {};
146 	void *gid;
147 
148 	MLX5_SET(detach_from_mcg_in, in, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
149 	MLX5_SET(detach_from_mcg_in, in, qpn, qpn);
150 	MLX5_SET(detach_from_mcg_in, in, uid, uid);
151 	gid = MLX5_ADDR_OF(detach_from_mcg_in, in, multicast_gid);
152 	memcpy(gid, mgid, sizeof(*mgid));
153 	return mlx5_cmd_exec_in(dev, detach_from_mcg, in);
154 }
155 
mlx5_cmd_xrcd_alloc(struct mlx5_core_dev * dev,u32 * xrcdn,u16 uid)156 int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid)
157 {
158 	u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {};
159 	u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {};
160 	int err;
161 
162 	MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
163 	MLX5_SET(alloc_xrcd_in, in, uid, uid);
164 	err = mlx5_cmd_exec_inout(dev, alloc_xrcd, in, out);
165 	if (!err)
166 		*xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
167 	return err;
168 }
169 
mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev * dev,u32 xrcdn,u16 uid)170 int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid)
171 {
172 	u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {};
173 
174 	MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
175 	MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
176 	MLX5_SET(dealloc_xrcd_in, in, uid, uid);
177 	return mlx5_cmd_exec_in(dev, dealloc_xrcd, in);
178 }
179 
mlx5_cmd_mad_ifc(struct mlx5_ib_dev * dev,const void * inb,void * outb,u16 opmod,u8 port)180 int mlx5_cmd_mad_ifc(struct mlx5_ib_dev *dev, const void *inb, void *outb,
181 		     u16 opmod, u8 port)
182 {
183 	int outlen = MLX5_ST_SZ_BYTES(mad_ifc_out);
184 	int inlen = MLX5_ST_SZ_BYTES(mad_ifc_in);
185 	int err = -ENOMEM;
186 	void *data;
187 	void *resp;
188 	u32 *out;
189 	u32 *in;
190 
191 	in = kzalloc(inlen, GFP_KERNEL);
192 	out = kzalloc(outlen, GFP_KERNEL);
193 	if (!in || !out)
194 		goto out;
195 
196 	MLX5_SET(mad_ifc_in, in, opcode, MLX5_CMD_OP_MAD_IFC);
197 	MLX5_SET(mad_ifc_in, in, op_mod, opmod);
198 	if (dev->ib_dev.type == RDMA_DEVICE_TYPE_SMI) {
199 		MLX5_SET(mad_ifc_in, in, plane_index, port);
200 		MLX5_SET(mad_ifc_in, in, port,
201 			 smi_to_native_portnum(dev, port));
202 	} else {
203 		MLX5_SET(mad_ifc_in, in, port, port);
204 	}
205 
206 	data = MLX5_ADDR_OF(mad_ifc_in, in, mad);
207 	memcpy(data, inb, MLX5_FLD_SZ_BYTES(mad_ifc_in, mad));
208 
209 	err = mlx5_cmd_exec_inout(dev->mdev, mad_ifc, in, out);
210 	if (err)
211 		goto out;
212 
213 	resp = MLX5_ADDR_OF(mad_ifc_out, out, response_mad_packet);
214 	memcpy(outb, resp,
215 	       MLX5_FLD_SZ_BYTES(mad_ifc_out, response_mad_packet));
216 
217 out:
218 	kfree(out);
219 	kfree(in);
220 	return err;
221 }
222 
mlx5_cmd_uar_alloc(struct mlx5_core_dev * dev,u32 * uarn,u16 uid)223 int mlx5_cmd_uar_alloc(struct mlx5_core_dev *dev, u32 *uarn, u16 uid)
224 {
225 	u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {};
226 	u32 in[MLX5_ST_SZ_DW(alloc_uar_in)] = {};
227 	int err;
228 
229 	MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR);
230 	MLX5_SET(alloc_uar_in, in, uid, uid);
231 	err = mlx5_cmd_exec_inout(dev, alloc_uar, in, out);
232 	if (err)
233 		return err;
234 
235 	*uarn = MLX5_GET(alloc_uar_out, out, uar);
236 	return 0;
237 }
238 
mlx5_cmd_uar_dealloc(struct mlx5_core_dev * dev,u32 uarn,u16 uid)239 int mlx5_cmd_uar_dealloc(struct mlx5_core_dev *dev, u32 uarn, u16 uid)
240 {
241 	u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)] = {};
242 
243 	MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR);
244 	MLX5_SET(dealloc_uar_in, in, uar, uarn);
245 	MLX5_SET(dealloc_uar_in, in, uid, uid);
246 	return mlx5_cmd_exec_in(dev, dealloc_uar, in);
247 }
248 
mlx5_cmd_query_vuid(struct mlx5_core_dev * dev,bool data_direct,char * out_vuid)249 int mlx5_cmd_query_vuid(struct mlx5_core_dev *dev, bool data_direct,
250 			char *out_vuid)
251 {
252 	u8 out[MLX5_ST_SZ_BYTES(query_vuid_out) +
253 		MLX5_ST_SZ_BYTES(array1024_auto)] = {};
254 	u8 in[MLX5_ST_SZ_BYTES(query_vuid_in)] = {};
255 	char *vuid;
256 	int err;
257 
258 	MLX5_SET(query_vuid_in, in, opcode, MLX5_CMD_OPCODE_QUERY_VUID);
259 	MLX5_SET(query_vuid_in, in, vhca_id, MLX5_CAP_GEN(dev, vhca_id));
260 	MLX5_SET(query_vuid_in, in, data_direct, data_direct);
261 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
262 	if (err)
263 		return err;
264 
265 	vuid = MLX5_ADDR_OF(query_vuid_out, out, vuid);
266 	memcpy(out_vuid, vuid, MLX5_ST_SZ_BYTES(array1024_auto));
267 	return 0;
268 }
269