xref: /freebsd/sys/dev/smartpqi/smartpqi_ioctl.c (revision 780fb4a2)
1 /*-
2  * Copyright (c) 2018 Microsemi Corporation.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /* $FreeBSD$ */
28 
29 /*
30  * Management interface for smartpqi driver
31  */
32 
33 #include "smartpqi_includes.h"
34 
35 /*
36  * Wrapper function to copy to user from kernel
37  */
38 int os_copy_to_user(struct pqisrc_softstate *softs, void *dest_buf,
39 		void *src_buf, int size, int mode)
40 {
41 	return(copyout(src_buf, dest_buf, size));
42 }
43 
44 /*
45  * Wrapper function to copy from user to kernel
46  */
47 int os_copy_from_user(struct pqisrc_softstate *softs, void *dest_buf,
48 		void *src_buf, int size, int mode)
49 {
50 	return(copyin(src_buf, dest_buf, size));
51 }
52 
53 /*
54  * Device open function for ioctl entry
55  */
56 static int smartpqi_open(struct cdev *cdev, int flags, int devtype,
57 		struct thread *td)
58 {
59 	int error = PQI_STATUS_SUCCESS;
60 
61 	return error;
62 }
63 
64 /*
65  * Device close function for ioctl entry
66  */
67 static int smartpqi_close(struct cdev *cdev, int flags, int devtype,
68 		struct thread *td)
69 {
70 	int error = PQI_STATUS_SUCCESS;
71 
72 	return error;
73 }
74 
75 /*
76  * ioctl for getting driver info
77  */
78 static void smartpqi_get_driver_info_ioctl(caddr_t udata, struct cdev *cdev)
79 {
80 	struct pqisrc_softstate *softs = cdev->si_drv1;
81 	pdriver_info driver_info = (pdriver_info)udata;
82 
83 	DBG_FUNC("IN udata = %p cdev = %p\n", udata, cdev);
84 
85 	driver_info->major_version = PQISRC_DRIVER_MAJOR;
86 	driver_info->minor_version = PQISRC_DRIVER_MINOR;
87 	driver_info->release_version = PQISRC_DRIVER_RELEASE;
88 	driver_info->build_revision = PQISRC_DRIVER_REVISION;
89 	driver_info->max_targets = PQI_MAX_DEVICES - 1;
90 	driver_info->max_io = softs->max_io_for_scsi_ml;
91 	driver_info->max_transfer_length = softs->pqi_cap.max_transfer_size;
92 
93 	DBG_FUNC("OUT\n");
94 }
95 
96 /*
97  * ioctl for getting controller info
98  */
99 static void smartpqi_get_pci_info_ioctl(caddr_t udata, struct cdev *cdev)
100 {
101 	struct pqisrc_softstate *softs = cdev->si_drv1;
102 	device_t dev = softs->os_specific.pqi_dev;
103 	pqi_pci_info_t *pci_info = (pqi_pci_info_t *)udata;
104 	uint32_t sub_vendor = 0;
105 	uint32_t sub_device = 0;
106 	uint32_t vendor = 0;
107 	uint32_t device = 0;
108 
109 	DBG_FUNC("IN udata = %p cdev = %p\n", udata, cdev);
110 
111 	pci_info->bus = pci_get_bus(dev);
112 	pci_info->dev_fn = pci_get_function(dev);
113 	pci_info->domain = pci_get_domain(dev);
114 	sub_vendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
115 	sub_device = pci_read_config(dev, PCIR_SUBDEV_0, 2);
116 	pci_info->board_id = ((sub_device << 16) & 0xffff0000) | sub_vendor;
117 	vendor = pci_get_vendor(dev);
118 	device =  pci_get_device(dev);
119 	pci_info->chip_id = ((device << 16) & 0xffff0000) | vendor;
120 	DBG_FUNC("OUT\n");
121 }
122 
123 
124 /*
125  * ioctl entry point for user
126  */
127 static int smartpqi_ioctl(struct cdev *cdev, u_long cmd, caddr_t udata,
128 		int flags, struct thread *td)
129 {
130 	int error = PQI_STATUS_SUCCESS;
131 	struct pqisrc_softstate *softs = cdev->si_drv1;
132 
133 	DBG_FUNC("IN cmd = 0x%lx udata = %p cdev = %p\n", cmd, udata, cdev);
134 
135 	if (!udata) {
136 		DBG_ERR("udata is null !!\n");
137 	}
138 
139 	if (pqisrc_ctrl_offline(softs)){
140 		DBG_ERR("Controller s offline !!\n");
141 		return ENOTTY;
142 	}
143 
144 	switch (cmd) {
145 		case CCISS_GETDRIVVER:
146 			smartpqi_get_driver_info_ioctl(udata, cdev);
147 			break;
148 		case CCISS_GETPCIINFO:
149 			smartpqi_get_pci_info_ioctl(udata, cdev);
150 			break;
151 		case SMARTPQI_PASS_THRU:
152 		case CCISS_PASSTHRU:
153 			error = pqisrc_passthru_ioctl(softs, udata, 0);
154 			error = PQI_STATUS_SUCCESS;
155 			break;
156 		case CCISS_REGNEWD:
157 			error = pqisrc_scan_devices(softs);
158 			break;
159 		default:
160 			DBG_WARN( "!IOCTL cmd 0x%lx not supported", cmd);
161 			error = ENOTTY;
162 			break;
163 	}
164 
165 	DBG_FUNC("OUT error = %d\n", error);
166 	return error;
167 }
168 
169 static d_open_t         smartpqi_open;
170 static d_ioctl_t        smartpqi_ioctl;
171 static d_close_t        smartpqi_close;
172 
173 static struct cdevsw smartpqi_cdevsw =
174 {
175 	.d_version = D_VERSION,
176 	.d_open    = smartpqi_open,
177 	.d_close   = smartpqi_close,
178 	.d_ioctl   = smartpqi_ioctl,
179 	.d_name    = "smartpqi",
180 };
181 
182 /*
183  * Function to create device node for ioctl
184  */
185 int create_char_dev(struct pqisrc_softstate *softs, int card_index)
186 {
187 	int error = PQI_STATUS_SUCCESS;
188 
189 	DBG_FUNC("IN idx = %d\n", card_index);
190 
191 	softs->os_specific.cdev = make_dev(&smartpqi_cdevsw, card_index,
192 				UID_ROOT, GID_OPERATOR, 0640,
193 				"smartpqi%u", card_index);
194 	if(softs->os_specific.cdev) {
195 		softs->os_specific.cdev->si_drv1 = softs;
196 	} else {
197 		error = PQI_STATUS_FAILURE;
198 	}
199 
200 	DBG_FUNC("OUT error = %d\n", error);
201 	return error;
202 }
203 
204 /*
205  * Function to destroy device node for ioctl
206  */
207 void destroy_char_dev(struct pqisrc_softstate *softs)
208 {
209 	DBG_FUNC("IN\n");
210 	if (softs->os_specific.cdev) {
211 		destroy_dev(softs->os_specific.cdev);
212 		softs->os_specific.cdev = NULL;
213 	}
214 	DBG_FUNC("OUT\n");
215 }
216 
217 /*
218  * Function used to send passthru commands to adapter
219  * to support management tools. For eg. ssacli, sscon.
220  */
221 int
222 pqisrc_passthru_ioctl(struct pqisrc_softstate *softs, void *arg, int mode)
223 {
224 	int ret = PQI_STATUS_SUCCESS;
225 	char *drv_buf = NULL;
226 	uint32_t tag = 0;
227 	IOCTL_Command_struct *iocommand = (IOCTL_Command_struct *)arg;
228 	dma_mem_t ioctl_dma_buf;
229 	pqisrc_raid_req_t request;
230 	raid_path_error_info_elem_t error_info;
231 	ib_queue_t *ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE];
232 	ob_queue_t *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE];
233 	rcb_t *rcb = NULL;
234 
235 	memset(&request, 0, sizeof(request));
236 	memset(&error_info, 0, sizeof(error_info));
237 
238 	DBG_FUNC("IN");
239 
240 	if (pqisrc_ctrl_offline(softs))
241 		return PQI_STATUS_FAILURE;
242 
243 	if (!arg)
244 		return (PQI_STATUS_FAILURE);
245 
246 	if (iocommand->buf_size < 1 &&
247 		iocommand->Request.Type.Direction != PQIIOCTL_NONE)
248 		return PQI_STATUS_FAILURE;
249 	if (iocommand->Request.CDBLen > sizeof(request.cdb))
250 		return PQI_STATUS_FAILURE;
251 
252 	switch (iocommand->Request.Type.Direction) {
253 		case PQIIOCTL_NONE:
254 		case PQIIOCTL_WRITE:
255 		case PQIIOCTL_READ:
256 		case PQIIOCTL_BIDIRECTIONAL:
257 			break;
258 		default:
259 			return PQI_STATUS_FAILURE;
260 	}
261 
262 	if (iocommand->buf_size > 0) {
263 		memset(&ioctl_dma_buf, 0, sizeof(struct dma_mem));
264 		ioctl_dma_buf.tag = "Ioctl_PassthruCmd_Buffer";
265 		ioctl_dma_buf.size = iocommand->buf_size;
266 		ioctl_dma_buf.align = PQISRC_DEFAULT_DMA_ALIGN;
267 		/* allocate memory */
268 		ret = os_dma_mem_alloc(softs, &ioctl_dma_buf);
269 		if (ret) {
270 			DBG_ERR("Failed to Allocate dma mem for Ioctl PassthruCmd Buffer : %d\n", ret);
271 			ret = PQI_STATUS_FAILURE;
272 			goto out;
273 		}
274 
275 		DBG_INFO("ioctl_dma_buf.dma_addr  = %p\n",(void*)ioctl_dma_buf.dma_addr);
276 		DBG_INFO("ioctl_dma_buf.virt_addr = %p\n",(void*)ioctl_dma_buf.virt_addr);
277 
278 		drv_buf = (char *)ioctl_dma_buf.virt_addr;
279 		if (iocommand->Request.Type.Direction & PQIIOCTL_WRITE) {
280         		if ((ret = os_copy_from_user(softs, (void *)drv_buf, (void *)iocommand->buf,
281 						iocommand->buf_size, mode)) != 0) {
282 				ret = PQI_STATUS_FAILURE;
283 				goto free_mem;
284 			}
285 		}
286 	}
287 
288 	request.header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST;
289 	request.header.iu_length = offsetof(pqisrc_raid_req_t, sg_descriptors[1]) -
290 									PQI_REQUEST_HEADER_LENGTH;
291 	memcpy(request.lun_number, iocommand->LUN_info.LunAddrBytes,
292 		sizeof(request.lun_number));
293 	memcpy(request.cdb, iocommand->Request.CDB, iocommand->Request.CDBLen);
294 	request.additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_0;
295 
296 	switch (iocommand->Request.Type.Direction) {
297 	case PQIIOCTL_NONE:
298 		request.data_direction = SOP_DATA_DIR_NONE;
299 		break;
300 	case PQIIOCTL_WRITE:
301 		request.data_direction = SOP_DATA_DIR_FROM_DEVICE;
302 		break;
303 	case PQIIOCTL_READ:
304 		request.data_direction = SOP_DATA_DIR_TO_DEVICE;
305 		break;
306 	case PQIIOCTL_BIDIRECTIONAL:
307 		request.data_direction = SOP_DATA_DIR_BIDIRECTIONAL;
308 		break;
309 	}
310 
311 	request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
312 	if (iocommand->buf_size > 0) {
313 		request.buffer_length = iocommand->buf_size;
314 		request.sg_descriptors[0].addr = ioctl_dma_buf.dma_addr;
315 		request.sg_descriptors[0].len = iocommand->buf_size;
316 		request.sg_descriptors[0].flags =  SG_FLAG_LAST;
317 	}
318 	tag = pqisrc_get_tag(&softs->taglist);
319 	if (INVALID_ELEM == tag) {
320 		DBG_ERR("Tag not available\n");
321 		ret = PQI_STATUS_FAILURE;
322 		goto free_mem;
323 	}
324 	request.request_id = tag;
325 	request.response_queue_id = ob_q->q_id;
326 	request.error_index = request.request_id;
327 	rcb = &softs->rcb[tag];
328 
329 	rcb->success_cmp_callback = pqisrc_process_internal_raid_response_success;
330 	rcb->error_cmp_callback = pqisrc_process_internal_raid_response_error;
331 	rcb->tag = tag;
332 	rcb->req_pending = true;
333 	/* Submit Command */
334 	ret = pqisrc_submit_cmnd(softs, ib_q, &request);
335 	if (ret != PQI_STATUS_SUCCESS) {
336 		DBG_ERR("Unable to submit command\n");
337 		goto err_out;
338 	}
339 
340 	ret = pqisrc_wait_on_condition(softs, rcb);
341 	if (ret != PQI_STATUS_SUCCESS) {
342 		DBG_ERR("Passthru IOCTL cmd timed out !!\n");
343 		goto err_out;
344 	}
345 
346 	memset(&iocommand->error_info, 0, sizeof(iocommand->error_info));
347 
348 
349 	if (rcb->status) {
350 		size_t sense_data_length;
351 
352 		memcpy(&error_info, rcb->error_info, sizeof(error_info));
353 		iocommand->error_info.ScsiStatus = error_info.status;
354 		sense_data_length = error_info.sense_data_len;
355 
356 		if (!sense_data_length)
357 			sense_data_length = error_info.resp_data_len;
358 
359 		if (sense_data_length &&
360 			(sense_data_length > sizeof(error_info.data)))
361 				sense_data_length = sizeof(error_info.data);
362 
363 		if (sense_data_length) {
364 			if (sense_data_length >
365 				sizeof(iocommand->error_info.SenseInfo))
366 				sense_data_length =
367 					sizeof(iocommand->error_info.SenseInfo);
368 			memcpy (iocommand->error_info.SenseInfo,
369 					error_info.data, sense_data_length);
370 			iocommand->error_info.SenseLen = sense_data_length;
371 		}
372 
373 		if (error_info.data_out_result ==
374 				PQI_RAID_DATA_IN_OUT_UNDERFLOW){
375 			rcb->status = REQUEST_SUCCESS;
376 		}
377 	}
378 
379 	if (rcb->status == REQUEST_SUCCESS && iocommand->buf_size > 0 &&
380 		(iocommand->Request.Type.Direction & PQIIOCTL_READ)) {
381 
382 		if ((ret = os_copy_to_user(softs, (void*)iocommand->buf,
383 			(void*)drv_buf, iocommand->buf_size, mode)) != 0) {
384 				DBG_ERR("Failed to copy the response\n");
385 				goto err_out;
386 		}
387 	}
388 
389 	os_reset_rcb(rcb);
390 	pqisrc_put_tag(&softs->taglist, request.request_id);
391 	if (iocommand->buf_size > 0)
392 			os_dma_mem_free(softs,&ioctl_dma_buf);
393 
394 	DBG_FUNC("OUT\n");
395 	return ret;
396 err_out:
397 	os_reset_rcb(rcb);
398 	pqisrc_put_tag(&softs->taglist, request.request_id);
399 
400 free_mem:
401 	if (iocommand->buf_size > 0)
402 		os_dma_mem_free(softs, &ioctl_dma_buf);
403 
404 out:
405 	DBG_FUNC("Failed OUT\n");
406 	return PQI_STATUS_FAILURE;
407 }
408