xref: /linux/drivers/nvme/host/ioctl.c (revision be647e2c)
12405252aSChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
22405252aSChristoph Hellwig /*
32405252aSChristoph Hellwig  * Copyright (c) 2011-2014, Intel Corporation.
42405252aSChristoph Hellwig  * Copyright (c) 2017-2021 Christoph Hellwig.
52405252aSChristoph Hellwig  */
62405252aSChristoph Hellwig #include <linux/ptrace.h>	/* for force_successful_syscall_return */
72405252aSChristoph Hellwig #include <linux/nvme_ioctl.h>
8b66509b8SPavel Begunkov #include <linux/io_uring/cmd.h>
92405252aSChristoph Hellwig #include "nvme.h"
102405252aSChristoph Hellwig 
117b7fdb8eSChristoph Hellwig enum {
127b7fdb8eSChristoph Hellwig 	NVME_IOCTL_VEC		= (1 << 0),
13313c08c7SChristoph Hellwig 	NVME_IOCTL_PARTITION	= (1 << 1),
147b7fdb8eSChristoph Hellwig };
157b7fdb8eSChristoph Hellwig 
nvme_cmd_allowed(struct nvme_ns * ns,struct nvme_command * c,unsigned int flags,bool open_for_write)16855b7717SKanchan Joshi static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
177d9d7d59SChristoph Hellwig 		unsigned int flags, bool open_for_write)
18855b7717SKanchan Joshi {
196f99ac04SChristoph Hellwig 	u32 effects;
206f99ac04SChristoph Hellwig 
21855b7717SKanchan Joshi 	/*
22313c08c7SChristoph Hellwig 	 * Do not allow unprivileged passthrough on partitions, as that allows an
23313c08c7SChristoph Hellwig 	 * escape from the containment of the partition.
24313c08c7SChristoph Hellwig 	 */
25313c08c7SChristoph Hellwig 	if (flags & NVME_IOCTL_PARTITION)
267be866b1SKeith Busch 		goto admin;
27313c08c7SChristoph Hellwig 
28313c08c7SChristoph Hellwig 	/*
29855b7717SKanchan Joshi 	 * Do not allow unprivileged processes to send vendor specific or fabrics
30855b7717SKanchan Joshi 	 * commands as we can't be sure about their effects.
31855b7717SKanchan Joshi 	 */
32855b7717SKanchan Joshi 	if (c->common.opcode >= nvme_cmd_vendor_start ||
33855b7717SKanchan Joshi 	    c->common.opcode == nvme_fabrics_command)
347be866b1SKeith Busch 		goto admin;
35855b7717SKanchan Joshi 
36e4fbcf32SKanchan Joshi 	/*
37e4fbcf32SKanchan Joshi 	 * Do not allow unprivileged passthrough of admin commands except
38e4fbcf32SKanchan Joshi 	 * for a subset of identify commands that contain information required
39e4fbcf32SKanchan Joshi 	 * to form proper I/O commands in userspace and do not expose any
40e4fbcf32SKanchan Joshi 	 * potentially sensitive information.
41e4fbcf32SKanchan Joshi 	 */
42e4fbcf32SKanchan Joshi 	if (!ns) {
43e4fbcf32SKanchan Joshi 		if (c->common.opcode == nvme_admin_identify) {
44e4fbcf32SKanchan Joshi 			switch (c->identify.cns) {
45e4fbcf32SKanchan Joshi 			case NVME_ID_CNS_NS:
46e4fbcf32SKanchan Joshi 			case NVME_ID_CNS_CS_NS:
47e4fbcf32SKanchan Joshi 			case NVME_ID_CNS_NS_CS_INDEP:
48ea43fceeSJoel Granados 			case NVME_ID_CNS_CS_CTRL:
49ea43fceeSJoel Granados 			case NVME_ID_CNS_CTRL:
50e4fbcf32SKanchan Joshi 				return true;
51e4fbcf32SKanchan Joshi 			}
52e4fbcf32SKanchan Joshi 		}
537be866b1SKeith Busch 		goto admin;
54e4fbcf32SKanchan Joshi 	}
55855b7717SKanchan Joshi 
56855b7717SKanchan Joshi 	/*
576f99ac04SChristoph Hellwig 	 * Check if the controller provides a Commands Supported and Effects log
586f99ac04SChristoph Hellwig 	 * and marks this command as supported.  If not reject unprivileged
596f99ac04SChristoph Hellwig 	 * passthrough.
60855b7717SKanchan Joshi 	 */
616f99ac04SChristoph Hellwig 	effects = nvme_command_effects(ns->ctrl, ns, c->common.opcode);
626f99ac04SChristoph Hellwig 	if (!(effects & NVME_CMD_EFFECTS_CSUPP))
637be866b1SKeith Busch 		goto admin;
646f99ac04SChristoph Hellwig 
656f99ac04SChristoph Hellwig 	/*
666f99ac04SChristoph Hellwig 	 * Don't allow passthrough for command that have intrusive (or unknown)
676f99ac04SChristoph Hellwig 	 * effects.
686f99ac04SChristoph Hellwig 	 */
696f99ac04SChristoph Hellwig 	if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC |
706f99ac04SChristoph Hellwig 			NVME_CMD_EFFECTS_UUID_SEL |
716f99ac04SChristoph Hellwig 			NVME_CMD_EFFECTS_SCOPE_MASK))
727be866b1SKeith Busch 		goto admin;
736f99ac04SChristoph Hellwig 
746f99ac04SChristoph Hellwig 	/*
756f99ac04SChristoph Hellwig 	 * Only allow I/O commands that transfer data to the controller or that
766f99ac04SChristoph Hellwig 	 * change the logical block contents if the file descriptor is open for
776f99ac04SChristoph Hellwig 	 * writing.
786f99ac04SChristoph Hellwig 	 */
797be866b1SKeith Busch 	if ((nvme_is_write(c) || (effects & NVME_CMD_EFFECTS_LBCC)) &&
807be866b1SKeith Busch 	    !open_for_write)
817be866b1SKeith Busch 		goto admin;
827be866b1SKeith Busch 
83855b7717SKanchan Joshi 	return true;
847be866b1SKeith Busch admin:
857be866b1SKeith Busch 	return capable(CAP_SYS_ADMIN);
86855b7717SKanchan Joshi }
87855b7717SKanchan Joshi 
882405252aSChristoph Hellwig /*
892405252aSChristoph Hellwig  * Convert integer values from ioctl structures to user pointers, silently
902405252aSChristoph Hellwig  * ignoring the upper bits in the compat case to match behaviour of 32-bit
912405252aSChristoph Hellwig  * kernels.
922405252aSChristoph Hellwig  */
nvme_to_user_ptr(uintptr_t ptrval)932405252aSChristoph Hellwig static void __user *nvme_to_user_ptr(uintptr_t ptrval)
942405252aSChristoph Hellwig {
952405252aSChristoph Hellwig 	if (in_compat_syscall())
962405252aSChristoph Hellwig 		ptrval = (compat_uptr_t)ptrval;
972405252aSChristoph Hellwig 	return (void __user *)ptrval;
982405252aSChristoph Hellwig }
992405252aSChristoph Hellwig 
nvme_alloc_user_request(struct request_queue * q,struct nvme_command * cmd,blk_opf_t rq_flags,blk_mq_req_flags_t blk_flags)100bcad2565SChristoph Hellwig static struct request *nvme_alloc_user_request(struct request_queue *q,
101470e900cSKanchan Joshi 		struct nvme_command *cmd, blk_opf_t rq_flags,
102470e900cSKanchan Joshi 		blk_mq_req_flags_t blk_flags)
1032405252aSChristoph Hellwig {
1042405252aSChristoph Hellwig 	struct request *req;
1052405252aSChristoph Hellwig 
106456cba38SKanchan Joshi 	req = blk_mq_alloc_request(q, nvme_req_op(cmd) | rq_flags, blk_flags);
1072405252aSChristoph Hellwig 	if (IS_ERR(req))
108bcad2565SChristoph Hellwig 		return req;
109e559398fSChristoph Hellwig 	nvme_init_request(req, cmd);
1102405252aSChristoph Hellwig 	nvme_req(req)->flags |= NVME_REQ_USERCMD;
111470e900cSKanchan Joshi 	return req;
112470e900cSKanchan Joshi }
1132405252aSChristoph Hellwig 
nvme_map_user_request(struct request * req,u64 ubuffer,unsigned bufflen,void __user * meta_buffer,unsigned meta_len,u32 meta_seed,struct io_uring_cmd * ioucmd,unsigned int flags)1144d174486SKanchan Joshi static int nvme_map_user_request(struct request *req, u64 ubuffer,
115470e900cSKanchan Joshi 		unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
116d6aacee9SKeith Busch 		u32 meta_seed, struct io_uring_cmd *ioucmd, unsigned int flags)
117470e900cSKanchan Joshi {
118470e900cSKanchan Joshi 	struct request_queue *q = req->q;
119470e900cSKanchan Joshi 	struct nvme_ns *ns = q->queuedata;
120470e900cSKanchan Joshi 	struct block_device *bdev = ns ? ns->disk->part0 : NULL;
121470e900cSKanchan Joshi 	struct bio *bio = NULL;
122470e900cSKanchan Joshi 	int ret;
123470e900cSKanchan Joshi 
12423fd22e5SKanchan Joshi 	if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
12523fd22e5SKanchan Joshi 		struct iov_iter iter;
12623fd22e5SKanchan Joshi 
12723fd22e5SKanchan Joshi 		/* fixedbufs is only for non-vectored io */
1287b7fdb8eSChristoph Hellwig 		if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC))
12923fd22e5SKanchan Joshi 			return -EINVAL;
13023fd22e5SKanchan Joshi 		ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
13123fd22e5SKanchan Joshi 				rq_data_dir(req), &iter, ioucmd);
13223fd22e5SKanchan Joshi 		if (ret < 0)
13323fd22e5SKanchan Joshi 			goto out;
13423fd22e5SKanchan Joshi 		ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL);
13523fd22e5SKanchan Joshi 	} else {
13623fd22e5SKanchan Joshi 		ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer),
1377b7fdb8eSChristoph Hellwig 				bufflen, GFP_KERNEL, flags & NVME_IOCTL_VEC, 0,
1387b7fdb8eSChristoph Hellwig 				0, rq_data_dir(req));
13923fd22e5SKanchan Joshi 	}
140470e900cSKanchan Joshi 
1412405252aSChristoph Hellwig 	if (ret)
1422405252aSChristoph Hellwig 		goto out;
143470e900cSKanchan Joshi 
144d6aacee9SKeith Busch 	bio = req->bio;
145d6aacee9SKeith Busch 	if (bdev) {
146d6aacee9SKeith Busch 		bio_set_dev(bio, bdev);
147d6aacee9SKeith Busch 		if (meta_buffer && meta_len) {
148d6aacee9SKeith Busch 			ret = bio_integrity_map_user(bio, meta_buffer, meta_len,
149470e900cSKanchan Joshi 						     meta_seed);
150d6aacee9SKeith Busch 			if (ret)
1512405252aSChristoph Hellwig 				goto out_unmap;
152d6aacee9SKeith Busch 			req->cmd_flags |= REQ_INTEGRITY;
1532405252aSChristoph Hellwig 		}
1542405252aSChristoph Hellwig 	}
1552405252aSChristoph Hellwig 
156470e900cSKanchan Joshi 	return ret;
157bcad2565SChristoph Hellwig 
1582405252aSChristoph Hellwig out_unmap:
1592405252aSChristoph Hellwig 	if (bio)
1602405252aSChristoph Hellwig 		blk_rq_unmap_user(bio);
1612405252aSChristoph Hellwig out:
1622405252aSChristoph Hellwig 	blk_mq_free_request(req);
163470e900cSKanchan Joshi 	return ret;
1642405252aSChristoph Hellwig }
1652405252aSChristoph Hellwig 
nvme_submit_user_cmd(struct request_queue * q,struct nvme_command * cmd,u64 ubuffer,unsigned bufflen,void __user * meta_buffer,unsigned meta_len,u32 meta_seed,u64 * result,unsigned timeout,unsigned int flags)166bcad2565SChristoph Hellwig static int nvme_submit_user_cmd(struct request_queue *q,
1677b7fdb8eSChristoph Hellwig 		struct nvme_command *cmd, u64 ubuffer, unsigned bufflen,
1687b7fdb8eSChristoph Hellwig 		void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
1697b7fdb8eSChristoph Hellwig 		u64 *result, unsigned timeout, unsigned int flags)
170bcad2565SChristoph Hellwig {
17162281b9eSChristoph Hellwig 	struct nvme_ns *ns = q->queuedata;
172bc8fb906SKeith Busch 	struct nvme_ctrl *ctrl;
173bcad2565SChristoph Hellwig 	struct request *req;
174bcad2565SChristoph Hellwig 	struct bio *bio;
175bc8fb906SKeith Busch 	u32 effects;
176bcad2565SChristoph Hellwig 	int ret;
177bcad2565SChristoph Hellwig 
178470e900cSKanchan Joshi 	req = nvme_alloc_user_request(q, cmd, 0, 0);
179bcad2565SChristoph Hellwig 	if (IS_ERR(req))
180bcad2565SChristoph Hellwig 		return PTR_ERR(req);
181bcad2565SChristoph Hellwig 
182470e900cSKanchan Joshi 	req->timeout = timeout;
183470e900cSKanchan Joshi 	if (ubuffer && bufflen) {
184470e900cSKanchan Joshi 		ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
185d6aacee9SKeith Busch 				meta_len, meta_seed, NULL, flags);
186470e900cSKanchan Joshi 		if (ret)
187470e900cSKanchan Joshi 			return ret;
188470e900cSKanchan Joshi 	}
189470e900cSKanchan Joshi 
190bcad2565SChristoph Hellwig 	bio = req->bio;
191bc8fb906SKeith Busch 	ctrl = nvme_req(req)->ctrl;
192bcad2565SChristoph Hellwig 
19362281b9eSChristoph Hellwig 	effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
19462281b9eSChristoph Hellwig 	ret = nvme_execute_rq(req, false);
195bcad2565SChristoph Hellwig 	if (result)
196bcad2565SChristoph Hellwig 		*result = le64_to_cpu(nvme_req(req)->result.u64);
197bcad2565SChristoph Hellwig 	if (bio)
198bcad2565SChristoph Hellwig 		blk_rq_unmap_user(bio);
199bcad2565SChristoph Hellwig 	blk_mq_free_request(req);
200bc8fb906SKeith Busch 
201bc8fb906SKeith Busch 	if (effects)
20231a59782Smin15.li 		nvme_passthru_end(ctrl, ns, effects, cmd, ret);
203bc8fb906SKeith Busch 
204bcad2565SChristoph Hellwig 	return ret;
205bcad2565SChristoph Hellwig }
2062405252aSChristoph Hellwig 
nvme_submit_io(struct nvme_ns * ns,struct nvme_user_io __user * uio)2072405252aSChristoph Hellwig static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
2082405252aSChristoph Hellwig {
2092405252aSChristoph Hellwig 	struct nvme_user_io io;
2102405252aSChristoph Hellwig 	struct nvme_command c;
2112405252aSChristoph Hellwig 	unsigned length, meta_len;
2122405252aSChristoph Hellwig 	void __user *metadata;
2132405252aSChristoph Hellwig 
2142405252aSChristoph Hellwig 	if (copy_from_user(&io, uio, sizeof(io)))
2152405252aSChristoph Hellwig 		return -EFAULT;
2162405252aSChristoph Hellwig 	if (io.flags)
2172405252aSChristoph Hellwig 		return -EINVAL;
2182405252aSChristoph Hellwig 
2192405252aSChristoph Hellwig 	switch (io.opcode) {
2202405252aSChristoph Hellwig 	case nvme_cmd_write:
2212405252aSChristoph Hellwig 	case nvme_cmd_read:
2222405252aSChristoph Hellwig 	case nvme_cmd_compare:
2232405252aSChristoph Hellwig 		break;
2242405252aSChristoph Hellwig 	default:
2252405252aSChristoph Hellwig 		return -EINVAL;
2262405252aSChristoph Hellwig 	}
2272405252aSChristoph Hellwig 
2289419e71bSDaniel Wagner 	length = (io.nblocks + 1) << ns->head->lba_shift;
2292405252aSChristoph Hellwig 
2302405252aSChristoph Hellwig 	if ((io.control & NVME_RW_PRINFO_PRACT) &&
23140547052SFrancis Pravin 	    (ns->head->ms == ns->head->pi_size)) {
2322405252aSChristoph Hellwig 		/*
2332405252aSChristoph Hellwig 		 * Protection information is stripped/inserted by the
2342405252aSChristoph Hellwig 		 * controller.
2352405252aSChristoph Hellwig 		 */
2362405252aSChristoph Hellwig 		if (nvme_to_user_ptr(io.metadata))
2372405252aSChristoph Hellwig 			return -EINVAL;
2382405252aSChristoph Hellwig 		meta_len = 0;
2392405252aSChristoph Hellwig 		metadata = NULL;
2402405252aSChristoph Hellwig 	} else {
2419419e71bSDaniel Wagner 		meta_len = (io.nblocks + 1) * ns->head->ms;
2422405252aSChristoph Hellwig 		metadata = nvme_to_user_ptr(io.metadata);
2432405252aSChristoph Hellwig 	}
2442405252aSChristoph Hellwig 
2459419e71bSDaniel Wagner 	if (ns->head->features & NVME_NS_EXT_LBAS) {
2462405252aSChristoph Hellwig 		length += meta_len;
2472405252aSChristoph Hellwig 		meta_len = 0;
2482405252aSChristoph Hellwig 	} else if (meta_len) {
2492405252aSChristoph Hellwig 		if ((io.metadata & 3) || !io.metadata)
2502405252aSChristoph Hellwig 			return -EINVAL;
2512405252aSChristoph Hellwig 	}
2522405252aSChristoph Hellwig 
2532405252aSChristoph Hellwig 	memset(&c, 0, sizeof(c));
2542405252aSChristoph Hellwig 	c.rw.opcode = io.opcode;
2552405252aSChristoph Hellwig 	c.rw.flags = io.flags;
2562405252aSChristoph Hellwig 	c.rw.nsid = cpu_to_le32(ns->head->ns_id);
2572405252aSChristoph Hellwig 	c.rw.slba = cpu_to_le64(io.slba);
2582405252aSChristoph Hellwig 	c.rw.length = cpu_to_le16(io.nblocks);
2592405252aSChristoph Hellwig 	c.rw.control = cpu_to_le16(io.control);
2602405252aSChristoph Hellwig 	c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
2612405252aSChristoph Hellwig 	c.rw.reftag = cpu_to_le32(io.reftag);
2622405252aSChristoph Hellwig 	c.rw.apptag = cpu_to_le16(io.apptag);
2632405252aSChristoph Hellwig 	c.rw.appmask = cpu_to_le16(io.appmask);
2642405252aSChristoph Hellwig 
2657b7fdb8eSChristoph Hellwig 	return nvme_submit_user_cmd(ns->queue, &c, io.addr, length, metadata,
2667b7fdb8eSChristoph Hellwig 			meta_len, lower_32_bits(io.slba), NULL, 0, 0);
2672405252aSChristoph Hellwig }
2682405252aSChristoph Hellwig 
nvme_validate_passthru_nsid(struct nvme_ctrl * ctrl,struct nvme_ns * ns,__u32 nsid)269e7d4b549SChaitanya Kulkarni static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
270e7d4b549SChaitanya Kulkarni 					struct nvme_ns *ns, __u32 nsid)
271e7d4b549SChaitanya Kulkarni {
272e7d4b549SChaitanya Kulkarni 	if (ns && nsid != ns->head->ns_id) {
273e7d4b549SChaitanya Kulkarni 		dev_err(ctrl->device,
274e7d4b549SChaitanya Kulkarni 			"%s: nsid (%u) in cmd does not match nsid (%u)"
275e7d4b549SChaitanya Kulkarni 			"of namespace\n",
276e7d4b549SChaitanya Kulkarni 			current->comm, nsid, ns->head->ns_id);
277e7d4b549SChaitanya Kulkarni 		return false;
278e7d4b549SChaitanya Kulkarni 	}
279e7d4b549SChaitanya Kulkarni 
280e7d4b549SChaitanya Kulkarni 	return true;
281e7d4b549SChaitanya Kulkarni }
282e7d4b549SChaitanya Kulkarni 
nvme_user_cmd(struct nvme_ctrl * ctrl,struct nvme_ns * ns,struct nvme_passthru_cmd __user * ucmd,unsigned int flags,bool open_for_write)2832405252aSChristoph Hellwig static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
284313c08c7SChristoph Hellwig 		struct nvme_passthru_cmd __user *ucmd, unsigned int flags,
2857d9d7d59SChristoph Hellwig 		bool open_for_write)
2862405252aSChristoph Hellwig {
2872405252aSChristoph Hellwig 	struct nvme_passthru_cmd cmd;
2882405252aSChristoph Hellwig 	struct nvme_command c;
2892405252aSChristoph Hellwig 	unsigned timeout = 0;
2902405252aSChristoph Hellwig 	u64 result;
2912405252aSChristoph Hellwig 	int status;
2922405252aSChristoph Hellwig 
2932405252aSChristoph Hellwig 	if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
2942405252aSChristoph Hellwig 		return -EFAULT;
2952405252aSChristoph Hellwig 	if (cmd.flags)
2962405252aSChristoph Hellwig 		return -EINVAL;
297e7d4b549SChaitanya Kulkarni 	if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
2982405252aSChristoph Hellwig 		return -EINVAL;
2992405252aSChristoph Hellwig 
3002405252aSChristoph Hellwig 	memset(&c, 0, sizeof(c));
3012405252aSChristoph Hellwig 	c.common.opcode = cmd.opcode;
3022405252aSChristoph Hellwig 	c.common.flags = cmd.flags;
3032405252aSChristoph Hellwig 	c.common.nsid = cpu_to_le32(cmd.nsid);
3042405252aSChristoph Hellwig 	c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
3052405252aSChristoph Hellwig 	c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
3062405252aSChristoph Hellwig 	c.common.cdw10 = cpu_to_le32(cmd.cdw10);
3072405252aSChristoph Hellwig 	c.common.cdw11 = cpu_to_le32(cmd.cdw11);
3082405252aSChristoph Hellwig 	c.common.cdw12 = cpu_to_le32(cmd.cdw12);
3092405252aSChristoph Hellwig 	c.common.cdw13 = cpu_to_le32(cmd.cdw13);
3102405252aSChristoph Hellwig 	c.common.cdw14 = cpu_to_le32(cmd.cdw14);
3112405252aSChristoph Hellwig 	c.common.cdw15 = cpu_to_le32(cmd.cdw15);
3122405252aSChristoph Hellwig 
3137d9d7d59SChristoph Hellwig 	if (!nvme_cmd_allowed(ns, &c, 0, open_for_write))
314855b7717SKanchan Joshi 		return -EACCES;
315855b7717SKanchan Joshi 
3162405252aSChristoph Hellwig 	if (cmd.timeout_ms)
3172405252aSChristoph Hellwig 		timeout = msecs_to_jiffies(cmd.timeout_ms);
3182405252aSChristoph Hellwig 
3192405252aSChristoph Hellwig 	status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
3207b7fdb8eSChristoph Hellwig 			cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
3217b7fdb8eSChristoph Hellwig 			cmd.metadata_len, 0, &result, timeout, 0);
3222405252aSChristoph Hellwig 
3232405252aSChristoph Hellwig 	if (status >= 0) {
3242405252aSChristoph Hellwig 		if (put_user(result, &ucmd->result))
3252405252aSChristoph Hellwig 			return -EFAULT;
3262405252aSChristoph Hellwig 	}
3272405252aSChristoph Hellwig 
3282405252aSChristoph Hellwig 	return status;
3292405252aSChristoph Hellwig }
3302405252aSChristoph Hellwig 
nvme_user_cmd64(struct nvme_ctrl * ctrl,struct nvme_ns * ns,struct nvme_passthru_cmd64 __user * ucmd,unsigned int flags,bool open_for_write)3312405252aSChristoph Hellwig static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
3327b7fdb8eSChristoph Hellwig 		struct nvme_passthru_cmd64 __user *ucmd, unsigned int flags,
3337d9d7d59SChristoph Hellwig 		bool open_for_write)
3342405252aSChristoph Hellwig {
3352405252aSChristoph Hellwig 	struct nvme_passthru_cmd64 cmd;
3362405252aSChristoph Hellwig 	struct nvme_command c;
3372405252aSChristoph Hellwig 	unsigned timeout = 0;
3382405252aSChristoph Hellwig 	int status;
3392405252aSChristoph Hellwig 
3402405252aSChristoph Hellwig 	if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
3412405252aSChristoph Hellwig 		return -EFAULT;
3422405252aSChristoph Hellwig 	if (cmd.flags)
3432405252aSChristoph Hellwig 		return -EINVAL;
344e7d4b549SChaitanya Kulkarni 	if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
3452405252aSChristoph Hellwig 		return -EINVAL;
3462405252aSChristoph Hellwig 
3472405252aSChristoph Hellwig 	memset(&c, 0, sizeof(c));
3482405252aSChristoph Hellwig 	c.common.opcode = cmd.opcode;
3492405252aSChristoph Hellwig 	c.common.flags = cmd.flags;
3502405252aSChristoph Hellwig 	c.common.nsid = cpu_to_le32(cmd.nsid);
3512405252aSChristoph Hellwig 	c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
3522405252aSChristoph Hellwig 	c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
3532405252aSChristoph Hellwig 	c.common.cdw10 = cpu_to_le32(cmd.cdw10);
3542405252aSChristoph Hellwig 	c.common.cdw11 = cpu_to_le32(cmd.cdw11);
3552405252aSChristoph Hellwig 	c.common.cdw12 = cpu_to_le32(cmd.cdw12);
3562405252aSChristoph Hellwig 	c.common.cdw13 = cpu_to_le32(cmd.cdw13);
3572405252aSChristoph Hellwig 	c.common.cdw14 = cpu_to_le32(cmd.cdw14);
3582405252aSChristoph Hellwig 	c.common.cdw15 = cpu_to_le32(cmd.cdw15);
3592405252aSChristoph Hellwig 
3607d9d7d59SChristoph Hellwig 	if (!nvme_cmd_allowed(ns, &c, flags, open_for_write))
361855b7717SKanchan Joshi 		return -EACCES;
362855b7717SKanchan Joshi 
3632405252aSChristoph Hellwig 	if (cmd.timeout_ms)
3642405252aSChristoph Hellwig 		timeout = msecs_to_jiffies(cmd.timeout_ms);
3652405252aSChristoph Hellwig 
3662405252aSChristoph Hellwig 	status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
3677b7fdb8eSChristoph Hellwig 			cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
3687b7fdb8eSChristoph Hellwig 			cmd.metadata_len, 0, &cmd.result, timeout, flags);
3692405252aSChristoph Hellwig 
3702405252aSChristoph Hellwig 	if (status >= 0) {
3712405252aSChristoph Hellwig 		if (put_user(cmd.result, &ucmd->result))
3722405252aSChristoph Hellwig 			return -EFAULT;
3732405252aSChristoph Hellwig 	}
3742405252aSChristoph Hellwig 
3752405252aSChristoph Hellwig 	return status;
3762405252aSChristoph Hellwig }
3772405252aSChristoph Hellwig 
378456cba38SKanchan Joshi struct nvme_uring_data {
379456cba38SKanchan Joshi 	__u64	metadata;
380456cba38SKanchan Joshi 	__u64	addr;
381456cba38SKanchan Joshi 	__u32	data_len;
382456cba38SKanchan Joshi 	__u32	metadata_len;
383456cba38SKanchan Joshi 	__u32	timeout_ms;
384456cba38SKanchan Joshi };
385456cba38SKanchan Joshi 
386456cba38SKanchan Joshi /*
387456cba38SKanchan Joshi  * This overlays struct io_uring_cmd pdu.
388456cba38SKanchan Joshi  * Expect build errors if this grows larger than that.
389456cba38SKanchan Joshi  */
390456cba38SKanchan Joshi struct nvme_uring_cmd_pdu {
391456cba38SKanchan Joshi 	struct request *req;
392d6aacee9SKeith Busch 	struct bio *bio;
393c0a7ba77SJens Axboe 	u64 result;
394d6aacee9SKeith Busch 	int status;
395456cba38SKanchan Joshi };
396456cba38SKanchan Joshi 
nvme_uring_cmd_pdu(struct io_uring_cmd * ioucmd)397456cba38SKanchan Joshi static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
398456cba38SKanchan Joshi 		struct io_uring_cmd *ioucmd)
399456cba38SKanchan Joshi {
400456cba38SKanchan Joshi 	return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu;
401456cba38SKanchan Joshi }
402456cba38SKanchan Joshi 
nvme_uring_task_cb(struct io_uring_cmd * ioucmd,unsigned issue_flags)4039d2789acSJens Axboe static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
4049d2789acSJens Axboe 			       unsigned issue_flags)
405c0a7ba77SJens Axboe {
406c0a7ba77SJens Axboe 	struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
407c0a7ba77SJens Axboe 
408c0a7ba77SJens Axboe 	if (pdu->bio)
409c0a7ba77SJens Axboe 		blk_rq_unmap_user(pdu->bio);
410d6aacee9SKeith Busch 	io_uring_cmd_done(ioucmd, pdu->status, pdu->result, issue_flags);
411c0a7ba77SJens Axboe }
412c0a7ba77SJens Axboe 
nvme_uring_cmd_end_io(struct request * req,blk_status_t err)413de671d61SJens Axboe static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
414de671d61SJens Axboe 						blk_status_t err)
415456cba38SKanchan Joshi {
416456cba38SKanchan Joshi 	struct io_uring_cmd *ioucmd = req->end_io_data;
417456cba38SKanchan Joshi 	struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
418456cba38SKanchan Joshi 
419d6aacee9SKeith Busch 	if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
420d6aacee9SKeith Busch 		pdu->status = -EINTR;
421d6aacee9SKeith Busch 	else
422d6aacee9SKeith Busch 		pdu->status = nvme_req(req)->status;
423d6aacee9SKeith Busch 	pdu->result = le64_to_cpu(nvme_req(req)->result.u64);
424585079b6SKanchan Joshi 
425585079b6SKanchan Joshi 	/*
4261afdb760SJens Axboe 	 * For iopoll, complete it directly. Note that using the uring_cmd
4271afdb760SJens Axboe 	 * helper for this is safe only because we check blk_rq_is_poll().
4281afdb760SJens Axboe 	 * As that returns false if we're NOT on a polled queue, then it's
4291afdb760SJens Axboe 	 * safe to use the polled completion helper.
4301afdb760SJens Axboe 	 *
431585079b6SKanchan Joshi 	 * Otherwise, move the completion to task work.
432585079b6SKanchan Joshi 	 */
4331afdb760SJens Axboe 	if (blk_rq_is_poll(req)) {
4341afdb760SJens Axboe 		if (pdu->bio)
4351afdb760SJens Axboe 			blk_rq_unmap_user(pdu->bio);
4361afdb760SJens Axboe 		io_uring_cmd_iopoll_done(ioucmd, pdu->result, pdu->status);
4371afdb760SJens Axboe 	} else {
438f026be0eSPavel Begunkov 		io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);
4391afdb760SJens Axboe 	}
440de671d61SJens Axboe 
441851eb780SJens Axboe 	return RQ_END_IO_FREE;
442c0a7ba77SJens Axboe }
443c0a7ba77SJens Axboe 
nvme_uring_cmd_io(struct nvme_ctrl * ctrl,struct nvme_ns * ns,struct io_uring_cmd * ioucmd,unsigned int issue_flags,bool vec)444456cba38SKanchan Joshi static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
445f569add4SAnuj Gupta 		struct io_uring_cmd *ioucmd, unsigned int issue_flags, bool vec)
446456cba38SKanchan Joshi {
447456cba38SKanchan Joshi 	struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
448fd9b8547SBreno Leitao 	const struct nvme_uring_cmd *cmd = io_uring_sqe_cmd(ioucmd->sqe);
449456cba38SKanchan Joshi 	struct request_queue *q = ns ? ns->queue : ctrl->admin_q;
450456cba38SKanchan Joshi 	struct nvme_uring_data d;
451456cba38SKanchan Joshi 	struct nvme_command c;
452456cba38SKanchan Joshi 	struct request *req;
453888545cbSAnuj Gupta 	blk_opf_t rq_flags = REQ_ALLOC_CACHE;
454456cba38SKanchan Joshi 	blk_mq_req_flags_t blk_flags = 0;
455470e900cSKanchan Joshi 	int ret;
456456cba38SKanchan Joshi 
457456cba38SKanchan Joshi 	c.common.opcode = READ_ONCE(cmd->opcode);
458456cba38SKanchan Joshi 	c.common.flags = READ_ONCE(cmd->flags);
459456cba38SKanchan Joshi 	if (c.common.flags)
460456cba38SKanchan Joshi 		return -EINVAL;
461456cba38SKanchan Joshi 
462456cba38SKanchan Joshi 	c.common.command_id = 0;
463456cba38SKanchan Joshi 	c.common.nsid = cpu_to_le32(cmd->nsid);
464456cba38SKanchan Joshi 	if (!nvme_validate_passthru_nsid(ctrl, ns, le32_to_cpu(c.common.nsid)))
465456cba38SKanchan Joshi 		return -EINVAL;
466456cba38SKanchan Joshi 
467456cba38SKanchan Joshi 	c.common.cdw2[0] = cpu_to_le32(READ_ONCE(cmd->cdw2));
468456cba38SKanchan Joshi 	c.common.cdw2[1] = cpu_to_le32(READ_ONCE(cmd->cdw3));
469456cba38SKanchan Joshi 	c.common.metadata = 0;
470456cba38SKanchan Joshi 	c.common.dptr.prp1 = c.common.dptr.prp2 = 0;
471456cba38SKanchan Joshi 	c.common.cdw10 = cpu_to_le32(READ_ONCE(cmd->cdw10));
472456cba38SKanchan Joshi 	c.common.cdw11 = cpu_to_le32(READ_ONCE(cmd->cdw11));
473456cba38SKanchan Joshi 	c.common.cdw12 = cpu_to_le32(READ_ONCE(cmd->cdw12));
474456cba38SKanchan Joshi 	c.common.cdw13 = cpu_to_le32(READ_ONCE(cmd->cdw13));
475456cba38SKanchan Joshi 	c.common.cdw14 = cpu_to_le32(READ_ONCE(cmd->cdw14));
476456cba38SKanchan Joshi 	c.common.cdw15 = cpu_to_le32(READ_ONCE(cmd->cdw15));
477456cba38SKanchan Joshi 
4787d9d7d59SChristoph Hellwig 	if (!nvme_cmd_allowed(ns, &c, 0, ioucmd->file->f_mode & FMODE_WRITE))
479855b7717SKanchan Joshi 		return -EACCES;
480855b7717SKanchan Joshi 
481456cba38SKanchan Joshi 	d.metadata = READ_ONCE(cmd->metadata);
482456cba38SKanchan Joshi 	d.addr = READ_ONCE(cmd->addr);
483456cba38SKanchan Joshi 	d.data_len = READ_ONCE(cmd->data_len);
484456cba38SKanchan Joshi 	d.metadata_len = READ_ONCE(cmd->metadata_len);
485456cba38SKanchan Joshi 	d.timeout_ms = READ_ONCE(cmd->timeout_ms);
486456cba38SKanchan Joshi 
487456cba38SKanchan Joshi 	if (issue_flags & IO_URING_F_NONBLOCK) {
488888545cbSAnuj Gupta 		rq_flags |= REQ_NOWAIT;
489456cba38SKanchan Joshi 		blk_flags = BLK_MQ_REQ_NOWAIT;
490456cba38SKanchan Joshi 	}
491585079b6SKanchan Joshi 	if (issue_flags & IO_URING_F_IOPOLL)
492585079b6SKanchan Joshi 		rq_flags |= REQ_POLLED;
493456cba38SKanchan Joshi 
494470e900cSKanchan Joshi 	req = nvme_alloc_user_request(q, &c, rq_flags, blk_flags);
495456cba38SKanchan Joshi 	if (IS_ERR(req))
496456cba38SKanchan Joshi 		return PTR_ERR(req);
497470e900cSKanchan Joshi 	req->timeout = d.timeout_ms ? msecs_to_jiffies(d.timeout_ms) : 0;
498470e900cSKanchan Joshi 
499470e900cSKanchan Joshi 	if (d.addr && d.data_len) {
5004d174486SKanchan Joshi 		ret = nvme_map_user_request(req, d.addr,
501470e900cSKanchan Joshi 			d.data_len, nvme_to_user_ptr(d.metadata),
502d6aacee9SKeith Busch 			d.metadata_len, 0, ioucmd, vec);
503470e900cSKanchan Joshi 		if (ret)
504470e900cSKanchan Joshi 			return ret;
505470e900cSKanchan Joshi 	}
506456cba38SKanchan Joshi 
507456cba38SKanchan Joshi 	/* to free bio on completion, as req->bio will be null at that time */
508456cba38SKanchan Joshi 	pdu->bio = req->bio;
509d6aacee9SKeith Busch 	pdu->req = req;
510c0a7ba77SJens Axboe 	req->end_io_data = ioucmd;
511c0a7ba77SJens Axboe 	req->end_io = nvme_uring_cmd_end_io;
512e2e53086SChristoph Hellwig 	blk_execute_rq_nowait(req, false);
513456cba38SKanchan Joshi 	return -EIOCBQUEUED;
514456cba38SKanchan Joshi }
515456cba38SKanchan Joshi 
is_ctrl_ioctl(unsigned int cmd)5162405252aSChristoph Hellwig static bool is_ctrl_ioctl(unsigned int cmd)
5172405252aSChristoph Hellwig {
5182405252aSChristoph Hellwig 	if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
5192405252aSChristoph Hellwig 		return true;
5202405252aSChristoph Hellwig 	if (is_sed_ioctl(cmd))
5212405252aSChristoph Hellwig 		return true;
5222405252aSChristoph Hellwig 	return false;
5232405252aSChristoph Hellwig }
5242405252aSChristoph Hellwig 
nvme_ctrl_ioctl(struct nvme_ctrl * ctrl,unsigned int cmd,void __user * argp,bool open_for_write)5252405252aSChristoph Hellwig static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd,
5267d9d7d59SChristoph Hellwig 		void __user *argp, bool open_for_write)
5272405252aSChristoph Hellwig {
5282405252aSChristoph Hellwig 	switch (cmd) {
5292405252aSChristoph Hellwig 	case NVME_IOCTL_ADMIN_CMD:
5307d9d7d59SChristoph Hellwig 		return nvme_user_cmd(ctrl, NULL, argp, 0, open_for_write);
5312405252aSChristoph Hellwig 	case NVME_IOCTL_ADMIN64_CMD:
5327d9d7d59SChristoph Hellwig 		return nvme_user_cmd64(ctrl, NULL, argp, 0, open_for_write);
5332405252aSChristoph Hellwig 	default:
5342405252aSChristoph Hellwig 		return sed_ioctl(ctrl->opal_dev, cmd, argp);
5352405252aSChristoph Hellwig 	}
5362405252aSChristoph Hellwig }
5372405252aSChristoph Hellwig 
5382405252aSChristoph Hellwig #ifdef COMPAT_FOR_U64_ALIGNMENT
5392405252aSChristoph Hellwig struct nvme_user_io32 {
5402405252aSChristoph Hellwig 	__u8	opcode;
5412405252aSChristoph Hellwig 	__u8	flags;
5422405252aSChristoph Hellwig 	__u16	control;
5432405252aSChristoph Hellwig 	__u16	nblocks;
5442405252aSChristoph Hellwig 	__u16	rsvd;
5452405252aSChristoph Hellwig 	__u64	metadata;
5462405252aSChristoph Hellwig 	__u64	addr;
5472405252aSChristoph Hellwig 	__u64	slba;
5482405252aSChristoph Hellwig 	__u32	dsmgmt;
5492405252aSChristoph Hellwig 	__u32	reftag;
5502405252aSChristoph Hellwig 	__u16	apptag;
5512405252aSChristoph Hellwig 	__u16	appmask;
5522405252aSChristoph Hellwig } __attribute__((__packed__));
5532405252aSChristoph Hellwig #define NVME_IOCTL_SUBMIT_IO32	_IOW('N', 0x42, struct nvme_user_io32)
5542405252aSChristoph Hellwig #endif /* COMPAT_FOR_U64_ALIGNMENT */
5552405252aSChristoph Hellwig 
nvme_ns_ioctl(struct nvme_ns * ns,unsigned int cmd,void __user * argp,unsigned int flags,bool open_for_write)5562405252aSChristoph Hellwig static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd,
5577d9d7d59SChristoph Hellwig 		void __user *argp, unsigned int flags, bool open_for_write)
5582405252aSChristoph Hellwig {
5592405252aSChristoph Hellwig 	switch (cmd) {
5602405252aSChristoph Hellwig 	case NVME_IOCTL_ID:
5612405252aSChristoph Hellwig 		force_successful_syscall_return();
5622405252aSChristoph Hellwig 		return ns->head->ns_id;
5632405252aSChristoph Hellwig 	case NVME_IOCTL_IO_CMD:
5647d9d7d59SChristoph Hellwig 		return nvme_user_cmd(ns->ctrl, ns, argp, flags, open_for_write);
5652405252aSChristoph Hellwig 	/*
5662405252aSChristoph Hellwig 	 * struct nvme_user_io can have different padding on some 32-bit ABIs.
5672405252aSChristoph Hellwig 	 * Just accept the compat version as all fields that are used are the
5682405252aSChristoph Hellwig 	 * same size and at the same offset.
5692405252aSChristoph Hellwig 	 */
5702405252aSChristoph Hellwig #ifdef COMPAT_FOR_U64_ALIGNMENT
5712405252aSChristoph Hellwig 	case NVME_IOCTL_SUBMIT_IO32:
5722405252aSChristoph Hellwig #endif
5732405252aSChristoph Hellwig 	case NVME_IOCTL_SUBMIT_IO:
5742405252aSChristoph Hellwig 		return nvme_submit_io(ns, argp);
57589377bc1SKanchan Joshi 	case NVME_IOCTL_IO64_CMD_VEC:
5767b7fdb8eSChristoph Hellwig 		flags |= NVME_IOCTL_VEC;
5777b7fdb8eSChristoph Hellwig 		fallthrough;
5787b7fdb8eSChristoph Hellwig 	case NVME_IOCTL_IO64_CMD:
5797d9d7d59SChristoph Hellwig 		return nvme_user_cmd64(ns->ctrl, ns, argp, flags,
5807d9d7d59SChristoph Hellwig 				       open_for_write);
5812405252aSChristoph Hellwig 	default:
5822405252aSChristoph Hellwig 		return -ENOTTY;
5832405252aSChristoph Hellwig 	}
5842405252aSChristoph Hellwig }
5852405252aSChristoph Hellwig 
nvme_ioctl(struct block_device * bdev,blk_mode_t mode,unsigned int cmd,unsigned long arg)58605bdb996SChristoph Hellwig int nvme_ioctl(struct block_device *bdev, blk_mode_t mode,
5872405252aSChristoph Hellwig 		unsigned int cmd, unsigned long arg)
5882405252aSChristoph Hellwig {
5892405252aSChristoph Hellwig 	struct nvme_ns *ns = bdev->bd_disk->private_data;
59005bdb996SChristoph Hellwig 	bool open_for_write = mode & BLK_OPEN_WRITE;
5912fa1dc86SChristoph Hellwig 	void __user *argp = (void __user *)arg;
592313c08c7SChristoph Hellwig 	unsigned int flags = 0;
593313c08c7SChristoph Hellwig 
594313c08c7SChristoph Hellwig 	if (bdev_is_partition(bdev))
595313c08c7SChristoph Hellwig 		flags |= NVME_IOCTL_PARTITION;
5962405252aSChristoph Hellwig 
5972fa1dc86SChristoph Hellwig 	if (is_ctrl_ioctl(cmd))
5987d9d7d59SChristoph Hellwig 		return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write);
5997d9d7d59SChristoph Hellwig 	return nvme_ns_ioctl(ns, cmd, argp, flags, open_for_write);
6002637baedSMinwoo Im }
6012637baedSMinwoo Im 
nvme_ns_chr_ioctl(struct file * file,unsigned int cmd,unsigned long arg)6022637baedSMinwoo Im long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
6032637baedSMinwoo Im {
6042637baedSMinwoo Im 	struct nvme_ns *ns =
6052637baedSMinwoo Im 		container_of(file_inode(file)->i_cdev, struct nvme_ns, cdev);
6067d9d7d59SChristoph Hellwig 	bool open_for_write = file->f_mode & FMODE_WRITE;
6072fa1dc86SChristoph Hellwig 	void __user *argp = (void __user *)arg;
6082637baedSMinwoo Im 
6092fa1dc86SChristoph Hellwig 	if (is_ctrl_ioctl(cmd))
6107d9d7d59SChristoph Hellwig 		return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write);
6117d9d7d59SChristoph Hellwig 	return nvme_ns_ioctl(ns, cmd, argp, 0, open_for_write);
6122405252aSChristoph Hellwig }
6132405252aSChristoph Hellwig 
nvme_uring_cmd_checks(unsigned int issue_flags)61400fc2eebSKanchan Joshi static int nvme_uring_cmd_checks(unsigned int issue_flags)
61500fc2eebSKanchan Joshi {
61600fc2eebSKanchan Joshi 
61700fc2eebSKanchan Joshi 	/* NVMe passthrough requires big SQE/CQE support */
61800fc2eebSKanchan Joshi 	if ((issue_flags & (IO_URING_F_SQE128|IO_URING_F_CQE32)) !=
61900fc2eebSKanchan Joshi 	    (IO_URING_F_SQE128|IO_URING_F_CQE32))
62000fc2eebSKanchan Joshi 		return -EOPNOTSUPP;
62100fc2eebSKanchan Joshi 	return 0;
62200fc2eebSKanchan Joshi }
62300fc2eebSKanchan Joshi 
nvme_ns_uring_cmd(struct nvme_ns * ns,struct io_uring_cmd * ioucmd,unsigned int issue_flags)624456cba38SKanchan Joshi static int nvme_ns_uring_cmd(struct nvme_ns *ns, struct io_uring_cmd *ioucmd,
625456cba38SKanchan Joshi 			     unsigned int issue_flags)
626456cba38SKanchan Joshi {
627456cba38SKanchan Joshi 	struct nvme_ctrl *ctrl = ns->ctrl;
628456cba38SKanchan Joshi 	int ret;
629456cba38SKanchan Joshi 
630456cba38SKanchan Joshi 	BUILD_BUG_ON(sizeof(struct nvme_uring_cmd_pdu) > sizeof(ioucmd->pdu));
631456cba38SKanchan Joshi 
63200fc2eebSKanchan Joshi 	ret = nvme_uring_cmd_checks(issue_flags);
63300fc2eebSKanchan Joshi 	if (ret)
63400fc2eebSKanchan Joshi 		return ret;
635456cba38SKanchan Joshi 
636456cba38SKanchan Joshi 	switch (ioucmd->cmd_op) {
637456cba38SKanchan Joshi 	case NVME_URING_CMD_IO:
638f569add4SAnuj Gupta 		ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, false);
639f569add4SAnuj Gupta 		break;
640f569add4SAnuj Gupta 	case NVME_URING_CMD_IO_VEC:
641f569add4SAnuj Gupta 		ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, true);
642456cba38SKanchan Joshi 		break;
643456cba38SKanchan Joshi 	default:
644456cba38SKanchan Joshi 		ret = -ENOTTY;
645456cba38SKanchan Joshi 	}
646456cba38SKanchan Joshi 
647456cba38SKanchan Joshi 	return ret;
648456cba38SKanchan Joshi }
649456cba38SKanchan Joshi 
nvme_ns_chr_uring_cmd(struct io_uring_cmd * ioucmd,unsigned int issue_flags)650456cba38SKanchan Joshi int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
651456cba38SKanchan Joshi {
652456cba38SKanchan Joshi 	struct nvme_ns *ns = container_of(file_inode(ioucmd->file)->i_cdev,
653456cba38SKanchan Joshi 			struct nvme_ns, cdev);
654456cba38SKanchan Joshi 
655456cba38SKanchan Joshi 	return nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
656456cba38SKanchan Joshi }
657456cba38SKanchan Joshi 
nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd * ioucmd,struct io_comp_batch * iob,unsigned int poll_flags)658de97fcb3SJens Axboe int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
659de97fcb3SJens Axboe 				 struct io_comp_batch *iob,
660de97fcb3SJens Axboe 				 unsigned int poll_flags)
661585079b6SKanchan Joshi {
662d6aacee9SKeith Busch 	struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
663d6aacee9SKeith Busch 	struct request *req = pdu->req;
6649408d8a3SKeith Busch 
6659408d8a3SKeith Busch 	if (req && blk_rq_is_poll(req))
666d6aacee9SKeith Busch 		return blk_rq_poll(req, iob, poll_flags);
667d6aacee9SKeith Busch 	return 0;
668585079b6SKanchan Joshi }
6692405252aSChristoph Hellwig #ifdef CONFIG_NVME_MULTIPATH
nvme_ns_head_ctrl_ioctl(struct nvme_ns * ns,unsigned int cmd,void __user * argp,struct nvme_ns_head * head,int srcu_idx,bool open_for_write)67048145b62SMinwoo Im static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
671855b7717SKanchan Joshi 		void __user *argp, struct nvme_ns_head *head, int srcu_idx,
6727d9d7d59SChristoph Hellwig 		bool open_for_write)
67385b790a7SChristoph Hellwig 	__releases(&head->srcu)
6742405252aSChristoph Hellwig {
67548145b62SMinwoo Im 	struct nvme_ctrl *ctrl = ns->ctrl;
6762405252aSChristoph Hellwig 	int ret;
6772405252aSChristoph Hellwig 
67848145b62SMinwoo Im 	nvme_get_ctrl(ns->ctrl);
6793e7d1a55SChristoph Hellwig 	srcu_read_unlock(&head->srcu, srcu_idx);
6807d9d7d59SChristoph Hellwig 	ret = nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write);
68148145b62SMinwoo Im 
6822405252aSChristoph Hellwig 	nvme_put_ctrl(ctrl);
6832405252aSChristoph Hellwig 	return ret;
6842405252aSChristoph Hellwig }
6852405252aSChristoph Hellwig 
nvme_ns_head_ioctl(struct block_device * bdev,blk_mode_t mode,unsigned int cmd,unsigned long arg)68605bdb996SChristoph Hellwig int nvme_ns_head_ioctl(struct block_device *bdev, blk_mode_t mode,
6872405252aSChristoph Hellwig 		unsigned int cmd, unsigned long arg)
6882405252aSChristoph Hellwig {
68986b4284dSChristoph Hellwig 	struct nvme_ns_head *head = bdev->bd_disk->private_data;
69005bdb996SChristoph Hellwig 	bool open_for_write = mode & BLK_OPEN_WRITE;
6912637baedSMinwoo Im 	void __user *argp = (void __user *)arg;
69248145b62SMinwoo Im 	struct nvme_ns *ns;
69386b4284dSChristoph Hellwig 	int srcu_idx, ret = -EWOULDBLOCK;
694313c08c7SChristoph Hellwig 	unsigned int flags = 0;
695313c08c7SChristoph Hellwig 
696313c08c7SChristoph Hellwig 	if (bdev_is_partition(bdev))
697313c08c7SChristoph Hellwig 		flags |= NVME_IOCTL_PARTITION;
6982405252aSChristoph Hellwig 
69986b4284dSChristoph Hellwig 	srcu_idx = srcu_read_lock(&head->srcu);
70086b4284dSChristoph Hellwig 	ns = nvme_find_path(head);
70186b4284dSChristoph Hellwig 	if (!ns)
70286b4284dSChristoph Hellwig 		goto out_unlock;
70348145b62SMinwoo Im 
70448145b62SMinwoo Im 	/*
70548145b62SMinwoo Im 	 * Handle ioctls that apply to the controller instead of the namespace
70648145b62SMinwoo Im 	 * seperately and drop the ns SRCU reference early.  This avoids a
70748145b62SMinwoo Im 	 * deadlock when deleting namespaces using the passthrough interface.
70848145b62SMinwoo Im 	 */
7092405252aSChristoph Hellwig 	if (is_ctrl_ioctl(cmd))
710855b7717SKanchan Joshi 		return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx,
7117d9d7d59SChristoph Hellwig 					       open_for_write);
71248145b62SMinwoo Im 
7137d9d7d59SChristoph Hellwig 	ret = nvme_ns_ioctl(ns, cmd, argp, flags, open_for_write);
71486b4284dSChristoph Hellwig out_unlock:
71586b4284dSChristoph Hellwig 	srcu_read_unlock(&head->srcu, srcu_idx);
71648145b62SMinwoo Im 	return ret;
7172637baedSMinwoo Im }
7182637baedSMinwoo Im 
nvme_ns_head_chr_ioctl(struct file * file,unsigned int cmd,unsigned long arg)7192637baedSMinwoo Im long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
7202637baedSMinwoo Im 		unsigned long arg)
7212637baedSMinwoo Im {
7227d9d7d59SChristoph Hellwig 	bool open_for_write = file->f_mode & FMODE_WRITE;
7232637baedSMinwoo Im 	struct cdev *cdev = file_inode(file)->i_cdev;
7242637baedSMinwoo Im 	struct nvme_ns_head *head =
7252637baedSMinwoo Im 		container_of(cdev, struct nvme_ns_head, cdev);
7262637baedSMinwoo Im 	void __user *argp = (void __user *)arg;
72748145b62SMinwoo Im 	struct nvme_ns *ns;
728f423c85cSChristoph Hellwig 	int srcu_idx, ret = -EWOULDBLOCK;
72948145b62SMinwoo Im 
73048145b62SMinwoo Im 	srcu_idx = srcu_read_lock(&head->srcu);
73148145b62SMinwoo Im 	ns = nvme_find_path(head);
732f423c85cSChristoph Hellwig 	if (!ns)
733f423c85cSChristoph Hellwig 		goto out_unlock;
7342637baedSMinwoo Im 
7352637baedSMinwoo Im 	if (is_ctrl_ioctl(cmd))
736855b7717SKanchan Joshi 		return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx,
7377d9d7d59SChristoph Hellwig 				open_for_write);
73848145b62SMinwoo Im 
7397d9d7d59SChristoph Hellwig 	ret = nvme_ns_ioctl(ns, cmd, argp, 0, open_for_write);
740f423c85cSChristoph Hellwig out_unlock:
741f423c85cSChristoph Hellwig 	srcu_read_unlock(&head->srcu, srcu_idx);
74248145b62SMinwoo Im 	return ret;
7432405252aSChristoph Hellwig }
744456cba38SKanchan Joshi 
nvme_ns_head_chr_uring_cmd(struct io_uring_cmd * ioucmd,unsigned int issue_flags)745456cba38SKanchan Joshi int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
746456cba38SKanchan Joshi 		unsigned int issue_flags)
747456cba38SKanchan Joshi {
748456cba38SKanchan Joshi 	struct cdev *cdev = file_inode(ioucmd->file)->i_cdev;
749456cba38SKanchan Joshi 	struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev);
750456cba38SKanchan Joshi 	int srcu_idx = srcu_read_lock(&head->srcu);
751456cba38SKanchan Joshi 	struct nvme_ns *ns = nvme_find_path(head);
752456cba38SKanchan Joshi 	int ret = -EINVAL;
753456cba38SKanchan Joshi 
754456cba38SKanchan Joshi 	if (ns)
755456cba38SKanchan Joshi 		ret = nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
756456cba38SKanchan Joshi 	srcu_read_unlock(&head->srcu, srcu_idx);
757456cba38SKanchan Joshi 	return ret;
758456cba38SKanchan Joshi }
7592405252aSChristoph Hellwig #endif /* CONFIG_NVME_MULTIPATH */
7602405252aSChristoph Hellwig 
nvme_dev_uring_cmd(struct io_uring_cmd * ioucmd,unsigned int issue_flags)76158e5bdebSKanchan Joshi int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
76258e5bdebSKanchan Joshi {
76358e5bdebSKanchan Joshi 	struct nvme_ctrl *ctrl = ioucmd->file->private_data;
76458e5bdebSKanchan Joshi 	int ret;
76558e5bdebSKanchan Joshi 
766585079b6SKanchan Joshi 	/* IOPOLL not supported yet */
767585079b6SKanchan Joshi 	if (issue_flags & IO_URING_F_IOPOLL)
768585079b6SKanchan Joshi 		return -EOPNOTSUPP;
769585079b6SKanchan Joshi 
77058e5bdebSKanchan Joshi 	ret = nvme_uring_cmd_checks(issue_flags);
77158e5bdebSKanchan Joshi 	if (ret)
77258e5bdebSKanchan Joshi 		return ret;
77358e5bdebSKanchan Joshi 
77458e5bdebSKanchan Joshi 	switch (ioucmd->cmd_op) {
77558e5bdebSKanchan Joshi 	case NVME_URING_CMD_ADMIN:
77658e5bdebSKanchan Joshi 		ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, false);
77758e5bdebSKanchan Joshi 		break;
77858e5bdebSKanchan Joshi 	case NVME_URING_CMD_ADMIN_VEC:
77958e5bdebSKanchan Joshi 		ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, true);
78058e5bdebSKanchan Joshi 		break;
78158e5bdebSKanchan Joshi 	default:
78258e5bdebSKanchan Joshi 		ret = -ENOTTY;
78358e5bdebSKanchan Joshi 	}
78458e5bdebSKanchan Joshi 
78558e5bdebSKanchan Joshi 	return ret;
78658e5bdebSKanchan Joshi }
78758e5bdebSKanchan Joshi 
nvme_dev_user_cmd(struct nvme_ctrl * ctrl,void __user * argp,bool open_for_write)788855b7717SKanchan Joshi static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp,
7897d9d7d59SChristoph Hellwig 		bool open_for_write)
7902405252aSChristoph Hellwig {
7912405252aSChristoph Hellwig 	struct nvme_ns *ns;
792*be647e2cSKeith Busch 	int ret, srcu_idx;
7932405252aSChristoph Hellwig 
794*be647e2cSKeith Busch 	srcu_idx = srcu_read_lock(&ctrl->srcu);
7952405252aSChristoph Hellwig 	if (list_empty(&ctrl->namespaces)) {
7962405252aSChristoph Hellwig 		ret = -ENOTTY;
7972405252aSChristoph Hellwig 		goto out_unlock;
7982405252aSChristoph Hellwig 	}
7992405252aSChristoph Hellwig 
800*be647e2cSKeith Busch 	ns = list_first_or_null_rcu(&ctrl->namespaces, struct nvme_ns, list);
8012405252aSChristoph Hellwig 	if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
8022405252aSChristoph Hellwig 		dev_warn(ctrl->device,
8032405252aSChristoph Hellwig 			"NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
8042405252aSChristoph Hellwig 		ret = -EINVAL;
8052405252aSChristoph Hellwig 		goto out_unlock;
8062405252aSChristoph Hellwig 	}
8072405252aSChristoph Hellwig 
8082405252aSChristoph Hellwig 	dev_warn(ctrl->device,
8092405252aSChristoph Hellwig 		"using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
810*be647e2cSKeith Busch 	if (!nvme_get_ns(ns)) {
811*be647e2cSKeith Busch 		ret = -ENXIO;
812*be647e2cSKeith Busch 		goto out_unlock;
813*be647e2cSKeith Busch 	}
814*be647e2cSKeith Busch 	srcu_read_unlock(&ctrl->srcu, srcu_idx);
8152405252aSChristoph Hellwig 
8167d9d7d59SChristoph Hellwig 	ret = nvme_user_cmd(ctrl, ns, argp, 0, open_for_write);
8172405252aSChristoph Hellwig 	nvme_put_ns(ns);
8182405252aSChristoph Hellwig 	return ret;
8192405252aSChristoph Hellwig 
8202405252aSChristoph Hellwig out_unlock:
821*be647e2cSKeith Busch 	srcu_read_unlock(&ctrl->srcu, srcu_idx);
8222405252aSChristoph Hellwig 	return ret;
8232405252aSChristoph Hellwig }
8242405252aSChristoph Hellwig 
nvme_dev_ioctl(struct file * file,unsigned int cmd,unsigned long arg)8252405252aSChristoph Hellwig long nvme_dev_ioctl(struct file *file, unsigned int cmd,
8262405252aSChristoph Hellwig 		unsigned long arg)
8272405252aSChristoph Hellwig {
8287d9d7d59SChristoph Hellwig 	bool open_for_write = file->f_mode & FMODE_WRITE;
8292405252aSChristoph Hellwig 	struct nvme_ctrl *ctrl = file->private_data;
8302405252aSChristoph Hellwig 	void __user *argp = (void __user *)arg;
8312405252aSChristoph Hellwig 
8322405252aSChristoph Hellwig 	switch (cmd) {
8332405252aSChristoph Hellwig 	case NVME_IOCTL_ADMIN_CMD:
8347d9d7d59SChristoph Hellwig 		return nvme_user_cmd(ctrl, NULL, argp, 0, open_for_write);
8352405252aSChristoph Hellwig 	case NVME_IOCTL_ADMIN64_CMD:
8367d9d7d59SChristoph Hellwig 		return nvme_user_cmd64(ctrl, NULL, argp, 0, open_for_write);
8372405252aSChristoph Hellwig 	case NVME_IOCTL_IO_CMD:
8387d9d7d59SChristoph Hellwig 		return nvme_dev_user_cmd(ctrl, argp, open_for_write);
8392405252aSChristoph Hellwig 	case NVME_IOCTL_RESET:
84023e085b2SKeith Busch 		if (!capable(CAP_SYS_ADMIN))
84123e085b2SKeith Busch 			return -EACCES;
8422405252aSChristoph Hellwig 		dev_warn(ctrl->device, "resetting controller\n");
8432405252aSChristoph Hellwig 		return nvme_reset_ctrl_sync(ctrl);
8442405252aSChristoph Hellwig 	case NVME_IOCTL_SUBSYS_RESET:
84523e085b2SKeith Busch 		if (!capable(CAP_SYS_ADMIN))
84623e085b2SKeith Busch 			return -EACCES;
8472405252aSChristoph Hellwig 		return nvme_reset_subsystem(ctrl);
8482405252aSChristoph Hellwig 	case NVME_IOCTL_RESCAN:
84923e085b2SKeith Busch 		if (!capable(CAP_SYS_ADMIN))
85023e085b2SKeith Busch 			return -EACCES;
8512405252aSChristoph Hellwig 		nvme_queue_scan(ctrl);
8522405252aSChristoph Hellwig 		return 0;
8532405252aSChristoph Hellwig 	default:
8542405252aSChristoph Hellwig 		return -ENOTTY;
8552405252aSChristoph Hellwig 	}
8562405252aSChristoph Hellwig }
857