1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include "ecpf.h"
5 
6 bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev)
7 {
8 	return (ioread32be(&dev->iseg->initializing) >> MLX5_ECPU_BIT_NUM) & 1;
9 }
10 
11 static int mlx5_peer_pf_enable_hca(struct mlx5_core_dev *dev)
12 {
13 	u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {};
14 	u32 in[MLX5_ST_SZ_DW(enable_hca_in)]   = {};
15 
16 	MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
17 	MLX5_SET(enable_hca_in, in, function_id, 0);
18 	MLX5_SET(enable_hca_in, in, embedded_cpu_function, 0);
19 	return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
20 }
21 
22 static int mlx5_peer_pf_disable_hca(struct mlx5_core_dev *dev)
23 {
24 	u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {};
25 	u32 in[MLX5_ST_SZ_DW(disable_hca_in)]   = {};
26 
27 	MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
28 	MLX5_SET(disable_hca_in, in, function_id, 0);
29 	MLX5_SET(disable_hca_in, in, embedded_cpu_function, 0);
30 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
31 }
32 
33 static int mlx5_peer_pf_init(struct mlx5_core_dev *dev)
34 {
35 	int err;
36 
37 	err = mlx5_peer_pf_enable_hca(dev);
38 	if (err)
39 		mlx5_core_err(dev, "Failed to enable peer PF HCA err(%d)\n",
40 			      err);
41 
42 	return err;
43 }
44 
45 static void mlx5_peer_pf_cleanup(struct mlx5_core_dev *dev)
46 {
47 	int err;
48 
49 	err = mlx5_peer_pf_disable_hca(dev);
50 	if (err) {
51 		mlx5_core_err(dev, "Failed to disable peer PF HCA err(%d)\n",
52 			      err);
53 		return;
54 	}
55 
56 	err = mlx5_wait_for_pages(dev, &dev->priv.peer_pf_pages);
57 	if (err)
58 		mlx5_core_warn(dev, "Timeout reclaiming peer PF pages err(%d)\n",
59 			       err);
60 }
61 
62 int mlx5_ec_init(struct mlx5_core_dev *dev)
63 {
64 	int err = 0;
65 
66 	if (!mlx5_core_is_ecpf(dev))
67 		return 0;
68 
69 	/* ECPF shall enable HCA for peer PF in the same way a PF
70 	 * does this for its VFs.
71 	 */
72 	err = mlx5_peer_pf_init(dev);
73 	if (err)
74 		return err;
75 
76 	return 0;
77 }
78 
79 void mlx5_ec_cleanup(struct mlx5_core_dev *dev)
80 {
81 	if (!mlx5_core_is_ecpf(dev))
82 		return;
83 
84 	mlx5_peer_pf_cleanup(dev);
85 }
86 
87 static int mlx5_query_host_params_context(struct mlx5_core_dev *dev,
88 					  u32 *out, int outlen)
89 {
90 	u32 in[MLX5_ST_SZ_DW(query_host_params_in)] = {};
91 
92 	MLX5_SET(query_host_params_in, in, opcode,
93 		 MLX5_CMD_OP_QUERY_HOST_PARAMS);
94 
95 	return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
96 }
97 
98 int mlx5_query_host_params_num_vfs(struct mlx5_core_dev *dev, int *num_vf)
99 {
100 	u32 out[MLX5_ST_SZ_DW(query_host_params_out)] = {};
101 	int err;
102 
103 	err = mlx5_query_host_params_context(dev, out, sizeof(out));
104 	if (err)
105 		return err;
106 
107 	*num_vf = MLX5_GET(query_host_params_out, out,
108 			   host_params_context.host_num_of_vfs);
109 	mlx5_core_dbg(dev, "host_num_of_vfs %d\n", *num_vf);
110 
111 	return 0;
112 }
113