1 /*- 2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #include <linux/errno.h> 29 #include <linux/slab.h> 30 #include <linux/mm.h> 31 #include <linux/dma-mapping.h> 32 #include <linux/vmalloc.h> 33 #include <dev/mlx5/driver.h> 34 35 #include "mlx5_core.h" 36 37 /* Handling for queue buffers -- we allocate a bunch of memory and 38 * register it in a memory region at HCA virtual address 0. If the 39 * requested size is > max_direct, we split the allocation into 40 * multiple pages, so we don't require too much contiguous memory. 41 */ 42 43 static void 44 mlx5_buf_load_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 45 { 46 struct mlx5_buf *buf; 47 uint8_t owned; 48 int x; 49 50 buf = (struct mlx5_buf *)arg; 51 owned = MLX5_DMA_OWNED(buf->dev); 52 53 if (!owned) 54 MLX5_DMA_LOCK(buf->dev); 55 56 if (error == 0) { 57 for (x = 0; x != nseg; x++) { 58 buf->page_list[x] = segs[x].ds_addr; 59 KASSERT(segs[x].ds_len == PAGE_SIZE, ("Invalid segment size")); 60 } 61 buf->load_done = MLX5_LOAD_ST_SUCCESS; 62 } else { 63 buf->load_done = MLX5_LOAD_ST_FAILURE; 64 } 65 MLX5_DMA_DONE(buf->dev); 66 67 if (!owned) 68 MLX5_DMA_UNLOCK(buf->dev); 69 } 70 71 int 72 mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, 73 int max_direct, struct mlx5_buf *buf) 74 { 75 int err; 76 77 buf->npages = howmany(size, PAGE_SIZE); 78 buf->page_shift = PAGE_SHIFT; 79 buf->load_done = MLX5_LOAD_ST_NONE; 80 buf->dev = dev; 81 buf->page_list = kcalloc(buf->npages, sizeof(*buf->page_list), 82 GFP_KERNEL); 83 84 err = -bus_dma_tag_create( 85 bus_get_dma_tag(dev->pdev->dev.bsddev), 86 PAGE_SIZE, /* alignment */ 87 0, /* no boundary */ 88 BUS_SPACE_MAXADDR, /* lowaddr */ 89 BUS_SPACE_MAXADDR, /* highaddr */ 90 NULL, NULL, /* filter, filterarg */ 91 PAGE_SIZE * buf->npages, /* maxsize */ 92 buf->npages, /* nsegments */ 93 PAGE_SIZE, /* maxsegsize */ 94 0, /* flags */ 95 NULL, NULL, /* lockfunc, lockfuncarg */ 96 &buf->dma_tag); 97 98 if (err != 0) 99 goto err_dma_tag; 100 101 /* allocate memory */ 102 err = -bus_dmamem_alloc(buf->dma_tag, &buf->direct.buf, 103 BUS_DMA_WAITOK | BUS_DMA_COHERENT, &buf->dma_map); 104 if (err != 0) 105 goto err_dma_alloc; 106 107 /* load memory into DMA */ 108 MLX5_DMA_LOCK(dev); 109 err = bus_dmamap_load( 110 buf->dma_tag, buf->dma_map, buf->direct.buf, 111 PAGE_SIZE * buf->npages, &mlx5_buf_load_mem_cb, 112 buf, BUS_DMA_WAITOK | BUS_DMA_COHERENT); 113 114 while (buf->load_done == MLX5_LOAD_ST_NONE) 115 MLX5_DMA_WAIT(dev); 116 MLX5_DMA_UNLOCK(dev); 117 118 /* check for error */ 119 if (buf->load_done != MLX5_LOAD_ST_SUCCESS) { 120 err = -ENOMEM; 121 goto err_dma_load; 122 } 123 124 /* clean memory */ 125 memset(buf->direct.buf, 0, PAGE_SIZE * buf->npages); 126 127 /* flush memory to RAM */ 128 bus_dmamap_sync(buf->dev->cmd.dma_tag, buf->dma_map, BUS_DMASYNC_PREWRITE); 129 return (0); 130 131 err_dma_load: 132 bus_dmamem_free(buf->dma_tag, buf->direct.buf, buf->dma_map); 133 err_dma_alloc: 134 bus_dma_tag_destroy(buf->dma_tag); 135 err_dma_tag: 136 kfree(buf->page_list); 137 return (err); 138 } 139 140 void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf) 141 { 142 143 bus_dmamap_unload(buf->dma_tag, buf->dma_map); 144 bus_dmamem_free(buf->dma_tag, buf->direct.buf, buf->dma_map); 145 bus_dma_tag_destroy(buf->dma_tag); 146 kfree(buf->page_list); 147 } 148 EXPORT_SYMBOL_GPL(mlx5_buf_free); 149 150 static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev, 151 int node) 152 { 153 struct mlx5_db_pgdir *pgdir; 154 155 pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL); 156 157 bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE); 158 159 pgdir->fw_page = mlx5_fwp_alloc(dev, GFP_KERNEL, 1); 160 if (pgdir->fw_page != NULL) { 161 pgdir->db_page = pgdir->fw_page->virt_addr; 162 pgdir->db_dma = pgdir->fw_page->dma_addr; 163 164 /* clean allocated memory */ 165 memset(pgdir->db_page, 0, MLX5_ADAPTER_PAGE_SIZE); 166 167 /* flush memory to RAM */ 168 mlx5_fwp_flush(pgdir->fw_page); 169 } 170 if (!pgdir->db_page) { 171 kfree(pgdir); 172 return NULL; 173 } 174 175 return pgdir; 176 } 177 178 static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir, 179 struct mlx5_db *db) 180 { 181 int offset; 182 int i; 183 184 i = find_first_bit(pgdir->bitmap, MLX5_DB_PER_PAGE); 185 if (i >= MLX5_DB_PER_PAGE) 186 return -ENOMEM; 187 188 __clear_bit(i, pgdir->bitmap); 189 190 db->u.pgdir = pgdir; 191 db->index = i; 192 offset = db->index * L1_CACHE_BYTES; 193 db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page); 194 db->dma = pgdir->db_dma + offset; 195 196 db->db[0] = 0; 197 db->db[1] = 0; 198 199 return 0; 200 } 201 202 int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node) 203 { 204 struct mlx5_db_pgdir *pgdir; 205 int ret = 0; 206 207 mutex_lock(&dev->priv.pgdir_mutex); 208 209 list_for_each_entry(pgdir, &dev->priv.pgdir_list, list) 210 if (!mlx5_alloc_db_from_pgdir(pgdir, db)) 211 goto out; 212 213 pgdir = mlx5_alloc_db_pgdir(dev, node); 214 if (!pgdir) { 215 ret = -ENOMEM; 216 goto out; 217 } 218 219 list_add(&pgdir->list, &dev->priv.pgdir_list); 220 221 /* This should never fail -- we just allocated an empty page: */ 222 WARN_ON(mlx5_alloc_db_from_pgdir(pgdir, db)); 223 224 out: 225 mutex_unlock(&dev->priv.pgdir_mutex); 226 227 return ret; 228 } 229 EXPORT_SYMBOL_GPL(mlx5_db_alloc_node); 230 231 int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db) 232 { 233 return mlx5_db_alloc_node(dev, db, dev->priv.numa_node); 234 } 235 EXPORT_SYMBOL_GPL(mlx5_db_alloc); 236 237 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db) 238 { 239 mutex_lock(&dev->priv.pgdir_mutex); 240 241 __set_bit(db->index, db->u.pgdir->bitmap); 242 243 if (bitmap_full(db->u.pgdir->bitmap, MLX5_DB_PER_PAGE)) { 244 mlx5_fwp_free(db->u.pgdir->fw_page); 245 list_del(&db->u.pgdir->list); 246 kfree(db->u.pgdir); 247 } 248 249 mutex_unlock(&dev->priv.pgdir_mutex); 250 } 251 EXPORT_SYMBOL_GPL(mlx5_db_free); 252 253 void 254 mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas) 255 { 256 int i; 257 258 for (i = 0; i != buf->npages; i++) 259 pas[i] = cpu_to_be64(buf->page_list[i]); 260 } 261 EXPORT_SYMBOL_GPL(mlx5_fill_page_array); 262