1 /* 2 * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/errno.h> 35 #include <linux/mm.h> 36 #include <linux/scatterlist.h> 37 #include <linux/slab.h> 38 #include <linux/math64.h> 39 40 #include <dev/mlx4/cmd.h> 41 42 #include "mlx4.h" 43 #include "icm.h" 44 #include "fw.h" 45 46 /* 47 * We allocate in as big chunks as we can, up to a maximum of 256 KB 48 * per chunk. 49 */ 50 enum { 51 MLX4_ICM_ALLOC_SIZE = 1 << 18, 52 MLX4_TABLE_CHUNK_SIZE = 1 << 18 53 }; 54 55 static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) 56 { 57 int i; 58 59 if (chunk->nsg > 0) 60 pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages, 61 PCI_DMA_BIDIRECTIONAL); 62 63 for (i = 0; i < chunk->npages; ++i) 64 __free_pages(sg_page(&chunk->mem[i]), 65 get_order(chunk->mem[i].length)); 66 } 67 68 static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) 69 { 70 int i; 71 72 for (i = 0; i < chunk->npages; ++i) 73 dma_free_coherent(&dev->persist->pdev->dev, 74 chunk->mem[i].length, 75 lowmem_page_address(sg_page(&chunk->mem[i])), 76 sg_dma_address(&chunk->mem[i])); 77 } 78 79 void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent) 80 { 81 struct mlx4_icm_chunk *chunk, *tmp; 82 83 if (!icm) 84 return; 85 86 list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) { 87 if (coherent) 88 mlx4_free_icm_coherent(dev, chunk); 89 else 90 mlx4_free_icm_pages(dev, chunk); 91 92 kfree(chunk); 93 } 94 95 kfree(icm); 96 } 97 98 static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, 99 gfp_t gfp_mask, int node) 100 { 101 struct page *page; 102 103 page = alloc_pages_node(node, gfp_mask, order); 104 if (!page) { 105 page = alloc_pages(gfp_mask, order); 106 if (!page) 107 return -ENOMEM; 108 } 109 110 sg_set_page(mem, page, PAGE_SIZE << order, 0); 111 return 0; 112 } 113 114 static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem, 115 int order, gfp_t gfp_mask) 116 { 117 void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order, 118 &sg_dma_address(mem), gfp_mask); 119 if (!buf) 120 return -ENOMEM; 121 122 sg_set_buf(mem, buf, PAGE_SIZE << order); 123 BUG_ON(mem->offset); 124 sg_dma_len(mem) = PAGE_SIZE << order; 125 return 0; 126 } 127 128 struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, 129 gfp_t gfp_mask, int coherent) 130 { 131 struct mlx4_icm *icm; 132 struct mlx4_icm_chunk *chunk = NULL; 133 int cur_order; 134 int ret; 135 136 /* We use sg_set_buf for coherent allocs, which assumes low memory */ 137 BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM)); 138 139 icm = kmalloc_node(sizeof(*icm), 140 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN), 141 dev->numa_node); 142 if (!icm) { 143 icm = kmalloc(sizeof(*icm), 144 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); 145 if (!icm) 146 return NULL; 147 } 148 149 icm->refcount = 0; 150 INIT_LIST_HEAD(&icm->chunk_list); 151 152 cur_order = get_order(MLX4_ICM_ALLOC_SIZE); 153 154 while (npages > 0) { 155 if (!chunk) { 156 chunk = kmalloc_node(sizeof(*chunk), 157 gfp_mask & ~(__GFP_HIGHMEM | 158 __GFP_NOWARN), 159 dev->numa_node); 160 if (!chunk) { 161 chunk = kmalloc(sizeof(*chunk), 162 gfp_mask & ~(__GFP_HIGHMEM | 163 __GFP_NOWARN)); 164 if (!chunk) 165 goto fail; 166 } 167 168 sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN); 169 chunk->npages = 0; 170 chunk->nsg = 0; 171 list_add_tail(&chunk->list, &icm->chunk_list); 172 } 173 174 while (1 << cur_order > npages) 175 --cur_order; 176 177 if (coherent) 178 ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev, 179 &chunk->mem[chunk->npages], 180 cur_order, gfp_mask); 181 else 182 ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], 183 cur_order, gfp_mask, 184 dev->numa_node); 185 186 if (ret) { 187 if (--cur_order < 0) 188 goto fail; 189 else 190 continue; 191 } 192 193 ++chunk->npages; 194 195 if (coherent) 196 ++chunk->nsg; 197 else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { 198 chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem, 199 chunk->npages, 200 PCI_DMA_BIDIRECTIONAL); 201 202 if (chunk->nsg <= 0) 203 goto fail; 204 } 205 206 if (chunk->npages == MLX4_ICM_CHUNK_LEN) 207 chunk = NULL; 208 209 npages -= 1 << cur_order; 210 } 211 212 if (!coherent && chunk) { 213 chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem, 214 chunk->npages, 215 PCI_DMA_BIDIRECTIONAL); 216 217 if (chunk->nsg <= 0) 218 goto fail; 219 } 220 221 return icm; 222 223 fail: 224 mlx4_free_icm(dev, icm, coherent); 225 return NULL; 226 } 227 228 static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt) 229 { 230 return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt); 231 } 232 233 static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count) 234 { 235 return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM, 236 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 237 } 238 239 int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm) 240 { 241 return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1); 242 } 243 244 int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev) 245 { 246 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX, 247 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 248 } 249 250 int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj, 251 gfp_t gfp) 252 { 253 u32 i = (obj & (table->num_obj - 1)) / 254 (MLX4_TABLE_CHUNK_SIZE / table->obj_size); 255 int ret = 0; 256 257 mutex_lock(&table->mutex); 258 259 if (table->icm[i]) { 260 ++table->icm[i]->refcount; 261 goto out; 262 } 263 264 table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT, 265 (table->lowmem ? gfp : GFP_HIGHUSER) | 266 __GFP_NOWARN, table->coherent); 267 if (!table->icm[i]) { 268 ret = -ENOMEM; 269 goto out; 270 } 271 272 if (mlx4_MAP_ICM(dev, table->icm[i], table->virt + 273 (u64) i * MLX4_TABLE_CHUNK_SIZE)) { 274 mlx4_free_icm(dev, table->icm[i], table->coherent); 275 table->icm[i] = NULL; 276 ret = -ENOMEM; 277 goto out; 278 } 279 280 ++table->icm[i]->refcount; 281 282 out: 283 mutex_unlock(&table->mutex); 284 return ret; 285 } 286 287 void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj) 288 { 289 u32 i; 290 u64 offset; 291 292 i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size); 293 294 mutex_lock(&table->mutex); 295 296 if (--table->icm[i]->refcount == 0) { 297 offset = (u64) i * MLX4_TABLE_CHUNK_SIZE; 298 mlx4_UNMAP_ICM(dev, table->virt + offset, 299 MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE); 300 mlx4_free_icm(dev, table->icm[i], table->coherent); 301 table->icm[i] = NULL; 302 } 303 304 mutex_unlock(&table->mutex); 305 } 306 307 void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj, 308 dma_addr_t *dma_handle) 309 { 310 int offset, dma_offset, i; 311 u64 idx; 312 struct mlx4_icm_chunk *chunk; 313 struct mlx4_icm *icm; 314 struct page *page = NULL; 315 316 if (!table->lowmem) 317 return NULL; 318 319 mutex_lock(&table->mutex); 320 321 idx = (u64) (obj & (table->num_obj - 1)) * table->obj_size; 322 icm = table->icm[idx / MLX4_TABLE_CHUNK_SIZE]; 323 dma_offset = offset = idx % MLX4_TABLE_CHUNK_SIZE; 324 325 if (!icm) 326 goto out; 327 328 list_for_each_entry(chunk, &icm->chunk_list, list) { 329 for (i = 0; i < chunk->npages; ++i) { 330 if (dma_handle && dma_offset >= 0) { 331 if (sg_dma_len(&chunk->mem[i]) > dma_offset) 332 *dma_handle = sg_dma_address(&chunk->mem[i]) + 333 dma_offset; 334 dma_offset -= sg_dma_len(&chunk->mem[i]); 335 } 336 /* 337 * DMA mapping can merge pages but not split them, 338 * so if we found the page, dma_handle has already 339 * been assigned to. 340 */ 341 if (chunk->mem[i].length > offset) { 342 page = sg_page(&chunk->mem[i]); 343 goto out; 344 } 345 offset -= chunk->mem[i].length; 346 } 347 } 348 349 out: 350 mutex_unlock(&table->mutex); 351 return page ? lowmem_page_address(page) + offset : NULL; 352 } 353 354 int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, 355 u32 start, u32 end) 356 { 357 int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size; 358 int err; 359 u32 i; 360 361 for (i = start; i <= end; i += inc) { 362 err = mlx4_table_get(dev, table, i, GFP_KERNEL); 363 if (err) 364 goto fail; 365 } 366 367 return 0; 368 369 fail: 370 while (i > start) { 371 i -= inc; 372 mlx4_table_put(dev, table, i); 373 } 374 375 return err; 376 } 377 378 void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, 379 u32 start, u32 end) 380 { 381 u32 i; 382 383 for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size) 384 mlx4_table_put(dev, table, i); 385 } 386 387 int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, 388 u64 virt, int obj_size, u32 nobj, int reserved, 389 int use_lowmem, int use_coherent) 390 { 391 int obj_per_chunk; 392 int num_icm; 393 unsigned chunk_size; 394 int i; 395 u64 size; 396 397 obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size; 398 num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk; 399 400 table->icm = kcalloc(num_icm, sizeof *table->icm, GFP_KERNEL); 401 if (!table->icm) 402 return -ENOMEM; 403 table->virt = virt; 404 table->num_icm = num_icm; 405 table->num_obj = nobj; 406 table->obj_size = obj_size; 407 table->lowmem = use_lowmem; 408 table->coherent = use_coherent; 409 mutex_init(&table->mutex); 410 411 size = (u64) nobj * obj_size; 412 for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) { 413 chunk_size = MLX4_TABLE_CHUNK_SIZE; 414 if ((u64) (i + 1) * MLX4_TABLE_CHUNK_SIZE > size) 415 chunk_size = PAGE_ALIGN(size - 416 i * MLX4_TABLE_CHUNK_SIZE); 417 418 table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT, 419 (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) | 420 __GFP_NOWARN, use_coherent); 421 if (!table->icm[i]) 422 goto err; 423 if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) { 424 mlx4_free_icm(dev, table->icm[i], use_coherent); 425 table->icm[i] = NULL; 426 goto err; 427 } 428 429 /* 430 * Add a reference to this ICM chunk so that it never 431 * gets freed (since it contains reserved firmware objects). 432 */ 433 ++table->icm[i]->refcount; 434 } 435 436 return 0; 437 438 err: 439 for (i = 0; i < num_icm; ++i) 440 if (table->icm[i]) { 441 mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE, 442 MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE); 443 mlx4_free_icm(dev, table->icm[i], use_coherent); 444 } 445 446 kfree(table->icm); 447 448 return -ENOMEM; 449 } 450 451 void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table) 452 { 453 int i; 454 455 for (i = 0; i < table->num_icm; ++i) 456 if (table->icm[i]) { 457 mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE, 458 MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE); 459 mlx4_free_icm(dev, table->icm[i], table->coherent); 460 } 461 462 kfree(table->icm); 463 } 464