xref: /freebsd/sys/dev/mlx4/mlx4_core/mlx4_icm.c (revision 601e19f0)
197549c34SHans Petter Selasky /*
297549c34SHans Petter Selasky  * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved.
397549c34SHans Petter Selasky  * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
497549c34SHans Petter Selasky  *
597549c34SHans Petter Selasky  * This software is available to you under a choice of one of two
697549c34SHans Petter Selasky  * licenses.  You may choose to be licensed under the terms of the GNU
797549c34SHans Petter Selasky  * General Public License (GPL) Version 2, available from the file
897549c34SHans Petter Selasky  * COPYING in the main directory of this source tree, or the
997549c34SHans Petter Selasky  * OpenIB.org BSD license below:
1097549c34SHans Petter Selasky  *
1197549c34SHans Petter Selasky  *     Redistribution and use in source and binary forms, with or
1297549c34SHans Petter Selasky  *     without modification, are permitted provided that the following
1397549c34SHans Petter Selasky  *     conditions are met:
1497549c34SHans Petter Selasky  *
1597549c34SHans Petter Selasky  *      - Redistributions of source code must retain the above
1697549c34SHans Petter Selasky  *        copyright notice, this list of conditions and the following
1797549c34SHans Petter Selasky  *        disclaimer.
1897549c34SHans Petter Selasky  *
1997549c34SHans Petter Selasky  *      - Redistributions in binary form must reproduce the above
2097549c34SHans Petter Selasky  *        copyright notice, this list of conditions and the following
2197549c34SHans Petter Selasky  *        disclaimer in the documentation and/or other materials
2297549c34SHans Petter Selasky  *        provided with the distribution.
2397549c34SHans Petter Selasky  *
2497549c34SHans Petter Selasky  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
2597549c34SHans Petter Selasky  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
2697549c34SHans Petter Selasky  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
2797549c34SHans Petter Selasky  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
2897549c34SHans Petter Selasky  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
2997549c34SHans Petter Selasky  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
3097549c34SHans Petter Selasky  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
3197549c34SHans Petter Selasky  * SOFTWARE.
3297549c34SHans Petter Selasky  */
3397549c34SHans Petter Selasky 
3497549c34SHans Petter Selasky #include <linux/errno.h>
3597549c34SHans Petter Selasky #include <linux/mm.h>
3697549c34SHans Petter Selasky #include <linux/scatterlist.h>
3797549c34SHans Petter Selasky #include <linux/slab.h>
3897549c34SHans Petter Selasky #include <linux/math64.h>
3997549c34SHans Petter Selasky 
4097549c34SHans Petter Selasky #include <dev/mlx4/cmd.h>
4197549c34SHans Petter Selasky 
4297549c34SHans Petter Selasky #include "mlx4.h"
4397549c34SHans Petter Selasky #include "icm.h"
4497549c34SHans Petter Selasky #include "fw.h"
4597549c34SHans Petter Selasky 
4697549c34SHans Petter Selasky /*
4797549c34SHans Petter Selasky  * We allocate in as big chunks as we can, up to a maximum of 256 KB
4897549c34SHans Petter Selasky  * per chunk.
4997549c34SHans Petter Selasky  */
5097549c34SHans Petter Selasky enum {
5197549c34SHans Petter Selasky 	MLX4_ICM_ALLOC_SIZE	= 1 << 18,
5297549c34SHans Petter Selasky 	MLX4_TABLE_CHUNK_SIZE	= 1 << 18
5397549c34SHans Petter Selasky };
5497549c34SHans Petter Selasky 
mlx4_free_icm_pages(struct mlx4_dev * dev,struct mlx4_icm_chunk * chunk)5597549c34SHans Petter Selasky static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
5697549c34SHans Petter Selasky {
5797549c34SHans Petter Selasky 	int i;
5897549c34SHans Petter Selasky 
5997549c34SHans Petter Selasky 	if (chunk->nsg > 0)
60c3191c2eSHans Petter Selasky 		pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages,
6197549c34SHans Petter Selasky 			     PCI_DMA_BIDIRECTIONAL);
6297549c34SHans Petter Selasky 
6397549c34SHans Petter Selasky 	for (i = 0; i < chunk->npages; ++i)
6497549c34SHans Petter Selasky 		__free_pages(sg_page(&chunk->mem[i]),
6597549c34SHans Petter Selasky 			     get_order(chunk->mem[i].length));
6697549c34SHans Petter Selasky }
6797549c34SHans Petter Selasky 
mlx4_free_icm_coherent(struct mlx4_dev * dev,struct mlx4_icm_chunk * chunk)6897549c34SHans Petter Selasky static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
6997549c34SHans Petter Selasky {
7097549c34SHans Petter Selasky 	int i;
7197549c34SHans Petter Selasky 
7297549c34SHans Petter Selasky 	for (i = 0; i < chunk->npages; ++i)
73c3191c2eSHans Petter Selasky 		dma_free_coherent(&dev->persist->pdev->dev,
74c3191c2eSHans Petter Selasky 				  chunk->mem[i].length,
7597549c34SHans Petter Selasky 				  lowmem_page_address(sg_page(&chunk->mem[i])),
7697549c34SHans Petter Selasky 				  sg_dma_address(&chunk->mem[i]));
7797549c34SHans Petter Selasky }
7897549c34SHans Petter Selasky 
mlx4_free_icm(struct mlx4_dev * dev,struct mlx4_icm * icm,int coherent)7997549c34SHans Petter Selasky void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
8097549c34SHans Petter Selasky {
8197549c34SHans Petter Selasky 	struct mlx4_icm_chunk *chunk, *tmp;
8297549c34SHans Petter Selasky 
8397549c34SHans Petter Selasky 	if (!icm)
8497549c34SHans Petter Selasky 		return;
8597549c34SHans Petter Selasky 
8697549c34SHans Petter Selasky 	list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
8797549c34SHans Petter Selasky 		if (coherent)
8897549c34SHans Petter Selasky 			mlx4_free_icm_coherent(dev, chunk);
8997549c34SHans Petter Selasky 		else
9097549c34SHans Petter Selasky 			mlx4_free_icm_pages(dev, chunk);
9197549c34SHans Petter Selasky 
9297549c34SHans Petter Selasky 		kfree(chunk);
9397549c34SHans Petter Selasky 	}
9497549c34SHans Petter Selasky 
9597549c34SHans Petter Selasky 	kfree(icm);
9697549c34SHans Petter Selasky }
9797549c34SHans Petter Selasky 
mlx4_alloc_icm_pages(struct scatterlist * mem,int order,gfp_t gfp_mask,int node)9897549c34SHans Petter Selasky static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order,
9997549c34SHans Petter Selasky 				gfp_t gfp_mask, int node)
10097549c34SHans Petter Selasky {
10197549c34SHans Petter Selasky 	struct page *page;
10297549c34SHans Petter Selasky 
10397549c34SHans Petter Selasky 	page = alloc_pages_node(node, gfp_mask, order);
10497549c34SHans Petter Selasky 	if (!page) {
10597549c34SHans Petter Selasky 		page = alloc_pages(gfp_mask, order);
10697549c34SHans Petter Selasky 		if (!page)
10797549c34SHans Petter Selasky 			return -ENOMEM;
10897549c34SHans Petter Selasky 	}
10997549c34SHans Petter Selasky 
11097549c34SHans Petter Selasky 	sg_set_page(mem, page, PAGE_SIZE << order, 0);
11197549c34SHans Petter Selasky 	return 0;
11297549c34SHans Petter Selasky }
11397549c34SHans Petter Selasky 
mlx4_alloc_icm_coherent(struct device * dev,struct scatterlist * mem,int order,gfp_t gfp_mask)11497549c34SHans Petter Selasky static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
11597549c34SHans Petter Selasky 				    int order, gfp_t gfp_mask)
11697549c34SHans Petter Selasky {
11797549c34SHans Petter Selasky 	void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order,
11897549c34SHans Petter Selasky 				       &sg_dma_address(mem), gfp_mask);
11997549c34SHans Petter Selasky 	if (!buf)
12097549c34SHans Petter Selasky 		return -ENOMEM;
12197549c34SHans Petter Selasky 
12297549c34SHans Petter Selasky 	sg_set_buf(mem, buf, PAGE_SIZE << order);
12397549c34SHans Petter Selasky 	BUG_ON(mem->offset);
12497549c34SHans Petter Selasky 	sg_dma_len(mem) = PAGE_SIZE << order;
12597549c34SHans Petter Selasky 	return 0;
12697549c34SHans Petter Selasky }
12797549c34SHans Petter Selasky 
mlx4_alloc_icm(struct mlx4_dev * dev,int npages,gfp_t gfp_mask,int coherent)12897549c34SHans Petter Selasky struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
12997549c34SHans Petter Selasky 				gfp_t gfp_mask, int coherent)
13097549c34SHans Petter Selasky {
13197549c34SHans Petter Selasky 	struct mlx4_icm *icm;
13297549c34SHans Petter Selasky 	struct mlx4_icm_chunk *chunk = NULL;
13397549c34SHans Petter Selasky 	int cur_order;
13497549c34SHans Petter Selasky 	int ret;
13597549c34SHans Petter Selasky 
13697549c34SHans Petter Selasky 	/* We use sg_set_buf for coherent allocs, which assumes low memory */
13797549c34SHans Petter Selasky 	BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
13897549c34SHans Petter Selasky 
139c3191c2eSHans Petter Selasky 	icm = kmalloc_node(sizeof(*icm),
140c3191c2eSHans Petter Selasky 			   gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN),
14197549c34SHans Petter Selasky 			   dev->numa_node);
14297549c34SHans Petter Selasky 	if (!icm) {
143c3191c2eSHans Petter Selasky 		icm = kmalloc(sizeof(*icm),
144c3191c2eSHans Petter Selasky 			      gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
14597549c34SHans Petter Selasky 		if (!icm)
14697549c34SHans Petter Selasky 			return NULL;
14797549c34SHans Petter Selasky 	}
14897549c34SHans Petter Selasky 
14997549c34SHans Petter Selasky 	icm->refcount = 0;
15097549c34SHans Petter Selasky 	INIT_LIST_HEAD(&icm->chunk_list);
15197549c34SHans Petter Selasky 
15297549c34SHans Petter Selasky 	cur_order = get_order(MLX4_ICM_ALLOC_SIZE);
15397549c34SHans Petter Selasky 
15497549c34SHans Petter Selasky 	while (npages > 0) {
15597549c34SHans Petter Selasky 		if (!chunk) {
156c3191c2eSHans Petter Selasky 			chunk = kmalloc_node(sizeof(*chunk),
157c3191c2eSHans Petter Selasky 					     gfp_mask & ~(__GFP_HIGHMEM |
158c3191c2eSHans Petter Selasky 							  __GFP_NOWARN),
15997549c34SHans Petter Selasky 					     dev->numa_node);
16097549c34SHans Petter Selasky 			if (!chunk) {
161c3191c2eSHans Petter Selasky 				chunk = kmalloc(sizeof(*chunk),
162c3191c2eSHans Petter Selasky 						gfp_mask & ~(__GFP_HIGHMEM |
163c3191c2eSHans Petter Selasky 							     __GFP_NOWARN));
16497549c34SHans Petter Selasky 				if (!chunk)
16597549c34SHans Petter Selasky 					goto fail;
16697549c34SHans Petter Selasky 			}
16797549c34SHans Petter Selasky 
16897549c34SHans Petter Selasky 			sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
16997549c34SHans Petter Selasky 			chunk->npages = 0;
17097549c34SHans Petter Selasky 			chunk->nsg    = 0;
17197549c34SHans Petter Selasky 			list_add_tail(&chunk->list, &icm->chunk_list);
17297549c34SHans Petter Selasky 		}
17397549c34SHans Petter Selasky 
17497549c34SHans Petter Selasky 		while (1 << cur_order > npages)
17597549c34SHans Petter Selasky 			--cur_order;
17697549c34SHans Petter Selasky 
17797549c34SHans Petter Selasky 		if (coherent)
178c3191c2eSHans Petter Selasky 			ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev,
17997549c34SHans Petter Selasky 						      &chunk->mem[chunk->npages],
18097549c34SHans Petter Selasky 						      cur_order, gfp_mask);
18197549c34SHans Petter Selasky 		else
18297549c34SHans Petter Selasky 			ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
18397549c34SHans Petter Selasky 						   cur_order, gfp_mask,
18497549c34SHans Petter Selasky 						   dev->numa_node);
18597549c34SHans Petter Selasky 
18697549c34SHans Petter Selasky 		if (ret) {
18797549c34SHans Petter Selasky 			if (--cur_order < 0)
18897549c34SHans Petter Selasky 				goto fail;
18997549c34SHans Petter Selasky 			else
19097549c34SHans Petter Selasky 				continue;
19197549c34SHans Petter Selasky 		}
19297549c34SHans Petter Selasky 
19397549c34SHans Petter Selasky 		++chunk->npages;
19497549c34SHans Petter Selasky 
19597549c34SHans Petter Selasky 		if (coherent)
19697549c34SHans Petter Selasky 			++chunk->nsg;
19797549c34SHans Petter Selasky 		else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
198c3191c2eSHans Petter Selasky 			chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
19997549c34SHans Petter Selasky 						chunk->npages,
20097549c34SHans Petter Selasky 						PCI_DMA_BIDIRECTIONAL);
20197549c34SHans Petter Selasky 
20297549c34SHans Petter Selasky 			if (chunk->nsg <= 0)
20397549c34SHans Petter Selasky 				goto fail;
20497549c34SHans Petter Selasky 		}
20597549c34SHans Petter Selasky 
20697549c34SHans Petter Selasky 		if (chunk->npages == MLX4_ICM_CHUNK_LEN)
20797549c34SHans Petter Selasky 			chunk = NULL;
20897549c34SHans Petter Selasky 
20997549c34SHans Petter Selasky 		npages -= 1 << cur_order;
21097549c34SHans Petter Selasky 	}
21197549c34SHans Petter Selasky 
21297549c34SHans Petter Selasky 	if (!coherent && chunk) {
213c3191c2eSHans Petter Selasky 		chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
21497549c34SHans Petter Selasky 					chunk->npages,
21597549c34SHans Petter Selasky 					PCI_DMA_BIDIRECTIONAL);
21697549c34SHans Petter Selasky 
21797549c34SHans Petter Selasky 		if (chunk->nsg <= 0)
21897549c34SHans Petter Selasky 			goto fail;
21997549c34SHans Petter Selasky 	}
22097549c34SHans Petter Selasky 
22197549c34SHans Petter Selasky 	return icm;
22297549c34SHans Petter Selasky 
22397549c34SHans Petter Selasky fail:
22497549c34SHans Petter Selasky 	mlx4_free_icm(dev, icm, coherent);
22597549c34SHans Petter Selasky 	return NULL;
22697549c34SHans Petter Selasky }
22797549c34SHans Petter Selasky 
mlx4_MAP_ICM(struct mlx4_dev * dev,struct mlx4_icm * icm,u64 virt)22897549c34SHans Petter Selasky static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
22997549c34SHans Petter Selasky {
23097549c34SHans Petter Selasky 	return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt);
23197549c34SHans Petter Selasky }
23297549c34SHans Petter Selasky 
mlx4_UNMAP_ICM(struct mlx4_dev * dev,u64 virt,u32 page_count)23397549c34SHans Petter Selasky static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
23497549c34SHans Petter Selasky {
23597549c34SHans Petter Selasky 	return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
23697549c34SHans Petter Selasky 			MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
23797549c34SHans Petter Selasky }
23897549c34SHans Petter Selasky 
mlx4_MAP_ICM_AUX(struct mlx4_dev * dev,struct mlx4_icm * icm)23997549c34SHans Petter Selasky int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
24097549c34SHans Petter Selasky {
24197549c34SHans Petter Selasky 	return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1);
24297549c34SHans Petter Selasky }
24397549c34SHans Petter Selasky 
mlx4_UNMAP_ICM_AUX(struct mlx4_dev * dev)24497549c34SHans Petter Selasky int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
24597549c34SHans Petter Selasky {
24697549c34SHans Petter Selasky 	return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX,
24797549c34SHans Petter Selasky 			MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
24897549c34SHans Petter Selasky }
24997549c34SHans Petter Selasky 
mlx4_table_get(struct mlx4_dev * dev,struct mlx4_icm_table * table,u32 obj,gfp_t gfp)250c3191c2eSHans Petter Selasky int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj,
251c3191c2eSHans Petter Selasky 		   gfp_t gfp)
25297549c34SHans Petter Selasky {
25397549c34SHans Petter Selasky 	u32 i = (obj & (table->num_obj - 1)) /
25497549c34SHans Petter Selasky 			(MLX4_TABLE_CHUNK_SIZE / table->obj_size);
25597549c34SHans Petter Selasky 	int ret = 0;
25697549c34SHans Petter Selasky 
25797549c34SHans Petter Selasky 	mutex_lock(&table->mutex);
25897549c34SHans Petter Selasky 
25997549c34SHans Petter Selasky 	if (table->icm[i]) {
26097549c34SHans Petter Selasky 		++table->icm[i]->refcount;
26197549c34SHans Petter Selasky 		goto out;
26297549c34SHans Petter Selasky 	}
26397549c34SHans Petter Selasky 
26497549c34SHans Petter Selasky 	table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
265c3191c2eSHans Petter Selasky 				       (table->lowmem ? gfp : GFP_HIGHUSER) |
26697549c34SHans Petter Selasky 				       __GFP_NOWARN, table->coherent);
26797549c34SHans Petter Selasky 	if (!table->icm[i]) {
26897549c34SHans Petter Selasky 		ret = -ENOMEM;
26997549c34SHans Petter Selasky 		goto out;
27097549c34SHans Petter Selasky 	}
27197549c34SHans Petter Selasky 
27297549c34SHans Petter Selasky 	if (mlx4_MAP_ICM(dev, table->icm[i], table->virt +
27397549c34SHans Petter Selasky 			 (u64) i * MLX4_TABLE_CHUNK_SIZE)) {
27497549c34SHans Petter Selasky 		mlx4_free_icm(dev, table->icm[i], table->coherent);
27597549c34SHans Petter Selasky 		table->icm[i] = NULL;
27697549c34SHans Petter Selasky 		ret = -ENOMEM;
27797549c34SHans Petter Selasky 		goto out;
27897549c34SHans Petter Selasky 	}
27997549c34SHans Petter Selasky 
28097549c34SHans Petter Selasky 	++table->icm[i]->refcount;
28197549c34SHans Petter Selasky 
28297549c34SHans Petter Selasky out:
28397549c34SHans Petter Selasky 	mutex_unlock(&table->mutex);
28497549c34SHans Petter Selasky 	return ret;
28597549c34SHans Petter Selasky }
28697549c34SHans Petter Selasky 
mlx4_table_put(struct mlx4_dev * dev,struct mlx4_icm_table * table,u32 obj)28797549c34SHans Petter Selasky void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj)
28897549c34SHans Petter Selasky {
28997549c34SHans Petter Selasky 	u32 i;
29097549c34SHans Petter Selasky 	u64 offset;
29197549c34SHans Petter Selasky 
29297549c34SHans Petter Selasky 	i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
29397549c34SHans Petter Selasky 
29497549c34SHans Petter Selasky 	mutex_lock(&table->mutex);
29597549c34SHans Petter Selasky 
29697549c34SHans Petter Selasky 	if (--table->icm[i]->refcount == 0) {
29797549c34SHans Petter Selasky 		offset = (u64) i * MLX4_TABLE_CHUNK_SIZE;
298c3191c2eSHans Petter Selasky 		mlx4_UNMAP_ICM(dev, table->virt + offset,
299c3191c2eSHans Petter Selasky 			       MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
30097549c34SHans Petter Selasky 		mlx4_free_icm(dev, table->icm[i], table->coherent);
30197549c34SHans Petter Selasky 		table->icm[i] = NULL;
30297549c34SHans Petter Selasky 	}
30397549c34SHans Petter Selasky 
30497549c34SHans Petter Selasky 	mutex_unlock(&table->mutex);
30597549c34SHans Petter Selasky }
30697549c34SHans Petter Selasky 
mlx4_table_find(struct mlx4_icm_table * table,u32 obj,dma_addr_t * dma_handle)30797549c34SHans Petter Selasky void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj,
30897549c34SHans Petter Selasky 			dma_addr_t *dma_handle)
30997549c34SHans Petter Selasky {
31097549c34SHans Petter Selasky 	int offset, dma_offset, i;
31197549c34SHans Petter Selasky 	u64 idx;
31297549c34SHans Petter Selasky 	struct mlx4_icm_chunk *chunk;
31397549c34SHans Petter Selasky 	struct mlx4_icm *icm;
31497549c34SHans Petter Selasky 	struct page *page = NULL;
31597549c34SHans Petter Selasky 
31697549c34SHans Petter Selasky 	if (!table->lowmem)
31797549c34SHans Petter Selasky 		return NULL;
31897549c34SHans Petter Selasky 
31997549c34SHans Petter Selasky 	mutex_lock(&table->mutex);
32097549c34SHans Petter Selasky 
32197549c34SHans Petter Selasky 	idx = (u64) (obj & (table->num_obj - 1)) * table->obj_size;
32297549c34SHans Petter Selasky 	icm = table->icm[idx / MLX4_TABLE_CHUNK_SIZE];
32397549c34SHans Petter Selasky 	dma_offset = offset = idx % MLX4_TABLE_CHUNK_SIZE;
32497549c34SHans Petter Selasky 
32597549c34SHans Petter Selasky 	if (!icm)
32697549c34SHans Petter Selasky 		goto out;
32797549c34SHans Petter Selasky 
32897549c34SHans Petter Selasky 	list_for_each_entry(chunk, &icm->chunk_list, list) {
32997549c34SHans Petter Selasky 		for (i = 0; i < chunk->npages; ++i) {
33097549c34SHans Petter Selasky 			if (dma_handle && dma_offset >= 0) {
33197549c34SHans Petter Selasky 				if (sg_dma_len(&chunk->mem[i]) > dma_offset)
33297549c34SHans Petter Selasky 					*dma_handle = sg_dma_address(&chunk->mem[i]) +
33397549c34SHans Petter Selasky 						dma_offset;
33497549c34SHans Petter Selasky 				dma_offset -= sg_dma_len(&chunk->mem[i]);
33597549c34SHans Petter Selasky 			}
33697549c34SHans Petter Selasky 			/*
33797549c34SHans Petter Selasky 			 * DMA mapping can merge pages but not split them,
33897549c34SHans Petter Selasky 			 * so if we found the page, dma_handle has already
33997549c34SHans Petter Selasky 			 * been assigned to.
34097549c34SHans Petter Selasky 			 */
34197549c34SHans Petter Selasky 			if (chunk->mem[i].length > offset) {
34297549c34SHans Petter Selasky 				page = sg_page(&chunk->mem[i]);
34397549c34SHans Petter Selasky 				goto out;
34497549c34SHans Petter Selasky 			}
34597549c34SHans Petter Selasky 			offset -= chunk->mem[i].length;
34697549c34SHans Petter Selasky 		}
34797549c34SHans Petter Selasky 	}
34897549c34SHans Petter Selasky 
34997549c34SHans Petter Selasky out:
35097549c34SHans Petter Selasky 	mutex_unlock(&table->mutex);
35197549c34SHans Petter Selasky 	return page ? lowmem_page_address(page) + offset : NULL;
35297549c34SHans Petter Selasky }
35397549c34SHans Petter Selasky 
mlx4_table_get_range(struct mlx4_dev * dev,struct mlx4_icm_table * table,u32 start,u32 end)35497549c34SHans Petter Selasky int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
35597549c34SHans Petter Selasky 			 u32 start, u32 end)
35697549c34SHans Petter Selasky {
35797549c34SHans Petter Selasky 	int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size;
35897549c34SHans Petter Selasky 	int err;
35997549c34SHans Petter Selasky 	u32 i;
36097549c34SHans Petter Selasky 
36197549c34SHans Petter Selasky 	for (i = start; i <= end; i += inc) {
362c3191c2eSHans Petter Selasky 		err = mlx4_table_get(dev, table, i, GFP_KERNEL);
36397549c34SHans Petter Selasky 		if (err)
36497549c34SHans Petter Selasky 			goto fail;
36597549c34SHans Petter Selasky 	}
36697549c34SHans Petter Selasky 
36797549c34SHans Petter Selasky 	return 0;
36897549c34SHans Petter Selasky 
36997549c34SHans Petter Selasky fail:
37097549c34SHans Petter Selasky 	while (i > start) {
37197549c34SHans Petter Selasky 		i -= inc;
37297549c34SHans Petter Selasky 		mlx4_table_put(dev, table, i);
37397549c34SHans Petter Selasky 	}
37497549c34SHans Petter Selasky 
37597549c34SHans Petter Selasky 	return err;
37697549c34SHans Petter Selasky }
37797549c34SHans Petter Selasky 
mlx4_table_put_range(struct mlx4_dev * dev,struct mlx4_icm_table * table,u32 start,u32 end)37897549c34SHans Petter Selasky void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
37997549c34SHans Petter Selasky 			  u32 start, u32 end)
38097549c34SHans Petter Selasky {
38197549c34SHans Petter Selasky 	u32 i;
38297549c34SHans Petter Selasky 
38397549c34SHans Petter Selasky 	for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size)
38497549c34SHans Petter Selasky 		mlx4_table_put(dev, table, i);
38597549c34SHans Petter Selasky }
38697549c34SHans Petter Selasky 
mlx4_init_icm_table(struct mlx4_dev * dev,struct mlx4_icm_table * table,u64 virt,int obj_size,u32 nobj,int reserved,int use_lowmem,int use_coherent)38797549c34SHans Petter Selasky int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
388c3191c2eSHans Petter Selasky 			u64 virt, int obj_size,	u32 nobj, int reserved,
38997549c34SHans Petter Selasky 			int use_lowmem, int use_coherent)
39097549c34SHans Petter Selasky {
39197549c34SHans Petter Selasky 	int obj_per_chunk;
39297549c34SHans Petter Selasky 	int num_icm;
39397549c34SHans Petter Selasky 	unsigned chunk_size;
39497549c34SHans Petter Selasky 	int i;
39597549c34SHans Petter Selasky 	u64 size;
39697549c34SHans Petter Selasky 
39797549c34SHans Petter Selasky 	obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
398c3191c2eSHans Petter Selasky 	num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
39997549c34SHans Petter Selasky 
40097549c34SHans Petter Selasky 	table->icm      = kcalloc(num_icm, sizeof *table->icm, GFP_KERNEL);
40197549c34SHans Petter Selasky 	if (!table->icm)
40297549c34SHans Petter Selasky 		return -ENOMEM;
40397549c34SHans Petter Selasky 	table->virt     = virt;
40497549c34SHans Petter Selasky 	table->num_icm  = num_icm;
40597549c34SHans Petter Selasky 	table->num_obj  = nobj;
40697549c34SHans Petter Selasky 	table->obj_size = obj_size;
40797549c34SHans Petter Selasky 	table->lowmem   = use_lowmem;
40897549c34SHans Petter Selasky 	table->coherent = use_coherent;
40997549c34SHans Petter Selasky 	mutex_init(&table->mutex);
41097549c34SHans Petter Selasky 
41197549c34SHans Petter Selasky 	size = (u64) nobj * obj_size;
41297549c34SHans Petter Selasky 	for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
41397549c34SHans Petter Selasky 		chunk_size = MLX4_TABLE_CHUNK_SIZE;
414601e19f0SSlava Shwartsman 		if ((u64) (i + 1) * MLX4_TABLE_CHUNK_SIZE > size)
41597549c34SHans Petter Selasky 			chunk_size = PAGE_ALIGN(size -
41697549c34SHans Petter Selasky 					i * MLX4_TABLE_CHUNK_SIZE);
41797549c34SHans Petter Selasky 
41897549c34SHans Petter Selasky 		table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
41997549c34SHans Petter Selasky 					       (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
42097549c34SHans Petter Selasky 					       __GFP_NOWARN, use_coherent);
42197549c34SHans Petter Selasky 		if (!table->icm[i])
42297549c34SHans Petter Selasky 			goto err;
42397549c34SHans Petter Selasky 		if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) {
42497549c34SHans Petter Selasky 			mlx4_free_icm(dev, table->icm[i], use_coherent);
42597549c34SHans Petter Selasky 			table->icm[i] = NULL;
42697549c34SHans Petter Selasky 			goto err;
42797549c34SHans Petter Selasky 		}
42897549c34SHans Petter Selasky 
42997549c34SHans Petter Selasky 		/*
43097549c34SHans Petter Selasky 		 * Add a reference to this ICM chunk so that it never
43197549c34SHans Petter Selasky 		 * gets freed (since it contains reserved firmware objects).
43297549c34SHans Petter Selasky 		 */
43397549c34SHans Petter Selasky 		++table->icm[i]->refcount;
43497549c34SHans Petter Selasky 	}
43597549c34SHans Petter Selasky 
43697549c34SHans Petter Selasky 	return 0;
43797549c34SHans Petter Selasky 
43897549c34SHans Petter Selasky err:
43997549c34SHans Petter Selasky 	for (i = 0; i < num_icm; ++i)
44097549c34SHans Petter Selasky 		if (table->icm[i]) {
441c3191c2eSHans Petter Selasky 			mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE,
442c3191c2eSHans Petter Selasky 				       MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
44397549c34SHans Petter Selasky 			mlx4_free_icm(dev, table->icm[i], use_coherent);
44497549c34SHans Petter Selasky 		}
445c3191c2eSHans Petter Selasky 
44697549c34SHans Petter Selasky 	kfree(table->icm);
44797549c34SHans Petter Selasky 
44897549c34SHans Petter Selasky 	return -ENOMEM;
44997549c34SHans Petter Selasky }
45097549c34SHans Petter Selasky 
mlx4_cleanup_icm_table(struct mlx4_dev * dev,struct mlx4_icm_table * table)45197549c34SHans Petter Selasky void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table)
45297549c34SHans Petter Selasky {
453c3191c2eSHans Petter Selasky 	int i;
45497549c34SHans Petter Selasky 
45597549c34SHans Petter Selasky 	for (i = 0; i < table->num_icm; ++i)
45697549c34SHans Petter Selasky 		if (table->icm[i]) {
457c3191c2eSHans Petter Selasky 			mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
45897549c34SHans Petter Selasky 				       MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
459c3191c2eSHans Petter Selasky 			mlx4_free_icm(dev, table->icm[i], table->coherent);
46097549c34SHans Petter Selasky 		}
46197549c34SHans Petter Selasky 
46297549c34SHans Petter Selasky 	kfree(table->icm);
46397549c34SHans Petter Selasky }
464