xref: /linux/drivers/net/ethernet/chelsio/cxgb4/smt.c (revision 8bf9d8ea)
13bdb376eSKumar Sanghvi /*
23bdb376eSKumar Sanghvi  * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
33bdb376eSKumar Sanghvi  *
43bdb376eSKumar Sanghvi  * Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved.
53bdb376eSKumar Sanghvi  *
63bdb376eSKumar Sanghvi  * This software is available to you under a choice of one of two
73bdb376eSKumar Sanghvi  * licenses.  You may choose to be licensed under the terms of the GNU
83bdb376eSKumar Sanghvi  * General Public License (GPL) Version 2, available from the file
93bdb376eSKumar Sanghvi  * COPYING in the main directory of this source tree, or the
103bdb376eSKumar Sanghvi  * OpenIB.org BSD license below:
113bdb376eSKumar Sanghvi  *
123bdb376eSKumar Sanghvi  *     Redistribution and use in source and binary forms, with or
133bdb376eSKumar Sanghvi  *     without modification, are permitted provided that the following
143bdb376eSKumar Sanghvi  *     conditions are met:
153bdb376eSKumar Sanghvi  *
163bdb376eSKumar Sanghvi  *      - Redistributions of source code must retain the above
173bdb376eSKumar Sanghvi  *        copyright notice, this list of conditions and the following
183bdb376eSKumar Sanghvi  *        disclaimer.
193bdb376eSKumar Sanghvi  *
203bdb376eSKumar Sanghvi  *      - Redistributions in binary form must reproduce the above
213bdb376eSKumar Sanghvi  *        copyright notice, this list of conditions and the following
223bdb376eSKumar Sanghvi  *        disclaimer in the documentation and/or other materials
233bdb376eSKumar Sanghvi  *        provided with the distribution.
243bdb376eSKumar Sanghvi  *
253bdb376eSKumar Sanghvi  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
263bdb376eSKumar Sanghvi  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
273bdb376eSKumar Sanghvi  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
283bdb376eSKumar Sanghvi  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
293bdb376eSKumar Sanghvi  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
303bdb376eSKumar Sanghvi  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
313bdb376eSKumar Sanghvi  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
323bdb376eSKumar Sanghvi  * SOFTWARE.
333bdb376eSKumar Sanghvi  */
343bdb376eSKumar Sanghvi 
353bdb376eSKumar Sanghvi #include "cxgb4.h"
363bdb376eSKumar Sanghvi #include "smt.h"
373bdb376eSKumar Sanghvi #include "t4_msg.h"
383bdb376eSKumar Sanghvi #include "t4fw_api.h"
393bdb376eSKumar Sanghvi #include "t4_regs.h"
403bdb376eSKumar Sanghvi #include "t4_values.h"
413bdb376eSKumar Sanghvi 
t4_init_smt(void)423bdb376eSKumar Sanghvi struct smt_data *t4_init_smt(void)
433bdb376eSKumar Sanghvi {
443bdb376eSKumar Sanghvi 	unsigned int smt_size;
453bdb376eSKumar Sanghvi 	struct smt_data *s;
463bdb376eSKumar Sanghvi 	int i;
473bdb376eSKumar Sanghvi 
483bdb376eSKumar Sanghvi 	smt_size = SMT_SIZE;
493bdb376eSKumar Sanghvi 
50c49f0ce0SGustavo A. R. Silva 	s = kvzalloc(struct_size(s, smtab, smt_size), GFP_KERNEL);
513bdb376eSKumar Sanghvi 	if (!s)
523bdb376eSKumar Sanghvi 		return NULL;
533bdb376eSKumar Sanghvi 	s->smt_size = smt_size;
543bdb376eSKumar Sanghvi 	rwlock_init(&s->lock);
553bdb376eSKumar Sanghvi 	for (i = 0; i < s->smt_size; ++i) {
563bdb376eSKumar Sanghvi 		s->smtab[i].idx = i;
573bdb376eSKumar Sanghvi 		s->smtab[i].state = SMT_STATE_UNUSED;
58*8bf9d8eaSMiaohe Lin 		eth_zero_addr(s->smtab[i].src_mac);
593bdb376eSKumar Sanghvi 		spin_lock_init(&s->smtab[i].lock);
60ad2dcba0SChuhong Yuan 		s->smtab[i].refcnt = 0;
613bdb376eSKumar Sanghvi 	}
623bdb376eSKumar Sanghvi 	return s;
633bdb376eSKumar Sanghvi }
643bdb376eSKumar Sanghvi 
find_or_alloc_smte(struct smt_data * s,u8 * smac)653bdb376eSKumar Sanghvi static struct smt_entry *find_or_alloc_smte(struct smt_data *s, u8 *smac)
663bdb376eSKumar Sanghvi {
673bdb376eSKumar Sanghvi 	struct smt_entry *first_free = NULL;
683bdb376eSKumar Sanghvi 	struct smt_entry *e, *end;
693bdb376eSKumar Sanghvi 
703bdb376eSKumar Sanghvi 	for (e = &s->smtab[0], end = &s->smtab[s->smt_size]; e != end; ++e) {
71ad2dcba0SChuhong Yuan 		if (e->refcnt == 0) {
723bdb376eSKumar Sanghvi 			if (!first_free)
733bdb376eSKumar Sanghvi 				first_free = e;
743bdb376eSKumar Sanghvi 		} else {
753bdb376eSKumar Sanghvi 			if (e->state == SMT_STATE_SWITCHING) {
763bdb376eSKumar Sanghvi 				/* This entry is actually in use. See if we can
773bdb376eSKumar Sanghvi 				 * re-use it ?
783bdb376eSKumar Sanghvi 				 */
793bdb376eSKumar Sanghvi 				if (memcmp(e->src_mac, smac, ETH_ALEN) == 0)
803bdb376eSKumar Sanghvi 					goto found_reuse;
813bdb376eSKumar Sanghvi 			}
823bdb376eSKumar Sanghvi 		}
833bdb376eSKumar Sanghvi 	}
843bdb376eSKumar Sanghvi 
853bdb376eSKumar Sanghvi 	if (first_free) {
863bdb376eSKumar Sanghvi 		e = first_free;
873bdb376eSKumar Sanghvi 		goto found;
883bdb376eSKumar Sanghvi 	}
893bdb376eSKumar Sanghvi 	return NULL;
903bdb376eSKumar Sanghvi 
913bdb376eSKumar Sanghvi found:
923bdb376eSKumar Sanghvi 	e->state = SMT_STATE_UNUSED;
933bdb376eSKumar Sanghvi 
943bdb376eSKumar Sanghvi found_reuse:
953bdb376eSKumar Sanghvi 	return e;
963bdb376eSKumar Sanghvi }
973bdb376eSKumar Sanghvi 
t4_smte_free(struct smt_entry * e)983bdb376eSKumar Sanghvi static void t4_smte_free(struct smt_entry *e)
993bdb376eSKumar Sanghvi {
100ad2dcba0SChuhong Yuan 	if (e->refcnt == 0) {  /* hasn't been recycled */
1013bdb376eSKumar Sanghvi 		e->state = SMT_STATE_UNUSED;
1023bdb376eSKumar Sanghvi 	}
1033bdb376eSKumar Sanghvi }
1043bdb376eSKumar Sanghvi 
1053bdb376eSKumar Sanghvi /**
10629bbf5d7SRahul Lakkireddy  * cxgb4_smt_release - Release SMT entry
1073bdb376eSKumar Sanghvi  * @e: smt entry to release
1083bdb376eSKumar Sanghvi  *
1093bdb376eSKumar Sanghvi  * Releases ref count and frees up an smt entry from SMT table
1103bdb376eSKumar Sanghvi  */
cxgb4_smt_release(struct smt_entry * e)1113bdb376eSKumar Sanghvi void cxgb4_smt_release(struct smt_entry *e)
1123bdb376eSKumar Sanghvi {
1134a8937b8SChuhong Yuan 	spin_lock_bh(&e->lock);
114ad2dcba0SChuhong Yuan 	if ((--e->refcnt) == 0)
1153bdb376eSKumar Sanghvi 		t4_smte_free(e);
1164a8937b8SChuhong Yuan 	spin_unlock_bh(&e->lock);
1173bdb376eSKumar Sanghvi }
1183bdb376eSKumar Sanghvi EXPORT_SYMBOL(cxgb4_smt_release);
1193bdb376eSKumar Sanghvi 
do_smt_write_rpl(struct adapter * adap,const struct cpl_smt_write_rpl * rpl)1203bdb376eSKumar Sanghvi void do_smt_write_rpl(struct adapter *adap, const struct cpl_smt_write_rpl *rpl)
1213bdb376eSKumar Sanghvi {
1223bdb376eSKumar Sanghvi 	unsigned int smtidx = TID_TID_G(GET_TID(rpl));
1233bdb376eSKumar Sanghvi 	struct smt_data *s = adap->smt;
1243bdb376eSKumar Sanghvi 
1253bdb376eSKumar Sanghvi 	if (unlikely(rpl->status != CPL_ERR_NONE)) {
1263bdb376eSKumar Sanghvi 		struct smt_entry *e = &s->smtab[smtidx];
1273bdb376eSKumar Sanghvi 
1283bdb376eSKumar Sanghvi 		dev_err(adap->pdev_dev,
1293bdb376eSKumar Sanghvi 			"Unexpected SMT_WRITE_RPL status %u for entry %u\n",
1303bdb376eSKumar Sanghvi 			rpl->status, smtidx);
1313bdb376eSKumar Sanghvi 		spin_lock(&e->lock);
1323bdb376eSKumar Sanghvi 		e->state = SMT_STATE_ERROR;
1333bdb376eSKumar Sanghvi 		spin_unlock(&e->lock);
1343bdb376eSKumar Sanghvi 		return;
1353bdb376eSKumar Sanghvi 	}
1363bdb376eSKumar Sanghvi }
1373bdb376eSKumar Sanghvi 
write_smt_entry(struct adapter * adapter,struct smt_entry * e)1383bdb376eSKumar Sanghvi static int write_smt_entry(struct adapter *adapter, struct smt_entry *e)
1393bdb376eSKumar Sanghvi {
1403bdb376eSKumar Sanghvi 	struct cpl_t6_smt_write_req *t6req;
1413bdb376eSKumar Sanghvi 	struct smt_data *s = adapter->smt;
1423bdb376eSKumar Sanghvi 	struct cpl_smt_write_req *req;
1433bdb376eSKumar Sanghvi 	struct sk_buff *skb;
1443bdb376eSKumar Sanghvi 	int size;
1453bdb376eSKumar Sanghvi 	u8 row;
1463bdb376eSKumar Sanghvi 
1473bdb376eSKumar Sanghvi 	if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
1483bdb376eSKumar Sanghvi 		size = sizeof(*req);
1493bdb376eSKumar Sanghvi 		skb = alloc_skb(size, GFP_ATOMIC);
1503bdb376eSKumar Sanghvi 		if (!skb)
1513bdb376eSKumar Sanghvi 			return -ENOMEM;
1523bdb376eSKumar Sanghvi 		/* Source MAC Table (SMT) contains 256 SMAC entries
1533bdb376eSKumar Sanghvi 		 * organized in 128 rows of 2 entries each.
1543bdb376eSKumar Sanghvi 		 */
1553bdb376eSKumar Sanghvi 		req = (struct cpl_smt_write_req *)__skb_put(skb, size);
1563bdb376eSKumar Sanghvi 		INIT_TP_WR(req, 0);
1573bdb376eSKumar Sanghvi 
1583bdb376eSKumar Sanghvi 		/* Each row contains an SMAC pair.
1593bdb376eSKumar Sanghvi 		 * LSB selects the SMAC entry within a row
1603bdb376eSKumar Sanghvi 		 */
1613bdb376eSKumar Sanghvi 		row = (e->idx >> 1);
1623bdb376eSKumar Sanghvi 		if (e->idx & 1) {
1633bdb376eSKumar Sanghvi 			req->pfvf1 = 0x0;
1643bdb376eSKumar Sanghvi 			memcpy(req->src_mac1, e->src_mac, ETH_ALEN);
1653bdb376eSKumar Sanghvi 
1663bdb376eSKumar Sanghvi 			/* fill pfvf0/src_mac0 with entry
1673bdb376eSKumar Sanghvi 			 * at prev index from smt-tab.
1683bdb376eSKumar Sanghvi 			 */
1693bdb376eSKumar Sanghvi 			req->pfvf0 = 0x0;
1703bdb376eSKumar Sanghvi 			memcpy(req->src_mac0, s->smtab[e->idx - 1].src_mac,
1713bdb376eSKumar Sanghvi 			       ETH_ALEN);
1723bdb376eSKumar Sanghvi 		} else {
1733bdb376eSKumar Sanghvi 			req->pfvf0 = 0x0;
1743bdb376eSKumar Sanghvi 			memcpy(req->src_mac0, e->src_mac, ETH_ALEN);
1753bdb376eSKumar Sanghvi 
1763bdb376eSKumar Sanghvi 			/* fill pfvf1/src_mac1 with entry
1773bdb376eSKumar Sanghvi 			 * at next index from smt-tab
1783bdb376eSKumar Sanghvi 			 */
1793bdb376eSKumar Sanghvi 			req->pfvf1 = 0x0;
1803bdb376eSKumar Sanghvi 			memcpy(req->src_mac1, s->smtab[e->idx + 1].src_mac,
1813bdb376eSKumar Sanghvi 			       ETH_ALEN);
1823bdb376eSKumar Sanghvi 		}
1833bdb376eSKumar Sanghvi 	} else {
1843bdb376eSKumar Sanghvi 		size = sizeof(*t6req);
1853bdb376eSKumar Sanghvi 		skb = alloc_skb(size, GFP_ATOMIC);
1863bdb376eSKumar Sanghvi 		if (!skb)
1873bdb376eSKumar Sanghvi 			return -ENOMEM;
1883bdb376eSKumar Sanghvi 		/* Source MAC Table (SMT) contains 256 SMAC entries */
1893bdb376eSKumar Sanghvi 		t6req = (struct cpl_t6_smt_write_req *)__skb_put(skb, size);
1903bdb376eSKumar Sanghvi 		INIT_TP_WR(t6req, 0);
1913bdb376eSKumar Sanghvi 		req = (struct cpl_smt_write_req *)t6req;
1923bdb376eSKumar Sanghvi 
1933bdb376eSKumar Sanghvi 		/* fill pfvf0/src_mac0 from smt-tab */
1943bdb376eSKumar Sanghvi 		req->pfvf0 = 0x0;
1953bdb376eSKumar Sanghvi 		memcpy(req->src_mac0, s->smtab[e->idx].src_mac, ETH_ALEN);
1963bdb376eSKumar Sanghvi 		row = e->idx;
1973bdb376eSKumar Sanghvi 	}
1983bdb376eSKumar Sanghvi 
1993bdb376eSKumar Sanghvi 	OPCODE_TID(req) =
2003bdb376eSKumar Sanghvi 		htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, e->idx |
2013bdb376eSKumar Sanghvi 				    TID_QID_V(adapter->sge.fw_evtq.abs_id)));
2023bdb376eSKumar Sanghvi 	req->params = htonl(SMTW_NORPL_V(0) |
2033bdb376eSKumar Sanghvi 			    SMTW_IDX_V(row) |
2043bdb376eSKumar Sanghvi 			    SMTW_OVLAN_IDX_V(0));
2053bdb376eSKumar Sanghvi 	t4_mgmt_tx(adapter, skb);
2063bdb376eSKumar Sanghvi 	return 0;
2073bdb376eSKumar Sanghvi }
2083bdb376eSKumar Sanghvi 
t4_smt_alloc_switching(struct adapter * adap,u16 pfvf,u8 * smac)2093bdb376eSKumar Sanghvi static struct smt_entry *t4_smt_alloc_switching(struct adapter *adap, u16 pfvf,
2103bdb376eSKumar Sanghvi 						u8 *smac)
2113bdb376eSKumar Sanghvi {
2123bdb376eSKumar Sanghvi 	struct smt_data *s = adap->smt;
2133bdb376eSKumar Sanghvi 	struct smt_entry *e;
2143bdb376eSKumar Sanghvi 
2153bdb376eSKumar Sanghvi 	write_lock_bh(&s->lock);
2163bdb376eSKumar Sanghvi 	e = find_or_alloc_smte(s, smac);
2173bdb376eSKumar Sanghvi 	if (e) {
2183bdb376eSKumar Sanghvi 		spin_lock(&e->lock);
219ad2dcba0SChuhong Yuan 		if (!e->refcnt) {
220ad2dcba0SChuhong Yuan 			e->refcnt = 1;
2213bdb376eSKumar Sanghvi 			e->state = SMT_STATE_SWITCHING;
2223bdb376eSKumar Sanghvi 			e->pfvf = pfvf;
2233bdb376eSKumar Sanghvi 			memcpy(e->src_mac, smac, ETH_ALEN);
2243bdb376eSKumar Sanghvi 			write_smt_entry(adap, e);
2253bdb376eSKumar Sanghvi 		} else {
226ad2dcba0SChuhong Yuan 			++e->refcnt;
2273bdb376eSKumar Sanghvi 		}
2283bdb376eSKumar Sanghvi 		spin_unlock(&e->lock);
2293bdb376eSKumar Sanghvi 	}
2303bdb376eSKumar Sanghvi 	write_unlock_bh(&s->lock);
2313bdb376eSKumar Sanghvi 	return e;
2323bdb376eSKumar Sanghvi }
2333bdb376eSKumar Sanghvi 
2343bdb376eSKumar Sanghvi /**
23529bbf5d7SRahul Lakkireddy  * cxgb4_smt_alloc_switching - Allocates an SMT entry for switch filters.
2363bdb376eSKumar Sanghvi  * @dev: net_device pointer
2373bdb376eSKumar Sanghvi  * @smac: MAC address to add to SMT
2383bdb376eSKumar Sanghvi  * Returns pointer to the SMT entry created
2393bdb376eSKumar Sanghvi  *
2403bdb376eSKumar Sanghvi  * Allocates an SMT entry to be used by switching rule of a filter.
2413bdb376eSKumar Sanghvi  */
cxgb4_smt_alloc_switching(struct net_device * dev,u8 * smac)2423bdb376eSKumar Sanghvi struct smt_entry *cxgb4_smt_alloc_switching(struct net_device *dev, u8 *smac)
2433bdb376eSKumar Sanghvi {
2443bdb376eSKumar Sanghvi 	struct adapter *adap = netdev2adap(dev);
2453bdb376eSKumar Sanghvi 
2463bdb376eSKumar Sanghvi 	return t4_smt_alloc_switching(adap, 0x0, smac);
2473bdb376eSKumar Sanghvi }
2483bdb376eSKumar Sanghvi EXPORT_SYMBOL(cxgb4_smt_alloc_switching);
249