Commit 2280740f authored by Vishwanathapura, Niranjana's avatar Vishwanathapura, Niranjana Committed by Doug Ledford
Browse files

IB/hfi1: Virtual Network Interface Controller (VNIC) HW support



HFI1 HW specific support for VNIC functionality.
Dynamically allocate a set of contexts for VNIC when the first vnic
port is instantiated. Allocate VNIC contexts from user contexts pool
and return them back to the same pool while freeing up. Set aside
enough MSI-X interrupts for VNIC contexts and assign them when the
contexts are allocated. On the receive side, use an RSM rule to
spread TCP/UDP streams among VNIC contexts.
Reviewed-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Reviewed-by: default avatarIra Weiny <ira.weiny@intel.com>
Signed-off-by: default avatarNiranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
Signed-off-by: default avatarAndrzej Kacprowski <andrzej.kacprowski@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent d4829ea6
/*
* Copyright(c) 2015, 2016 Intel Corporation.
* Copyright(c) 2015-2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
......@@ -229,14 +229,17 @@ static inline void aspm_ctx_timer_function(unsigned long data)
spin_unlock_irqrestore(&rcd->aspm_lock, flags);
}
/* Disable interrupt processing for verbs contexts when PSM contexts are open */
/*
* Disable interrupt processing for verbs contexts when PSM or VNIC contexts
* are open.
*/
static inline void aspm_disable_all(struct hfi1_devdata *dd)
{
struct hfi1_ctxtdata *rcd;
unsigned long flags;
unsigned i;
for (i = 0; i < dd->first_user_ctxt; i++) {
for (i = 0; i < dd->first_dyn_alloc_ctxt; i++) {
rcd = dd->rcd[i];
del_timer_sync(&rcd->aspm_timer);
spin_lock_irqsave(&rcd->aspm_lock, flags);
......@@ -260,7 +263,7 @@ static inline void aspm_enable_all(struct hfi1_devdata *dd)
if (aspm_mode != ASPM_MODE_DYNAMIC)
return;
for (i = 0; i < dd->first_user_ctxt; i++) {
for (i = 0; i < dd->first_dyn_alloc_ctxt; i++) {
rcd = dd->rcd[i];
spin_lock_irqsave(&rcd->aspm_lock, flags);
rcd->aspm_intr_enable = true;
......@@ -276,7 +279,7 @@ static inline void aspm_ctx_init(struct hfi1_ctxtdata *rcd)
(unsigned long)rcd);
rcd->aspm_intr_supported = rcd->dd->aspm_supported &&
aspm_mode == ASPM_MODE_DYNAMIC &&
rcd->ctxt < rcd->dd->first_user_ctxt;
rcd->ctxt < rcd->dd->first_dyn_alloc_ctxt;
}
static inline void aspm_init(struct hfi1_devdata *dd)
......@@ -286,7 +289,7 @@ static inline void aspm_init(struct hfi1_devdata *dd)
spin_lock_init(&dd->aspm_lock);
dd->aspm_supported = aspm_hw_l1_supported(dd);
for (i = 0; i < dd->first_user_ctxt; i++)
for (i = 0; i < dd->first_dyn_alloc_ctxt; i++)
aspm_ctx_init(dd->rcd[i]);
/* Start with ASPM disabled */
......
......@@ -126,9 +126,16 @@ struct flag_table {
#define DEFAULT_KRCVQS 2
#define MIN_KERNEL_KCTXTS 2
#define FIRST_KERNEL_KCTXT 1
/* sizes for both the QP and RSM map tables */
#define NUM_MAP_ENTRIES 256
#define NUM_MAP_REGS 32
/*
* RSM instance allocation
* 0 - Verbs
* 1 - User Fecn Handling
* 2 - Vnic
*/
#define RSM_INS_VERBS 0
#define RSM_INS_FECN 1
#define RSM_INS_VNIC 2
/* Bit offset into the GUID which carries HFI id information */
#define GUID_HFI_INDEX_SHIFT 39
......@@ -139,8 +146,7 @@ struct flag_table {
#define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
#define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
/* RSM fields */
/* RSM fields for Verbs */
/* packet type */
#define IB_PACKET_TYPE 2ull
#define QW_SHIFT 6ull
......@@ -170,6 +176,28 @@ struct flag_table {
/* QPN[m+n:1] QW 1, OFFSET 1 */
#define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
/* RSM fields for Vnic */
/* L2_TYPE: QW 0, OFFSET 61 - for match */
#define L2_TYPE_QW 0ull
#define L2_TYPE_BIT_OFFSET 61ull
#define L2_TYPE_OFFSET(off) ((L2_TYPE_QW << QW_SHIFT) | (off))
#define L2_TYPE_MATCH_OFFSET L2_TYPE_OFFSET(L2_TYPE_BIT_OFFSET)
#define L2_TYPE_MASK 3ull
#define L2_16B_VALUE 2ull
/* L4_TYPE QW 1, OFFSET 0 - for match */
#define L4_TYPE_QW 1ull
#define L4_TYPE_BIT_OFFSET 0ull
#define L4_TYPE_OFFSET(off) ((L4_TYPE_QW << QW_SHIFT) | (off))
#define L4_TYPE_MATCH_OFFSET L4_TYPE_OFFSET(L4_TYPE_BIT_OFFSET)
#define L4_16B_TYPE_MASK 0xFFull
#define L4_16B_ETH_VALUE 0x78ull
/* 16B VESWID - for select */
#define L4_16B_HDR_VESWID_OFFSET ((2 << QW_SHIFT) | (16ull))
/* 16B ENTROPY - for select */
#define L2_16B_ENTROPY_OFFSET ((1 << QW_SHIFT) | (32ull))
/* defines to build power on SC2VL table */
#define SC2VL_VAL( \
num, \
......@@ -1047,6 +1075,7 @@ static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
unsigned int *np);
static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index);
/*
* Error interrupt table entry. This is used as input to the interrupt
......@@ -6703,7 +6732,13 @@ static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
int i;
/* enable all kernel contexts */
for (i = 0; i < dd->n_krcv_queues; i++) {
for (i = 0; i < dd->num_rcv_contexts; i++) {
struct hfi1_ctxtdata *rcd = dd->rcd[i];
/* Ensure all non-user contexts(including vnic) are enabled */
if (!rcd || !rcd->sc || (rcd->sc->type == SC_USER))
continue;
rcvmask = HFI1_RCVCTRL_CTXT_ENB;
/* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
......@@ -8000,7 +8035,9 @@ static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
if (likely(source < dd->num_rcv_contexts)) {
rcd = dd->rcd[source];
if (rcd) {
if (source < dd->first_user_ctxt)
/* Check for non-user contexts, including vnic */
if ((source < dd->first_dyn_alloc_ctxt) ||
(rcd->sc && (rcd->sc->type == SC_KERNEL)))
rcd->do_interrupt(rcd, 0);
else
handle_user_interrupt(rcd);
......@@ -8028,7 +8065,8 @@ static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
rcd = dd->rcd[source];
if (rcd) {
/* only pay attention to user urgent interrupts */
if (source >= dd->first_user_ctxt)
if ((source >= dd->first_dyn_alloc_ctxt) &&
(!rcd->sc || (rcd->sc->type == SC_USER)))
handle_user_interrupt(rcd);
return; /* OK */
}
......@@ -12842,7 +12880,10 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
first_sdma = last_general;
last_sdma = first_sdma + dd->num_sdma;
first_rx = last_sdma;
last_rx = first_rx + dd->n_krcv_queues;
last_rx = first_rx + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT;
/* VNIC MSIx interrupts get mapped when VNIC contexts are created */
dd->first_dyn_msix_idx = first_rx + dd->n_krcv_queues;
/*
* Sanity check - the code expects all SDMA chip source
......@@ -12856,7 +12897,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
const char *err_info;
irq_handler_t handler;
irq_handler_t thread = NULL;
void *arg;
void *arg = NULL;
int idx;
struct hfi1_ctxtdata *rcd = NULL;
struct sdma_engine *sde = NULL;
......@@ -12883,24 +12924,25 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
} else if (first_rx <= i && i < last_rx) {
idx = i - first_rx;
rcd = dd->rcd[idx];
/* no interrupt if no rcd */
if (!rcd)
continue;
/*
* Set the interrupt register and mask for this
* context's interrupt.
*/
rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
rcd->imask = ((u64)1) <<
((IS_RCVAVAIL_START + idx) % 64);
handler = receive_context_interrupt;
thread = receive_context_thread;
arg = rcd;
snprintf(me->name, sizeof(me->name),
DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
err_info = "receive context";
remap_intr(dd, IS_RCVAVAIL_START + idx, i);
me->type = IRQ_RCVCTXT;
if (rcd) {
/*
* Set the interrupt register and mask for this
* context's interrupt.
*/
rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
rcd->imask = ((u64)1) <<
((IS_RCVAVAIL_START + idx) % 64);
handler = receive_context_interrupt;
thread = receive_context_thread;
arg = rcd;
snprintf(me->name, sizeof(me->name),
DRIVER_NAME "_%d kctxt%d",
dd->unit, idx);
err_info = "receive context";
remap_intr(dd, IS_RCVAVAIL_START + idx, i);
me->type = IRQ_RCVCTXT;
rcd->msix_intr = i;
}
} else {
/* not in our expected range - complain, then
* ignore it
......@@ -12938,6 +12980,84 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
return ret;
}
void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd)
{
int i;
if (!dd->num_msix_entries) {
synchronize_irq(dd->pcidev->irq);
return;
}
for (i = 0; i < dd->vnic.num_ctxt; i++) {
struct hfi1_ctxtdata *rcd = dd->vnic.ctxt[i];
struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
synchronize_irq(me->msix.vector);
}
}
void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd)
{
struct hfi1_devdata *dd = rcd->dd;
struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
if (!me->arg) /* => no irq, no affinity */
return;
hfi1_put_irq_affinity(dd, me);
free_irq(me->msix.vector, me->arg);
me->arg = NULL;
}
void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd)
{
struct hfi1_devdata *dd = rcd->dd;
struct hfi1_msix_entry *me;
int idx = rcd->ctxt;
void *arg = rcd;
int ret;
rcd->msix_intr = dd->vnic.msix_idx++;
me = &dd->msix_entries[rcd->msix_intr];
/*
* Set the interrupt register and mask for this
* context's interrupt.
*/
rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
rcd->imask = ((u64)1) <<
((IS_RCVAVAIL_START + idx) % 64);
snprintf(me->name, sizeof(me->name),
DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
me->name[sizeof(me->name) - 1] = 0;
me->type = IRQ_RCVCTXT;
remap_intr(dd, IS_RCVAVAIL_START + idx, rcd->msix_intr);
ret = request_threaded_irq(me->msix.vector, receive_context_interrupt,
receive_context_thread, 0, me->name, arg);
if (ret) {
dd_dev_err(dd, "vnic irq request (vector %d, idx %d) fail %d\n",
me->msix.vector, idx, ret);
return;
}
/*
* assign arg after request_irq call, so it will be
* cleaned up
*/
me->arg = arg;
ret = hfi1_get_irq_affinity(dd, me);
if (ret) {
dd_dev_err(dd,
"unable to pin IRQ %d\n", ret);
free_irq(me->msix.vector, me->arg);
}
}
/*
* Set the general handler to accept all interrupts, remap all
* chip interrupts back to MSI-X 0.
......@@ -12969,7 +13089,7 @@ static int set_up_interrupts(struct hfi1_devdata *dd)
* N interrupts - one per used SDMA engine
* M interrupt - one per kernel receive context
*/
total = 1 + dd->num_sdma + dd->n_krcv_queues;
total = 1 + dd->num_sdma + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT;
entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
if (!entries) {
......@@ -13034,7 +13154,8 @@ static int set_up_interrupts(struct hfi1_devdata *dd)
*
* num_rcv_contexts - number of contexts being used
* n_krcv_queues - number of kernel contexts
* first_user_ctxt - first non-kernel context in array of contexts
* first_dyn_alloc_ctxt - first dynamically allocated context
* in array of contexts
* freectxts - number of free user contexts
* num_send_contexts - number of PIO send contexts being used
*/
......@@ -13111,10 +13232,14 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
total_contexts = num_kernel_contexts + num_user_contexts;
}
/* the first N are kernel contexts, the rest are user contexts */
/* Accommodate VNIC contexts */
if ((total_contexts + HFI1_NUM_VNIC_CTXT) <= dd->chip_rcv_contexts)
total_contexts += HFI1_NUM_VNIC_CTXT;
/* the first N are kernel contexts, the rest are user/vnic contexts */
dd->num_rcv_contexts = total_contexts;
dd->n_krcv_queues = num_kernel_contexts;
dd->first_user_ctxt = num_kernel_contexts;
dd->first_dyn_alloc_ctxt = num_kernel_contexts;
dd->num_user_contexts = num_user_contexts;
dd->freectxts = num_user_contexts;
dd_dev_info(dd,
......@@ -13570,11 +13695,8 @@ static void reset_rxe_csrs(struct hfi1_devdata *dd)
write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) {
write_csr(dd, RCV_RSM_CFG + (8 * i), 0);
write_csr(dd, RCV_RSM_SELECT + (8 * i), 0);
write_csr(dd, RCV_RSM_MATCH + (8 * i), 0);
}
for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++)
clear_rsm_rule(dd, i);
for (i = 0; i < 32; i++)
write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
......@@ -13933,6 +14055,16 @@ static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
(u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
}
/*
* Clear a receive side mapping rule.
*/
static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
{
write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 0);
write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 0);
write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 0);
}
/* return the number of RSM map table entries that will be used for QOS */
static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
unsigned int *np)
......@@ -14048,7 +14180,7 @@ static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
rrd.value2 = LRH_SC_VALUE;
/* add rule 0 */
add_rsm_rule(dd, 0, &rrd);
add_rsm_rule(dd, RSM_INS_VERBS, &rrd);
/* mark RSM map entries as used */
rmt->used += rmt_entries;
......@@ -14078,7 +14210,7 @@ static void init_user_fecn_handling(struct hfi1_devdata *dd,
/*
* RSM will extract the destination context as an index into the
* map table. The destination contexts are a sequential block
* in the range first_user_ctxt...num_rcv_contexts-1 (inclusive).
* in the range first_dyn_alloc_ctxt...num_rcv_contexts-1 (inclusive).
* Map entries are accessed as offset + extracted value. Adjust
* the added offset so this sequence can be placed anywhere in
* the table - as long as the entries themselves do not wrap.
......@@ -14086,9 +14218,9 @@ static void init_user_fecn_handling(struct hfi1_devdata *dd,
* start with that to allow for a "negative" offset.
*/
offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used -
(int)dd->first_user_ctxt);
(int)dd->first_dyn_alloc_ctxt);
for (i = dd->first_user_ctxt, idx = rmt->used;
for (i = dd->first_dyn_alloc_ctxt, idx = rmt->used;
i < dd->num_rcv_contexts; i++, idx++) {
/* replace with identity mapping */
regoff = (idx % 8) * 8;
......@@ -14122,11 +14254,84 @@ static void init_user_fecn_handling(struct hfi1_devdata *dd,
rrd.value2 = 1;
/* add rule 1 */
add_rsm_rule(dd, 1, &rrd);
add_rsm_rule(dd, RSM_INS_FECN, &rrd);
rmt->used += dd->num_user_contexts;
}
/* Initialize RSM for VNIC */
void hfi1_init_vnic_rsm(struct hfi1_devdata *dd)
{
u8 i, j;
u8 ctx_id = 0;
u64 reg;
u32 regoff;
struct rsm_rule_data rrd;
if (hfi1_vnic_is_rsm_full(dd, NUM_VNIC_MAP_ENTRIES)) {
dd_dev_err(dd, "Vnic RSM disabled, rmt entries used = %d\n",
dd->vnic.rmt_start);
return;
}
dev_dbg(&(dd)->pcidev->dev, "Vnic rsm start = %d, end %d\n",
dd->vnic.rmt_start,
dd->vnic.rmt_start + NUM_VNIC_MAP_ENTRIES);
/* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */
regoff = RCV_RSM_MAP_TABLE + (dd->vnic.rmt_start / 8) * 8;
reg = read_csr(dd, regoff);
for (i = 0; i < NUM_VNIC_MAP_ENTRIES; i++) {
/* Update map register with vnic context */
j = (dd->vnic.rmt_start + i) % 8;
reg &= ~(0xffllu << (j * 8));
reg |= (u64)dd->vnic.ctxt[ctx_id++]->ctxt << (j * 8);
/* Wrap up vnic ctx index */
ctx_id %= dd->vnic.num_ctxt;
/* Write back map register */
if (j == 7 || ((i + 1) == NUM_VNIC_MAP_ENTRIES)) {
dev_dbg(&(dd)->pcidev->dev,
"Vnic rsm map reg[%d] =0x%llx\n",
regoff - RCV_RSM_MAP_TABLE, reg);
write_csr(dd, regoff, reg);
regoff += 8;
if (i < (NUM_VNIC_MAP_ENTRIES - 1))
reg = read_csr(dd, regoff);
}
}
/* Add rule for vnic */
rrd.offset = dd->vnic.rmt_start;
rrd.pkt_type = 4;
/* Match 16B packets */
rrd.field1_off = L2_TYPE_MATCH_OFFSET;
rrd.mask1 = L2_TYPE_MASK;
rrd.value1 = L2_16B_VALUE;
/* Match ETH L4 packets */
rrd.field2_off = L4_TYPE_MATCH_OFFSET;
rrd.mask2 = L4_16B_TYPE_MASK;
rrd.value2 = L4_16B_ETH_VALUE;
/* Calc context from veswid and entropy */
rrd.index1_off = L4_16B_HDR_VESWID_OFFSET;
rrd.index1_width = ilog2(NUM_VNIC_MAP_ENTRIES);
rrd.index2_off = L2_16B_ENTROPY_OFFSET;
rrd.index2_width = ilog2(NUM_VNIC_MAP_ENTRIES);
add_rsm_rule(dd, RSM_INS_VNIC, &rrd);
/* Enable RSM if not already enabled */
add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
}
void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
{
clear_rsm_rule(dd, RSM_INS_VNIC);
/* Disable RSM if used only by vnic */
if (dd->vnic.rmt_start == 0)
clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
}
static void init_rxe(struct hfi1_devdata *dd)
{
struct rsm_map_table *rmt;
......@@ -14139,6 +14344,8 @@ static void init_rxe(struct hfi1_devdata *dd)
init_qos(dd, rmt);
init_user_fecn_handling(dd, rmt);
complete_rsm_map_table(dd, rmt);
/* record number of used rsm map entries for vnic */
dd->vnic.rmt_start = rmt->used;
kfree(rmt);
/*
......
......@@ -1362,6 +1362,8 @@ int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt);
int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey);
int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt);
void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality);
void hfi1_init_vnic_rsm(struct hfi1_devdata *dd);
void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd);
/*
* Interrupt source table.
......
#ifdef CONFIG_DEBUG_FS
/*
* Copyright(c) 2015, 2016 Intel Corporation.
* Copyright(c) 2015-2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
......@@ -174,7 +174,7 @@ static int _opcode_stats_seq_show(struct seq_file *s, void *v)
struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
struct hfi1_devdata *dd = dd_from_dev(ibd);
for (j = 0; j < dd->first_user_ctxt; j++) {
for (j = 0; j < dd->first_dyn_alloc_ctxt; j++) {
if (!dd->rcd[j])
continue;
n_packets += dd->rcd[j]->opstats->stats[i].n_packets;
......@@ -200,7 +200,7 @@ static void *_ctx_stats_seq_start(struct seq_file *s, loff_t *pos)
if (!*pos)
return SEQ_START_TOKEN;
if (*pos >= dd->first_user_ctxt)
if (*pos >= dd->first_dyn_alloc_ctxt)
return NULL;
return pos;
}
......@@ -214,7 +214,7 @@ static void *_ctx_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
return pos;
++*pos;
if (*pos >= dd->first_user_ctxt)
if (*pos >= dd->first_dyn_alloc_ctxt)
return NULL;
return pos;
}
......@@ -1099,7 +1099,7 @@ static int _fault_stats_seq_show(struct seq_file *s, void *v)
struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
struct hfi1_devdata *dd = dd_from_dev(ibd);
for (j = 0; j < dd->first_user_ctxt; j++) {
for (j = 0; j < dd->first_dyn_alloc_ctxt; j++) {
if (!dd->rcd[j])
continue;
n_packets += dd->rcd[j]->opstats->stats[i].n_packets;
......
......@@ -874,20 +874,42 @@ int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread)
return last;
}
static inline void set_all_nodma_rtail(struct hfi1_devdata *dd)
static inline void set_nodma_rtail(struct hfi1_devdata *dd, u8 ctxt)
{
int i;
for (i = HFI1_CTRL_CTXT + 1; i < dd->first_user_ctxt; i++)
/*
* For dynamically allocated kernel contexts (like vnic) switch
* interrupt handler only for that context. Otherwise, switch
* interrupt handler for all statically allocated kernel contexts.
*/
if (ctxt >= dd->first_dyn_alloc_ctxt) {
dd->rcd[ctxt]->do_interrupt =
&handle_receive_interrupt_nodma_rtail;
return;
}
for (i = HFI1_CTRL_CTXT + 1; i < dd->first_dyn_alloc_ctxt; i++)
dd->rcd[i]->do_interrupt =
&handle_receive_interrupt_nodma_rtail;
}
static inline void set_all_dma_rtail(struct hfi1_devdata *dd)
static inline void set_dma_rtail(struct hfi1_devdata *dd, u8 ctxt)
{
int i;
for (i = HFI1_CTRL_CTXT + 1; i < dd->first_user_ctxt; i++)
/*
* For dynamically allocated kernel contexts (like vnic) switch
* interrupt handler only for that context. Otherwise, switch
* interrupt handler for all statically allocated kernel contexts.
*/
if (ctxt >= dd->first_dyn_alloc_ctxt) {
dd->rcd[ctxt]->do_interrupt =
&handle_receive_interrupt_dma_rtail;
return;
}
for (i = HFI1_CTRL_CTXT + 1; i < dd->first_dyn_alloc_ctxt; i++)
dd->rcd[i]->do_interrupt =
&handle_receive_interrupt_dma_rtail;
}
......@@ -897,8 +919,13 @@ void set_all_slowpath(struct hfi1_devdata *dd)
int i;
/* HFI1_CTRL_CTXT must always use the slow path interrupt handler */
for (i = HFI1_CTRL_CTXT + 1; i < dd->first_user_ctxt; i++)
dd->rcd[i]->do_interrupt = &handle_receive_interrupt;
for (i = HFI1_CTRL_CTXT + 1; i < dd->num_rcv_contexts; i++) {
struct hfi1_ctxtdata *rcd = dd->rcd[i];
if ((i < dd->first_dyn_alloc_ctxt) ||
(rcd && rcd->sc && (rcd->sc->type == SC_KERNEL)))
rcd->do_interrupt = &handle_receive_interrupt;
}
}
static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd,
......@@ -1008,7 +1035,7 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
last = RCV_PKT_DONE;
if (needset) {
dd_dev_info(dd, "Switching to NO_DMA_RTAIL\n");
set_all_nodma_rtail(dd);
set_nodma_rtail(dd, rcd->ctxt);