Skip to content
GitLab
Menu
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
xenomai
ipipe-x86
Commits
e1943424
Commit
e1943424
authored
Apr 19, 2011
by
David S. Miller
Browse files
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: drivers/net/bnx2x/bnx2x_ethtool.c
parents
88230fd5
0553c891
Changes
44
Hide whitespace changes
Inline
Side-by-side
MAINTAINERS
View file @
e1943424
...
...
@@ -151,6 +151,7 @@ S: Maintained
F: drivers/net/hamradio/6pack.c
8169 10/100/1000 GIGABIT ETHERNET DRIVER
M: Realtek linux nic maintainers <nic_swsd@realtek.com>
M: Francois Romieu <romieu@fr.zoreil.com>
L: netdev@vger.kernel.org
S: Maintained
...
...
drivers/connector/connector.c
View file @
e1943424
...
...
@@ -142,6 +142,7 @@ static int cn_call_callback(struct sk_buff *skb)
cbq
->
callback
(
msg
,
nsp
);
kfree_skb
(
skb
);
cn_queue_release_callback
(
cbq
);
err
=
0
;
}
return
err
;
...
...
drivers/net/bna/bfa_ioc.c
View file @
e1943424
...
...
@@ -38,6 +38,8 @@
#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
#define bfa_ioc_notify_fail(__ioc) \
((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
#define bfa_ioc_sync_start(__ioc) \
((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
#define bfa_ioc_sync_join(__ioc) \
((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
#define bfa_ioc_sync_leave(__ioc) \
...
...
@@ -602,7 +604,7 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
switch
(
event
)
{
case
IOCPF_E_SEMLOCKED
:
if
(
bfa_ioc_firmware_lock
(
ioc
))
{
if
(
bfa_ioc_sync_
complete
(
ioc
))
{
if
(
bfa_ioc_sync_
start
(
ioc
))
{
iocpf
->
retry_count
=
0
;
bfa_ioc_sync_join
(
ioc
);
bfa_fsm_set_state
(
iocpf
,
bfa_iocpf_sm_hwinit
);
...
...
@@ -1313,7 +1315,7 @@ bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
* execution context (driver/bios) must match.
*/
static
bool
bfa_ioc_fwver_valid
(
struct
bfa_ioc
*
ioc
)
bfa_ioc_fwver_valid
(
struct
bfa_ioc
*
ioc
,
u32
boot_env
)
{
struct
bfi_ioc_image_hdr
fwhdr
,
*
drv_fwhdr
;
...
...
@@ -1324,7 +1326,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
if
(
fwhdr
.
signature
!=
drv_fwhdr
->
signature
)
return
false
;
if
(
fwhdr
.
exec
!=
drv_fwhdr
->
exec
)
if
(
swab32
(
fwhdr
.
param
)
!=
boot_env
)
return
false
;
return
bfa_nw_ioc_fwver_cmp
(
ioc
,
&
fwhdr
);
...
...
@@ -1351,9 +1353,12 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
{
enum
bfi_ioc_state
ioc_fwstate
;
bool
fwvalid
;
u32
boot_env
;
ioc_fwstate
=
readl
(
ioc
->
ioc_regs
.
ioc_fwstate
);
boot_env
=
BFI_BOOT_LOADER_OS
;
if
(
force
)
ioc_fwstate
=
BFI_IOC_UNINIT
;
...
...
@@ -1361,10 +1366,10 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
* check if firmware is valid
*/
fwvalid
=
(
ioc_fwstate
==
BFI_IOC_UNINIT
)
?
false
:
bfa_ioc_fwver_valid
(
ioc
);
false
:
bfa_ioc_fwver_valid
(
ioc
,
boot_env
);
if
(
!
fwvalid
)
{
bfa_ioc_boot
(
ioc
,
BFI_BOOT_TYPE_NORMAL
,
ioc
->
pcidev
.
device_id
);
bfa_ioc_boot
(
ioc
,
BFI_BOOT_TYPE_NORMAL
,
boot_env
);
return
;
}
...
...
@@ -1395,7 +1400,7 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
/**
* Initialize the h/w for any other states.
*/
bfa_ioc_boot
(
ioc
,
BFI_BOOT_TYPE_NORMAL
,
ioc
->
pcidev
.
device_id
);
bfa_ioc_boot
(
ioc
,
BFI_BOOT_TYPE_NORMAL
,
boot_env
);
}
void
...
...
@@ -1505,7 +1510,7 @@ bfa_ioc_hb_stop(struct bfa_ioc *ioc)
*/
static
void
bfa_ioc_download_fw
(
struct
bfa_ioc
*
ioc
,
u32
boot_type
,
u32
boot_
param
)
u32
boot_
env
)
{
u32
*
fwimg
;
u32
pgnum
;
...
...
@@ -1556,10 +1561,10 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
/*
* Set boot type and boot param at the end.
*/
writel
(
(
swab32
(
swab32
(
boot_type
)))
,
((
ioc
->
ioc_regs
.
smem_page_start
)
writel
(
boot_type
,
((
ioc
->
ioc_regs
.
smem_page_start
)
+
(
BFI_BOOT_TYPE_OFF
)));
writel
(
(
swab32
(
swab32
(
boot_param
)))
,
((
ioc
->
ioc_regs
.
smem_page_start
)
+
(
BFI_BOOT_
PARAM
_OFF
)));
writel
(
boot_env
,
((
ioc
->
ioc_regs
.
smem_page_start
)
+
(
BFI_BOOT_
LOADER
_OFF
)));
}
static
void
...
...
@@ -1719,7 +1724,7 @@ bfa_ioc_pll_init(struct bfa_ioc *ioc)
* as the entry vector.
*/
static
void
bfa_ioc_boot
(
struct
bfa_ioc
*
ioc
,
u32
boot_type
,
u32
boot_
param
)
bfa_ioc_boot
(
struct
bfa_ioc
*
ioc
,
u32
boot_type
,
u32
boot_
env
)
{
void
__iomem
*
rb
;
...
...
@@ -1732,7 +1737,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
* Initialize IOC state of all functions on a chip reset.
*/
rb
=
ioc
->
pcidev
.
pci_bar_kva
;
if
(
boot_
param
==
BFI_BOOT_TYPE_MEMTEST
)
{
if
(
boot_
type
==
BFI_BOOT_TYPE_MEMTEST
)
{
writel
(
BFI_IOC_MEMTEST
,
(
rb
+
BFA_IOC0_STATE_REG
));
writel
(
BFI_IOC_MEMTEST
,
(
rb
+
BFA_IOC1_STATE_REG
));
}
else
{
...
...
@@ -1741,7 +1746,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
}
bfa_ioc_msgflush
(
ioc
);
bfa_ioc_download_fw
(
ioc
,
boot_type
,
boot_
param
);
bfa_ioc_download_fw
(
ioc
,
boot_type
,
boot_
env
);
/**
* Enable interrupts just before starting LPU
...
...
drivers/net/bna/bfa_ioc.h
View file @
e1943424
...
...
@@ -194,6 +194,7 @@ struct bfa_ioc_hwif {
bool
msix
);
void
(
*
ioc_notify_fail
)
(
struct
bfa_ioc
*
ioc
);
void
(
*
ioc_ownership_reset
)
(
struct
bfa_ioc
*
ioc
);
bool
(
*
ioc_sync_start
)
(
struct
bfa_ioc
*
ioc
);
void
(
*
ioc_sync_join
)
(
struct
bfa_ioc
*
ioc
);
void
(
*
ioc_sync_leave
)
(
struct
bfa_ioc
*
ioc
);
void
(
*
ioc_sync_ack
)
(
struct
bfa_ioc
*
ioc
);
...
...
drivers/net/bna/bfa_ioc_ct.c
View file @
e1943424
...
...
@@ -41,6 +41,7 @@ static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
static
void
bfa_ioc_ct_isr_mode_set
(
struct
bfa_ioc
*
ioc
,
bool
msix
);
static
void
bfa_ioc_ct_notify_fail
(
struct
bfa_ioc
*
ioc
);
static
void
bfa_ioc_ct_ownership_reset
(
struct
bfa_ioc
*
ioc
);
static
bool
bfa_ioc_ct_sync_start
(
struct
bfa_ioc
*
ioc
);
static
void
bfa_ioc_ct_sync_join
(
struct
bfa_ioc
*
ioc
);
static
void
bfa_ioc_ct_sync_leave
(
struct
bfa_ioc
*
ioc
);
static
void
bfa_ioc_ct_sync_ack
(
struct
bfa_ioc
*
ioc
);
...
...
@@ -63,6 +64,7 @@ bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
nw_hwif_ct
.
ioc_isr_mode_set
=
bfa_ioc_ct_isr_mode_set
;
nw_hwif_ct
.
ioc_notify_fail
=
bfa_ioc_ct_notify_fail
;
nw_hwif_ct
.
ioc_ownership_reset
=
bfa_ioc_ct_ownership_reset
;
nw_hwif_ct
.
ioc_sync_start
=
bfa_ioc_ct_sync_start
;
nw_hwif_ct
.
ioc_sync_join
=
bfa_ioc_ct_sync_join
;
nw_hwif_ct
.
ioc_sync_leave
=
bfa_ioc_ct_sync_leave
;
nw_hwif_ct
.
ioc_sync_ack
=
bfa_ioc_ct_sync_ack
;
...
...
@@ -342,6 +344,32 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
bfa_nw_ioc_hw_sem_release
(
ioc
);
}
/**
* Synchronized IOC failure processing routines
*/
static
bool
bfa_ioc_ct_sync_start
(
struct
bfa_ioc
*
ioc
)
{
u32
r32
=
readl
(
ioc
->
ioc_regs
.
ioc_fail_sync
);
u32
sync_reqd
=
bfa_ioc_ct_get_sync_reqd
(
r32
);
/*
* Driver load time. If the sync required bit for this PCI fn
* is set, it is due to an unclean exit by the driver for this
* PCI fn in the previous incarnation. Whoever comes here first
* should clean it up, no matter which PCI fn.
*/
if
(
sync_reqd
&
bfa_ioc_ct_sync_pos
(
ioc
))
{
writel
(
0
,
ioc
->
ioc_regs
.
ioc_fail_sync
);
writel
(
1
,
ioc
->
ioc_regs
.
ioc_usage_reg
);
writel
(
BFI_IOC_UNINIT
,
ioc
->
ioc_regs
.
ioc_fwstate
);
writel
(
BFI_IOC_UNINIT
,
ioc
->
ioc_regs
.
alt_ioc_fwstate
);
return
true
;
}
return
bfa_ioc_ct_sync_complete
(
ioc
);
}
/**
* Synchronized IOC failure processing routines
*/
...
...
drivers/net/bna/bfi.h
View file @
e1943424
...
...
@@ -184,12 +184,14 @@ enum bfi_mclass {
#define BFI_IOC_MSGLEN_MAX 32
/* 32 bytes */
#define BFI_BOOT_TYPE_OFF 8
#define BFI_BOOT_
PARAM
_OFF 12
#define BFI_BOOT_
LOADER
_OFF 12
#define BFI_BOOT_TYPE_NORMAL 0
/* param is device id */
#define BFI_BOOT_TYPE_NORMAL 0
#define BFI_BOOT_TYPE_FLASH 1
#define BFI_BOOT_TYPE_MEMTEST 2
#define BFI_BOOT_LOADER_OS 0
#define BFI_BOOT_MEMTEST_RES_ADDR 0x900
#define BFI_BOOT_MEMTEST_RES_SIG 0xA0A1A2A3
...
...
drivers/net/bna/bnad.c
View file @
e1943424
...
...
@@ -1837,7 +1837,6 @@ bnad_setup_rx(struct bnad *bnad, uint rx_id)
/* Initialize the Rx event handlers */
rx_cbfn
.
rcb_setup_cbfn
=
bnad_cb_rcb_setup
;
rx_cbfn
.
rcb_destroy_cbfn
=
bnad_cb_rcb_destroy
;
rx_cbfn
.
rcb_destroy_cbfn
=
NULL
;
rx_cbfn
.
ccb_setup_cbfn
=
bnad_cb_ccb_setup
;
rx_cbfn
.
ccb_destroy_cbfn
=
bnad_cb_ccb_destroy
;
rx_cbfn
.
rx_cleanup_cbfn
=
bnad_cb_rx_cleanup
;
...
...
drivers/net/bnx2x/bnx2x_ethtool.c
View file @
e1943424
...
...
@@ -2030,20 +2030,19 @@ static int bnx2x_set_phys_id(struct net_device *dev,
case
ETHTOOL_ID_ON
:
bnx2x_set_led
(
&
bp
->
link_params
,
&
bp
->
link_vars
,
LED_MODE_O
PER
,
SPEED_1000
);
LED_MODE_O
N
,
SPEED_1000
);
break
;
case
ETHTOOL_ID_OFF
:
bnx2x_set_led
(
&
bp
->
link_params
,
&
bp
->
link_vars
,
LED_MODE_OFF
,
0
);
LED_MODE_
FRONT_PANEL_
OFF
,
0
);
break
;
case
ETHTOOL_ID_INACTIVE
:
if
(
bp
->
link_vars
.
link_up
)
bnx2x_set_led
(
&
bp
->
link_params
,
&
bp
->
link_vars
,
LED_MODE_OPER
,
bp
->
link_vars
.
line_speed
);
bnx2x_set_led
(
&
bp
->
link_params
,
&
bp
->
link_vars
,
LED_MODE_OPER
,
bp
->
link_vars
.
line_speed
);
}
return
0
;
...
...
drivers/net/can/mscan/mpc5xxx_can.c
View file @
e1943424
...
...
@@ -260,7 +260,7 @@ static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev)
if
(
!
ofdev
->
dev
.
of_match
)
return
-
EINVAL
;
data
=
(
struct
mpc5xxx_can_data
*
)
of
_
dev
->
dev
.
of_match
->
data
;
data
=
(
struct
mpc5xxx_can_data
*
)
ofdev
->
dev
.
of_match
->
data
;
base
=
of_iomap
(
np
,
0
);
if
(
!
base
)
{
...
...
drivers/net/loopback.c
View file @
e1943424
...
...
@@ -173,7 +173,8 @@ static void loopback_setup(struct net_device *dev)
|
NETIF_F_RXCSUM
|
NETIF_F_HIGHDMA
|
NETIF_F_LLTX
|
NETIF_F_NETNS_LOCAL
;
|
NETIF_F_NETNS_LOCAL
|
NETIF_F_VLAN_CHALLENGED
;
dev
->
ethtool_ops
=
&
loopback_ethtool_ops
;
dev
->
header_ops
=
&
eth_header_ops
;
dev
->
netdev_ops
=
&
loopback_ops
;
...
...
drivers/net/natsemi.c
View file @
e1943424
...
...
@@ -860,6 +860,9 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
prev_eedata
=
eedata
;
}
/* Store MAC Address in perm_addr */
memcpy
(
dev
->
perm_addr
,
dev
->
dev_addr
,
ETH_ALEN
);
dev
->
base_addr
=
(
unsigned
long
__force
)
ioaddr
;
dev
->
irq
=
irq
;
...
...
drivers/net/qlcnic/qlcnic.h
View file @
e1943424
...
...
@@ -99,6 +99,7 @@
#define TX_UDPV6_PKT 0x0c
/* Tx defines */
#define QLCNIC_MAX_FRAGS_PER_TX 14
#define MAX_TSO_HEADER_DESC 2
#define MGMT_CMD_DESC_RESV 4
#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
...
...
drivers/net/qlcnic/qlcnic_main.c
View file @
e1943424
...
...
@@ -2138,6 +2138,7 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
struct
cmd_desc_type0
*
hwdesc
,
*
first_desc
;
struct
pci_dev
*
pdev
;
struct
ethhdr
*
phdr
;
int
delta
=
0
;
int
i
,
k
;
u32
producer
;
...
...
@@ -2157,6 +2158,19 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
frag_count
=
skb_shinfo
(
skb
)
->
nr_frags
+
1
;
/* 14 frags supported for normal packet and
* 32 frags supported for TSO packet
*/
if
(
!
skb_is_gso
(
skb
)
&&
frag_count
>
QLCNIC_MAX_FRAGS_PER_TX
)
{
for
(
i
=
0
;
i
<
(
frag_count
-
QLCNIC_MAX_FRAGS_PER_TX
);
i
++
)
delta
+=
skb_shinfo
(
skb
)
->
frags
[
i
].
size
;
if
(
!
__pskb_pull_tail
(
skb
,
delta
))
goto
drop_packet
;
frag_count
=
1
+
skb_shinfo
(
skb
)
->
nr_frags
;
}
if
(
unlikely
(
qlcnic_tx_avail
(
tx_ring
)
<=
TX_STOP_THRESH
))
{
netif_stop_queue
(
netdev
);
...
...
drivers/net/sfc/efx.c
View file @
e1943424
...
...
@@ -328,7 +328,8 @@ static int efx_poll(struct napi_struct *napi, int budget)
* processing to finish, then directly poll (and ack ) the eventq.
* Finally reenable NAPI and interrupts.
*
* Since we are touching interrupts the caller should hold the suspend lock
* This is for use only during a loopback self-test. It must not
* deliver any packets up the stack as this can result in deadlock.
*/
void
efx_process_channel_now
(
struct
efx_channel
*
channel
)
{
...
...
@@ -336,6 +337,7 @@ void efx_process_channel_now(struct efx_channel *channel)
BUG_ON
(
channel
->
channel
>=
efx
->
n_channels
);
BUG_ON
(
!
channel
->
enabled
);
BUG_ON
(
!
efx
->
loopback_selftest
);
/* Disable interrupts and wait for ISRs to complete */
efx_nic_disable_interrupts
(
efx
);
...
...
@@ -1436,7 +1438,7 @@ static void efx_start_all(struct efx_nic *efx)
* restart the transmit interface early so the watchdog timer stops */
efx_start_port
(
efx
);
if
(
efx_dev_registered
(
efx
))
if
(
efx_dev_registered
(
efx
)
&&
!
efx
->
port_inhibited
)
netif_tx_wake_all_queues
(
efx
->
net_dev
);
efx_for_each_channel
(
channel
,
efx
)
...
...
drivers/net/sfc/io.h
View file @
e1943424
...
...
@@ -152,6 +152,7 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
spin_lock_irqsave
(
&
efx
->
biu_lock
,
flags
);
value
->
u32
[
0
]
=
_efx_readd
(
efx
,
reg
+
0
);
rmb
();
value
->
u32
[
1
]
=
_efx_readd
(
efx
,
reg
+
4
);
value
->
u32
[
2
]
=
_efx_readd
(
efx
,
reg
+
8
);
value
->
u32
[
3
]
=
_efx_readd
(
efx
,
reg
+
12
);
...
...
@@ -174,6 +175,7 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
value
->
u64
[
0
]
=
(
__force
__le64
)
__raw_readq
(
membase
+
addr
);
#else
value
->
u32
[
0
]
=
(
__force
__le32
)
__raw_readl
(
membase
+
addr
);
rmb
();
value
->
u32
[
1
]
=
(
__force
__le32
)
__raw_readl
(
membase
+
addr
+
4
);
#endif
spin_unlock_irqrestore
(
&
efx
->
biu_lock
,
flags
);
...
...
drivers/net/sfc/net_driver.h
View file @
e1943424
...
...
@@ -330,7 +330,6 @@ enum efx_rx_alloc_method {
* @eventq_mask: Event queue pointer mask
* @eventq_read_ptr: Event queue read pointer
* @last_eventq_read_ptr: Last event queue read pointer value.
* @magic_count: Event queue test event count
* @irq_count: Number of IRQs since last adaptive moderation decision
* @irq_mod_score: IRQ moderation score
* @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
...
...
@@ -360,7 +359,6 @@ struct efx_channel {
unsigned
int
eventq_mask
;
unsigned
int
eventq_read_ptr
;
unsigned
int
last_eventq_read_ptr
;
unsigned
int
magic_count
;
unsigned
int
irq_count
;
unsigned
int
irq_mod_score
;
...
...
drivers/net/sfc/nic.c
View file @
e1943424
...
...
@@ -84,7 +84,8 @@ static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
static
inline
efx_qword_t
*
efx_event
(
struct
efx_channel
*
channel
,
unsigned
int
index
)
{
return
((
efx_qword_t
*
)
(
channel
->
eventq
.
addr
))
+
index
;
return
((
efx_qword_t
*
)
(
channel
->
eventq
.
addr
))
+
(
index
&
channel
->
eventq_mask
);
}
/* See if an event is present
...
...
@@ -673,7 +674,8 @@ void efx_nic_eventq_read_ack(struct efx_channel *channel)
efx_dword_t
reg
;
struct
efx_nic
*
efx
=
channel
->
efx
;
EFX_POPULATE_DWORD_1
(
reg
,
FRF_AZ_EVQ_RPTR
,
channel
->
eventq_read_ptr
);
EFX_POPULATE_DWORD_1
(
reg
,
FRF_AZ_EVQ_RPTR
,
channel
->
eventq_read_ptr
&
channel
->
eventq_mask
);
efx_writed_table
(
efx
,
&
reg
,
efx
->
type
->
evq_rptr_tbl_base
,
channel
->
channel
);
}
...
...
@@ -906,7 +908,7 @@ efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
code
=
EFX_QWORD_FIELD
(
*
event
,
FSF_AZ_DRV_GEN_EV_MAGIC
);
if
(
code
==
EFX_CHANNEL_MAGIC_TEST
(
channel
))
++
channel
->
magic_count
;
;
/* ignore */
else
if
(
code
==
EFX_CHANNEL_MAGIC_FILL
(
channel
))
/* The queue must be empty, so we won't receive any rx
* events, so efx_process_channel() won't refill the
...
...
@@ -1013,8 +1015,7 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
/* Clear this event by marking it all ones */
EFX_SET_QWORD
(
*
p_event
);
/* Increment read pointer */
read_ptr
=
(
read_ptr
+
1
)
&
channel
->
eventq_mask
;
++
read_ptr
;
ev_code
=
EFX_QWORD_FIELD
(
event
,
FSF_AZ_EV_CODE
);
...
...
@@ -1058,6 +1059,13 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
return
spent
;
}
/* Check whether an event is present in the eventq at the current
* read pointer. Only useful for self-test.
*/
bool
efx_nic_event_present
(
struct
efx_channel
*
channel
)
{
return
efx_event_present
(
efx_event
(
channel
,
channel
->
eventq_read_ptr
));
}
/* Allocate buffer table entries for event queue */
int
efx_nic_probe_eventq
(
struct
efx_channel
*
channel
)
...
...
@@ -1163,7 +1171,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
struct
efx_tx_queue
*
tx_queue
;
struct
efx_rx_queue
*
rx_queue
;
unsigned
int
read_ptr
=
channel
->
eventq_read_ptr
;
unsigned
int
end_ptr
=
(
read_ptr
-
1
)
&
channel
->
eventq_mask
;
unsigned
int
end_ptr
=
read_ptr
+
channel
->
eventq_mask
-
1
;
do
{
efx_qword_t
*
event
=
efx_event
(
channel
,
read_ptr
);
...
...
@@ -1203,7 +1211,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
* it's ok to throw away every non-flush event */
EFX_SET_QWORD
(
*
event
);
read_ptr
=
(
read_ptr
+
1
)
&
channel
->
eventq_mask
;
++
read_ptr
;
}
while
(
read_ptr
!=
end_ptr
);
channel
->
eventq_read_ptr
=
read_ptr
;
...
...
drivers/net/sfc/nic.h
View file @
e1943424
...
...
@@ -184,6 +184,7 @@ extern void efx_nic_fini_eventq(struct efx_channel *channel);
extern
void
efx_nic_remove_eventq
(
struct
efx_channel
*
channel
);
extern
int
efx_nic_process_eventq
(
struct
efx_channel
*
channel
,
int
rx_quota
);
extern
void
efx_nic_eventq_read_ack
(
struct
efx_channel
*
channel
);
extern
bool
efx_nic_event_present
(
struct
efx_channel
*
channel
);
/* MAC/PHY */
extern
void
falcon_drain_tx_fifo
(
struct
efx_nic
*
efx
);
...
...
drivers/net/sfc/selftest.c
View file @
e1943424
...
...
@@ -131,8 +131,6 @@ static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
static
int
efx_test_interrupts
(
struct
efx_nic
*
efx
,
struct
efx_self_tests
*
tests
)
{
struct
efx_channel
*
channel
;
netif_dbg
(
efx
,
drv
,
efx
->
net_dev
,
"testing interrupts
\n
"
);
tests
->
interrupt
=
-
1
;
...
...
@@ -140,15 +138,6 @@ static int efx_test_interrupts(struct efx_nic *efx,
efx
->
last_irq_cpu
=
-
1
;
smp_wmb
();
/* ACK each interrupting event queue. Receiving an interrupt due to
* traffic before a test event is raised is considered a pass */
efx_for_each_channel
(
channel
,
efx
)
{
if
(
channel
->
work_pending
)
efx_process_channel_now
(
channel
);
if
(
efx
->
last_irq_cpu
>=
0
)
goto
success
;
}
efx_nic_generate_interrupt
(
efx
);
/* Wait for arrival of test interrupt. */
...
...
@@ -173,13 +162,13 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
struct
efx_self_tests
*
tests
)
{
struct
efx_nic
*
efx
=
channel
->
efx
;
unsigned
int
magic_count
,
count
;
unsigned
int
read_ptr
,
count
;
tests
->
eventq_dma
[
channel
->
channel
]
=
-
1
;
tests
->
eventq_int
[
channel
->
channel
]
=
-
1
;
tests
->
eventq_poll
[
channel
->
channel
]
=
-
1
;
magic_count
=
channel
->
magic_count
;
read_ptr
=
channel
->
eventq_read_ptr
;
channel
->
efx
->
last_irq_cpu
=
-
1
;
smp_wmb
();
...
...
@@ -190,10 +179,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
do
{
schedule_timeout_uninterruptible
(
HZ
/
100
);
if
(
channel
->
work_pending
)
efx_process_channel_now
(
channel
);
if
(
channel
->
magic_count
!=
magic_count
)
if
(
ACCESS_ONCE
(
channel
->
eventq_read_ptr
)
!=
read_ptr
)
goto
eventq_ok
;
}
while
(
++
count
<
2
);
...
...
@@ -211,8 +197,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
}
/* Check to see if event was received even if interrupt wasn't */
efx_process_channel_now
(
channel
);
if
(
channel
->
magic_count
!=
magic_count
)
{
if
(
efx_nic_event_present
(
channel
))
{
netif_err
(
efx
,
drv
,
efx
->
net_dev
,
"channel %d event was generated, but "
"failed to trigger an interrupt
\n
"
,
channel
->
channel
);
...
...
@@ -770,6 +755,8 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
__efx_reconfigure_port
(
efx
);
mutex_unlock
(
&
efx
->
mac_lock
);
netif_tx_wake_all_queues
(
efx
->
net_dev
);
return
rc_test
;
}
drivers/net/sfc/tx.c
View file @
e1943424
...
...
@@ -435,7 +435,8 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
* queue state. */
smp_mb
();
if
(
unlikely
(
netif_tx_queue_stopped
(
tx_queue
->
core_txq
))
&&
likely
(
efx
->
port_enabled
))
{
likely
(
efx
->
port_enabled
)
&&
likely
(
!
efx
->
port_inhibited
))
{
fill_level
=
tx_queue
->
insert_count
-
tx_queue
->
read_count
;
if
(
fill_level
<
EFX_TXQ_THRESHOLD
(
efx
))
{
EFX_BUG_ON_PARANOID
(
!
efx_dev_registered
(
efx
));
...
...
Prev
1
2
3
Next
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment