dhd_linux.c 29.9 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
/*
 * Copyright (c) 2010 Broadcom Corporation
 *
 * Permission to use, copy, modify, and/or distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
 * copyright notice and this permission notice appear in all copies.
 *
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 */

#include <linux/kernel.h>
#include <linux/etherdevice.h>
19
#include <linux/module.h>
20
21
22
23
24
25
26
27
28
#include <net/cfg80211.h>
#include <net/rtnetlink.h>
#include <brcmu_utils.h>
#include <brcmu_wifi.h>

#include "dhd.h"
#include "dhd_bus.h"
#include "dhd_proto.h"
#include "dhd_dbg.h"
29
#include "fwil_types.h"
30
#include "p2p.h"
31
#include "wl_cfg80211.h"
32
#include "fwil.h"
33
#include "fwsignal.h"
34
35

MODULE_AUTHOR("Broadcom Corporation");
Hante Meuleman's avatar
Hante Meuleman committed
36
MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver.");
37
38
MODULE_LICENSE("Dual BSD/GPL");

39
#define MAX_WAIT_FOR_8021X_TX		50	/* msecs */
40

41
42
43
44
45
46
47
48
49
50
51
52
53
/* AMPDU rx reordering definitions */
#define BRCMF_RXREORDER_FLOWID_OFFSET		0
#define BRCMF_RXREORDER_MAXIDX_OFFSET		2
#define BRCMF_RXREORDER_FLAGS_OFFSET		4
#define BRCMF_RXREORDER_CURIDX_OFFSET		6
#define BRCMF_RXREORDER_EXPIDX_OFFSET		8

#define BRCMF_RXREORDER_DEL_FLOW		0x01
#define BRCMF_RXREORDER_FLUSH_ALL		0x02
#define BRCMF_RXREORDER_CURIDX_VALID		0x04
#define BRCMF_RXREORDER_EXPIDX_VALID		0x08
#define BRCMF_RXREORDER_NEW_HOLE		0x10

54
/* Error bits */
55
int brcmf_msg_level;
56
57
module_param_named(debug, brcmf_msg_level, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(debug, "level of debug output");
58

59
60
61
62
63
64
/* P2P0 enable */
static int brcmf_p2p_enable;
#ifdef CONFIG_BRCMDBG
module_param_named(p2pon, brcmf_p2p_enable, int, 0);
MODULE_PARM_DESC(p2pon, "enable p2p management functionality");
#endif
65
66
67
68

char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx)
{
	if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) {
69
		brcmf_err("ifidx %d out of range\n", ifidx);
70
71
72
		return "<if_bad>";
	}

73
	if (drvr->iflist[ifidx] == NULL) {
74
		brcmf_err("null i/f %d\n", ifidx);
75
76
77
		return "<if_null>";
	}

78
79
	if (drvr->iflist[ifidx]->ndev)
		return drvr->iflist[ifidx]->ndev->name;
80
81
82
83
84
85

	return "<if_none>";
}

static void _brcmf_set_multicast_list(struct work_struct *work)
{
86
	struct brcmf_if *ifp;
87
88
	struct net_device *ndev;
	struct netdev_hw_addr *ha;
89
	u32 cmd_value, cnt;
90
91
	__le32 cnt_le;
	char *buf, *bufp;
92
93
	u32 buflen;
	s32 err;
94

95
	ifp = container_of(work, struct brcmf_if, multicast_work);
96

97
	brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
98

99
	ndev = ifp->ndev;
100
101

	/* Determine initial value of allmulti flag */
102
	cmd_value = (ndev->flags & IFF_ALLMULTI) ? true : false;
103
104

	/* Send down the multicast list first. */
105
106
107
108
	cnt = netdev_mc_count(ndev);
	buflen = sizeof(cnt) + (cnt * ETH_ALEN);
	buf = kmalloc(buflen, GFP_ATOMIC);
	if (!buf)
109
		return;
110
	bufp = buf;
111
112

	cnt_le = cpu_to_le32(cnt);
113
	memcpy(bufp, &cnt_le, sizeof(cnt_le));
114
115
116
117
118
119
120
121
122
123
	bufp += sizeof(cnt_le);

	netdev_for_each_mc_addr(ha, ndev) {
		if (!cnt)
			break;
		memcpy(bufp, ha->addr, ETH_ALEN);
		bufp += ETH_ALEN;
		cnt--;
	}

124
125
	err = brcmf_fil_iovar_data_set(ifp, "mcast_list", buf, buflen);
	if (err < 0) {
126
		brcmf_err("Setting mcast_list failed, %d\n", err);
127
		cmd_value = cnt ? true : cmd_value;
128
129
130
131
	}

	kfree(buf);

132
133
	/*
	 * Now send the allmulti setting.  This is based on the setting in the
134
135
136
	 * net_device flags, but might be modified above to be turned on if we
	 * were trying to set some addresses and dongle rejected it...
	 */
137
138
	err = brcmf_fil_iovar_int_set(ifp, "allmulti", cmd_value);
	if (err < 0)
139
		brcmf_err("Setting allmulti failed, %d\n", err);
140
141
142
143
144

	/*Finally, pick up the PROMISC flag */
	cmd_value = (ndev->flags & IFF_PROMISC) ? true : false;
	err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PROMISC, cmd_value);
	if (err < 0)
145
		brcmf_err("Setting BRCMF_C_SET_PROMISC failed, %d\n",
146
			  err);
147
148
149
150
151
}

static void
_brcmf_set_mac_address(struct work_struct *work)
{
152
153
	struct brcmf_if *ifp;
	s32 err;
154

155
	ifp = container_of(work, struct brcmf_if, setmacaddr_work);
156

157
	brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
158

159
	err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", ifp->mac_addr,
160
161
				       ETH_ALEN);
	if (err < 0) {
162
		brcmf_err("Setting cur_etheraddr failed, %d\n", err);
163
164
	} else {
		brcmf_dbg(TRACE, "MAC address updated to %pM\n",
165
166
			  ifp->mac_addr);
		memcpy(ifp->ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
167
	}
168
169
170
171
}

static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr)
{
172
	struct brcmf_if *ifp = netdev_priv(ndev);
173
174
	struct sockaddr *sa = (struct sockaddr *)addr;

175
176
	memcpy(&ifp->mac_addr, sa->sa_data, ETH_ALEN);
	schedule_work(&ifp->setmacaddr_work);
177
178
179
180
181
	return 0;
}

static void brcmf_netdev_set_multicast_list(struct net_device *ndev)
{
182
	struct brcmf_if *ifp = netdev_priv(ndev);
183

184
	schedule_work(&ifp->multicast_work);
185
186
}

187
188
static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
					   struct net_device *ndev)
189
190
{
	int ret;
191
	struct brcmf_if *ifp = netdev_priv(ndev);
192
	struct brcmf_pub *drvr = ifp->drvr;
193
	struct ethhdr *eh;
194

195
	brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx);
196

197
198
199
	/* Can the device send data? */
	if (drvr->bus_if->state != BRCMF_BUS_DATA) {
		brcmf_err("xmit rejected state=%d\n", drvr->bus_if->state);
200
		netif_stop_queue(ndev);
201
202
203
		dev_kfree_skb(skb);
		ret = -ENODEV;
		goto done;
204
205
	}

206
207
	if (!drvr->iflist[ifp->bssidx]) {
		brcmf_err("bad ifidx %d\n", ifp->bssidx);
208
		netif_stop_queue(ndev);
209
210
211
		dev_kfree_skb(skb);
		ret = -ENODEV;
		goto done;
212
213
214
	}

	/* Make sure there's enough room for any header */
215
	if (skb_headroom(skb) < drvr->hdrlen) {
216
217
218
		struct sk_buff *skb2;

		brcmf_dbg(INFO, "%s: insufficient headroom\n",
219
			  brcmf_ifname(drvr, ifp->bssidx));
220
		drvr->bus_if->tx_realloc++;
221
		skb2 = skb_realloc_headroom(skb, drvr->hdrlen);
222
223
224
		dev_kfree_skb(skb);
		skb = skb2;
		if (skb == NULL) {
225
			brcmf_err("%s: skb_realloc_headroom failed\n",
226
				  brcmf_ifname(drvr, ifp->bssidx));
227
228
229
230
231
			ret = -ENOMEM;
			goto done;
		}
	}

232
233
234
235
236
	/* validate length for ether packet */
	if (skb->len < sizeof(*eh)) {
		ret = -EINVAL;
		dev_kfree_skb(skb);
		goto done;
237
238
	}

239
	ret = brcmf_fws_process_skb(ifp, skb);
240
241

done:
242
243
244
245
246
247
	if (ret) {
		ifp->stats.tx_dropped++;
	} else {
		ifp->stats.tx_packets++;
		ifp->stats.tx_bytes += skb->len;
	}
248
249

	/* Return ok: we always eat the packet */
250
	return NETDEV_TX_OK;
251
252
}

253
254
255
void brcmf_txflowblock_if(struct brcmf_if *ifp,
			  enum brcmf_netif_stop_reason reason, bool state)
{
256
257
	unsigned long flags;

258
	if (!ifp || !ifp->ndev)
259
260
261
262
		return;

	brcmf_dbg(TRACE, "enter: idx=%d stop=0x%X reason=%d state=%d\n",
		  ifp->bssidx, ifp->netif_stop, reason, state);
263
264

	spin_lock_irqsave(&ifp->netif_stop_lock, flags);
265
266
267
268
269
270
271
272
273
	if (state) {
		if (!ifp->netif_stop)
			netif_stop_queue(ifp->ndev);
		ifp->netif_stop |= reason;
	} else {
		ifp->netif_stop &= ~reason;
		if (!ifp->netif_stop)
			netif_wake_queue(ifp->ndev);
	}
274
	spin_unlock_irqrestore(&ifp->netif_stop_lock, flags);
275
276
}

277
void brcmf_txflowblock(struct device *dev, bool state)
278
{
279
280
	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
	struct brcmf_pub *drvr = bus_if->drvr;
281
282
283

	brcmf_dbg(TRACE, "Enter\n");

284
	brcmf_fws_bus_blocked(drvr, state);
285
286
}

287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
static void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
{
	skb->dev = ifp->ndev;
	skb->protocol = eth_type_trans(skb, skb->dev);

	if (skb->pkt_type == PACKET_MULTICAST)
		ifp->stats.multicast++;

	/* Process special event packets */
	brcmf_fweh_process_skb(ifp->drvr, skb);

	if (!(ifp->ndev->flags & IFF_UP)) {
		brcmu_pkt_buf_free_skb(skb);
		return;
	}

	ifp->stats.rx_bytes += skb->len;
	ifp->stats.rx_packets++;

	brcmf_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol));
	if (in_interrupt())
		netif_rx(skb);
	else
		/* If the receive is not processed inside an ISR,
		 * the softirqd must be woken explicitly to service
		 * the NET_RX_SOFTIRQ.  This is handled by netif_rx_ni().
		 */
		netif_rx_ni(skb);
}

static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
					 u8 start, u8 end,
					 struct sk_buff_head *skb_list)
{
	/* initialize return list */
	__skb_queue_head_init(skb_list);

	if (rfi->pend_pkts == 0) {
		brcmf_dbg(INFO, "no packets in reorder queue\n");
		return;
	}

	do {
		if (rfi->pktslots[start]) {
			__skb_queue_tail(skb_list, rfi->pktslots[start]);
			rfi->pktslots[start] = NULL;
		}
		start++;
		if (start > rfi->max_idx)
			start = 0;
	} while (start != end);
	rfi->pend_pkts -= skb_queue_len(skb_list);
}

static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data,
					 struct sk_buff *pkt)
{
	u8 flow_id, max_idx, cur_idx, exp_idx, end_idx;
	struct brcmf_ampdu_rx_reorder *rfi;
	struct sk_buff_head reorder_list;
	struct sk_buff *pnext;
	u8 flags;
	u32 buf_size;

	flow_id = reorder_data[BRCMF_RXREORDER_FLOWID_OFFSET];
	flags = reorder_data[BRCMF_RXREORDER_FLAGS_OFFSET];

	/* validate flags and flow id */
	if (flags == 0xFF) {
		brcmf_err("invalid flags...so ignore this packet\n");
		brcmf_netif_rx(ifp, pkt);
		return;
	}

	rfi = ifp->drvr->reorder_flows[flow_id];
	if (flags & BRCMF_RXREORDER_DEL_FLOW) {
		brcmf_dbg(INFO, "flow-%d: delete\n",
			  flow_id);

		if (rfi == NULL) {
			brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
				  flow_id);
			brcmf_netif_rx(ifp, pkt);
			return;
		}

		brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, rfi->exp_idx,
					     &reorder_list);
		/* add the last packet */
		__skb_queue_tail(&reorder_list, pkt);
		kfree(rfi);
		ifp->drvr->reorder_flows[flow_id] = NULL;
		goto netif_rx;
	}
	/* from here on we need a flow reorder instance */
	if (rfi == NULL) {
		buf_size = sizeof(*rfi);
		max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];

		buf_size += (max_idx + 1) * sizeof(pkt);

		/* allocate space for flow reorder info */
		brcmf_dbg(INFO, "flow-%d: start, maxidx %d\n",
			  flow_id, max_idx);
		rfi = kzalloc(buf_size, GFP_ATOMIC);
		if (rfi == NULL) {
			brcmf_err("failed to alloc buffer\n");
			brcmf_netif_rx(ifp, pkt);
			return;
		}

		ifp->drvr->reorder_flows[flow_id] = rfi;
		rfi->pktslots = (struct sk_buff **)(rfi+1);
		rfi->max_idx = max_idx;
	}
	if (flags & BRCMF_RXREORDER_NEW_HOLE)  {
		if (rfi->pend_pkts) {
			brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx,
						     rfi->exp_idx,
						     &reorder_list);
			WARN_ON(rfi->pend_pkts);
		} else {
			__skb_queue_head_init(&reorder_list);
		}
		rfi->cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
		rfi->exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
		rfi->max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
		rfi->pktslots[rfi->cur_idx] = pkt;
		rfi->pend_pkts++;
		brcmf_dbg(DATA, "flow-%d: new hole %d (%d), pending %d\n",
			  flow_id, rfi->cur_idx, rfi->exp_idx, rfi->pend_pkts);
	} else if (flags & BRCMF_RXREORDER_CURIDX_VALID) {
		cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
		exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];

		if ((exp_idx == rfi->exp_idx) && (cur_idx != rfi->exp_idx)) {
			/* still in the current hole */
			/* enqueue the current on the buffer chain */
			if (rfi->pktslots[cur_idx] != NULL) {
				brcmf_dbg(INFO, "HOLE: ERROR buffer pending..free it\n");
				brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
				rfi->pktslots[cur_idx] = NULL;
			}
			rfi->pktslots[cur_idx] = pkt;
			rfi->pend_pkts++;
			rfi->cur_idx = cur_idx;
			brcmf_dbg(DATA, "flow-%d: store pkt %d (%d), pending %d\n",
				  flow_id, cur_idx, exp_idx, rfi->pend_pkts);

			/* can return now as there is no reorder
			 * list to process.
			 */
			return;
		}
		if (rfi->exp_idx == cur_idx) {
			if (rfi->pktslots[cur_idx] != NULL) {
				brcmf_dbg(INFO, "error buffer pending..free it\n");
				brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
				rfi->pktslots[cur_idx] = NULL;
			}
			rfi->pktslots[cur_idx] = pkt;
			rfi->pend_pkts++;

			/* got the expected one. flush from current to expected
			 * and update expected
			 */
			brcmf_dbg(DATA, "flow-%d: expected %d (%d), pending %d\n",
				  flow_id, cur_idx, exp_idx, rfi->pend_pkts);

			rfi->cur_idx = cur_idx;
			rfi->exp_idx = exp_idx;

			brcmf_rxreorder_get_skb_list(rfi, cur_idx, exp_idx,
						     &reorder_list);
			brcmf_dbg(DATA, "flow-%d: freeing buffers %d, pending %d\n",
				  flow_id, skb_queue_len(&reorder_list),
				  rfi->pend_pkts);
		} else {
			u8 end_idx;

			brcmf_dbg(DATA, "flow-%d (0x%x): both moved, old %d/%d, new %d/%d\n",
				  flow_id, flags, rfi->cur_idx, rfi->exp_idx,
				  cur_idx, exp_idx);
			if (flags & BRCMF_RXREORDER_FLUSH_ALL)
				end_idx = rfi->exp_idx;
			else
				end_idx = exp_idx;

			/* flush pkts first */
			brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
						     &reorder_list);

			if (exp_idx == ((cur_idx + 1) % (rfi->max_idx + 1))) {
				__skb_queue_tail(&reorder_list, pkt);
			} else {
				rfi->pktslots[cur_idx] = pkt;
				rfi->pend_pkts++;
			}
			rfi->exp_idx = exp_idx;
			rfi->cur_idx = cur_idx;
		}
	} else {
		/* explicity window move updating the expected index */
		exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];

		brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n",
			  flow_id, flags, rfi->exp_idx, exp_idx);
		if (flags & BRCMF_RXREORDER_FLUSH_ALL)
			end_idx =  rfi->exp_idx;
		else
			end_idx =  exp_idx;

		brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
					     &reorder_list);
		__skb_queue_tail(&reorder_list, pkt);
		/* set the new expected idx */
		rfi->exp_idx = exp_idx;
	}
netif_rx:
	skb_queue_walk_safe(&reorder_list, pkt, pnext) {
		__skb_unlink(pkt, &reorder_list);
		brcmf_netif_rx(ifp, pkt);
	}
}

512
void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
513
{
514
	struct sk_buff *skb, *pnext;
515
	struct brcmf_if *ifp;
516
517
	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
	struct brcmf_pub *drvr = bus_if->drvr;
518
	struct brcmf_skb_reorder_data *rd;
519
520
	u8 ifidx;
	int ret;
521

522
523
	brcmf_dbg(DATA, "Enter: %s: count=%u\n", dev_name(dev),
		  skb_queue_len(skb_list));
524

525
526
	skb_queue_walk_safe(skb_list, skb, pnext) {
		skb_unlink(skb, skb_list);
527

528
		/* process and remove protocol-specific header */
529
		ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb);
530
531
532
533
534
		ifp = drvr->iflist[ifidx];

		if (ret || !ifp || !ifp->ndev) {
			if ((ret != -ENODATA) && ifp)
				ifp->stats.rx_errors++;
535
536
537
538
			brcmu_pkt_buf_free_skb(skb);
			continue;
		}

539
540
541
		rd = (struct brcmf_skb_reorder_data *)skb->cb;
		if (rd->reorder)
			brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
542
		else
543
			brcmf_netif_rx(ifp, skb);
544
545
546
	}
}

547
548
void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
		      bool success)
549
{
550
	struct brcmf_if *ifp;
551
	struct ethhdr *eh;
552
	u8 ifidx;
553
	u16 type;
554
	int res;
555

556
	res = brcmf_proto_hdrpull(drvr, false, &ifidx, txp);
557

558
559
	ifp = drvr->iflist[ifidx];
	if (!ifp)
560
		goto done;
561

562
563
564
	if (res == 0) {
		eh = (struct ethhdr *)(txp->data);
		type = ntohs(eh->h_proto);
565

566
567
568
569
570
		if (type == ETH_P_PAE) {
			atomic_dec(&ifp->pend_8021x_cnt);
			if (waitqueue_active(&ifp->pend_8021x_wait))
				wake_up(&ifp->pend_8021x_wait);
		}
571
	}
572
573
	if (!success)
		ifp->stats.tx_errors++;
574
575
done:
	brcmu_pkt_buf_free_skb(txp);
576
}
577

578
579
580
581
582
void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
{
	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
	struct brcmf_pub *drvr = bus_if->drvr;

583
584
585
586
587
588
589
	/* await txstatus signal for firmware if active */
	if (brcmf_fws_fc_active(drvr->fws)) {
		if (!success)
			brcmf_fws_bustxfail(drvr->fws, txp);
	} else {
		brcmf_txfinalize(drvr, txp, success);
	}
590
591
592
593
}

static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
{
594
	struct brcmf_if *ifp = netdev_priv(ndev);
595

596
	brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
597
598
599
600

	return &ifp->stats;
}

601
602
603
604
605
/*
 * Set current toe component enables in toe_ol iovar,
 * and set toe global enable iovar
 */
static int brcmf_toe_set(struct brcmf_if *ifp, u32 toe_ol)
606
{
607
	s32 err;
608

609
610
	err = brcmf_fil_iovar_int_set(ifp, "toe_ol", toe_ol);
	if (err < 0) {
611
		brcmf_err("Setting toe_ol failed, %d\n", err);
612
		return err;
613
614
	}

615
616
	err = brcmf_fil_iovar_int_set(ifp, "toe", (toe_ol != 0));
	if (err < 0)
617
		brcmf_err("Setting toe failed, %d\n", err);
618

619
	return err;
620
621
622
623
624
625

}

static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
				    struct ethtool_drvinfo *info)
{
626
	struct brcmf_if *ifp = netdev_priv(ndev);
627
	struct brcmf_pub *drvr = ifp->drvr;
628

629
630
631
632
633
	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
	snprintf(info->version, sizeof(info->version), "%lu",
		 drvr->drv_version);
	strlcpy(info->bus_info, dev_name(drvr->bus_if->dev),
		sizeof(info->bus_info));
634
635
}

636
637
static const struct ethtool_ops brcmf_ethtool_ops = {
	.get_drvinfo = brcmf_ethtool_get_drvinfo,
638
639
};

640
static int brcmf_ethtool(struct brcmf_if *ifp, void __user *uaddr)
641
{
642
	struct brcmf_pub *drvr = ifp->drvr;
643
644
645
646
647
648
649
	struct ethtool_drvinfo info;
	char drvname[sizeof(info.driver)];
	u32 cmd;
	struct ethtool_value edata;
	u32 toe_cmpnt, csum_dir;
	int ret;

650
	brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672

	/* all ethtool calls start with a cmd word */
	if (copy_from_user(&cmd, uaddr, sizeof(u32)))
		return -EFAULT;

	switch (cmd) {
	case ETHTOOL_GDRVINFO:
		/* Copy out any request driver name */
		if (copy_from_user(&info, uaddr, sizeof(info)))
			return -EFAULT;
		strncpy(drvname, info.driver, sizeof(info.driver));
		drvname[sizeof(info.driver) - 1] = '\0';

		/* clear struct for return */
		memset(&info, 0, sizeof(info));
		info.cmd = cmd;

		/* if requested, identify ourselves */
		if (strcmp(drvname, "?dhd") == 0) {
			sprintf(info.driver, "dhd");
			strcpy(info.version, BRCMF_VERSION_STR);
		}
673
		/* report dongle driver type */
674
		else
675
			sprintf(info.driver, "wl");
676

677
		sprintf(info.version, "%lu", drvr->drv_version);
678
679
		if (copy_to_user(uaddr, &info, sizeof(info)))
			return -EFAULT;
680
		brcmf_dbg(TRACE, "given %*s, returning %s\n",
681
682
683
684
685
686
			  (int)sizeof(drvname), drvname, info.driver);
		break;

		/* Get toe offload components from dongle */
	case ETHTOOL_GRXCSUM:
	case ETHTOOL_GTXCSUM:
687
		ret = brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_cmpnt);
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
		if (ret < 0)
			return ret;

		csum_dir =
		    (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;

		edata.cmd = cmd;
		edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;

		if (copy_to_user(uaddr, &edata, sizeof(edata)))
			return -EFAULT;
		break;

		/* Set toe offload components in dongle */
	case ETHTOOL_SRXCSUM:
	case ETHTOOL_STXCSUM:
		if (copy_from_user(&edata, uaddr, sizeof(edata)))
			return -EFAULT;

		/* Read the current settings, update and write back */
708
		ret = brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_cmpnt);
709
710
711
712
713
714
715
716
717
718
719
		if (ret < 0)
			return ret;

		csum_dir =
		    (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;

		if (edata.data != 0)
			toe_cmpnt |= csum_dir;
		else
			toe_cmpnt &= ~csum_dir;

720
		ret = brcmf_toe_set(ifp, toe_cmpnt);
721
722
723
724
725
726
		if (ret < 0)
			return ret;

		/* If setting TX checksum mode, tell Linux the new mode */
		if (cmd == ETHTOOL_STXCSUM) {
			if (edata.data)
727
				ifp->ndev->features |= NETIF_F_IP_CSUM;
728
			else
729
				ifp->ndev->features &= ~NETIF_F_IP_CSUM;
730
731
732
733
734
735
736
737
738
739
740
741
742
743
		}

		break;

	default:
		return -EOPNOTSUPP;
	}

	return 0;
}

static int brcmf_netdev_ioctl_entry(struct net_device *ndev, struct ifreq *ifr,
				    int cmd)
{
744
	struct brcmf_if *ifp = netdev_priv(ndev);
745
	struct brcmf_pub *drvr = ifp->drvr;
746

747
	brcmf_dbg(TRACE, "Enter, idx=%d, cmd=0x%04x\n", ifp->bssidx, cmd);
748

749
	if (!drvr->iflist[ifp->bssidx])
750
751
752
		return -1;

	if (cmd == SIOCETHTOOL)
753
		return brcmf_ethtool(ifp, ifr->ifr_data);
754
755
756
757
758
759

	return -EOPNOTSUPP;
}

static int brcmf_netdev_stop(struct net_device *ndev)
{
760
	struct brcmf_if *ifp = netdev_priv(ndev);
761

762
	brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
763

764
	brcmf_cfg80211_down(ndev);
765

766
767
768
769
770
771
772
773
	/* Set state and stop OS transmissions */
	netif_stop_queue(ndev);

	return 0;
}

static int brcmf_netdev_open(struct net_device *ndev)
{
774
	struct brcmf_if *ifp = netdev_priv(ndev);
775
	struct brcmf_pub *drvr = ifp->drvr;
776
	struct brcmf_bus *bus_if = drvr->bus_if;
777
778
779
	u32 toe_ol;
	s32 ret = 0;

780
	brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
781

782
783
	/* If bus is not ready, can't continue */
	if (bus_if->state != BRCMF_BUS_DATA) {
784
		brcmf_err("failed bus is not ready\n");
785
786
		return -EAGAIN;
	}
787

788
	atomic_set(&ifp->pend_8021x_cnt, 0);
789

790
791
792
	/* Get current TOE mode from dongle */
	if (brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_ol) >= 0
	    && (toe_ol & TOE_TX_CSUM_OL) != 0)
793
		ndev->features |= NETIF_F_IP_CSUM;
794
	else
795
		ndev->features &= ~NETIF_F_IP_CSUM;
796

797
798
	/* Allow transmit calls */
	netif_start_queue(ndev);
799
	if (brcmf_cfg80211_up(ndev)) {
800
		brcmf_err("failed to bring up cfg80211\n");
801
802
803
804
805
806
		return -1;
	}

	return ret;
}

807
808
809
810
811
812
813
814
815
816
static const struct net_device_ops brcmf_netdev_ops_pri = {
	.ndo_open = brcmf_netdev_open,
	.ndo_stop = brcmf_netdev_stop,
	.ndo_get_stats = brcmf_netdev_get_stats,
	.ndo_do_ioctl = brcmf_netdev_ioctl_entry,
	.ndo_start_xmit = brcmf_netdev_start_xmit,
	.ndo_set_mac_address = brcmf_netdev_set_mac_address,
	.ndo_set_rx_mode = brcmf_netdev_set_multicast_list
};

817
int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
818
{
819
	struct brcmf_pub *drvr = ifp->drvr;
820
	struct net_device *ndev;
821
	s32 err;
822

823
	brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
824
		  ifp->mac_addr);
825
	ndev = ifp->ndev;
826

827
	/* set appropriate operations */
828
	ndev->netdev_ops = &brcmf_netdev_ops_pri;
829

830
	ndev->hard_header_len += drvr->hdrlen;
831
832
833
834
835
	ndev->ethtool_ops = &brcmf_ethtool_ops;

	drvr->rxsz = ndev->mtu + ndev->hard_header_len +
			      drvr->hdrlen;

836
837
	/* set the mac address */
	memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
838

839
840
841
	INIT_WORK(&ifp->setmacaddr_work, _brcmf_set_mac_address);
	INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list);

842
843
844
845
846
	if (rtnl_locked)
		err = register_netdevice(ndev);
	else
		err = register_netdev(ndev);
	if (err != 0) {
847
		brcmf_err("couldn't register the net device\n");
848
849
850
851
852
		goto fail;
	}

	brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);

853
	ndev->destructor = free_netdev;
854
855
856
	return 0;

fail:
857
	drvr->iflist[ifp->bssidx] = NULL;
858
	ndev->netdev_ops = NULL;
859
	free_netdev(ndev);
860
861
862
	return -EBADE;
}

863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
static int brcmf_net_p2p_open(struct net_device *ndev)
{
	brcmf_dbg(TRACE, "Enter\n");

	return brcmf_cfg80211_up(ndev);
}

static int brcmf_net_p2p_stop(struct net_device *ndev)
{
	brcmf_dbg(TRACE, "Enter\n");

	return brcmf_cfg80211_down(ndev);
}

static int brcmf_net_p2p_do_ioctl(struct net_device *ndev,
				  struct ifreq *ifr, int cmd)
{
	brcmf_dbg(TRACE, "Enter\n");
	return 0;
}

static netdev_tx_t brcmf_net_p2p_start_xmit(struct sk_buff *skb,
					    struct net_device *ndev)
{
	if (skb)
		dev_kfree_skb_any(skb);

	return NETDEV_TX_OK;
}

static const struct net_device_ops brcmf_netdev_ops_p2p = {
	.ndo_open = brcmf_net_p2p_open,
	.ndo_stop = brcmf_net_p2p_stop,
	.ndo_do_ioctl = brcmf_net_p2p_do_ioctl,
	.ndo_start_xmit = brcmf_net_p2p_start_xmit
};

static int brcmf_net_p2p_attach(struct brcmf_if *ifp)
{
	struct net_device *ndev;

	brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
		  ifp->mac_addr);
	ndev = ifp->ndev;

	ndev->netdev_ops = &brcmf_netdev_ops_p2p;

	/* set the mac address */
	memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);

	if (register_netdev(ndev) != 0) {
		brcmf_err("couldn't register the p2p net device\n");
		goto fail;
	}

	brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);

	return 0;

fail:
923
924
925
	ifp->drvr->iflist[ifp->bssidx] = NULL;
	ndev->netdev_ops = NULL;
	free_netdev(ndev);
926
927
928
	return -EBADE;
}

929
930
struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
			      char *name, u8 *mac_addr)
931
932
{
	struct brcmf_if *ifp;
933
	struct net_device *ndev;
934

935
	brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifidx);
936

937
	ifp = drvr->iflist[bssidx];
938
939
940
941
942
	/*
	 * Delete the existing interface before overwriting it
	 * in case we missed the BRCMF_E_IF_DEL event.
	 */
	if (ifp) {
943
		brcmf_err("ERROR: netdev:%s already exists\n",
944
			  ifp->ndev->name);
945
946
947
948
		if (ifidx) {
			netif_stop_queue(ifp->ndev);
			unregister_netdev(ifp->ndev);
			free_netdev(ifp->ndev);
949
			drvr->iflist[bssidx] = NULL;
950
		} else {
951
			brcmf_err("ignore IF event\n");
952
953
			return ERR_PTR(-EINVAL);
		}
954
	}
955

956
957
958
959
	if (!brcmf_p2p_enable && bssidx == 1) {
		/* this is P2P_DEVICE interface */
		brcmf_dbg(INFO, "allocate non-netdev interface\n");
		ifp = kzalloc(sizeof(*ifp), GFP_KERNEL);
960
961
		if (!ifp)
			return ERR_PTR(-ENOMEM);
962
963
964
965
	} else {
		brcmf_dbg(INFO, "allocate netdev interface\n");
		/* Allocate netdev, including space for private structure */
		ndev = alloc_netdev(sizeof(*ifp), name, ether_setup);
966
		if (!ndev)
967
968
969
970
			return ERR_PTR(-ENOMEM);

		ifp = netdev_priv(ndev);
		ifp->ndev = ndev;
971
	}
972

973
	ifp->drvr = drvr;
974
975
	drvr->iflist[bssidx] = ifp;
	ifp->ifidx = ifidx;
976
	ifp->bssidx = bssidx;
977

978
	init_waitqueue_head(&ifp->pend_8021x_wait);
979
	spin_lock_init(&ifp->netif_stop_lock);
980

981
982
	if (mac_addr != NULL)
		memcpy(ifp->mac_addr, mac_addr, ETH_ALEN);
983

984
	brcmf_dbg(TRACE, " ==== pid:%x, if:%s (%pM) created ===\n",
985
		  current->pid, name, ifp->mac_addr);
986

987
	return ifp;
988
989
}

990
void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx)
991
992
993
{
	struct brcmf_if *ifp;

994
	ifp = drvr->iflist[bssidx];
995
	drvr->iflist[bssidx] = NULL;
996
	if (!ifp) {
997
		brcmf_err("Null interface, idx=%d\n", bssidx);
998
999
		return;
	}
1000
	brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifp->ifidx);