cnic.c 139 KB
Newer Older
1
2
/* cnic.c: Broadcom CNIC core network driver.
 *
Michael Chan's avatar
Michael Chan committed
3
 * Copyright (c) 2006-2010 Broadcom Corporation
4
5
6
7
8
9
10
11
12
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation.
 *
 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
 * Modified and maintained by: Michael Chan <mchan@broadcom.com>
 */

13
14
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
#include <linux/module.h>

#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/uio_driver.h>
#include <linux/in.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#define BCM_VLAN 1
#endif
#include <net/ip.h>
#include <net/tcp.h>
#include <net/route.h>
#include <net/ipv6.h>
#include <net/ip6_route.h>
38
#include <net/ip6_checksum.h>
39
40
41
42
#include <scsi/iscsi_if.h>

#include "cnic_if.h"
#include "bnx2.h"
43
44
45
#include "bnx2x/bnx2x_reg.h"
#include "bnx2x/bnx2x_fw_defs.h"
#include "bnx2x/bnx2x_hsi.h"
46
47
#include "../scsi/bnx2i/57xx_iscsi_constants.h"
#include "../scsi/bnx2i/57xx_iscsi_hsi.h"
48
49
50
51
52
53
54
55
56
57
58
59
60
61
#include "cnic.h"
#include "cnic_defs.h"

#define DRV_MODULE_NAME		"cnic"

static char version[] __devinitdata =
	"Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";

MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
	      "Chen (zongxi@broadcom.com");
MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(CNIC_MODULE_VERSION);

62
/* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
63
static LIST_HEAD(cnic_dev_list);
64
static LIST_HEAD(cnic_udev_list);
65
66
67
static DEFINE_RWLOCK(cnic_dev_lock);
static DEFINE_MUTEX(cnic_lock);

68
69
70
71
72
73
74
75
static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];

/* helper function, assuming cnic_lock is held */
static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
{
	return rcu_dereference_protected(cnic_ulp_tbl[type],
					 lockdep_is_held(&cnic_lock));
}
76
77

static int cnic_service_bnx2(void *, void *);
78
static int cnic_service_bnx2x(void *, void *);
79
80
81
82
83
84
85
86
static int cnic_ctl(void *, struct cnic_ctl_info *);

static struct cnic_ops cnic_bnx2_ops = {
	.cnic_owner	= THIS_MODULE,
	.cnic_handler	= cnic_service_bnx2,
	.cnic_ctl	= cnic_ctl,
};

87
88
89
90
91
92
static struct cnic_ops cnic_bnx2x_ops = {
	.cnic_owner	= THIS_MODULE,
	.cnic_handler	= cnic_service_bnx2x,
	.cnic_ctl	= cnic_ctl,
};

93
94
static struct workqueue_struct *cnic_wq;

Michael Chan's avatar
Michael Chan committed
95
96
static void cnic_shutdown_rings(struct cnic_dev *);
static void cnic_init_rings(struct cnic_dev *);
97
98
99
100
static int cnic_cm_set_pg(struct cnic_sock *);

static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
{
Michael Chan's avatar
Michael Chan committed
101
102
	struct cnic_uio_dev *udev = uinfo->priv;
	struct cnic_dev *dev;
103
104
105
106

	if (!capable(CAP_NET_ADMIN))
		return -EPERM;

Michael Chan's avatar
Michael Chan committed
107
	if (udev->uio_dev != -1)
108
109
		return -EBUSY;

Michael Chan's avatar
Michael Chan committed
110
	rtnl_lock();
Michael Chan's avatar
Michael Chan committed
111
112
	dev = udev->dev;

113
	if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
Michael Chan's avatar
Michael Chan committed
114
115
116
117
		rtnl_unlock();
		return -ENODEV;
	}

Michael Chan's avatar
Michael Chan committed
118
	udev->uio_dev = iminor(inode);
119

120
	cnic_shutdown_rings(dev);
Michael Chan's avatar
Michael Chan committed
121
122
	cnic_init_rings(dev);
	rtnl_unlock();
123
124
125
126
127
128

	return 0;
}

static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
{
Michael Chan's avatar
Michael Chan committed
129
	struct cnic_uio_dev *udev = uinfo->priv;
130

Michael Chan's avatar
Michael Chan committed
131
	udev->uio_dev = -1;
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
	return 0;
}

static inline void cnic_hold(struct cnic_dev *dev)
{
	atomic_inc(&dev->ref_count);
}

static inline void cnic_put(struct cnic_dev *dev)
{
	atomic_dec(&dev->ref_count);
}

static inline void csk_hold(struct cnic_sock *csk)
{
	atomic_inc(&csk->ref_count);
}

static inline void csk_put(struct cnic_sock *csk)
{
	atomic_dec(&csk->ref_count);
}

static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
{
	struct cnic_dev *cdev;

	read_lock(&cnic_dev_lock);
	list_for_each_entry(cdev, &cnic_dev_list, list) {
		if (netdev == cdev->netdev) {
			cnic_hold(cdev);
			read_unlock(&cnic_dev_lock);
			return cdev;
		}
	}
	read_unlock(&cnic_dev_lock);
	return NULL;
}

171
172
173
174
175
176
177
178
179
180
static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
{
	atomic_inc(&ulp_ops->ref_count);
}

static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
{
	atomic_dec(&ulp_ops->ref_count);
}

181
182
183
184
185
186
187
188
189
190
191
192
193
194
static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
{
	struct cnic_local *cp = dev->cnic_priv;
	struct cnic_eth_dev *ethdev = cp->ethdev;
	struct drv_ctl_info info;
	struct drv_ctl_io *io = &info.data.io;

	info.cmd = DRV_CTL_CTX_WR_CMD;
	io->cid_addr = cid_addr;
	io->offset = off;
	io->data = val;
	ethdev->drv_ctl(dev->netdev, &info);
}

195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
{
	struct cnic_local *cp = dev->cnic_priv;
	struct cnic_eth_dev *ethdev = cp->ethdev;
	struct drv_ctl_info info;
	struct drv_ctl_io *io = &info.data.io;

	info.cmd = DRV_CTL_CTXTBL_WR_CMD;
	io->offset = off;
	io->dma_addr = addr;
	ethdev->drv_ctl(dev->netdev, &info);
}

static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
{
	struct cnic_local *cp = dev->cnic_priv;
	struct cnic_eth_dev *ethdev = cp->ethdev;
	struct drv_ctl_info info;
	struct drv_ctl_l2_ring *ring = &info.data.ring;

	if (start)
		info.cmd = DRV_CTL_START_L2_CMD;
	else
		info.cmd = DRV_CTL_STOP_L2_CMD;

	ring->cid = cid;
	ring->client_id = cl_id;
	ethdev->drv_ctl(dev->netdev, &info);
}

225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
{
	struct cnic_local *cp = dev->cnic_priv;
	struct cnic_eth_dev *ethdev = cp->ethdev;
	struct drv_ctl_info info;
	struct drv_ctl_io *io = &info.data.io;

	info.cmd = DRV_CTL_IO_WR_CMD;
	io->offset = off;
	io->data = val;
	ethdev->drv_ctl(dev->netdev, &info);
}

static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
{
	struct cnic_local *cp = dev->cnic_priv;
	struct cnic_eth_dev *ethdev = cp->ethdev;
	struct drv_ctl_info info;
	struct drv_ctl_io *io = &info.data.io;

	info.cmd = DRV_CTL_IO_RD_CMD;
	io->offset = off;
	ethdev->drv_ctl(dev->netdev, &info);
	return io->data;
}

static int cnic_in_use(struct cnic_sock *csk)
{
	return test_bit(SK_F_INUSE, &csk->flags);
}

256
static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
257
258
259
260
261
{
	struct cnic_local *cp = dev->cnic_priv;
	struct cnic_eth_dev *ethdev = cp->ethdev;
	struct drv_ctl_info info;

262
263
	info.cmd = cmd;
	info.data.credit.credit_count = count;
264
265
266
	ethdev->drv_ctl(dev->netdev, &info);
}

267
268
269
270
static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
{
	u32 i;

271
	for (i = 0; i < cp->max_cid_space; i++) {
272
273
274
275
276
277
278
279
		if (cp->ctx_tbl[i].cid == cid) {
			*l5_cid = i;
			return 0;
		}
	}
	return -EINVAL;
}

280
281
282
283
284
285
286
287
static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
			   struct cnic_sock *csk)
{
	struct iscsi_path path_req;
	char *buf = NULL;
	u16 len = 0;
	u32 msg_type = ISCSI_KEVENT_IF_DOWN;
	struct cnic_ulp_ops *ulp_ops;
Michael Chan's avatar
Michael Chan committed
288
	struct cnic_uio_dev *udev = cp->udev;
289
	int rc = 0, retry = 0;
290

Michael Chan's avatar
Michael Chan committed
291
	if (!udev || udev->uio_dev == -1)
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
		return -ENODEV;

	if (csk) {
		len = sizeof(path_req);
		buf = (char *) &path_req;
		memset(&path_req, 0, len);

		msg_type = ISCSI_KEVENT_PATH_REQ;
		path_req.handle = (u64) csk->l5_cid;
		if (test_bit(SK_F_IPV6, &csk->flags)) {
			memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
			       sizeof(struct in6_addr));
			path_req.ip_addr_len = 16;
		} else {
			memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
			       sizeof(struct in_addr));
			path_req.ip_addr_len = 4;
		}
		path_req.vlan_id = csk->vlan_id;
		path_req.pmtu = csk->mtu;
	}

314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
	while (retry < 3) {
		rc = 0;
		rcu_read_lock();
		ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]);
		if (ulp_ops)
			rc = ulp_ops->iscsi_nl_send_msg(
				cp->ulp_handle[CNIC_ULP_ISCSI],
				msg_type, buf, len);
		rcu_read_unlock();
		if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
			break;

		msleep(100);
		retry++;
	}
329
330
331
	return 0;
}

332
333
static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);

334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
				  char *buf, u16 len)
{
	int rc = -EINVAL;

	switch (msg_type) {
	case ISCSI_UEVENT_PATH_UPDATE: {
		struct cnic_local *cp;
		u32 l5_cid;
		struct cnic_sock *csk;
		struct iscsi_path *path_resp;

		if (len < sizeof(*path_resp))
			break;

		path_resp = (struct iscsi_path *) buf;
		cp = dev->cnic_priv;
		l5_cid = (u32) path_resp->handle;
		if (l5_cid >= MAX_CM_SK_TBL_SZ)
			break;

355
356
357
358
359
360
		rcu_read_lock();
		if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) {
			rc = -ENODEV;
			rcu_read_unlock();
			break;
		}
361
362
		csk = &cp->csk_tbl[l5_cid];
		csk_hold(csk);
363
364
365
		if (cnic_in_use(csk) &&
		    test_bit(SK_F_CONNECT_START, &csk->flags)) {

366
367
368
369
370
371
372
			memcpy(csk->ha, path_resp->mac_addr, 6);
			if (test_bit(SK_F_IPV6, &csk->flags))
				memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
				       sizeof(struct in6_addr));
			else
				memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
				       sizeof(struct in_addr));
373
374

			if (is_valid_ether_addr(csk->ha)) {
375
				cnic_cm_set_pg(csk);
376
377
378
379
380
381
382
			} else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) &&
				!test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {

				cnic_cm_upcall(cp, csk,
					L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
				clear_bit(SK_F_CONNECT_START, &csk->flags);
			}
383
384
		}
		csk_put(csk);
385
		rcu_read_unlock();
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
		rc = 0;
	}
	}

	return rc;
}

static int cnic_offld_prep(struct cnic_sock *csk)
{
	if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
		return 0;

	if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
		return 0;
	}

	return 1;
}

static int cnic_close_prep(struct cnic_sock *csk)
{
	clear_bit(SK_F_CONNECT_START, &csk->flags);
	smp_mb__after_clear_bit();

	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
		while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
			msleep(1);

		return 1;
	}
	return 0;
}

static int cnic_abort_prep(struct cnic_sock *csk)
{
	clear_bit(SK_F_CONNECT_START, &csk->flags);
	smp_mb__after_clear_bit();

	while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
		msleep(1);

	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
		csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
		return 1;
	}

	return 0;
}

int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
{
	struct cnic_dev *dev;

440
	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
441
		pr_err("%s: Bad type %d\n", __func__, ulp_type);
442
443
444
		return -EINVAL;
	}
	mutex_lock(&cnic_lock);
445
	if (cnic_ulp_tbl_prot(ulp_type)) {
446
447
		pr_err("%s: Type %d has already been registered\n",
		       __func__, ulp_type);
448
449
450
451
452
453
454
455
456
457
458
459
		mutex_unlock(&cnic_lock);
		return -EBUSY;
	}

	read_lock(&cnic_dev_lock);
	list_for_each_entry(dev, &cnic_dev_list, list) {
		struct cnic_local *cp = dev->cnic_priv;

		clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
	}
	read_unlock(&cnic_dev_lock);

460
	atomic_set(&ulp_ops->ref_count, 0);
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
	rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
	mutex_unlock(&cnic_lock);

	/* Prevent race conditions with netdev_event */
	rtnl_lock();
	list_for_each_entry(dev, &cnic_dev_list, list) {
		struct cnic_local *cp = dev->cnic_priv;

		if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
			ulp_ops->cnic_init(dev);
	}
	rtnl_unlock();

	return 0;
}

int cnic_unregister_driver(int ulp_type)
{
	struct cnic_dev *dev;
480
481
	struct cnic_ulp_ops *ulp_ops;
	int i = 0;
482

483
	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
484
		pr_err("%s: Bad type %d\n", __func__, ulp_type);
485
486
487
		return -EINVAL;
	}
	mutex_lock(&cnic_lock);
488
	ulp_ops = cnic_ulp_tbl_prot(ulp_type);
489
	if (!ulp_ops) {
490
491
		pr_err("%s: Type %d has not been registered\n",
		       __func__, ulp_type);
492
493
494
495
496
497
498
		goto out_unlock;
	}
	read_lock(&cnic_dev_lock);
	list_for_each_entry(dev, &cnic_dev_list, list) {
		struct cnic_local *cp = dev->cnic_priv;

		if (rcu_dereference(cp->ulp_ops[ulp_type])) {
499
500
			pr_err("%s: Type %d still has devices registered\n",
			       __func__, ulp_type);
501
502
503
504
505
506
507
508
509
510
			read_unlock(&cnic_dev_lock);
			goto out_unlock;
		}
	}
	read_unlock(&cnic_dev_lock);

	rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL);

	mutex_unlock(&cnic_lock);
	synchronize_rcu();
511
512
513
514
515
516
	while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
		msleep(100);
		i++;
	}

	if (atomic_read(&ulp_ops->ref_count) != 0)
517
		netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n");
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
	return 0;

out_unlock:
	mutex_unlock(&cnic_lock);
	return -EINVAL;
}

static int cnic_start_hw(struct cnic_dev *);
static void cnic_stop_hw(struct cnic_dev *);

static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
				void *ulp_ctx)
{
	struct cnic_local *cp = dev->cnic_priv;
	struct cnic_ulp_ops *ulp_ops;

534
	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
535
		pr_err("%s: Bad type %d\n", __func__, ulp_type);
536
537
538
		return -EINVAL;
	}
	mutex_lock(&cnic_lock);
539
	if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
540
541
		pr_err("%s: Driver with type %d has not been registered\n",
		       __func__, ulp_type);
542
543
544
545
		mutex_unlock(&cnic_lock);
		return -EAGAIN;
	}
	if (rcu_dereference(cp->ulp_ops[ulp_type])) {
546
547
		pr_err("%s: Type %d has already been registered to this device\n",
		       __func__, ulp_type);
548
549
550
551
552
553
		mutex_unlock(&cnic_lock);
		return -EBUSY;
	}

	clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
	cp->ulp_handle[ulp_type] = ulp_ctx;
554
	ulp_ops = cnic_ulp_tbl_prot(ulp_type);
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
	rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
	cnic_hold(dev);

	if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
		if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
			ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);

	mutex_unlock(&cnic_lock);

	return 0;

}
EXPORT_SYMBOL(cnic_register_driver);

static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
{
	struct cnic_local *cp = dev->cnic_priv;
572
	int i = 0;
573

574
	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
575
		pr_err("%s: Bad type %d\n", __func__, ulp_type);
576
577
578
579
580
581
582
		return -EINVAL;
	}
	mutex_lock(&cnic_lock);
	if (rcu_dereference(cp->ulp_ops[ulp_type])) {
		rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL);
		cnic_put(dev);
	} else {
583
584
		pr_err("%s: device not registered to this ulp type %d\n",
		       __func__, ulp_type);
585
586
587
588
589
		mutex_unlock(&cnic_lock);
		return -EINVAL;
	}
	mutex_unlock(&cnic_lock);

590
591
592
	if (ulp_type == CNIC_ULP_ISCSI)
		cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);

593
594
	synchronize_rcu();

595
596
597
598
599
600
	while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
	       i < 20) {
		msleep(100);
		i++;
	}
	if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
601
		netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
602

603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
	return 0;
}
EXPORT_SYMBOL(cnic_unregister_driver);

static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id)
{
	id_tbl->start = start_id;
	id_tbl->max = size;
	id_tbl->next = 0;
	spin_lock_init(&id_tbl->lock);
	id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
	if (!id_tbl->table)
		return -ENOMEM;

	return 0;
}

static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
{
	kfree(id_tbl->table);
	id_tbl->table = NULL;
}

static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
{
	int ret = -1;

	id -= id_tbl->start;
	if (id >= id_tbl->max)
		return ret;

	spin_lock(&id_tbl->lock);
	if (!test_bit(id, id_tbl->table)) {
		set_bit(id, id_tbl->table);
		ret = 0;
	}
	spin_unlock(&id_tbl->lock);
	return ret;
}

/* Returns -1 if not successful */
static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
{
	u32 id;

	spin_lock(&id_tbl->lock);
	id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
	if (id >= id_tbl->max) {
		id = -1;
		if (id_tbl->next != 0) {
			id = find_first_zero_bit(id_tbl->table, id_tbl->next);
			if (id >= id_tbl->next)
				id = -1;
		}
	}

	if (id < id_tbl->max) {
		set_bit(id, id_tbl->table);
		id_tbl->next = (id + 1) & (id_tbl->max - 1);
		id += id_tbl->start;
	}

	spin_unlock(&id_tbl->lock);

	return id;
}

static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
{
	if (id == -1)
		return;

	id -= id_tbl->start;
	if (id >= id_tbl->max)
		return;

	clear_bit(id, id_tbl->table);
}

static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
{
	int i;

	if (!dma->pg_arr)
		return;

	for (i = 0; i < dma->num_pages; i++) {
		if (dma->pg_arr[i]) {
Michael Chan's avatar
Michael Chan committed
691
692
			dma_free_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE,
					  dma->pg_arr[i], dma->pg_map_arr[i]);
693
694
695
696
			dma->pg_arr[i] = NULL;
		}
	}
	if (dma->pgtbl) {
Michael Chan's avatar
Michael Chan committed
697
698
		dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
				  dma->pgtbl, dma->pgtbl_map);
699
700
701
702
703
704
705
706
707
708
		dma->pgtbl = NULL;
	}
	kfree(dma->pg_arr);
	dma->pg_arr = NULL;
	dma->num_pages = 0;
}

static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
{
	int i;
Michael Chan's avatar
Michael Chan committed
709
	__le32 *page_table = (__le32 *) dma->pgtbl;
710
711
712

	for (i = 0; i < dma->num_pages; i++) {
		/* Each entry needs to be in big endian format. */
Michael Chan's avatar
Michael Chan committed
713
		*page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
714
		page_table++;
Michael Chan's avatar
Michael Chan committed
715
		*page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
716
717
718
719
		page_table++;
	}
}

720
721
722
static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
{
	int i;
Michael Chan's avatar
Michael Chan committed
723
	__le32 *page_table = (__le32 *) dma->pgtbl;
724
725
726

	for (i = 0; i < dma->num_pages; i++) {
		/* Each entry needs to be in little endian format. */
Michael Chan's avatar
Michael Chan committed
727
		*page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
728
		page_table++;
Michael Chan's avatar
Michael Chan committed
729
		*page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
730
731
732
733
		page_table++;
	}
}

734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
			  int pages, int use_pg_tbl)
{
	int i, size;
	struct cnic_local *cp = dev->cnic_priv;

	size = pages * (sizeof(void *) + sizeof(dma_addr_t));
	dma->pg_arr = kzalloc(size, GFP_ATOMIC);
	if (dma->pg_arr == NULL)
		return -ENOMEM;

	dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
	dma->num_pages = pages;

	for (i = 0; i < pages; i++) {
Michael Chan's avatar
Michael Chan committed
749
750
751
752
		dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
						    BCM_PAGE_SIZE,
						    &dma->pg_map_arr[i],
						    GFP_ATOMIC);
753
754
755
756
757
758
759
760
		if (dma->pg_arr[i] == NULL)
			goto error;
	}
	if (!use_pg_tbl)
		return 0;

	dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) &
			  ~(BCM_PAGE_SIZE - 1);
Michael Chan's avatar
Michael Chan committed
761
762
	dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
					&dma->pgtbl_map, GFP_ATOMIC);
763
764
765
766
767
768
769
770
771
772
773
774
	if (dma->pgtbl == NULL)
		goto error;

	cp->setup_pgtbl(dev, dma);

	return 0;

error:
	cnic_free_dma(dev, dma);
	return -ENOMEM;
}

Michael Chan's avatar
Michael Chan committed
775
776
777
778
779
780
781
static void cnic_free_context(struct cnic_dev *dev)
{
	struct cnic_local *cp = dev->cnic_priv;
	int i;

	for (i = 0; i < cp->ctx_blks; i++) {
		if (cp->ctx_arr[i].ctx) {
Michael Chan's avatar
Michael Chan committed
782
783
784
			dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
					  cp->ctx_arr[i].ctx,
					  cp->ctx_arr[i].mapping);
Michael Chan's avatar
Michael Chan committed
785
786
787
788
789
			cp->ctx_arr[i].ctx = NULL;
		}
	}
}

Michael Chan's avatar
Michael Chan committed
790
static void __cnic_free_uio(struct cnic_uio_dev *udev)
791
{
Michael Chan's avatar
Michael Chan committed
792
	uio_unregister_device(&udev->cnic_uinfo);
Michael Chan's avatar
Michael Chan committed
793

Michael Chan's avatar
Michael Chan committed
794
795
796
797
	if (udev->l2_buf) {
		dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
				  udev->l2_buf, udev->l2_buf_map);
		udev->l2_buf = NULL;
798
799
	}

Michael Chan's avatar
Michael Chan committed
800
801
802
803
	if (udev->l2_ring) {
		dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
				  udev->l2_ring, udev->l2_ring_map);
		udev->l2_ring = NULL;
804
	}
805
806
807

	pci_dev_put(udev->pdev);
	kfree(udev);
Michael Chan's avatar
Michael Chan committed
808
809
}

Michael Chan's avatar
Michael Chan committed
810
static void cnic_free_uio(struct cnic_uio_dev *udev)
Michael Chan's avatar
Michael Chan committed
811
{
Michael Chan's avatar
Michael Chan committed
812
	if (!udev)
Michael Chan's avatar
Michael Chan committed
813
814
		return;

815
816
817
	write_lock(&cnic_dev_lock);
	list_del_init(&udev->list);
	write_unlock(&cnic_dev_lock);
Michael Chan's avatar
Michael Chan committed
818
	__cnic_free_uio(udev);
Michael Chan's avatar
Michael Chan committed
819
820
821
822
823
}

static void cnic_free_resc(struct cnic_dev *dev)
{
	struct cnic_local *cp = dev->cnic_priv;
Michael Chan's avatar
Michael Chan committed
824
	struct cnic_uio_dev *udev = cp->udev;
Michael Chan's avatar
Michael Chan committed
825

Michael Chan's avatar
Michael Chan committed
826
	if (udev) {
827
		udev->dev = NULL;
Michael Chan's avatar
Michael Chan committed
828
		cp->udev = NULL;
Michael Chan's avatar
Michael Chan committed
829
	}
830

Michael Chan's avatar
Michael Chan committed
831
	cnic_free_context(dev);
832
833
834
835
836
837
838
	kfree(cp->ctx_arr);
	cp->ctx_arr = NULL;
	cp->ctx_blks = 0;

	cnic_free_dma(dev, &cp->gbl_buf_info);
	cnic_free_dma(dev, &cp->conn_buf_info);
	cnic_free_dma(dev, &cp->kwq_info);
839
	cnic_free_dma(dev, &cp->kwq_16_data_info);
Michael Chan's avatar
Michael Chan committed
840
	cnic_free_dma(dev, &cp->kcq2.dma);
841
	cnic_free_dma(dev, &cp->kcq1.dma);
842
843
844
845
846
	kfree(cp->iscsi_tbl);
	cp->iscsi_tbl = NULL;
	kfree(cp->ctx_tbl);
	cp->ctx_tbl = NULL;

Michael Chan's avatar
Michael Chan committed
847
	cnic_free_id_tbl(&cp->fcoe_cid_tbl);
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
	cnic_free_id_tbl(&cp->cid_tbl);
}

static int cnic_alloc_context(struct cnic_dev *dev)
{
	struct cnic_local *cp = dev->cnic_priv;

	if (CHIP_NUM(cp) == CHIP_NUM_5709) {
		int i, k, arr_size;

		cp->ctx_blk_size = BCM_PAGE_SIZE;
		cp->cids_per_blk = BCM_PAGE_SIZE / 128;
		arr_size = BNX2_MAX_CID / cp->cids_per_blk *
			   sizeof(struct cnic_ctx);
		cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
		if (cp->ctx_arr == NULL)
			return -ENOMEM;

		k = 0;
		for (i = 0; i < 2; i++) {
			u32 j, reg, off, lo, hi;

			if (i == 0)
				off = BNX2_PG_CTX_MAP;
			else
				off = BNX2_ISCSI_CTX_MAP;

			reg = cnic_reg_rd_ind(dev, off);
			lo = reg >> 16;
			hi = reg & 0xffff;
			for (j = lo; j < hi; j += cp->cids_per_blk, k++)
				cp->ctx_arr[k].cid = j;
		}

		cp->ctx_blks = k;
		if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
			cp->ctx_blks = 0;
			return -ENOMEM;
		}

		for (i = 0; i < cp->ctx_blks; i++) {
			cp->ctx_arr[i].ctx =
Michael Chan's avatar
Michael Chan committed
890
891
892
893
				dma_alloc_coherent(&dev->pcidev->dev,
						   BCM_PAGE_SIZE,
						   &cp->ctx_arr[i].mapping,
						   GFP_KERNEL);
894
895
896
897
898
899
900
			if (cp->ctx_arr[i].ctx == NULL)
				return -ENOMEM;
		}
	}
	return 0;
}

901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info)
{
	int err, i, is_bnx2 = 0;
	struct kcqe **kcq;

	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags))
		is_bnx2 = 1;

	err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, is_bnx2);
	if (err)
		return err;

	kcq = (struct kcqe **) info->dma.pg_arr;
	info->kcq = kcq;

	if (is_bnx2)
		return 0;

	for (i = 0; i < KCQ_PAGE_CNT; i++) {
		struct bnx2x_bd_chain_next *next =
			(struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
		int j = i + 1;

		if (j >= KCQ_PAGE_CNT)
			j = 0;
		next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
		next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
	}
	return 0;
}

Michael Chan's avatar
Michael Chan committed
932
static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
933
934
{
	struct cnic_local *cp = dev->cnic_priv;
Michael Chan's avatar
Michael Chan committed
935
936
	struct cnic_uio_dev *udev;

937
938
939
940
941
942
943
944
945
946
947
	read_lock(&cnic_dev_lock);
	list_for_each_entry(udev, &cnic_udev_list, list) {
		if (udev->pdev == dev->pcidev) {
			udev->dev = dev;
			cp->udev = udev;
			read_unlock(&cnic_dev_lock);
			return 0;
		}
	}
	read_unlock(&cnic_dev_lock);

Michael Chan's avatar
Michael Chan committed
948
949
950
951
952
	udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
	if (!udev)
		return -ENOMEM;

	udev->uio_dev = -1;
953

Michael Chan's avatar
Michael Chan committed
954
955
956
957
958
959
960
	udev->dev = dev;
	udev->pdev = dev->pcidev;
	udev->l2_ring_size = pages * BCM_PAGE_SIZE;
	udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
					   &udev->l2_ring_map,
					   GFP_KERNEL | __GFP_COMP);
	if (!udev->l2_ring)
961
		goto err_udev;
962

Michael Chan's avatar
Michael Chan committed
963
964
965
966
967
968
	udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
	udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
	udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
					  &udev->l2_buf_map,
					  GFP_KERNEL | __GFP_COMP);
	if (!udev->l2_buf)
969
		goto err_dma;
970

971
972
973
974
975
976
	write_lock(&cnic_dev_lock);
	list_add(&udev->list, &cnic_udev_list);
	write_unlock(&cnic_dev_lock);

	pci_dev_get(udev->pdev);

Michael Chan's avatar
Michael Chan committed
977
978
	cp->udev = udev;

979
	return 0;
980
981
982
983
984
985
 err_dma:
	dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
			  udev->l2_ring, udev->l2_ring_map);
 err_udev:
	kfree(udev);
	return -ENOMEM;
986
987
}

Michael Chan's avatar
Michael Chan committed
988
989
static int cnic_init_uio(struct cnic_dev *dev)
{
990
	struct cnic_local *cp = dev->cnic_priv;
Michael Chan's avatar
Michael Chan committed
991
	struct cnic_uio_dev *udev = cp->udev;
992
	struct uio_info *uinfo;
Michael Chan's avatar
Michael Chan committed
993
	int ret = 0;
994

Michael Chan's avatar
Michael Chan committed
995
	if (!udev)
996
		return -ENOMEM;
997

Michael Chan's avatar
Michael Chan committed
998
999
	uinfo = &udev->cnic_uinfo;

1000
1001
1002
1003
1004
	uinfo->mem[0].addr = dev->netdev->base_addr;
	uinfo->mem[0].internal_addr = dev->regview;
	uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
	uinfo->mem[0].memtype = UIO_MEM_PHYS;

1005
	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
1006
		uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
Michael Chan's avatar
Michael Chan committed
1007
					PAGE_MASK;
1008
1009
1010
1011
1012
1013
		if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
			uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
		else
			uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;

		uinfo->name = "bnx2_cnic";
1014
1015
1016
	} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
		uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
			PAGE_MASK;
1017
		uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
1018
1019

		uinfo->name = "bnx2x_cnic";
1020
1021
	}

1022
1023
	uinfo->mem[1].memtype = UIO_MEM_LOGICAL;

Michael Chan's avatar
Michael Chan committed
1024
1025
	uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
	uinfo->mem[2].size = udev->l2_ring_size;
1026
1027
	uinfo->mem[2].memtype = UIO_MEM_LOGICAL;

Michael Chan's avatar
Michael Chan committed
1028
1029
	uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
	uinfo->mem[3].size = udev->l2_buf_size;
1030
1031
1032
1033
1034
1035
1036
1037
	uinfo->mem[3].memtype = UIO_MEM_LOGICAL;

	uinfo->version = CNIC_MODULE_VERSION;
	uinfo->irq = UIO_IRQ_CUSTOM;

	uinfo->open = cnic_uio_open;
	uinfo->release = cnic_uio_close;

1038
1039
1040
	if (udev->uio_dev == -1) {
		if (!uinfo->priv) {
			uinfo->priv = udev;
1041

1042
1043
1044
1045
1046
			ret = uio_register_device(&udev->pdev->dev, uinfo);
		}
	} else {
		cnic_init_rings(dev);
	}
1047

Michael Chan's avatar
Michael Chan committed
1048
	return ret;
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
}

static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
{
	struct cnic_local *cp = dev->cnic_priv;
	int ret;

	ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
	if (ret)
		goto error;
	cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;

1061
	ret = cnic_alloc_kcq(dev, &cp->kcq1);
1062
1063
1064
1065
1066
1067
1068
	if (ret)
		goto error;

	ret = cnic_alloc_context(dev);
	if (ret)
		goto error;

Michael Chan's avatar
Michael Chan committed
1069
	ret = cnic_alloc_uio_rings(dev, 2);
1070
1071
1072
	if (ret)
		goto error;

Michael Chan's avatar
Michael Chan committed
1073
	ret = cnic_init_uio(dev);
1074
1075
	if (ret)
		goto error;
1076
1077
1078
1079
1080
1081
1082
1083

	return 0;

error:
	cnic_free_resc(dev);
	return ret;
}

1084
1085
1086
1087
static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
{
	struct cnic_local *cp = dev->cnic_priv;
	int ctx_blk_size = cp->ethdev->ctx_blk_size;
1088
	int total_mem, blks, i;
1089

1090
	total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
1091
1092
1093
1094
1095
1096
1097
	blks = total_mem / ctx_blk_size;
	if (total_mem % ctx_blk_size)
		blks++;

	if (blks > cp->ethdev->ctx_tbl_len)
		return -ENOMEM;

1098
	cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
1099
1100
1101
1102
1103
	if (cp->ctx_arr == NULL)
		return -ENOMEM;

	cp->ctx_blks = blks;
	cp->ctx_blk_size = ctx_blk_size;
1104
	if (!BNX2X_CHIP_IS_57710(cp->chip_id))
1105
1106
1107
1108
1109
1110
1111
1112
		cp->ctx_align = 0;
	else
		cp->ctx_align = ctx_blk_size;

	cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;

	for (i = 0; i < blks; i++) {
		cp->ctx_arr[i].ctx =
Michael Chan's avatar
Michael Chan committed
1113
1114
1115
			dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
					   &cp->ctx_arr[i].mapping,
					   GFP_KERNEL);
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
		if (cp->ctx_arr[i].ctx == NULL)
			return -ENOMEM;

		if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
			if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
				cnic_free_context(dev);
				cp->ctx_blk_size += cp->ctx_align;
				i = -1;
				continue;
			}
		}
	}
	return 0;
}

static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
{
	struct cnic_local *cp = dev->cnic_priv;
1134
1135
	struct cnic_eth_dev *ethdev = cp->ethdev;
	u32 start_cid = ethdev->starting_cid;
1136
1137
1138
	int i, j, n, ret, pages;
	struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;

1139
1140
	cp->iro_arr = ethdev->iro_arr;

Michael Chan's avatar
Michael Chan committed
1141
	cp->max_cid_space = MAX_ISCSI_TBL_SZ + BNX2X_FCOE_NUM_CONNECTIONS;
1142
	cp->iscsi_start_cid = start_cid;
Michael Chan's avatar
Michael Chan committed
1143
1144
1145
1146
1147
1148
1149
1150
1151
	cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;

	if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
		cp->max_cid_space += BNX2X_FCOE_NUM_CONNECTIONS;
		cp->fcoe_init_cid = ethdev->fcoe_init_cid;
		if (!cp->fcoe_init_cid)
			cp->fcoe_init_cid = 0x10;
	}

1152
1153
1154
1155
	if (start_cid < BNX2X_ISCSI_START_CID) {
		u32 delta = BNX2X_ISCSI_START_CID - start_cid;

		cp->iscsi_start_cid = BNX2X_ISCSI_START_CID;
Michael Chan's avatar
Michael Chan committed
1156
		cp->fcoe_start_cid += delta;
1157
1158
1159
		cp->max_cid_space += delta;
	}

1160
1161
1162
1163
1164
1165
	cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ,
				GFP_KERNEL);
	if (!cp->iscsi_tbl)
		goto error;

	cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) *
1166
				cp->max_cid_space, GFP_KERNEL);
1167
1168
1169
1170
1171
1172
1173
1174
	if (!cp->ctx_tbl)
		goto error;

	for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
		cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
		cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
	}

Michael Chan's avatar
Michael Chan committed
1175
1176
1177
	for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
		cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;

1178
	pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
1179
1180
1181
1182
1183
1184
1185
		PAGE_SIZE;

	ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
	if (ret)
		return -ENOMEM;

	n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
1186
	for (i = 0, j = 0; i < cp->max_cid_space; i++) {
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
		long off = CNIC_KWQ16_DATA_SIZE * (i % n);

		cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
		cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
						   off;

		if ((i % n) == (n - 1))
			j++;
	}

1197
	ret = cnic_alloc_kcq(dev, &cp->kcq1);
1198
1199
1200
	if (ret)
		goto error;

Michael Chan's avatar
Michael Chan committed
1201
1202
1203
1204
1205
1206
	if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
		ret = cnic_alloc_kcq(dev, &cp->kcq2);
		if (ret)
			goto error;
	}

1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
	pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS *
			   BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE;
	ret = cnic_alloc_dma(dev, &cp->conn_buf_info, pages, 1);
	if (ret)
		goto error;

	pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE;
	ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
	if (ret)
		goto error;

	ret = cnic_alloc_bnx2x_context(dev);
	if (ret)
		goto error;

	cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;

	cp->l2_rx_ring_size = 15;

Michael Chan's avatar
Michael Chan committed
1226
	ret = cnic_alloc_uio_rings(dev, 4);
1227
1228
1229
	if (ret)
		goto error;

Michael Chan's avatar
Michael Chan committed
1230
	ret = cnic_init_uio(dev);
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
	if (ret)
		goto error;

	return 0;

error:
	cnic_free_resc(dev);
	return -ENOMEM;
}

1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
static inline u32 cnic_kwq_avail(struct cnic_local *cp)
{
	return cp->max_kwq_idx -
		((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
}

static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
				  u32 num_wqes)
{
	struct cnic_local *cp = dev->cnic_priv;
	struct kwqe *prod_qe;
	u16 prod, sw_prod, i;

	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
		return -EAGAIN;		/* bnx2 is down */

	spin_lock_bh(&cp->cnic_ulp_lock);
	if (num_wqes > cnic_kwq_avail(cp) &&
1259
	    !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
1260
1261
1262
1263
		spin_unlock_bh(&cp->cnic_ulp_lock);
		return -EAGAIN;
	}

1264
	clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281

	prod = cp->kwq_prod_idx;
	sw_prod = prod & MAX_KWQ_IDX;
	for (i = 0; i < num_wqes; i++) {
		prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
		memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
		prod++;
		sw_prod = prod & MAX_KWQ_IDX;
	}
	cp->kwq_prod_idx = prod;

	CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);

	spin_unlock_bh(&cp->cnic_ulp_lock);
	return 0;
}

1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
				   union l5cm_specific_data *l5_data)
{
	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
	dma_addr_t map;

	map = ctx->kwqe_data_mapping;
	l5_data->phy_address.lo = (u64) map & 0xffffffff;
	l5_data->phy_address.hi = (u64) map >> 32;
	return ctx->kwqe_data;
}

static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
				u32 type, union l5cm_specific_data *l5_data)
{
	struct cnic_local *cp = dev->cnic_priv;
	struct l5cm_spe kwqe;
	struct kwqe_16 *kwq[1];
1300
	u16 type_16;
1301
1302
1303
1304
	int ret;

	kwqe.hdr.conn_and_cmd_data =
		cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
1305
			     BNX2X_HW_CID(cp, cid)));
1306
1307
1308
1309
1310
1311

	type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
	type_16 |= (cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
		   SPE_HDR_FUNCTION_ID;

	kwqe.hdr.type = cpu_to_le16(type_16);
1312
	kwqe.hdr.reserved1 = 0;
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
	kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
	kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);

	kwq[0] = (struct kwqe_16 *) &kwqe;

	spin_lock_bh(&cp->cnic_ulp_lock);
	ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
	spin_unlock_bh(&cp->cnic_ulp_lock);

	if (ret == 1)
		return 0;

	return -EBUSY;
}

static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
				   struct kcqe *cqes[], u32 num_cqes)
{
	struct cnic_local *cp = dev->cnic_priv;
	struct cnic_ulp_ops *ulp_ops;

	rcu_read_lock();
	ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
	if (likely(ulp_ops)) {
		ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
					  cqes, num_cqes);
	}
	rcu_read_unlock();
}

static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
{
	struct cnic_local *cp = dev->cnic_priv;
	struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
1347
1348
	int hq_bds, pages;
	u32 pfid = cp->pfid;
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364

	cp->num_iscsi_tasks = req1->num_tasks_per_conn;
	cp->num_ccells = req1->num_ccells_per_conn;
	cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
			      cp->num_iscsi_tasks;
	cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
			BNX2X_ISCSI_R2TQE_SIZE;
	cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
	pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
	hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
	cp->num_cqs = req1->num_cqs;

	if (!dev->max_iscsi_conn)
		return 0;

	/* init Tstorm RAM */
1365
	CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1366
		  req1->rq_num_wqes);
1367
	CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1368
1369
		  PAGE_SIZE);
	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1370
		 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1371
	CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1372
		  TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1373
1374
1375
1376
		  req1->num_tasks_per_conn);

	/* init Ustorm RAM */
	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1377
		  USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
1378
		  req1->rq_buffer_size);
1379
	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1380
1381
		  PAGE_SIZE);
	CNIC_WR8(dev, BAR_USTRORM_INTMEM +
1382
		 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1383
	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1384
		  USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1385
		  req1->num_tasks_per_conn);
1386
	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1387
		  req1->rq_num_wqes);
1388
	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1389
		  req1->cq_num_wqes);
1390
	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1391
1392
1393
		  cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);

	/* init Xstorm RAM */
1394
	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1395
1396
		  PAGE_SIZE);
	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1397
		 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1398
	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
1399
		  XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1400
		  req1->num_tasks_per_conn);
1401
	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1402
		  hq_bds);
1403
	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
1404
		  req1->num_tasks_per_conn);
1405
	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1406
1407
1408
		  cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);

	/* init Cstorm RAM */
1409
	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1410
1411
		  PAGE_SIZE);
	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
1412
		 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1413
	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1414
		  CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1415
		  req1->num_tasks_per_conn);
1416
	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1417
		  req1->cq_num_wqes);
1418
	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1419
1420
1421
1422
1423
1424
1425
1426
1427
		  hq_bds);

	return 0;
}

static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
{
	struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
	struct cnic_local *cp = dev->cnic_priv;
1428
	u32 pfid = cp->pfid;
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
	struct iscsi_kcqe kcqe;
	struct kcqe *cqes[1];

	memset(&kcqe, 0, sizeof(kcqe));
	if (!dev->max_iscsi_conn) {
		kcqe.completion_status =
			ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
		goto done;
	}

	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1440
		TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1441
	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1442
		TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1443
1444
1445
		req2->error_bit_map[1]);

	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1446
		  USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1447
	CNIC_WR(dev, BAR_USTRORM_INTMEM +
1448
		USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1449
	CNIC_WR(dev, BAR_USTRORM_INTMEM +
1450
		USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1451
1452
1453
		req2->error_bit_map[1]);

	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1454
		  CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468