iommu.c 47 KB
Newer Older
1
2
/*
 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
Joerg Roedel's avatar
Joerg Roedel committed
3
 * Author: Joerg Roedel <jroedel@suse.de>
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published
 * by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 */

19
#define pr_fmt(fmt)    "iommu: " fmt
20

21
#include <linux/device.h>
22
#include <linux/kernel.h>
23
24
#include <linux/bug.h>
#include <linux/types.h>
25
26
#include <linux/module.h>
#include <linux/slab.h>
27
28
#include <linux/errno.h>
#include <linux/iommu.h>
Alex Williamson's avatar
Alex Williamson committed
29
30
31
#include <linux/idr.h>
#include <linux/notifier.h>
#include <linux/err.h>
32
#include <linux/pci.h>
33
#include <linux/bitops.h>
Robin Murphy's avatar
Robin Murphy committed
34
#include <linux/property.h>
35
#include <trace/events/iommu.h>
Alex Williamson's avatar
Alex Williamson committed
36
37

static struct kset *iommu_group_kset;
38
static DEFINE_IDA(iommu_group_ida);
39
static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA;
Alex Williamson's avatar
Alex Williamson committed
40

41
42
43
44
struct iommu_callback_data {
	const struct iommu_ops *ops;
};

Alex Williamson's avatar
Alex Williamson committed
45
46
47
48
49
50
51
52
53
54
struct iommu_group {
	struct kobject kobj;
	struct kobject *devices_kobj;
	struct list_head devices;
	struct mutex mutex;
	struct blocking_notifier_head notifier;
	void *iommu_data;
	void (*iommu_data_release)(void *iommu_data);
	char *name;
	int id;
55
	struct iommu_domain *default_domain;
56
	struct iommu_domain *domain;
Alex Williamson's avatar
Alex Williamson committed
57
58
};

59
struct group_device {
Alex Williamson's avatar
Alex Williamson committed
60
61
62
63
64
65
66
67
68
69
70
71
	struct list_head list;
	struct device *dev;
	char *name;
};

struct iommu_group_attribute {
	struct attribute attr;
	ssize_t (*show)(struct iommu_group *group, char *buf);
	ssize_t (*store)(struct iommu_group *group,
			 const char *buf, size_t count);
};

72
73
74
75
static const char * const iommu_group_resv_type_string[] = {
	[IOMMU_RESV_DIRECT]	= "direct",
	[IOMMU_RESV_RESERVED]	= "reserved",
	[IOMMU_RESV_MSI]	= "msi",
76
	[IOMMU_RESV_SW_MSI]	= "msi",
77
78
};

Alex Williamson's avatar
Alex Williamson committed
79
80
81
#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store)		\
struct iommu_group_attribute iommu_group_attr_##_name =		\
	__ATTR(_name, _mode, _show, _store)
82

Alex Williamson's avatar
Alex Williamson committed
83
84
85
86
#define to_iommu_group_attr(_attr)	\
	container_of(_attr, struct iommu_group_attribute, attr)
#define to_iommu_group(_kobj)		\
	container_of(_kobj, struct iommu_group, kobj)
87

88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
static LIST_HEAD(iommu_device_list);
static DEFINE_SPINLOCK(iommu_device_lock);

int iommu_device_register(struct iommu_device *iommu)
{
	spin_lock(&iommu_device_lock);
	list_add_tail(&iommu->list, &iommu_device_list);
	spin_unlock(&iommu_device_lock);

	return 0;
}

void iommu_device_unregister(struct iommu_device *iommu)
{
	spin_lock(&iommu_device_lock);
	list_del(&iommu->list);
	spin_unlock(&iommu_device_lock);
}

107
108
static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
						 unsigned type);
109
110
111
112
113
114
static int __iommu_attach_device(struct iommu_domain *domain,
				 struct device *dev);
static int __iommu_attach_group(struct iommu_domain *domain,
				struct iommu_group *group);
static void __iommu_detach_group(struct iommu_domain *domain,
				 struct iommu_group *group);
115

116
117
118
119
120
121
122
123
124
125
126
127
static int __init iommu_set_def_domain_type(char *str)
{
	bool pt;

	if (!str || strtobool(str, &pt))
		return -EINVAL;

	iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA;
	return 0;
}
early_param("iommu.passthrough", iommu_set_def_domain_type);

Alex Williamson's avatar
Alex Williamson committed
128
129
static ssize_t iommu_group_attr_show(struct kobject *kobj,
				     struct attribute *__attr, char *buf)
130
{
Alex Williamson's avatar
Alex Williamson committed
131
132
133
	struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
	struct iommu_group *group = to_iommu_group(kobj);
	ssize_t ret = -EIO;
134

Alex Williamson's avatar
Alex Williamson committed
135
136
137
138
139
140
141
142
143
144
145
146
	if (attr->show)
		ret = attr->show(group, buf);
	return ret;
}

static ssize_t iommu_group_attr_store(struct kobject *kobj,
				      struct attribute *__attr,
				      const char *buf, size_t count)
{
	struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
	struct iommu_group *group = to_iommu_group(kobj);
	ssize_t ret = -EIO;
147

Alex Williamson's avatar
Alex Williamson committed
148
149
150
	if (attr->store)
		ret = attr->store(group, buf, count);
	return ret;
151
152
}

Alex Williamson's avatar
Alex Williamson committed
153
154
155
156
static const struct sysfs_ops iommu_group_sysfs_ops = {
	.show = iommu_group_attr_show,
	.store = iommu_group_attr_store,
};
157

Alex Williamson's avatar
Alex Williamson committed
158
159
160
161
static int iommu_group_create_file(struct iommu_group *group,
				   struct iommu_group_attribute *attr)
{
	return sysfs_create_file(&group->kobj, &attr->attr);
162
163
}

Alex Williamson's avatar
Alex Williamson committed
164
165
166
167
168
169
170
171
172
173
174
static void iommu_group_remove_file(struct iommu_group *group,
				    struct iommu_group_attribute *attr)
{
	sysfs_remove_file(&group->kobj, &attr->attr);
}

static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
{
	return sprintf(buf, "%s\n", group->name);
}

175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
/**
 * iommu_insert_resv_region - Insert a new region in the
 * list of reserved regions.
 * @new: new region to insert
 * @regions: list of regions
 *
 * The new element is sorted by address with respect to the other
 * regions of the same type. In case it overlaps with another
 * region of the same type, regions are merged. In case it
 * overlaps with another region of different type, regions are
 * not merged.
 */
static int iommu_insert_resv_region(struct iommu_resv_region *new,
				    struct list_head *regions)
{
	struct iommu_resv_region *region;
	phys_addr_t start = new->start;
	phys_addr_t end = new->start + new->length - 1;
	struct list_head *pos = regions->next;

	while (pos != regions) {
		struct iommu_resv_region *entry =
			list_entry(pos, struct iommu_resv_region, list);
		phys_addr_t a = entry->start;
		phys_addr_t b = entry->start + entry->length - 1;
		int type = entry->type;

		if (end < a) {
			goto insert;
		} else if (start > b) {
			pos = pos->next;
		} else if ((start >= a) && (end <= b)) {
			if (new->type == type)
				goto done;
			else
				pos = pos->next;
		} else {
			if (new->type == type) {
				phys_addr_t new_start = min(a, start);
				phys_addr_t new_end = max(b, end);

				list_del(&entry->list);
				entry->start = new_start;
				entry->length = new_end - new_start + 1;
				iommu_insert_resv_region(entry, regions);
			} else {
				pos = pos->next;
			}
		}
	}
insert:
	region = iommu_alloc_resv_region(new->start, new->length,
					 new->prot, new->type);
	if (!region)
		return -ENOMEM;

	list_add_tail(&region->list, pos);
done:
	return 0;
}

static int
iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
				 struct list_head *group_resv_regions)
{
	struct iommu_resv_region *entry;
241
	int ret = 0;
242
243
244
245
246
247
248
249
250
251
252
253

	list_for_each_entry(entry, dev_resv_regions, list) {
		ret = iommu_insert_resv_region(entry, group_resv_regions);
		if (ret)
			break;
	}
	return ret;
}

int iommu_get_group_resv_regions(struct iommu_group *group,
				 struct list_head *head)
{
254
	struct group_device *device;
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
	int ret = 0;

	mutex_lock(&group->mutex);
	list_for_each_entry(device, &group->devices, list) {
		struct list_head dev_resv_regions;

		INIT_LIST_HEAD(&dev_resv_regions);
		iommu_get_resv_regions(device->dev, &dev_resv_regions);
		ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
		iommu_put_resv_regions(device->dev, &dev_resv_regions);
		if (ret)
			break;
	}
	mutex_unlock(&group->mutex);
	return ret;
}
EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);

273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
					     char *buf)
{
	struct iommu_resv_region *region, *next;
	struct list_head group_resv_regions;
	char *str = buf;

	INIT_LIST_HEAD(&group_resv_regions);
	iommu_get_group_resv_regions(group, &group_resv_regions);

	list_for_each_entry_safe(region, next, &group_resv_regions, list) {
		str += sprintf(str, "0x%016llx 0x%016llx %s\n",
			       (long long int)region->start,
			       (long long int)(region->start +
						region->length - 1),
			       iommu_group_resv_type_string[region->type]);
		kfree(region);
	}

	return (str - buf);
}

Alex Williamson's avatar
Alex Williamson committed
295
296
static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);

297
298
299
static IOMMU_GROUP_ATTR(reserved_regions, 0444,
			iommu_group_show_resv_regions, NULL);

Alex Williamson's avatar
Alex Williamson committed
300
301
302
303
static void iommu_group_release(struct kobject *kobj)
{
	struct iommu_group *group = to_iommu_group(kobj);

304
305
	pr_debug("Releasing group %d\n", group->id);

Alex Williamson's avatar
Alex Williamson committed
306
307
308
	if (group->iommu_data_release)
		group->iommu_data_release(group->iommu_data);

309
	ida_simple_remove(&iommu_group_ida, group->id);
Alex Williamson's avatar
Alex Williamson committed
310

311
312
313
	if (group->default_domain)
		iommu_domain_free(group->default_domain);

Alex Williamson's avatar
Alex Williamson committed
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
	kfree(group->name);
	kfree(group);
}

static struct kobj_type iommu_group_ktype = {
	.sysfs_ops = &iommu_group_sysfs_ops,
	.release = iommu_group_release,
};

/**
 * iommu_group_alloc - Allocate a new group
 * @name: Optional name to associate with group, visible in sysfs
 *
 * This function is called by an iommu driver to allocate a new iommu
 * group.  The iommu group represents the minimum granularity of the iommu.
 * Upon successful return, the caller holds a reference to the supplied
 * group in order to hold the group until devices are added.  Use
 * iommu_group_put() to release this extra reference count, allowing the
 * group to be automatically reclaimed once it has no devices or external
 * references.
 */
struct iommu_group *iommu_group_alloc(void)
336
{
Alex Williamson's avatar
Alex Williamson committed
337
338
339
340
341
342
343
344
345
346
347
348
	struct iommu_group *group;
	int ret;

	group = kzalloc(sizeof(*group), GFP_KERNEL);
	if (!group)
		return ERR_PTR(-ENOMEM);

	group->kobj.kset = iommu_group_kset;
	mutex_init(&group->mutex);
	INIT_LIST_HEAD(&group->devices);
	BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);

349
350
	ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
	if (ret < 0) {
Alex Williamson's avatar
Alex Williamson committed
351
		kfree(group);
352
		return ERR_PTR(ret);
Alex Williamson's avatar
Alex Williamson committed
353
	}
354
	group->id = ret;
355

Alex Williamson's avatar
Alex Williamson committed
356
357
358
	ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
				   NULL, "%d", group->id);
	if (ret) {
359
		ida_simple_remove(&iommu_group_ida, group->id);
Alex Williamson's avatar
Alex Williamson committed
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
		kfree(group);
		return ERR_PTR(ret);
	}

	group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
	if (!group->devices_kobj) {
		kobject_put(&group->kobj); /* triggers .release & free */
		return ERR_PTR(-ENOMEM);
	}

	/*
	 * The devices_kobj holds a reference on the group kobject, so
	 * as long as that exists so will the group.  We can therefore
	 * use the devices_kobj for reference counting.
	 */
	kobject_put(&group->kobj);

377
378
379
380
381
	ret = iommu_group_create_file(group,
				      &iommu_group_attr_reserved_regions);
	if (ret)
		return ERR_PTR(ret);

382
383
	pr_debug("Allocated group %d\n", group->id);

Alex Williamson's avatar
Alex Williamson committed
384
385
386
387
	return group;
}
EXPORT_SYMBOL_GPL(iommu_group_alloc);

388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
struct iommu_group *iommu_group_get_by_id(int id)
{
	struct kobject *group_kobj;
	struct iommu_group *group;
	const char *name;

	if (!iommu_group_kset)
		return NULL;

	name = kasprintf(GFP_KERNEL, "%d", id);
	if (!name)
		return NULL;

	group_kobj = kset_find_obj(iommu_group_kset, name);
	kfree(name);

	if (!group_kobj)
		return NULL;

	group = container_of(group_kobj, struct iommu_group, kobj);
	BUG_ON(group->id != id);

	kobject_get(group->devices_kobj);
	kobject_put(&group->kobj);

	return group;
}
EXPORT_SYMBOL_GPL(iommu_group_get_by_id);

Alex Williamson's avatar
Alex Williamson committed
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
/**
 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
 * @group: the group
 *
 * iommu drivers can store data in the group for use when doing iommu
 * operations.  This function provides a way to retrieve it.  Caller
 * should hold a group reference.
 */
void *iommu_group_get_iommudata(struct iommu_group *group)
{
	return group->iommu_data;
}
EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);

/**
 * iommu_group_set_iommudata - set iommu_data for a group
 * @group: the group
 * @iommu_data: new data
 * @release: release function for iommu_data
 *
 * iommu drivers can store data in the group for use when doing iommu
 * operations.  This function provides a way to set the data after
 * the group has been allocated.  Caller should hold a group reference.
 */
void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
			       void (*release)(void *iommu_data))
443
{
Alex Williamson's avatar
Alex Williamson committed
444
445
446
447
	group->iommu_data = iommu_data;
	group->iommu_data_release = release;
}
EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
448

Alex Williamson's avatar
Alex Williamson committed
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
/**
 * iommu_group_set_name - set name for a group
 * @group: the group
 * @name: name
 *
 * Allow iommu driver to set a name for a group.  When set it will
 * appear in a name attribute file under the group in sysfs.
 */
int iommu_group_set_name(struct iommu_group *group, const char *name)
{
	int ret;

	if (group->name) {
		iommu_group_remove_file(group, &iommu_group_attr_name);
		kfree(group->name);
		group->name = NULL;
		if (!name)
			return 0;
	}

	group->name = kstrdup(name, GFP_KERNEL);
	if (!group->name)
		return -ENOMEM;

	ret = iommu_group_create_file(group, &iommu_group_attr_name);
	if (ret) {
		kfree(group->name);
		group->name = NULL;
		return ret;
	}
479
480
481

	return 0;
}
Alex Williamson's avatar
Alex Williamson committed
482
EXPORT_SYMBOL_GPL(iommu_group_set_name);
483

484
485
486
487
static int iommu_group_create_direct_mappings(struct iommu_group *group,
					      struct device *dev)
{
	struct iommu_domain *domain = group->default_domain;
488
	struct iommu_resv_region *entry;
489
490
491
492
493
494
495
	struct list_head mappings;
	unsigned long pg_size;
	int ret = 0;

	if (!domain || domain->type != IOMMU_DOMAIN_DMA)
		return 0;

496
	BUG_ON(!domain->pgsize_bitmap);
497

498
	pg_size = 1UL << __ffs(domain->pgsize_bitmap);
499
500
	INIT_LIST_HEAD(&mappings);

501
	iommu_get_resv_regions(dev, &mappings);
502
503
504
505
506

	/* We need to consider overlapping regions for different devices */
	list_for_each_entry(entry, &mappings, list) {
		dma_addr_t start, end, addr;

507
508
		if (domain->ops->apply_resv_region)
			domain->ops->apply_resv_region(dev, domain, entry);
509

510
511
512
		start = ALIGN(entry->start, pg_size);
		end   = ALIGN(entry->start + entry->length, pg_size);

513
514
515
		if (entry->type != IOMMU_RESV_DIRECT)
			continue;

516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
		for (addr = start; addr < end; addr += pg_size) {
			phys_addr_t phys_addr;

			phys_addr = iommu_iova_to_phys(domain, addr);
			if (phys_addr)
				continue;

			ret = iommu_map(domain, addr, addr, pg_size, entry->prot);
			if (ret)
				goto out;
		}

	}

out:
531
	iommu_put_resv_regions(dev, &mappings);
532
533
534
535

	return ret;
}

Alex Williamson's avatar
Alex Williamson committed
536
537
538
539
540
541
542
543
544
/**
 * iommu_group_add_device - add a device to an iommu group
 * @group: the group into which to add the device (reference should be held)
 * @dev: the device
 *
 * This function is called by an iommu driver to add a device into a
 * group.  Adding a device increments the group reference count.
 */
int iommu_group_add_device(struct iommu_group *group, struct device *dev)
545
{
Alex Williamson's avatar
Alex Williamson committed
546
	int ret, i = 0;
547
	struct group_device *device;
Alex Williamson's avatar
Alex Williamson committed
548
549
550
551
552
553

	device = kzalloc(sizeof(*device), GFP_KERNEL);
	if (!device)
		return -ENOMEM;

	device->dev = dev;
554

Alex Williamson's avatar
Alex Williamson committed
555
	ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
556
557
	if (ret)
		goto err_free_device;
Alex Williamson's avatar
Alex Williamson committed
558
559
560
561

	device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
rename:
	if (!device->name) {
562
563
		ret = -ENOMEM;
		goto err_remove_link;
Alex Williamson's avatar
Alex Williamson committed
564
	}
565

Alex Williamson's avatar
Alex Williamson committed
566
567
568
569
570
571
572
573
	ret = sysfs_create_link_nowarn(group->devices_kobj,
				       &dev->kobj, device->name);
	if (ret) {
		if (ret == -EEXIST && i >= 0) {
			/*
			 * Account for the slim chance of collision
			 * and append an instance to the name.
			 */
574
			kfree(device->name);
Alex Williamson's avatar
Alex Williamson committed
575
576
577
578
			device->name = kasprintf(GFP_KERNEL, "%s.%d",
						 kobject_name(&dev->kobj), i++);
			goto rename;
		}
579
		goto err_free_name;
Alex Williamson's avatar
Alex Williamson committed
580
581
582
583
584
585
	}

	kobject_get(group->devices_kobj);

	dev->iommu_group = group;

586
587
	iommu_group_create_direct_mappings(group, dev);

Alex Williamson's avatar
Alex Williamson committed
588
589
	mutex_lock(&group->mutex);
	list_add_tail(&device->list, &group->devices);
590
	if (group->domain)
591
		ret = __iommu_attach_device(group->domain, dev);
Alex Williamson's avatar
Alex Williamson committed
592
	mutex_unlock(&group->mutex);
593
594
	if (ret)
		goto err_put_group;
Alex Williamson's avatar
Alex Williamson committed
595
596
597
598

	/* Notify any listeners about change to group. */
	blocking_notifier_call_chain(&group->notifier,
				     IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
599
600

	trace_add_device_to_group(group->id, dev);
601
602
603

	pr_info("Adding device %s to group %d\n", dev_name(dev), group->id);

604
	return 0;
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619

err_put_group:
	mutex_lock(&group->mutex);
	list_del(&device->list);
	mutex_unlock(&group->mutex);
	dev->iommu_group = NULL;
	kobject_put(group->devices_kobj);
err_free_name:
	kfree(device->name);
err_remove_link:
	sysfs_remove_link(&dev->kobj, "iommu_group");
err_free_device:
	kfree(device);
	pr_err("Failed to add device %s to group %d: %d\n", dev_name(dev), group->id, ret);
	return ret;
620
}
Alex Williamson's avatar
Alex Williamson committed
621
EXPORT_SYMBOL_GPL(iommu_group_add_device);
622

Alex Williamson's avatar
Alex Williamson committed
623
624
625
626
627
628
629
630
631
632
/**
 * iommu_group_remove_device - remove a device from it's current group
 * @dev: device to be removed
 *
 * This function is called by an iommu driver to remove the device from
 * it's current group.  This decrements the iommu group reference count.
 */
void iommu_group_remove_device(struct device *dev)
{
	struct iommu_group *group = dev->iommu_group;
633
	struct group_device *tmp_device, *device = NULL;
Alex Williamson's avatar
Alex Williamson committed
634

635
636
	pr_info("Removing device %s from group %d\n", dev_name(dev), group->id);

Alex Williamson's avatar
Alex Williamson committed
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
	/* Pre-notify listeners that a device is being removed. */
	blocking_notifier_call_chain(&group->notifier,
				     IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);

	mutex_lock(&group->mutex);
	list_for_each_entry(tmp_device, &group->devices, list) {
		if (tmp_device->dev == dev) {
			device = tmp_device;
			list_del(&device->list);
			break;
		}
	}
	mutex_unlock(&group->mutex);

	if (!device)
		return;

	sysfs_remove_link(group->devices_kobj, device->name);
	sysfs_remove_link(&dev->kobj, "iommu_group");

657
658
	trace_remove_device_from_group(group->id, dev);

Alex Williamson's avatar
Alex Williamson committed
659
660
661
662
663
664
665
	kfree(device->name);
	kfree(device);
	dev->iommu_group = NULL;
	kobject_put(group->devices_kobj);
}
EXPORT_SYMBOL_GPL(iommu_group_remove_device);

666
667
static int iommu_group_device_count(struct iommu_group *group)
{
668
	struct group_device *entry;
669
670
671
672
673
674
675
676
	int ret = 0;

	list_for_each_entry(entry, &group->devices, list)
		ret++;

	return ret;
}

Alex Williamson's avatar
Alex Williamson committed
677
678
679
680
681
682
683
684
685
686
687
/**
 * iommu_group_for_each_dev - iterate over each device in the group
 * @group: the group
 * @data: caller opaque data to be passed to callback function
 * @fn: caller supplied callback function
 *
 * This function is called by group users to iterate over group devices.
 * Callers should hold a reference count to the group during callback.
 * The group->mutex is held across callbacks, which will block calls to
 * iommu_group_add/remove_device.
 */
688
689
static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
				      int (*fn)(struct device *, void *))
Alex Williamson's avatar
Alex Williamson committed
690
{
691
	struct group_device *device;
Alex Williamson's avatar
Alex Williamson committed
692
693
694
695
696
697
698
	int ret = 0;

	list_for_each_entry(device, &group->devices, list) {
		ret = fn(device->dev, data);
		if (ret)
			break;
	}
699
700
701
702
703
704
705
706
707
708
709
	return ret;
}


int iommu_group_for_each_dev(struct iommu_group *group, void *data,
			     int (*fn)(struct device *, void *))
{
	int ret;

	mutex_lock(&group->mutex);
	ret = __iommu_group_for_each_dev(group, data, fn);
Alex Williamson's avatar
Alex Williamson committed
710
	mutex_unlock(&group->mutex);
711

Alex Williamson's avatar
Alex Williamson committed
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
	return ret;
}
EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);

/**
 * iommu_group_get - Return the group for a device and increment reference
 * @dev: get the group that this device belongs to
 *
 * This function is called by iommu drivers and users to get the group
 * for the specified device.  If found, the group is returned and the group
 * reference in incremented, else NULL.
 */
struct iommu_group *iommu_group_get(struct device *dev)
{
	struct iommu_group *group = dev->iommu_group;

	if (group)
		kobject_get(group->devices_kobj);

	return group;
}
EXPORT_SYMBOL_GPL(iommu_group_get);

735
736
737
738
739
740
741
742
743
744
745
746
747
/**
 * iommu_group_ref_get - Increment reference on a group
 * @group: the group to use, must not be NULL
 *
 * This function is called by iommu drivers to take additional references on an
 * existing group.  Returns the given group for convenience.
 */
struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
{
	kobject_get(group->devices_kobj);
	return group;
}

Alex Williamson's avatar
Alex Williamson committed
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
/**
 * iommu_group_put - Decrement group reference
 * @group: the group to use
 *
 * This function is called by iommu drivers and users to release the
 * iommu group.  Once the reference count is zero, the group is released.
 */
void iommu_group_put(struct iommu_group *group)
{
	if (group)
		kobject_put(group->devices_kobj);
}
EXPORT_SYMBOL_GPL(iommu_group_put);

/**
 * iommu_group_register_notifier - Register a notifier for group changes
 * @group: the group to watch
 * @nb: notifier block to signal
 *
 * This function allows iommu group users to track changes in a group.
 * See include/linux/iommu.h for actions sent via this notifier.  Caller
 * should hold a reference to the group throughout notifier registration.
 */
int iommu_group_register_notifier(struct iommu_group *group,
				  struct notifier_block *nb)
{
	return blocking_notifier_chain_register(&group->notifier, nb);
}
EXPORT_SYMBOL_GPL(iommu_group_register_notifier);

/**
 * iommu_group_unregister_notifier - Unregister a notifier
 * @group: the group to watch
 * @nb: notifier block to signal
 *
 * Unregister a previously registered group notifier block.
 */
int iommu_group_unregister_notifier(struct iommu_group *group,
				    struct notifier_block *nb)
{
	return blocking_notifier_chain_unregister(&group->notifier, nb);
}
EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);

/**
 * iommu_group_id - Return ID for a group
 * @group: the group to ID
 *
 * Return the unique ID for the group matching the sysfs group number.
 */
int iommu_group_id(struct iommu_group *group)
{
	return group->id;
}
EXPORT_SYMBOL_GPL(iommu_group_id);
803

804
805
806
static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
					       unsigned long *devfns);

807
808
809
810
811
812
813
814
815
816
/*
 * To consider a PCI device isolated, we require ACS to support Source
 * Validation, Request Redirection, Completer Redirection, and Upstream
 * Forwarding.  This effectively means that devices cannot spoof their
 * requester ID, requests and completions cannot be redirected, and all
 * transactions are forwarded upstream, even as it passes through a
 * bridge where the target device is downstream.
 */
#define REQ_ACS_FLAGS   (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)

817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
/*
 * For multifunction devices which are not isolated from each other, find
 * all the other non-isolated functions and look for existing groups.  For
 * each function, we also need to look for aliases to or from other devices
 * that may already have a group.
 */
static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
							unsigned long *devfns)
{
	struct pci_dev *tmp = NULL;
	struct iommu_group *group;

	if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
		return NULL;

	for_each_pci_dev(tmp) {
		if (tmp == pdev || tmp->bus != pdev->bus ||
		    PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
		    pci_acs_enabled(tmp, REQ_ACS_FLAGS))
			continue;

		group = get_pci_alias_group(tmp, devfns);
		if (group) {
			pci_dev_put(tmp);
			return group;
		}
	}

	return NULL;
}

/*
849
850
 * Look for aliases to or from the given device for existing groups. DMA
 * aliases are only supported on the same bus, therefore the search
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
 * space is quite small (especially since we're really only looking at pcie
 * device, and therefore only expect multiple slots on the root complex or
 * downstream switch ports).  It's conceivable though that a pair of
 * multifunction devices could have aliases between them that would cause a
 * loop.  To prevent this, we use a bitmap to track where we've been.
 */
static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
					       unsigned long *devfns)
{
	struct pci_dev *tmp = NULL;
	struct iommu_group *group;

	if (test_and_set_bit(pdev->devfn & 0xff, devfns))
		return NULL;

	group = iommu_group_get(&pdev->dev);
	if (group)
		return group;

	for_each_pci_dev(tmp) {
		if (tmp == pdev || tmp->bus != pdev->bus)
			continue;

		/* We alias them or they alias us */
875
		if (pci_devs_are_dma_aliases(pdev, tmp)) {
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
			group = get_pci_alias_group(tmp, devfns);
			if (group) {
				pci_dev_put(tmp);
				return group;
			}

			group = get_pci_function_alias_group(tmp, devfns);
			if (group) {
				pci_dev_put(tmp);
				return group;
			}
		}
	}

	return NULL;
}

893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
struct group_for_pci_data {
	struct pci_dev *pdev;
	struct iommu_group *group;
};

/*
 * DMA alias iterator callback, return the last seen device.  Stop and return
 * the IOMMU group if we find one along the way.
 */
static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
{
	struct group_for_pci_data *data = opaque;

	data->pdev = pdev;
	data->group = iommu_group_get(&pdev->dev);

	return data->group != NULL;
}

912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
/*
 * Generic device_group call-back function. It just allocates one
 * iommu-group per device.
 */
struct iommu_group *generic_device_group(struct device *dev)
{
	struct iommu_group *group;

	group = iommu_group_alloc();
	if (IS_ERR(group))
		return NULL;

	return group;
}

927
928
929
930
/*
 * Use standard PCI bus topology, isolation features, and DMA alias quirks
 * to find or create an IOMMU group for a device.
 */
931
struct iommu_group *pci_device_group(struct device *dev)
932
{
933
	struct pci_dev *pdev = to_pci_dev(dev);
934
935
936
	struct group_for_pci_data data;
	struct pci_bus *bus;
	struct iommu_group *group = NULL;
937
	u64 devfns[4] = { 0 };
938

939
940
941
	if (WARN_ON(!dev_is_pci(dev)))
		return ERR_PTR(-EINVAL);

942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
	/*
	 * Find the upstream DMA alias for the device.  A device must not
	 * be aliased due to topology in order to have its own IOMMU group.
	 * If we find an alias along the way that already belongs to a
	 * group, use it.
	 */
	if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data))
		return data.group;

	pdev = data.pdev;

	/*
	 * Continue upstream from the point of minimum IOMMU granularity
	 * due to aliases to the point where devices are protected from
	 * peer-to-peer DMA by PCI ACS.  Again, if we find an existing
	 * group, use it.
	 */
	for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
		if (!bus->self)
			continue;

		if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
			break;

		pdev = bus->self;

		group = iommu_group_get(&pdev->dev);
		if (group)
			return group;
	}

	/*
974
975
	 * Look for existing groups on device aliases.  If we alias another
	 * device or another device aliases us, use the same group.
976
	 */
977
978
979
	group = get_pci_alias_group(pdev, (unsigned long *)devfns);
	if (group)
		return group;
980
981

	/*
982
983
984
	 * Look for existing groups on non-isolated functions on the same
	 * slot and aliases of those funcions, if any.  No need to clear
	 * the search bitmap, the tested devfns are still valid.
985
	 */
986
987
988
	group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
	if (group)
		return group;
989
990

	/* No shared group found, allocate new */
991
	group = iommu_group_alloc();
992
993
994
	if (IS_ERR(group))
		return NULL;

995
	return group;
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
}

/**
 * iommu_group_get_for_dev - Find or create the IOMMU group for a device
 * @dev: target device
 *
 * This function is intended to be called by IOMMU drivers and extended to
 * support common, bus-defined algorithms when determining or creating the
 * IOMMU group for a device.  On success, the caller will hold a reference
 * to the returned IOMMU group, which will already include the provided
 * device.  The reference should be released with iommu_group_put().
 */
struct iommu_group *iommu_group_get_for_dev(struct device *dev)
{
1010
	const struct iommu_ops *ops = dev->bus->iommu_ops;
1011
	struct iommu_group *group;
1012
1013
1014
1015
1016
1017
	int ret;

	group = iommu_group_get(dev);
	if (group)
		return group;

1018
	group = ERR_PTR(-EINVAL);
1019

1020
1021
	if (ops && ops->device_group)
		group = ops->device_group(dev);
1022
1023
1024
1025

	if (IS_ERR(group))
		return group;

1026
1027
1028
1029
1030
	/*
	 * Try to allocate a default domain - needs support from the
	 * IOMMU driver.
	 */
	if (!group->default_domain) {
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
		struct iommu_domain *dom;

		dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type);
		if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) {
			dev_warn(dev,
				 "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
				 iommu_def_domain_type);
			dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA);
		}

		group->default_domain = dom;
1042
		if (!group->domain)
1043
			group->domain = dom;
1044
1045
	}

1046
1047
1048
1049
1050
1051
1052
1053
1054
	ret = iommu_group_add_device(group, dev);
	if (ret) {
		iommu_group_put(group);
		return ERR_PTR(ret);
	}

	return group;
}

1055
1056
1057
1058
1059
struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
{
	return group->default_domain;
}

Alex Williamson's avatar
Alex Williamson committed
1060
static int add_iommu_group(struct device *dev, void *data)
1061
{
1062
1063
	struct iommu_callback_data *cb = data;
	const struct iommu_ops *ops = cb->ops;
1064
	int ret;
Alex Williamson's avatar
Alex Williamson committed
1065
1066

	if (!ops->add_device)
1067
		return 0;
1068

Alex Williamson's avatar
Alex Williamson committed
1069
1070
	WARN_ON(dev->iommu_group);

1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
	ret = ops->add_device(dev);

	/*
	 * We ignore -ENODEV errors for now, as they just mean that the
	 * device is not translated by an IOMMU. We still care about
	 * other errors and fail to initialize when they happen.
	 */
	if (ret == -ENODEV)
		ret = 0;

	return ret;
1082
1083
}

1084
1085
1086
1087
1088
1089
1090
static int remove_iommu_group(struct device *dev, void *data)
{
	struct iommu_callback_data *cb = data;
	const struct iommu_ops *ops = cb->ops;

	if (ops->remove_device && dev->iommu_group)
		ops->remove_device(dev);
1091
1092
1093
1094

	return 0;
}

Alex Williamson's avatar
Alex Williamson committed
1095
1096
static int iommu_bus_notifier(struct notifier_block *nb,
			      unsigned long action, void *data)
1097
1098
{
	struct device *dev = data;
1099
	const struct iommu_ops *ops = dev->bus->iommu_ops;
Alex Williamson's avatar
Alex Williamson committed
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
	struct iommu_group *group;
	unsigned long group_action = 0;

	/*
	 * ADD/DEL call into iommu driver ops if provided, which may
	 * result in ADD/DEL notifiers to group->notifier
	 */
	if (action == BUS_NOTIFY_ADD_DEVICE) {
		if (ops->add_device)
			return ops->add_device(dev);
1110
	} else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
Alex Williamson's avatar
Alex Williamson committed
1111
1112
1113
1114
1115
		if (ops->remove_device && dev->iommu_group) {
			ops->remove_device(dev);
			return 0;
		}
	}
1116

Alex Williamson's avatar
Alex Williamson committed
1117
1118
1119
1120
1121
1122
1123
	/*
	 * Remaining BUS_NOTIFYs get filtered and republished to the
	 * group, if anyone is listening
	 */
	group = iommu_group_get(dev);
	if (!group)
		return 0;
1124

Alex Williamson's avatar
Alex Williamson committed
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
	switch (action) {
	case BUS_NOTIFY_BIND_DRIVER:
		group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
		break;
	case BUS_NOTIFY_BOUND_DRIVER:
		group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
		break;
	case BUS_NOTIFY_UNBIND_DRIVER:
		group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
		break;
	case BUS_NOTIFY_UNBOUND_DRIVER:
		group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
		break;
	}
1139

Alex Williamson's avatar
Alex Williamson committed
1140
1141
1142
	if (group_action)
		blocking_notifier_call_chain(&group->notifier,
					     group_action, dev);
1143

Alex Williamson's avatar
Alex Williamson committed
1144
	iommu_group_put(group);
1145
1146
1147
	return 0;
}

Mark Salter's avatar
Mark Salter committed
1148
static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
1149
{
Mark Salter's avatar
Mark Salter committed
1150
1151
	int err;
	struct notifier_block *nb;
1152
1153
1154
1155
	struct iommu_callback_data cb = {
		.ops = ops,
	};

Mark Salter's avatar
Mark Salter committed
1156
1157
1158
1159
1160
1161
1162
	nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
	if (!nb)
		return -ENOMEM;

	nb->notifier_call = iommu_bus_notifier;

	err = bus_register_notifier(bus, nb);
1163
1164
	if (err)
		goto out_free;
1165
1166

	err = bus_for_each_dev(bus, NULL, &cb, add_iommu_group);
1167
1168
1169
	if (err)
		goto out_err;

1170
1171

	return 0;
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181

out_err:
	/* Clean up */
	bus_for_each_dev(bus, NULL, &cb, remove_iommu_group);
	bus_unregister_notifier(bus, nb);

out_free:
	kfree(nb);

	return err;
1182
}
1183

1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
/**
 * bus_set_iommu - set iommu-callbacks for the bus
 * @bus: bus.
 * @ops: the callbacks provided by the iommu-driver
 *
 * This function is called by an iommu driver to set the iommu methods
 * used for a particular bus. Drivers for devices on that bus can use
 * the iommu-api after these ops are registered.
 * This special function is needed because IOMMUs are usually devices on
 * the bus itself, so the iommu drivers are not initialized when the bus
 * is set up. With this function the iommu-driver can set the iommu-ops
 * afterwards.
 */
1197
int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
1198
{
1199
1200
	int err;

1201
1202
	if (bus->iommu_ops != NULL)
		return -EBUSY;
1203

1204
1205
1206
	bus->iommu_ops = ops;

	/* Do IOMMU specific setup for this bus-type */
1207
1208
1209
1210
1211
	err = iommu_bus_init(bus, ops);
	if (err)
		bus->iommu_ops = NULL;

	return err;
1212
}
1213
EXPORT_SYMBOL_GPL(bus_set_iommu);
1214

1215
bool iommu_present(struct bus_type *bus)
1216
{
1217
	return bus->iommu_ops != NULL;
1218
}
1219
EXPORT_SYMBOL_GPL(iommu_present);
1220

1221
1222
1223
1224
1225
1226
1227
1228
1229
bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
{
	if (!bus->iommu_ops || !bus->iommu_ops->capable)
		return false;

	return bus->iommu_ops->capable(cap);
}
EXPORT_SYMBOL_GPL(iommu_capable);

1230
1231
1232
1233
/**
 * iommu_set_fault_handler() - set a fault handler for an iommu domain
 * @domain: iommu domain
 * @handler: fault handler
1234
 * @token: user data, will be passed back to the fault handler
1235
1236
1237
1238
1239
1240
 *
 * This function should be used by IOMMU users which want to be notified
 * whenever an IOMMU fault happens.
 *
 * The fault handler itself should return 0 on success, and an appropriate
 * error code otherwise.
1241
1242
 */
void iommu_set_fault_handler(struct iommu_domain *domain,
1243
1244
					iommu_fault_handler_t handler,
					void *token)
1245
1246
1247
1248
{
	BUG_ON(!domain);

	domain->handler = handler;
1249
	domain->handler_token = token;
1250
}
1251
EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
1252

1253
1254
static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
						 unsigned type)
1255
1256
1257
{
	struct iommu_domain *domain;

1258
	if (bus == NULL || bus->iommu_ops == NULL)
1259
1260
		return NULL;

1261
	domain = bus->iommu_ops->domain_alloc(type);
1262
1263
1264
	if (!domain)
		return NULL;

1265
	domain->ops  = bus->iommu_ops;
1266
	domain->type = type;
1267
1268
	/* Assume all sizes by default; the driver may override this later */
	domain->pgsize_bitmap  = bus->iommu_ops->pgsize_bitmap;
1269

1270
1271
1272
	return domain;
}

1273
1274
1275
struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
{
	return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);
1276
1277
1278
1279
1280
}
EXPORT_SYMBOL_GPL(iommu_domain_alloc);

void iommu_domain_free(struct iommu_domain *domain)
{
1281
	domain->ops->domain_free(domain);
1282
1283
1284
}
EXPORT_SYMBOL_GPL(iommu_domain_free);

1285
1286
static int __iommu_attach_device(struct iommu_domain *domain,
				 struct device *dev)
1287
{
1288
	int ret;
1289
1290
1291
	if (unlikely(domain->ops->attach_dev == NULL))
		return -ENODEV;

1292
1293
1294
1295
	ret = domain->ops->attach_dev(domain, dev);
	if (!ret)
		trace_attach_device_to_domain(dev);
	return ret;
1296
}
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316

int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
{
	struct iommu_group *group;
	int ret;

	group = iommu_group_get(dev);
	/* FIXME: Remove this when groups a mandatory for iommu drivers */
	if (group == NULL)
		return __iommu_attach_device(domain, dev);

	/*
	 * We have a group - lock it to make sure the device-count doesn't
	 * change while we are attaching
	 */
	mutex_lock(&group->mutex);
	ret = -EINVAL;
	if (iommu_group_device_count(group) != 1)
		goto out_unlock;

1317
	ret = __iommu_attach_group(domain, group);
1318
1319
1320
1321
1322
1323
1324

out_unlock:
	mutex_unlock(&group->mutex);
	iommu_group_put(group);

	return ret;
}
1325
1326
EXPORT_SYMBOL_GPL(iommu_attach_device);

1327
1328
static void __iommu_detach_device(struct iommu_domain *domain,
				  struct device *dev)
1329
{
1330
1331
1332
1333
	if (unlikely(domain->ops->detach_dev == NULL))
		return;

	domain->ops->detach_dev(domain, dev);
1334
	trace_detach_device_from_domain(dev);
1335
}
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351

void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
{
	struct iommu_group *group;

	group = iommu_group_get(dev);
	/* FIXME: Remove this when groups a mandatory for iommu drivers */
	if (group == NULL)
		return __iommu_detach_device(domain, dev);

	mutex_lock(&group->mutex);
	if (iommu_group_device_count(group) != 1) {
		WARN_ON(1);
		goto out_unlock;
	}

1352
	__iommu_detach_group(domain, group);
1353
1354
1355
1356
1357

out_unlock:
	mutex_unlock(&group->mutex);
	iommu_group_put(group);
}
1358
1359
EXPORT_SYMBOL_GPL(iommu_detach_device);

1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
{
	struct iommu_domain *domain;
	struct iommu_group *group;

	group = iommu_group_get(dev);
	/* FIXME: Remove this when groups a mandatory for iommu drivers */
	if (group == NULL)
		return NULL;

	domain = group->domain;

	iommu_group_put(group);

	return domain;
}
EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
1377

Alex Williamson's avatar
Alex Williamson committed
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
/*
 * IOMMU groups are really the natrual working unit of the IOMMU, but
 * the IOMMU API works on domains and devices.  Bridge that gap by
 * iterating over the devices in a group.  Ideally we'd have a single
 * device which represents the requestor ID of the group, but we also
 * allow IOMMU drivers to create policy defined minimum sets, where
 * the physical hardware may be able to distiguish members, but we
 * wish to group them at a higher level (ex. untrusted multi-function
 * PCI devices).  Thus we attach each device.
 */
static int iommu_group_do_attach_device(struct device *dev, void *data)
{
	struct iommu_domain *domain = data;

1392
	return __iommu_attach_device(domain, dev);
Alex Williamson's avatar
Alex Williamson committed
1393
1394
}

1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
static int __iommu_attach_group(struct iommu_domain *domain,
				struct iommu_group *group)
{
	int ret;

	if (group->default_domain && group->domain != group->default_domain)
		return -EBUSY;

	ret = __iommu_group_for_each_dev(group, domain,
					 iommu_group_do_attach_device);
	if (ret == 0)
		group->domain = domain;

	return ret;
Alex Williamson's avatar
Alex Williamson committed
1409
1410
1411
1412
}

int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
{
1413
1414
1415
1416
1417
1418
1419
	int ret;

	mutex_lock(&group->mutex);
	ret = __iommu_attach_group(domain, group);
	mutex_unlock(&group->mutex);

	return ret;
Alex Williamson's avatar
Alex Williamson committed
1420
1421
1422
1423
1424
1425
1426
}
EXPORT_SYMBOL_GPL(iommu_attach_group);

static int iommu_group_do_detach_device(struct device *dev, void *data)
{
	struct iommu_domain *domain = data;

1427
	__iommu_detach_device(domain, dev);
Alex Williamson's avatar
Alex Williamson committed
1428
1429
1430
1431

	return 0;
}

1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
static void __iommu_detach_group(struct iommu_domain *domain,
				 struct iommu_group *group)
{
	int ret;

	if (!group->default_domain) {
		__iommu_group_for_each_dev(group, domain,
					   iommu_group_do_detach_device);
		group->domain = NULL;
		return;
	}

	if (group->domain == group->default_domain)
		return;

	/* Detach by re-attaching to the default domain */
	ret = __iommu_group_for_each_dev(group, group->default_domain,
					 iommu_group_do_attach_device);
	if (ret != 0)
		WARN_ON(1);
	else
		group->domain = group->default_domain;
}

Alex Williamson's avatar
Alex Williamson committed
1456
1457
void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
{
1458
1459
1460
	mutex_lock(&group->mutex);
	__iommu_detach_group(domain, group);
	mutex_unlock(&group->mutex);
Alex Williamson's avatar
Alex Williamson committed
1461
1462
1463
}
EXPORT_SYMBOL_GPL(iommu_detach_group);

1464
phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1465
{
1466
1467
1468
1469
	if (unlikely(domain->ops->iova_to_phys == NULL))
		return 0;

	return domain->ops->iova_to_phys(domain, iova);
1470
1471
}
EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
1472

Alex Williamson's avatar
Alex Williamson committed
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
static size_t iommu_pgsize(struct iommu_domain *domain,
			   unsigned long addr_merge, size_t size)
{
	unsigned int pgsize_idx;
	size_t pgsize;

	/* Max page size that still fits into 'size' */
	pgsize_idx = __fls(size);

	/* need to consider alignment requirements ? */
	if (likely(addr_merge)) {
		/* Max page size allowed by address */
		unsigned int align_pgsize_idx = __ffs(addr_merge);
		pgsize_idx = min(pgsize_idx, align_pgsize_idx);
	}

	/* build a mask of acceptable page sizes */
	pgsize = (1UL << (pgsize_idx + 1)) - 1;

	/* throw away page sizes not supported by the hardware */
1493
	pgsize &= domain->pgsize_bitmap;
Alex Williamson's avatar
Alex Williamson committed
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504

	/* make sure we're still sane */
	BUG_ON(!pgsize);

	/* pick the biggest page */
	pgsize_idx = __fls(pgsize);
	pgsize = 1UL << pgsize_idx;

	return pgsize;
}

1505
int iommu_map(struct iommu_domain *domain, unsigned long iova,
1506
	      phys_addr_t paddr, size_t size, int prot)
1507
{
1508
1509
1510
	unsigned long orig_iova = iova;
	unsigned int min_pagesz;
	size_t orig_size = size;
1511
	phys_addr_t orig_paddr = paddr;
1512
	int ret = 0;
1513

1514
	if (unlikely(domain->ops->map == NULL ||
1515
		     domain->pgsize_bitmap == 0UL))
1516
		return -ENODEV;
1517

1518
1519
1520
	if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
		return -EINVAL;

1521
	/* find out the minimum page size supported */
1522
	min_pagesz = 1 << __ffs