etnaviv_mmu.c 9.51 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
/*
 * Copyright (C) 2015 Etnaviv Project
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published by
 * the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program.  If not, see <http://www.gnu.org/licenses/>.
 */

17
#include "common.xml.h"
18
19
20
#include "etnaviv_drv.h"
#include "etnaviv_gem.h"
#include "etnaviv_gpu.h"
21
#include "etnaviv_iommu.h"
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
#include "etnaviv_mmu.h"

static int etnaviv_fault_handler(struct iommu_domain *iommu, struct device *dev,
		unsigned long iova, int flags, void *arg)
{
	DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
	return 0;
}

int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
		struct sg_table *sgt, unsigned len, int prot)
{
	struct iommu_domain *domain = iommu->domain;
	struct scatterlist *sg;
	unsigned int da = iova;
	unsigned int i, j;
	int ret;

	if (!domain || !sgt)
		return -EINVAL;

	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
		u32 pa = sg_dma_address(sg) - sg->offset;
		size_t bytes = sg_dma_len(sg) + sg->offset;

		VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);

		ret = iommu_map(domain, da, pa, bytes, prot);
		if (ret)
			goto fail;

		da += bytes;
	}

	return 0;

fail:
	da = iova;

	for_each_sg(sgt->sgl, sg, i, j) {
		size_t bytes = sg_dma_len(sg) + sg->offset;

		iommu_unmap(domain, da, bytes);
		da += bytes;
	}
	return ret;
}

int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
		struct sg_table *sgt, unsigned len)
{
	struct iommu_domain *domain = iommu->domain;
	struct scatterlist *sg;
	unsigned int da = iova;
	int i;

	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
		size_t bytes = sg_dma_len(sg) + sg->offset;
		size_t unmapped;

		unmapped = iommu_unmap(domain, da, bytes);
		if (unmapped < bytes)
			return unmapped;

		VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);

		BUG_ON(!PAGE_ALIGNED(bytes));

		da += bytes;
	}

	return 0;
}

static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu,
	struct etnaviv_vram_mapping *mapping)
{
	struct etnaviv_gem_object *etnaviv_obj = mapping->object;

	etnaviv_iommu_unmap(mmu, mapping->vram_node.start,
			    etnaviv_obj->sgt, etnaviv_obj->base.size);
	drm_mm_remove_node(&mapping->vram_node);
}

106
107
static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
				   struct drm_mm_node *node, size_t size)
108
109
110
111
{
	struct etnaviv_vram_mapping *free = NULL;
	int ret;

112
	lockdep_assert_held(&mmu->lock);
113
114
115
116
117
118

	while (1) {
		struct etnaviv_vram_mapping *m, *n;
		struct list_head list;
		bool found;

119
120
121
122
123
		/*
		 * XXX: The DRM_MM_SEARCH_BELOW is really a hack to trick
		 * drm_mm into giving out a low IOVA after address space
		 * rollover. This needs a proper fix.
		 */
124
		ret = drm_mm_insert_node_in_range(&mmu->mm, node,
125
			size, 0, mmu->last_iova, ~0UL,
126
			mmu->last_iova ? DRM_MM_SEARCH_DEFAULT : DRM_MM_SEARCH_BELOW);
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141

		if (ret != -ENOSPC)
			break;

		/*
		 * If we did not search from the start of the MMU region,
		 * try again in case there are free slots.
		 */
		if (mmu->last_iova) {
			mmu->last_iova = 0;
			mmu->need_flush = true;
			continue;
		}

		/* Try to retire some entries */
142
		drm_mm_init_scan(&mmu->mm, size, 0, 0);
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183

		found = 0;
		INIT_LIST_HEAD(&list);
		list_for_each_entry(free, &mmu->mappings, mmu_node) {
			/* If this vram node has not been used, skip this. */
			if (!free->vram_node.mm)
				continue;

			/*
			 * If the iova is pinned, then it's in-use,
			 * so we must keep its mapping.
			 */
			if (free->use)
				continue;

			list_add(&free->scan_node, &list);
			if (drm_mm_scan_add_block(&free->vram_node)) {
				found = true;
				break;
			}
		}

		if (!found) {
			/* Nothing found, clean up and fail */
			list_for_each_entry_safe(m, n, &list, scan_node)
				BUG_ON(drm_mm_scan_remove_block(&m->vram_node));
			break;
		}

		/*
		 * drm_mm does not allow any other operations while
		 * scanning, so we have to remove all blocks first.
		 * If drm_mm_scan_remove_block() returns false, we
		 * can leave the block pinned.
		 */
		list_for_each_entry_safe(m, n, &list, scan_node)
			if (!drm_mm_scan_remove_block(&m->vram_node))
				list_del_init(&m->scan_node);

		/*
		 * Unmap the blocks which need to be reaped from the MMU.
184
		 * Clear the mmu pointer to prevent the mapping_get finding
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
		 * this mapping.
		 */
		list_for_each_entry_safe(m, n, &list, scan_node) {
			etnaviv_iommu_remove_mapping(mmu, m);
			m->mmu = NULL;
			list_del_init(&m->mmu_node);
			list_del_init(&m->scan_node);
		}

		/*
		 * We removed enough mappings so that the new allocation will
		 * succeed.  Ensure that the MMU will be flushed before the
		 * associated commit requesting this mapping, and retry the
		 * allocation one more time.
		 */
		mmu->need_flush = true;
	}

203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
	return ret;
}

int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
	struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
	struct etnaviv_vram_mapping *mapping)
{
	struct sg_table *sgt = etnaviv_obj->sgt;
	struct drm_mm_node *node;
	int ret;

	lockdep_assert_held(&etnaviv_obj->lock);

	mutex_lock(&mmu->lock);

	/* v1 MMU can optimize single entry (contiguous) scatterlists */
	if (mmu->version == ETNAVIV_IOMMU_V1 &&
	    sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
		u32 iova;

		iova = sg_dma_address(sgt->sgl) - memory_base;
		if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
			mapping->iova = iova;
			list_add_tail(&mapping->mmu_node, &mmu->mappings);
			mutex_unlock(&mmu->lock);
			return 0;
		}
	}

	node = &mapping->vram_node;

	ret = etnaviv_iommu_find_iova(mmu, node, etnaviv_obj->base.size);
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
	if (ret < 0) {
		mutex_unlock(&mmu->lock);
		return ret;
	}

	mmu->last_iova = node->start + etnaviv_obj->base.size;
	mapping->iova = node->start;
	ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
				IOMMU_READ | IOMMU_WRITE);

	if (ret < 0) {
		drm_mm_remove_node(node);
		mutex_unlock(&mmu->lock);
		return ret;
	}

	list_add_tail(&mapping->mmu_node, &mmu->mappings);
	mutex_unlock(&mmu->lock);

	return ret;
}

void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
	struct etnaviv_vram_mapping *mapping)
{
	WARN_ON(mapping->use);

	mutex_lock(&mmu->lock);

	/* If the vram node is on the mm, unmap and remove the node */
	if (mapping->vram_node.mm == &mmu->mm)
		etnaviv_iommu_remove_mapping(mmu, mapping);

	list_del(&mapping->mmu_node);
	mutex_unlock(&mmu->lock);
}

void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
{
	drm_mm_takedown(&mmu->mm);
	iommu_domain_free(mmu->domain);
	kfree(mmu);
}

279
struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu)
280
{
281
	enum etnaviv_iommu_version version;
282
283
284
285
286
287
	struct etnaviv_iommu *mmu;

	mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
	if (!mmu)
		return ERR_PTR(-ENOMEM);

288
289
290
291
292
293
294
295
296
297
298
299
300
301
	if (!(gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)) {
		mmu->domain = etnaviv_iommuv1_domain_alloc(gpu);
		version = ETNAVIV_IOMMU_V1;
	} else {
		mmu->domain = etnaviv_iommuv2_domain_alloc(gpu);
		version = ETNAVIV_IOMMU_V2;
	}

	if (!mmu->domain) {
		dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n");
		kfree(mmu);
		return ERR_PTR(-ENOMEM);
	}

302
303
304
305
306
	mmu->gpu = gpu;
	mmu->version = version;
	mutex_init(&mmu->lock);
	INIT_LIST_HEAD(&mmu->mappings);

307
308
309
	drm_mm_init(&mmu->mm, mmu->domain->geometry.aperture_start,
		    mmu->domain->geometry.aperture_end -
		    mmu->domain->geometry.aperture_start + 1);
310

311
	iommu_set_fault_handler(mmu->domain, etnaviv_fault_handler, gpu->dev);
312
313
314
315

	return mmu;
}

316
317
318
319
320
void etnaviv_iommu_restore(struct etnaviv_gpu *gpu)
{
	if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
		etnaviv_iommuv1_restore(gpu);
	else
321
		etnaviv_iommuv2_restore(gpu);
322
323
}

324
325
326
u32 etnaviv_iommu_get_cmdbuf_va(struct etnaviv_gpu *gpu,
				struct etnaviv_cmdbuf *buf)
{
327
328
329
330
331
332
333
334
335
336
337
	struct etnaviv_iommu *mmu = gpu->mmu;

	if (mmu->version == ETNAVIV_IOMMU_V1) {
		return buf->paddr - gpu->memory_base;
	} else {
		int ret;

		if (buf->vram_node.allocated)
			return (u32)buf->vram_node.start;

		mutex_lock(&mmu->lock);
338
339
		ret = etnaviv_iommu_find_iova(mmu, &buf->vram_node,
					      buf->size + SZ_64K);
340
341
342
343
344
345
346
347
348
349
350
		if (ret < 0) {
			mutex_unlock(&mmu->lock);
			return 0;
		}
		ret = iommu_map(mmu->domain, buf->vram_node.start, buf->paddr,
				buf->size, IOMMU_READ);
		if (ret < 0) {
			drm_mm_remove_node(&buf->vram_node);
			mutex_unlock(&mmu->lock);
			return 0;
		}
351
352
353
354
355
356
		/*
		 * At least on GC3000 the FE MMU doesn't properly flush old TLB
		 * entries. Make sure to space the command buffers out in a way
		 * that the FE MMU prefetch won't load invalid entries.
		 */
		mmu->last_iova = buf->vram_node.start + buf->size + SZ_64K;
357
358
359
360
361
		gpu->mmu->need_flush = true;
		mutex_unlock(&mmu->lock);

		return (u32)buf->vram_node.start;
	}
362
363
}

364
365
366
367
368
369
370
371
372
373
374
375
void etnaviv_iommu_put_cmdbuf_va(struct etnaviv_gpu *gpu,
				 struct etnaviv_cmdbuf *buf)
{
	struct etnaviv_iommu *mmu = gpu->mmu;

	if (mmu->version == ETNAVIV_IOMMU_V2 && buf->vram_node.allocated) {
		mutex_lock(&mmu->lock);
		iommu_unmap(mmu->domain, buf->vram_node.start, buf->size);
		drm_mm_remove_node(&buf->vram_node);
		mutex_unlock(&mmu->lock);
	}
}
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
{
	struct etnaviv_iommu_ops *ops;

	ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);

	return ops->dump_size(iommu->domain);
}

void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf)
{
	struct etnaviv_iommu_ops *ops;

	ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);

	ops->dump(iommu->domain, buf);
}