cpuset.c 64.1 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
/*
 *  kernel/cpuset.c
 *
 *  Processor and Memory placement constraints for sets of tasks.
 *
 *  Copyright (C) 2003 BULL SA.
 *  Copyright (C) 2004 Silicon Graphics, Inc.
 *
 *  Portions derived from Patrick Mochel's sysfs code.
 *  sysfs is Copyright (c) 2001-3 Patrick Mochel
 *  Portions Copyright (c) 2004 Silicon Graphics, Inc.
 *
 *  2003-10-10 Written by Simon Derr <simon.derr@bull.net>
 *  2003-10-22 Updates by Stephen Hemminger.
 *  2004 May-July Rework by Paul Jackson <pj@sgi.com>
 *
 *  This file is subject to the terms and conditions of the GNU General Public
 *  License.  See the file COPYING in the main directory of the Linux
 *  distribution for more details.
 */

#include <linux/config.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/cpuset.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/kmod.h>
#include <linux/list.h>
35
#include <linux/mempolicy.h>
Linus Torvalds's avatar
Linus Torvalds committed
36
37
38
39
40
41
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/pagemap.h>
#include <linux/proc_fs.h>
42
#include <linux/rcupdate.h>
Linus Torvalds's avatar
Linus Torvalds committed
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/smp_lock.h>
#include <linux/spinlock.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/time.h>
#include <linux/backing-dev.h>
#include <linux/sort.h>

#include <asm/uaccess.h>
#include <asm/atomic.h>
#include <asm/semaphore.h>

58
#define CPUSET_SUPER_MAGIC		0x27e0eb
Linus Torvalds's avatar
Linus Torvalds committed
59

60
61
62
63
64
65
66
/*
 * Tracks how many cpusets are currently defined in system.
 * When there is only one cpuset (the root cpuset) we can
 * short circuit some hooks.
 */
int number_of_cpusets;

67
68
69
70
71
72
73
74
75
/* See "Frequency meter" comments, below. */

struct fmeter {
	int cnt;		/* unprocessed events count */
	int val;		/* most recent output value */
	time_t time;		/* clock (secs) when val computed */
	spinlock_t lock;	/* guards read or write of above */
};

Linus Torvalds's avatar
Linus Torvalds committed
76
77
78
79
80
struct cpuset {
	unsigned long flags;		/* "unsigned long" so bitops work */
	cpumask_t cpus_allowed;		/* CPUs allowed to tasks in cpuset */
	nodemask_t mems_allowed;	/* Memory Nodes allowed to tasks */

81
82
83
	/*
	 * Count is atomic so can incr (fork) or decr (exit) without a lock.
	 */
Linus Torvalds's avatar
Linus Torvalds committed
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
	atomic_t count;			/* count tasks using this cpuset */

	/*
	 * We link our 'sibling' struct into our parents 'children'.
	 * Our children link their 'sibling' into our 'children'.
	 */
	struct list_head sibling;	/* my parents children */
	struct list_head children;	/* my children */

	struct cpuset *parent;		/* my parent */
	struct dentry *dentry;		/* cpuset fs entry */

	/*
	 * Copy of global cpuset_mems_generation as of the most
	 * recent time this cpuset changed its mems_allowed.
	 */
100
101
102
	int mems_generation;

	struct fmeter fmeter;		/* memory_pressure filter */
Linus Torvalds's avatar
Linus Torvalds committed
103
104
105
106
107
108
};

/* bits in struct cpuset flags field */
typedef enum {
	CS_CPU_EXCLUSIVE,
	CS_MEM_EXCLUSIVE,
109
	CS_MEMORY_MIGRATE,
Linus Torvalds's avatar
Linus Torvalds committed
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
	CS_REMOVED,
	CS_NOTIFY_ON_RELEASE
} cpuset_flagbits_t;

/* convenient tests for these bits */
static inline int is_cpu_exclusive(const struct cpuset *cs)
{
	return !!test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
}

static inline int is_mem_exclusive(const struct cpuset *cs)
{
	return !!test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
}

static inline int is_removed(const struct cpuset *cs)
{
	return !!test_bit(CS_REMOVED, &cs->flags);
}

static inline int notify_on_release(const struct cpuset *cs)
{
	return !!test_bit(CS_NOTIFY_ON_RELEASE, &cs->flags);
}

135
136
137
138
139
static inline int is_memory_migrate(const struct cpuset *cs)
{
	return !!test_bit(CS_MEMORY_MIGRATE, &cs->flags);
}

Linus Torvalds's avatar
Linus Torvalds committed
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
/*
 * Increment this atomic integer everytime any cpuset changes its
 * mems_allowed value.  Users of cpusets can track this generation
 * number, and avoid having to lock and reload mems_allowed unless
 * the cpuset they're using changes generation.
 *
 * A single, global generation is needed because attach_task() could
 * reattach a task to a different cpuset, which must not have its
 * generation numbers aliased with those of that tasks previous cpuset.
 *
 * Generations are needed for mems_allowed because one task cannot
 * modify anothers memory placement.  So we must enable every task,
 * on every visit to __alloc_pages(), to efficiently check whether
 * its current->cpuset->mems_allowed has changed, requiring an update
 * of its current->mems_allowed.
 */
static atomic_t cpuset_mems_generation = ATOMIC_INIT(1);

static struct cpuset top_cpuset = {
	.flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)),
	.cpus_allowed = CPU_MASK_ALL,
	.mems_allowed = NODE_MASK_ALL,
	.count = ATOMIC_INIT(0),
	.sibling = LIST_HEAD_INIT(top_cpuset.sibling),
	.children = LIST_HEAD_INIT(top_cpuset.children),
};

static struct vfsmount *cpuset_mount;
168
static struct super_block *cpuset_sb;
Linus Torvalds's avatar
Linus Torvalds committed
169
170

/*
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
 * We have two global cpuset semaphores below.  They can nest.
 * It is ok to first take manage_sem, then nest callback_sem.  We also
 * require taking task_lock() when dereferencing a tasks cpuset pointer.
 * See "The task_lock() exception", at the end of this comment.
 *
 * A task must hold both semaphores to modify cpusets.  If a task
 * holds manage_sem, then it blocks others wanting that semaphore,
 * ensuring that it is the only task able to also acquire callback_sem
 * and be able to modify cpusets.  It can perform various checks on
 * the cpuset structure first, knowing nothing will change.  It can
 * also allocate memory while just holding manage_sem.  While it is
 * performing these checks, various callback routines can briefly
 * acquire callback_sem to query cpusets.  Once it is ready to make
 * the changes, it takes callback_sem, blocking everyone else.
 *
 * Calls to the kernel memory allocator can not be made while holding
 * callback_sem, as that would risk double tripping on callback_sem
 * from one of the callbacks into the cpuset code from within
 * __alloc_pages().
 *
 * If a task is only holding callback_sem, then it has read-only
 * access to cpusets.
 *
 * The task_struct fields mems_allowed and mems_generation may only
 * be accessed in the context of that task, so require no locks.
 *
 * Any task can increment and decrement the count field without lock.
 * So in general, code holding manage_sem or callback_sem can't rely
 * on the count field not changing.  However, if the count goes to
 * zero, then only attach_task(), which holds both semaphores, can
 * increment it again.  Because a count of zero means that no tasks
 * are currently attached, therefore there is no way a task attached
 * to that cpuset can fork (the other way to increment the count).
 * So code holding manage_sem or callback_sem can safely assume that
 * if the count is zero, it will stay zero.  Similarly, if a task
 * holds manage_sem or callback_sem on a cpuset with zero count, it
 * knows that the cpuset won't be removed, as cpuset_rmdir() needs
 * both of those semaphores.
 *
 * A possible optimization to improve parallelism would be to make
 * callback_sem a R/W semaphore (rwsem), allowing the callback routines
 * to proceed in parallel, with read access, until the holder of
 * manage_sem needed to take this rwsem for exclusive write access
 * and modify some cpusets.
 *
 * The cpuset_common_file_write handler for operations that modify
 * the cpuset hierarchy holds manage_sem across the entire operation,
 * single threading all such cpuset modifications across the system.
 *
 * The cpuset_common_file_read() handlers only hold callback_sem across
 * small pieces of code, such as when reading out possibly multi-word
 * cpumasks and nodemasks.
 *
 * The fork and exit callbacks cpuset_fork() and cpuset_exit(), don't
 * (usually) take either semaphore.  These are the two most performance
 * critical pieces of code here.  The exception occurs on cpuset_exit(),
 * when a task in a notify_on_release cpuset exits.  Then manage_sem
228
 * is taken, and if the cpuset count is zero, a usermode call made
Linus Torvalds's avatar
Linus Torvalds committed
229
230
231
 * to /sbin/cpuset_release_agent with the name of the cpuset (path
 * relative to the root of cpuset file system) as the argument.
 *
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
 * A cpuset can only be deleted if both its 'count' of using tasks
 * is zero, and its list of 'children' cpusets is empty.  Since all
 * tasks in the system use _some_ cpuset, and since there is always at
 * least one task in the system (init, pid == 1), therefore, top_cpuset
 * always has either children cpusets and/or using tasks.  So we don't
 * need a special hack to ensure that top_cpuset cannot be deleted.
 *
 * The above "Tale of Two Semaphores" would be complete, but for:
 *
 *	The task_lock() exception
 *
 * The need for this exception arises from the action of attach_task(),
 * which overwrites one tasks cpuset pointer with another.  It does
 * so using both semaphores, however there are several performance
 * critical places that need to reference task->cpuset without the
 * expense of grabbing a system global semaphore.  Therefore except as
 * noted below, when dereferencing or, as in attach_task(), modifying
 * a tasks cpuset pointer we use task_lock(), which acts on a spinlock
 * (task->alloc_lock) already in the task_struct routinely used for
 * such matters.
252
253
254
255
256
 *
 * P.S.  One more locking exception.  RCU is used to guard the
 * update of a tasks cpuset pointer by attach_task() and the
 * access of task->cpuset->mems_generation via that pointer in
 * the routine cpuset_update_task_memory_state().
Linus Torvalds's avatar
Linus Torvalds committed
257
258
 */

259
260
static DECLARE_MUTEX(manage_sem);
static DECLARE_MUTEX(callback_sem);
261

Linus Torvalds's avatar
Linus Torvalds committed
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
/*
 * A couple of forward declarations required, due to cyclic reference loop:
 *  cpuset_mkdir -> cpuset_create -> cpuset_populate_dir -> cpuset_add_file
 *  -> cpuset_create_file -> cpuset_dir_inode_operations -> cpuset_mkdir.
 */

static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode);
static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry);

static struct backing_dev_info cpuset_backing_dev_info = {
	.ra_pages = 0,		/* No readahead */
	.capabilities	= BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
};

static struct inode *cpuset_new_inode(mode_t mode)
{
	struct inode *inode = new_inode(cpuset_sb);

	if (inode) {
		inode->i_mode = mode;
		inode->i_uid = current->fsuid;
		inode->i_gid = current->fsgid;
		inode->i_blksize = PAGE_CACHE_SIZE;
		inode->i_blocks = 0;
		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
		inode->i_mapping->backing_dev_info = &cpuset_backing_dev_info;
	}
	return inode;
}

static void cpuset_diput(struct dentry *dentry, struct inode *inode)
{
	/* is dentry a directory ? if so, kfree() associated cpuset */
	if (S_ISDIR(inode->i_mode)) {
		struct cpuset *cs = dentry->d_fsdata;
		BUG_ON(!(is_removed(cs)));
		kfree(cs);
	}
	iput(inode);
}

static struct dentry_operations cpuset_dops = {
	.d_iput = cpuset_diput,
};

static struct dentry *cpuset_get_dentry(struct dentry *parent, const char *name)
{
309
	struct dentry *d = lookup_one_len(name, parent, strlen(name));
Linus Torvalds's avatar
Linus Torvalds committed
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
	if (!IS_ERR(d))
		d->d_op = &cpuset_dops;
	return d;
}

static void remove_dir(struct dentry *d)
{
	struct dentry *parent = dget(d->d_parent);

	d_delete(d);
	simple_rmdir(parent->d_inode, d);
	dput(parent);
}

/*
 * NOTE : the dentry must have been dget()'ed
 */
static void cpuset_d_remove_dir(struct dentry *dentry)
{
	struct list_head *node;

	spin_lock(&dcache_lock);
	node = dentry->d_subdirs.next;
	while (node != &dentry->d_subdirs) {
		struct dentry *d = list_entry(node, struct dentry, d_child);
		list_del_init(node);
		if (d->d_inode) {
			d = dget_locked(d);
			spin_unlock(&dcache_lock);
			d_delete(d);
			simple_unlink(dentry->d_inode, d);
			dput(d);
			spin_lock(&dcache_lock);
		}
		node = dentry->d_subdirs.next;
	}
	list_del_init(&dentry->d_child);
	spin_unlock(&dcache_lock);
	remove_dir(dentry);
}

static struct super_operations cpuset_ops = {
	.statfs = simple_statfs,
	.drop_inode = generic_delete_inode,
};

static int cpuset_fill_super(struct super_block *sb, void *unused_data,
							int unused_silent)
{
	struct inode *inode;
	struct dentry *root;

	sb->s_blocksize = PAGE_CACHE_SIZE;
	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
	sb->s_magic = CPUSET_SUPER_MAGIC;
	sb->s_op = &cpuset_ops;
	cpuset_sb = sb;

	inode = cpuset_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR);
	if (inode) {
		inode->i_op = &simple_dir_inode_operations;
		inode->i_fop = &simple_dir_operations;
		/* directories start off with i_nlink == 2 (for "." entry) */
		inode->i_nlink++;
	} else {
		return -ENOMEM;
	}

	root = d_alloc_root(inode);
	if (!root) {
		iput(inode);
		return -ENOMEM;
	}
	sb->s_root = root;
	return 0;
}

static struct super_block *cpuset_get_sb(struct file_system_type *fs_type,
					int flags, const char *unused_dev_name,
					void *data)
{
	return get_sb_single(fs_type, flags, data, cpuset_fill_super);
}

static struct file_system_type cpuset_fs_type = {
	.name = "cpuset",
	.get_sb = cpuset_get_sb,
	.kill_sb = kill_litter_super,
};

/* struct cftype:
 *
 * The files in the cpuset filesystem mostly have a very simple read/write
 * handling, some common function will take care of it. Nevertheless some cases
 * (read tasks) are special and therefore I define this structure for every
 * kind of file.
 *
 *
 * When reading/writing to a file:
 *	- the cpuset to use in file->f_dentry->d_parent->d_fsdata
 *	- the 'cftype' of the file is file->f_dentry->d_fsdata
 */

struct cftype {
	char *name;
	int private;
	int (*open) (struct inode *inode, struct file *file);
	ssize_t (*read) (struct file *file, char __user *buf, size_t nbytes,
							loff_t *ppos);
	int (*write) (struct file *file, const char __user *buf, size_t nbytes,
							loff_t *ppos);
	int (*release) (struct inode *inode, struct file *file);
};

static inline struct cpuset *__d_cs(struct dentry *dentry)
{
	return dentry->d_fsdata;
}

static inline struct cftype *__d_cft(struct dentry *dentry)
{
	return dentry->d_fsdata;
}

/*
435
 * Call with manage_sem held.  Writes path of cpuset into buf.
Linus Torvalds's avatar
Linus Torvalds committed
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
 * Returns 0 on success, -errno on error.
 */

static int cpuset_path(const struct cpuset *cs, char *buf, int buflen)
{
	char *start;

	start = buf + buflen;

	*--start = '\0';
	for (;;) {
		int len = cs->dentry->d_name.len;
		if ((start -= len) < buf)
			return -ENAMETOOLONG;
		memcpy(start, cs->dentry->d_name.name, len);
		cs = cs->parent;
		if (!cs)
			break;
		if (!cs->parent)
			continue;
		if (--start < buf)
			return -ENAMETOOLONG;
		*start = '/';
	}
	memmove(buf, start, buf + buflen - start);
	return 0;
}

/*
 * Notify userspace when a cpuset is released, by running
 * /sbin/cpuset_release_agent with the name of the cpuset (path
 * relative to the root of cpuset file system) as the argument.
 *
 * Most likely, this user command will try to rmdir this cpuset.
 *
 * This races with the possibility that some other task will be
 * attached to this cpuset before it is removed, or that some other
 * user task will 'mkdir' a child cpuset of this cpuset.  That's ok.
 * The presumed 'rmdir' will fail quietly if this cpuset is no longer
 * unused, and this cpuset will be reprieved from its death sentence,
 * to continue to serve a useful existence.  Next time it's released,
 * we will get notified again, if it still has 'notify_on_release' set.
 *
479
480
481
482
483
484
485
486
 * The final arg to call_usermodehelper() is 0, which means don't
 * wait.  The separate /sbin/cpuset_release_agent task is forked by
 * call_usermodehelper(), then control in this thread returns here,
 * without waiting for the release agent task.  We don't bother to
 * wait because the caller of this routine has no use for the exit
 * status of the /sbin/cpuset_release_agent task, so no sense holding
 * our caller up for that.
 *
487
488
489
490
491
 * When we had only one cpuset semaphore, we had to call this
 * without holding it, to avoid deadlock when call_usermodehelper()
 * allocated memory.  With two locks, we could now call this while
 * holding manage_sem, but we still don't, so as to minimize
 * the time manage_sem is held.
Linus Torvalds's avatar
Linus Torvalds committed
492
493
 */

494
static void cpuset_release_agent(const char *pathbuf)
Linus Torvalds's avatar
Linus Torvalds committed
495
496
497
498
{
	char *argv[3], *envp[3];
	int i;

499
500
501
	if (!pathbuf)
		return;

Linus Torvalds's avatar
Linus Torvalds committed
502
503
	i = 0;
	argv[i++] = "/sbin/cpuset_release_agent";
504
	argv[i++] = (char *)pathbuf;
Linus Torvalds's avatar
Linus Torvalds committed
505
506
507
508
509
510
511
512
	argv[i] = NULL;

	i = 0;
	/* minimal command environment */
	envp[i++] = "HOME=/";
	envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
	envp[i] = NULL;

513
514
	call_usermodehelper(argv[0], argv, envp, 0);
	kfree(pathbuf);
Linus Torvalds's avatar
Linus Torvalds committed
515
516
517
518
519
520
}

/*
 * Either cs->count of using tasks transitioned to zero, or the
 * cs->children list of child cpusets just became empty.  If this
 * cs is notify_on_release() and now both the user count is zero and
521
522
 * the list of children is empty, prepare cpuset path in a kmalloc'd
 * buffer, to be returned via ppathbuf, so that the caller can invoke
523
524
 * cpuset_release_agent() with it later on, once manage_sem is dropped.
 * Call here with manage_sem held.
525
526
527
528
529
 *
 * This check_for_release() routine is responsible for kmalloc'ing
 * pathbuf.  The above cpuset_release_agent() is responsible for
 * kfree'ing pathbuf.  The caller of these routines is responsible
 * for providing a pathbuf pointer, initialized to NULL, then
530
531
 * calling check_for_release() with manage_sem held and the address
 * of the pathbuf pointer, then dropping manage_sem, then calling
532
 * cpuset_release_agent() with pathbuf, as set by check_for_release().
Linus Torvalds's avatar
Linus Torvalds committed
533
534
 */

535
static void check_for_release(struct cpuset *cs, char **ppathbuf)
Linus Torvalds's avatar
Linus Torvalds committed
536
537
538
539
540
541
542
543
544
{
	if (notify_on_release(cs) && atomic_read(&cs->count) == 0 &&
	    list_empty(&cs->children)) {
		char *buf;

		buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
		if (!buf)
			return;
		if (cpuset_path(cs, buf, PAGE_SIZE) < 0)
545
546
547
			kfree(buf);
		else
			*ppathbuf = buf;
Linus Torvalds's avatar
Linus Torvalds committed
548
549
550
551
552
553
554
555
556
557
558
559
560
561
	}
}

/*
 * Return in *pmask the portion of a cpusets's cpus_allowed that
 * are online.  If none are online, walk up the cpuset hierarchy
 * until we find one that does have some online cpus.  If we get
 * all the way to the top and still haven't found any online cpus,
 * return cpu_online_map.  Or if passed a NULL cs from an exit'ing
 * task, return cpu_online_map.
 *
 * One way or another, we guarantee to return some non-empty subset
 * of cpu_online_map.
 *
562
 * Call with callback_sem held.
Linus Torvalds's avatar
Linus Torvalds committed
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
 */

static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask)
{
	while (cs && !cpus_intersects(cs->cpus_allowed, cpu_online_map))
		cs = cs->parent;
	if (cs)
		cpus_and(*pmask, cs->cpus_allowed, cpu_online_map);
	else
		*pmask = cpu_online_map;
	BUG_ON(!cpus_intersects(*pmask, cpu_online_map));
}

/*
 * Return in *pmask the portion of a cpusets's mems_allowed that
 * are online.  If none are online, walk up the cpuset hierarchy
 * until we find one that does have some online mems.  If we get
 * all the way to the top and still haven't found any online mems,
 * return node_online_map.
 *
 * One way or another, we guarantee to return some non-empty subset
 * of node_online_map.
 *
586
 * Call with callback_sem held.
Linus Torvalds's avatar
Linus Torvalds committed
587
588
589
590
591
592
593
594
595
596
597
598
599
 */

static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
{
	while (cs && !nodes_intersects(cs->mems_allowed, node_online_map))
		cs = cs->parent;
	if (cs)
		nodes_and(*pmask, cs->mems_allowed, node_online_map);
	else
		*pmask = node_online_map;
	BUG_ON(!nodes_intersects(*pmask, node_online_map));
}

600
601
602
603
604
605
/**
 * cpuset_update_task_memory_state - update task memory placement
 *
 * If the current tasks cpusets mems_allowed changed behind our
 * backs, update current->mems_allowed, mems_generation and task NUMA
 * mempolicy to the new value.
606
 *
607
608
609
610
611
 * Task mempolicy is updated by rebinding it relative to the
 * current->cpuset if a task has its memory placement changed.
 * Do not call this routine if in_interrupt().
 *
 * Call without callback_sem or task_lock() held.  May be called
612
 * with or without manage_sem held.  Doesn't need task_lock to guard
613
614
615
616
617
 * against another task changing a non-NULL cpuset pointer to NULL,
 * as that is only done by a task on itself, and if the current task
 * is here, it is not simultaneously in the exit code NULL'ing its
 * cpuset pointer.  This routine also might acquire callback_sem and
 * current->mm->mmap_sem during call.
618
 *
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
 * Reading current->cpuset->mems_generation doesn't need task_lock
 * to guard the current->cpuset derefence, because it is guarded
 * from concurrent freeing of current->cpuset by attach_task(),
 * using RCU.
 *
 * The rcu_dereference() is technically probably not needed,
 * as I don't actually mind if I see a new cpuset pointer but
 * an old value of mems_generation.  However this really only
 * matters on alpha systems using cpusets heavily.  If I dropped
 * that rcu_dereference(), it would save them a memory barrier.
 * For all other arch's, rcu_dereference is a no-op anyway, and for
 * alpha systems not using cpusets, another planned optimization,
 * avoiding the rcu critical section for tasks in the root cpuset
 * which is statically allocated, so can't vanish, will make this
 * irrelevant.  Better to use RCU as intended, than to engage in
 * some cute trick to save a memory barrier that is impossible to
 * test, for alpha systems using cpusets heavily, which might not
 * even exist.
637
638
639
640
641
 *
 * This routine is needed to update the per-task mems_allowed data,
 * within the tasks context, when it is trying to allocate memory
 * (in various mm/mempolicy.c routines) and notices that some other
 * task has been modifying its cpuset.
Linus Torvalds's avatar
Linus Torvalds committed
642
643
 */

644
void cpuset_update_task_memory_state()
Linus Torvalds's avatar
Linus Torvalds committed
645
{
646
	int my_cpusets_mem_gen;
647
	struct task_struct *tsk = current;
648
	struct cpuset *cs;
649

650
651
	rcu_read_lock();
	cs = rcu_dereference(tsk->cpuset);
652
	my_cpusets_mem_gen = cs->mems_generation;
653
	rcu_read_unlock();
Linus Torvalds's avatar
Linus Torvalds committed
654

655
	if (my_cpusets_mem_gen != tsk->cpuset_mems_generation) {
656
		down(&callback_sem);
657
658
659
660
661
		task_lock(tsk);
		cs = tsk->cpuset;	/* Maybe changed when task not locked */
		guarantee_online_mems(cs, &tsk->mems_allowed);
		tsk->cpuset_mems_generation = cs->mems_generation;
		task_unlock(tsk);
662
		up(&callback_sem);
663
		mpol_rebind_task(tsk, &tsk->mems_allowed);
Linus Torvalds's avatar
Linus Torvalds committed
664
665
666
667
668
669
670
671
	}
}

/*
 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
 *
 * One cpuset is a subset of another if all its allowed CPUs and
 * Memory Nodes are a subset of the other, and its exclusive flags
672
 * are only set if the other's are set.  Call holding manage_sem.
Linus Torvalds's avatar
Linus Torvalds committed
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
 */

static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
{
	return	cpus_subset(p->cpus_allowed, q->cpus_allowed) &&
		nodes_subset(p->mems_allowed, q->mems_allowed) &&
		is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
		is_mem_exclusive(p) <= is_mem_exclusive(q);
}

/*
 * validate_change() - Used to validate that any proposed cpuset change
 *		       follows the structural rules for cpusets.
 *
 * If we replaced the flag and mask values of the current cpuset
 * (cur) with those values in the trial cpuset (trial), would
 * our various subset and exclusive rules still be valid?  Presumes
690
 * manage_sem held.
Linus Torvalds's avatar
Linus Torvalds committed
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
 *
 * 'cur' is the address of an actual, in-use cpuset.  Operations
 * such as list traversal that depend on the actual address of the
 * cpuset in the list must use cur below, not trial.
 *
 * 'trial' is the address of bulk structure copy of cur, with
 * perhaps one or more of the fields cpus_allowed, mems_allowed,
 * or flags changed to new, trial values.
 *
 * Return 0 if valid, -errno if not.
 */

static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
{
	struct cpuset *c, *par;

	/* Each of our child cpusets must be a subset of us */
	list_for_each_entry(c, &cur->children, sibling) {
		if (!is_cpuset_subset(c, trial))
			return -EBUSY;
	}

	/* Remaining checks don't apply to root cpuset */
	if ((par = cur->parent) == NULL)
		return 0;

	/* We must be a subset of our parent cpuset */
	if (!is_cpuset_subset(trial, par))
		return -EACCES;

	/* If either I or some sibling (!= me) is exclusive, we can't overlap */
	list_for_each_entry(c, &par->children, sibling) {
		if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
		    c != cur &&
		    cpus_intersects(trial->cpus_allowed, c->cpus_allowed))
			return -EINVAL;
		if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
		    c != cur &&
		    nodes_intersects(trial->mems_allowed, c->mems_allowed))
			return -EINVAL;
	}

	return 0;
}

736
737
738
739
740
741
742
743
/*
 * For a given cpuset cur, partition the system as follows
 * a. All cpus in the parent cpuset's cpus_allowed that are not part of any
 *    exclusive child cpusets
 * b. All cpus in the current cpuset's cpus_allowed that are not part of any
 *    exclusive child cpusets
 * Build these two partitions by calling partition_sched_domains
 *
744
 * Call with manage_sem held.  May nest a call to the
745
746
 * lock_cpu_hotplug()/unlock_cpu_hotplug() pair.
 */
747

748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
static void update_cpu_domains(struct cpuset *cur)
{
	struct cpuset *c, *par = cur->parent;
	cpumask_t pspan, cspan;

	if (par == NULL || cpus_empty(cur->cpus_allowed))
		return;

	/*
	 * Get all cpus from parent's cpus_allowed not part of exclusive
	 * children
	 */
	pspan = par->cpus_allowed;
	list_for_each_entry(c, &par->children, sibling) {
		if (is_cpu_exclusive(c))
			cpus_andnot(pspan, pspan, c->cpus_allowed);
	}
	if (is_removed(cur) || !is_cpu_exclusive(cur)) {
		cpus_or(pspan, pspan, cur->cpus_allowed);
		if (cpus_equal(pspan, cur->cpus_allowed))
			return;
		cspan = CPU_MASK_NONE;
	} else {
		if (cpus_empty(pspan))
			return;
		cspan = cur->cpus_allowed;
		/*
		 * Get all cpus from current cpuset's cpus_allowed not part
		 * of exclusive children
		 */
		list_for_each_entry(c, &cur->children, sibling) {
			if (is_cpu_exclusive(c))
				cpus_andnot(cspan, cspan, c->cpus_allowed);
		}
	}

	lock_cpu_hotplug();
	partition_sched_domains(&pspan, &cspan);
	unlock_cpu_hotplug();
}

789
790
791
792
/*
 * Call with manage_sem held.  May take callback_sem during call.
 */

Linus Torvalds's avatar
Linus Torvalds committed
793
794
795
static int update_cpumask(struct cpuset *cs, char *buf)
{
	struct cpuset trialcs;
796
	int retval, cpus_unchanged;
Linus Torvalds's avatar
Linus Torvalds committed
797
798
799
800
801
802
803
804
805

	trialcs = *cs;
	retval = cpulist_parse(buf, trialcs.cpus_allowed);
	if (retval < 0)
		return retval;
	cpus_and(trialcs.cpus_allowed, trialcs.cpus_allowed, cpu_online_map);
	if (cpus_empty(trialcs.cpus_allowed))
		return -ENOSPC;
	retval = validate_change(cs, &trialcs);
806
807
808
	if (retval < 0)
		return retval;
	cpus_unchanged = cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed);
809
	down(&callback_sem);
810
	cs->cpus_allowed = trialcs.cpus_allowed;
811
	up(&callback_sem);
812
813
814
	if (is_cpu_exclusive(cs) && !cpus_unchanged)
		update_cpu_domains(cs);
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
815
816
}

817
/*
818
819
820
 * Handle user request to change the 'mems' memory placement
 * of a cpuset.  Needs to validate the request, update the
 * cpusets mems_allowed and mems_generation, and for each
821
822
823
 * task in the cpuset, rebind any vma mempolicies and if
 * the cpuset is marked 'memory_migrate', migrate the tasks
 * pages to the new memory.
824
 *
825
 * Call with manage_sem held.  May take callback_sem during call.
826
827
828
 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
 * lock each such tasks mm->mmap_sem, scan its vma's and rebind
 * their mempolicies to the cpusets new mems_allowed.
829
830
 */

Linus Torvalds's avatar
Linus Torvalds committed
831
832
833
static int update_nodemask(struct cpuset *cs, char *buf)
{
	struct cpuset trialcs;
834
	nodemask_t oldmem;
835
836
837
	struct task_struct *g, *p;
	struct mm_struct **mmarray;
	int i, n, ntasks;
838
	int migrate;
839
	int fudge;
Linus Torvalds's avatar
Linus Torvalds committed
840
841
842
843
844
	int retval;

	trialcs = *cs;
	retval = nodelist_parse(buf, trialcs.mems_allowed);
	if (retval < 0)
845
		goto done;
Linus Torvalds's avatar
Linus Torvalds committed
846
	nodes_and(trialcs.mems_allowed, trialcs.mems_allowed, node_online_map);
847
848
849
850
851
	oldmem = cs->mems_allowed;
	if (nodes_equal(oldmem, trialcs.mems_allowed)) {
		retval = 0;		/* Too easy - nothing to do */
		goto done;
	}
852
853
854
	if (nodes_empty(trialcs.mems_allowed)) {
		retval = -ENOSPC;
		goto done;
Linus Torvalds's avatar
Linus Torvalds committed
855
	}
856
857
858
859
860
861
862
863
864
865
	retval = validate_change(cs, &trialcs);
	if (retval < 0)
		goto done;

	down(&callback_sem);
	cs->mems_allowed = trialcs.mems_allowed;
	atomic_inc(&cpuset_mems_generation);
	cs->mems_generation = atomic_read(&cpuset_mems_generation);
	up(&callback_sem);

866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
	set_cpuset_being_rebound(cs);		/* causes mpol_copy() rebind */

	fudge = 10;				/* spare mmarray[] slots */
	fudge += cpus_weight(cs->cpus_allowed);	/* imagine one fork-bomb/cpu */
	retval = -ENOMEM;

	/*
	 * Allocate mmarray[] to hold mm reference for each task
	 * in cpuset cs.  Can't kmalloc GFP_KERNEL while holding
	 * tasklist_lock.  We could use GFP_ATOMIC, but with a
	 * few more lines of code, we can retry until we get a big
	 * enough mmarray[] w/o using GFP_ATOMIC.
	 */
	while (1) {
		ntasks = atomic_read(&cs->count);	/* guess */
		ntasks += fudge;
		mmarray = kmalloc(ntasks * sizeof(*mmarray), GFP_KERNEL);
		if (!mmarray)
			goto done;
		write_lock_irq(&tasklist_lock);		/* block fork */
		if (atomic_read(&cs->count) <= ntasks)
			break;				/* got enough */
		write_unlock_irq(&tasklist_lock);	/* try again */
		kfree(mmarray);
	}

	n = 0;

	/* Load up mmarray[] with mm reference for each task in cpuset. */
	do_each_thread(g, p) {
		struct mm_struct *mm;

		if (n >= ntasks) {
			printk(KERN_WARNING
				"Cpuset mempolicy rebind incomplete.\n");
			continue;
		}
		if (p->cpuset != cs)
			continue;
		mm = get_task_mm(p);
		if (!mm)
			continue;
		mmarray[n++] = mm;
	} while_each_thread(g, p);
	write_unlock_irq(&tasklist_lock);

	/*
	 * Now that we've dropped the tasklist spinlock, we can
	 * rebind the vma mempolicies of each mm in mmarray[] to their
	 * new cpuset, and release that mm.  The mpol_rebind_mm()
	 * call takes mmap_sem, which we couldn't take while holding
	 * tasklist_lock.  Forks can happen again now - the mpol_copy()
	 * cpuset_being_rebound check will catch such forks, and rebind
	 * their vma mempolicies too.  Because we still hold the global
	 * cpuset manage_sem, we know that no other rebind effort will
	 * be contending for the global variable cpuset_being_rebound.
	 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
923
	 * is idempotent.  Also migrate pages in each mm to new nodes.
924
	 */
925
	migrate = is_memory_migrate(cs);
926
927
928
929
	for (i = 0; i < n; i++) {
		struct mm_struct *mm = mmarray[i];

		mpol_rebind_mm(mm, &cs->mems_allowed);
930
931
932
933
		if (migrate) {
			do_migrate_pages(mm, &oldmem, &cs->mems_allowed,
							MPOL_MF_MOVE_ALL);
		}
934
935
936
937
938
939
940
		mmput(mm);
	}

	/* We're done rebinding vma's to this cpusets new mems_allowed. */
	kfree(mmarray);
	set_cpuset_being_rebound(NULL);
	retval = 0;
941
done:
Linus Torvalds's avatar
Linus Torvalds committed
942
943
944
	return retval;
}

945
946
947
948
949
950
951
952
953
954
955
956
957
/*
 * Call with manage_sem held.
 */

static int update_memory_pressure_enabled(struct cpuset *cs, char *buf)
{
	if (simple_strtoul(buf, NULL, 10) != 0)
		cpuset_memory_pressure_enabled = 1;
	else
		cpuset_memory_pressure_enabled = 0;
	return 0;
}

Linus Torvalds's avatar
Linus Torvalds committed
958
959
960
/*
 * update_flag - read a 0 or a 1 in a file and update associated flag
 * bit:	the bit to update (CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE,
961
 *				CS_NOTIFY_ON_RELEASE, CS_MEMORY_MIGRATE)
Linus Torvalds's avatar
Linus Torvalds committed
962
963
 * cs:	the cpuset to update
 * buf:	the buffer where we read the 0 or 1
964
965
 *
 * Call with manage_sem held.
Linus Torvalds's avatar
Linus Torvalds committed
966
967
968
969
970
971
 */

static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf)
{
	int turning_on;
	struct cpuset trialcs;
972
	int err, cpu_exclusive_changed;
Linus Torvalds's avatar
Linus Torvalds committed
973
974
975
976
977
978
979
980
981
982

	turning_on = (simple_strtoul(buf, NULL, 10) != 0);

	trialcs = *cs;
	if (turning_on)
		set_bit(bit, &trialcs.flags);
	else
		clear_bit(bit, &trialcs.flags);

	err = validate_change(cs, &trialcs);
983
984
985
986
	if (err < 0)
		return err;
	cpu_exclusive_changed =
		(is_cpu_exclusive(cs) != is_cpu_exclusive(&trialcs));
987
	down(&callback_sem);
988
989
990
991
	if (turning_on)
		set_bit(bit, &cs->flags);
	else
		clear_bit(bit, &cs->flags);
992
	up(&callback_sem);
993
994
995
996

	if (cpu_exclusive_changed)
                update_cpu_domains(cs);
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
997
998
}

999
1000
/*
 * Frequency meter - How fast is some event occuring?
For faster browsing, not all history is shown. View entire blame