Commit 7d3b56ba authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'cpus4096-for-linus-3' of...

Merge branch 'cpus4096-for-linus-3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'cpus4096-for-linus-3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (77 commits)
  x86: setup_per_cpu_areas() cleanup
  cpumask: fix compile error when CONFIG_NR_CPUS is not defined
  cpumask: use alloc_cpumask_var_node where appropriate
  cpumask: convert shared_cpu_map in acpi_processor* structs to cpumask_var_t
  x86: use cpumask_var_t in acpi/boot.c
  x86: cleanup some remaining usages of NR_CPUS where s/b nr_cpu_ids
  sched: put back some stack hog changes that were undone in kernel/sched.c
  x86: enable cpus display of kernel_max and offlined cpus
  ia64: cpumask fix for is_affinity_mask_valid()
  cpumask: convert RCU implementations, fix
  xtensa: define __fls
  mn10300: define __fls
  m32r: define __fls
  h8300: define __fls
  frv: define __fls
  cris: define __fls
  cpumask: CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
  cpumask: zero extra bits in alloc_cpumask_var_node
  cpumask: replace for_each_cpu_mask_nr with for_each_cpu in kernel/time/
  cpumask: convert mm/
  ...
parents 269b0123 ab14398a
......@@ -31,3 +31,51 @@ not defined by include/asm-XXX/topology.h:
2) core_id: 0
3) thread_siblings: just the given CPU
4) core_siblings: just the given CPU
Additionally, cpu topology information is provided under
/sys/devices/system/cpu and includes these files. The internal
source for the output is in brackets ("[]").
kernel_max: the maximum cpu index allowed by the kernel configuration.
[NR_CPUS-1]
offline: cpus that are not online because they have been
HOTPLUGGED off (see cpu-hotplug.txt) or exceed the limit
of cpus allowed by the kernel configuration (kernel_max
above). [~cpu_online_mask + cpus >= NR_CPUS]
online: cpus that are online and being scheduled [cpu_online_mask]
possible: cpus that have been allocated resources and can be
brought online if they are present. [cpu_possible_mask]
present: cpus that have been identified as being present in the
system. [cpu_present_mask]
The format for the above output is compatible with cpulist_parse()
[see <linux/cpumask.h>]. Some examples follow.
In this example, there are 64 cpus in the system but cpus 32-63 exceed
the kernel max which is limited to 0..31 by the NR_CPUS config option
being 32. Note also that cpus 2 and 4-31 are not online but could be
brought online as they are both present and possible.
kernel_max: 31
offline: 2,4-31,32-63
online: 0-1,3
possible: 0-31
present: 0-31
In this example, the NR_CPUS config option is 128, but the kernel was
started with possible_cpus=144. There are 4 cpus in the system and cpu2
was manually taken offline (and is the only cpu that can be brought
online.)
kernel_max: 127
offline: 2,4-127,128-143
online: 0-1,3
possible: 0-127
present: 0-3
See cpu-hotplug.txt for the possible_cpus=NUM kernel start parameter
as well as more information on the various cpumask's.
......@@ -39,7 +39,24 @@ static inline cpumask_t node_to_cpumask(int node)
return node_cpu_mask;
}
extern struct cpumask node_to_cpumask_map[];
/* FIXME: This is dumb, recalculating every time. But simple. */
static const struct cpumask *cpumask_of_node(int node)
{
int cpu;
cpumask_clear(&node_to_cpumask_map[node]);
for_each_online_cpu(cpu) {
if (cpu_to_node(cpu) == node)
cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
}
return &node_to_cpumask_map[node];
}
#define pcibus_to_cpumask(bus) (cpu_online_map)
#define cpumask_of_pcibus(bus) (cpu_online_mask)
#endif /* !CONFIG_NUMA */
# include <asm-generic/topology.h>
......
......@@ -50,7 +50,8 @@ int irq_select_affinity(unsigned int irq)
if (!irq_desc[irq].chip->set_affinity || irq_user_affinity[irq])
return 1;
while (!cpu_possible(cpu) || !cpu_isset(cpu, irq_default_affinity))
while (!cpu_possible(cpu) ||
!cpumask_test_cpu(cpu, irq_default_affinity))
cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
last_cpu = cpu;
......
......@@ -79,6 +79,11 @@ int alpha_l3_cacheshape;
unsigned long alpha_verbose_mcheck = CONFIG_VERBOSE_MCHECK_ON;
#endif
#ifdef CONFIG_NUMA
struct cpumask node_to_cpumask_map[MAX_NUMNODES] __read_mostly;
EXPORT_SYMBOL(node_to_cpumask_map);
#endif
/* Which processor we booted from. */
int boot_cpuid;
......
......@@ -263,6 +263,11 @@ static inline int fls(unsigned long word)
return 32 - result;
}
static inline int __fls(unsigned long word)
{
return fls(word) - 1;
}
unsigned long find_first_zero_bit(const unsigned long *addr,
unsigned long size);
unsigned long find_next_zero_bit(const unsigned long *addr,
......
......@@ -213,6 +213,7 @@ static __inline__ int __test_bit(int nr, const void *addr)
#endif /* __KERNEL__ */
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
#endif /* _BLACKFIN_BITOPS_H */
......@@ -148,6 +148,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
#define ffs kernel_ffs
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/find.h>
......
......@@ -207,6 +207,7 @@ static __inline__ unsigned long __ffs(unsigned long word)
#endif /* __KERNEL__ */
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
#endif /* _H8300_BITOPS_H */
......@@ -27,7 +27,7 @@ irq_canonicalize (int irq)
}
extern void set_irq_affinity_info (unsigned int irq, int dest, int redir);
bool is_affinity_mask_valid(cpumask_t cpumask);
bool is_affinity_mask_valid(cpumask_var_t cpumask);
#define is_affinity_mask_valid is_affinity_mask_valid
......
......@@ -34,6 +34,7 @@
* Returns a bitmask of CPUs on Node 'node'.
*/
#define node_to_cpumask(node) (node_to_cpu_mask[node])
#define cpumask_of_node(node) (&node_to_cpu_mask[node])
/*
* Returns the number of the node containing Node 'nid'.
......@@ -45,7 +46,7 @@
/*
* Returns the number of the first CPU on Node 'node'.
*/
#define node_to_first_cpu(node) (first_cpu(node_to_cpumask(node)))
#define node_to_first_cpu(node) (cpumask_first(cpumask_of_node(node)))
/*
* Determines the node for a given pci bus
......@@ -109,6 +110,8 @@ void build_cpu_to_node_map(void);
#define topology_core_id(cpu) (cpu_data(cpu)->core_id)
#define topology_core_siblings(cpu) (cpu_core_map[cpu])
#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
#define smt_capable() (smp_num_siblings > 1)
#endif
......@@ -119,6 +122,10 @@ extern void arch_fix_phys_package_id(int num, u32 slot);
node_to_cpumask(pcibus_to_node(bus)) \
)
#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
cpu_all_mask : \
cpumask_from_node(pcibus_to_node(bus)))
#include <asm-generic/topology.h>
#endif /* _ASM_IA64_TOPOLOGY_H */
......@@ -202,7 +202,6 @@ char *__init __acpi_map_table(unsigned long phys_addr, unsigned long size)
Boot-time Table Parsing
-------------------------------------------------------------------------- */
static int total_cpus __initdata;
static int available_cpus __initdata;
struct acpi_table_madt *acpi_madt __initdata;
static u8 has_8259;
......@@ -1001,7 +1000,7 @@ acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret)
node = pxm_to_node(pxm);
if (node >= MAX_NUMNODES || !node_online(node) ||
cpus_empty(node_to_cpumask(node)))
cpumask_empty(cpumask_of_node(node)))
return AE_OK;
/* We know a gsi to node mapping! */
......
......@@ -695,32 +695,31 @@ get_target_cpu (unsigned int gsi, int irq)
#ifdef CONFIG_NUMA
{
int num_cpus, cpu_index, iosapic_index, numa_cpu, i = 0;
cpumask_t cpu_mask;
const struct cpumask *cpu_mask;
iosapic_index = find_iosapic(gsi);
if (iosapic_index < 0 ||
iosapic_lists[iosapic_index].node == MAX_NUMNODES)
goto skip_numa_setup;
cpu_mask = node_to_cpumask(iosapic_lists[iosapic_index].node);
cpus_and(cpu_mask, cpu_mask, domain);
for_each_cpu_mask(numa_cpu, cpu_mask) {
if (!cpu_online(numa_cpu))
cpu_clear(numa_cpu, cpu_mask);
cpu_mask = cpumask_of_node(iosapic_lists[iosapic_index].node);
num_cpus = 0;
for_each_cpu_and(numa_cpu, cpu_mask, &domain) {
if (cpu_online(numa_cpu))
num_cpus++;
}
num_cpus = cpus_weight(cpu_mask);
if (!num_cpus)
goto skip_numa_setup;
/* Use irq assignment to distribute across cpus in node */
cpu_index = irq % num_cpus;
for (numa_cpu = first_cpu(cpu_mask) ; i < cpu_index ; i++)
numa_cpu = next_cpu(numa_cpu, cpu_mask);
for_each_cpu_and(numa_cpu, cpu_mask, &domain)
if (cpu_online(numa_cpu) && i++ >= cpu_index)
break;
if (numa_cpu != NR_CPUS)
if (numa_cpu < nr_cpu_ids)
return cpu_physical_id(numa_cpu);
}
skip_numa_setup:
......@@ -731,7 +730,7 @@ skip_numa_setup:
* case of NUMA.)
*/
do {
if (++cpu >= NR_CPUS)
if (++cpu >= nr_cpu_ids)
cpu = 0;
} while (!cpu_online(cpu) || !cpu_isset(cpu, domain));
......
......@@ -112,11 +112,11 @@ void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
}
}
bool is_affinity_mask_valid(cpumask_t cpumask)
bool is_affinity_mask_valid(cpumask_var_t cpumask)
{
if (ia64_platform_is("sn2")) {
/* Only allow one CPU to be specified in the smp_affinity mask */
if (cpus_weight(cpumask) != 1)
if (cpumask_weight(cpumask) != 1)
return false;
}
return true;
......
......@@ -385,7 +385,6 @@ static int sn_topology_show(struct seq_file *s, void *d)
int j;
const char *slabname;
int ordinal;
cpumask_t cpumask;
char slice;
struct cpuinfo_ia64 *c;
struct sn_hwperf_port_info *ptdata;
......@@ -473,23 +472,21 @@ static int sn_topology_show(struct seq_file *s, void *d)
* CPUs on this node, if any
*/
if (!SN_HWPERF_IS_IONODE(obj)) {
cpumask = node_to_cpumask(ordinal);
for_each_online_cpu(i) {
if (cpu_isset(i, cpumask)) {
slice = 'a' + cpuid_to_slice(i);
c = cpu_data(i);
seq_printf(s, "cpu %d %s%c local"
" freq %luMHz, arch ia64",
i, obj->location, slice,
c->proc_freq / 1000000);
for_each_online_cpu(j) {
seq_printf(s, j ? ":%d" : ", dist %d",
node_distance(
for_each_cpu_and(i, cpu_online_mask,
cpumask_of_node(ordinal)) {
slice = 'a' + cpuid_to_slice(i);
c = cpu_data(i);
seq_printf(s, "cpu %d %s%c local"
" freq %luMHz, arch ia64",
i, obj->location, slice,
c->proc_freq / 1000000);
for_each_online_cpu(j) {
seq_printf(s, j ? ":%d" : ", dist %d",
node_distance(
cpu_to_node(i),
cpu_to_node(j)));
}
seq_putc(s, '\n');
}
seq_putc(s, '\n');
}
}
}
......
......@@ -592,7 +592,7 @@ int setup_profiling_timer(unsigned int multiplier)
* accounting. At that time they also adjust their APIC timers
* accordingly.
*/
for (i = 0; i < NR_CPUS; ++i)
for_each_possible_cpu(i)
per_cpu(prof_multiplier, i) = multiplier;
return 0;
......
......@@ -331,6 +331,7 @@ found_middle:
#endif /* __KERNEL__ */
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
#endif /* _M68KNOMMU_BITOPS_H */
......@@ -25,11 +25,13 @@ extern struct cpuinfo_ip27 sn_cpu_info[NR_CPUS];
#define cpu_to_node(cpu) (sn_cpu_info[(cpu)].p_nodeid)
#define parent_node(node) (node)
#define node_to_cpumask(node) (hub_data(node)->h_cpus)
#define node_to_first_cpu(node) (first_cpu(node_to_cpumask(node)))
#define cpumask_of_node(node) (&hub_data(node)->h_cpus)
#define node_to_first_cpu(node) (cpumask_first(cpumask_of_node(node)))
struct pci_bus;
extern int pcibus_to_node(struct pci_bus *);
#define pcibus_to_cpumask(bus) (cpu_online_map)
#define cpumask_of_pcibus(bus) (cpu_online_mask)
extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
......
......@@ -16,8 +16,6 @@
#include <linux/cpumask.h>
typedef unsigned long address_t;
extern cpumask_t cpu_online_map;
/*
* Private routines/data
......
......@@ -22,11 +22,11 @@ static inline cpumask_t node_to_cpumask(int node)
return numa_cpumask_lookup_table[node];
}
#define cpumask_of_node(node) (&numa_cpumask_lookup_table[node])
static inline int node_to_first_cpu(int node)
{
cpumask_t tmp;
tmp = node_to_cpumask(node);
return first_cpu(tmp);
return cpumask_first(cpumask_of_node(node));
}
int of_node_to_nid(struct device_node *device);
......@@ -46,6 +46,10 @@ static inline int pcibus_to_node(struct pci_bus *bus)
node_to_cpumask(pcibus_to_node(bus)) \
)
#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
cpu_all_mask : \
cpumask_of_node(pcibus_to_node(bus)))
/* sched_domains SD_NODE_INIT for PPC64 machines */
#define SD_NODE_INIT (struct sched_domain) { \
.parent = NULL, \
......@@ -108,6 +112,8 @@ static inline void sysfs_remove_device_from_node(struct sys_device *dev,
#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
#define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu))
#define topology_core_id(cpu) (cpu_to_core_id(cpu))
#endif
#endif
......
......@@ -80,10 +80,10 @@ static void cpu_affinity_set(struct spu *spu, int cpu)
u64 route;
if (nr_cpus_node(spu->node)) {
cpumask_t spumask = node_to_cpumask(spu->node);
cpumask_t cpumask = node_to_cpumask(cpu_to_node(cpu));
const struct cpumask *spumask = cpumask_of_node(spu->node),
*cpumask = cpumask_of_node(cpu_to_node(cpu));
if (!cpus_intersects(spumask, cpumask))
if (!cpumask_intersects(spumask, cpumask))
return;
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment