Commit 1deab8ce authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc

Pull sparc updates from David Miller:

 1) Add missing cmpxchg64() for 32-bit sparc.

 2) Timer conversions from Allen Pais and Kees Cook.

 3) vDSO support, from Nagarathnam Muthusamy.

 4) Fix sparc64 huge page table walks based upon bug report by Al Viro,
    from Nitin Gupta.

 5) Optimized fls() for T4 and above, from Vijay Kumar.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc:
  sparc64: Fix page table walk for PUD hugepages
  sparc64: Convert timers to user timer_setup()
  sparc64: convert mdesc_handle.refcnt from atomic_t to refcount_t
  sparc/led: Convert timers to use timer_setup()
  sparc64: Use sparc optimized fls and __fls for T4 and above
  sparc64: SPARC optimized __fls function
  sparc64: SPARC optimized fls function
  sparc64: Define SPARC default __fls function
  sparc64: Define SPARC default fls function
  vDSO for sparc
  sparc32: Add cmpxchg64().
  sbus: char: Move D7S_MINOR to include/linux/miscdevice.h
  s...
parents 81700247 70f3c8b7
......@@ -7,3 +7,4 @@ obj-y += mm/
obj-y += math-emu/
obj-y += net/
obj-y += crypto/
obj-$(CONFIG_SPARC64) += vdso/
......@@ -84,6 +84,8 @@ config SPARC64
select HAVE_REGS_AND_STACK_ACCESS_API
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
select GENERIC_TIME_VSYSCALL
select ARCH_CLOCKSOURCE_DATA
config ARCH_DEFCONFIG
string
......
......@@ -81,6 +81,10 @@ install:
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
PHONY += vdso_install
vdso_install:
$(Q)$(MAKE) $(build)=arch/sparc/vdso $@
# This is the image used for packaging
KBUILD_IMAGE := $(boot)/zImage
......
......@@ -23,10 +23,11 @@ void set_bit(unsigned long nr, volatile unsigned long *addr);
void clear_bit(unsigned long nr, volatile unsigned long *addr);
void change_bit(unsigned long nr, volatile unsigned long *addr);
int fls(unsigned int word);
int __fls(unsigned long word);
#include <asm-generic/bitops/non-atomic.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
#ifdef __KERNEL__
......
/*
* Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
*/
#ifndef _ASM_SPARC_CLOCKSOURCE_H
#define _ASM_SPARC_CLOCKSOURCE_H
/* VDSO clocksources */
#define VCLOCK_NONE 0 /* Nothing userspace can do. */
#define VCLOCK_TICK 1 /* Use %tick. */
#define VCLOCK_STICK 2 /* Use %stick. */
struct arch_clocksource_data {
int vclock_mode;
};
#endif /* _ASM_SPARC_CLOCKSOURCE_H */
......@@ -63,6 +63,9 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
(unsigned long)_n_, sizeof(*(ptr))); \
})
u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new);
#define cmpxchg64(ptr, old, new) __cmpxchg_u64(ptr, old, new)
#include <asm-generic/cmpxchg-local.h>
/*
......
......@@ -211,4 +211,18 @@ do { if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
(current->personality & (~PER_MASK))); \
} while (0)
extern unsigned int vdso_enabled;
#define ARCH_DLINFO \
do { \
if (vdso_enabled) \
NEW_AUX_ENT(AT_SYSINFO_EHDR, \
(unsigned long)current->mm->context.vdso); \
} while (0)
struct linux_binprm;
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp);
#endif /* !(__ASM_SPARC64_ELF_H) */
......@@ -97,6 +97,7 @@ typedef struct {
unsigned long thp_pte_count;
struct tsb_config tsb_block[MM_NUM_TSBS];
struct hv_tsb_descr tsb_descr[MM_NUM_TSBS];
void *vdso;
} mm_context_t;
#endif /* !__ASSEMBLY__ */
......
......@@ -8,9 +8,11 @@
#include <linux/spinlock.h>
#include <linux/mm_types.h>
#include <linux/smp.h>
#include <asm/spitfire.h>
#include <asm-generic/mm_hooks.h>
#include <asm/percpu.h>
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
......
......@@ -200,6 +200,13 @@ unsigned long get_wchan(struct task_struct *task);
* To make a long story short, we are trying to yield the current cpu
* strand during busy loops.
*/
#ifdef BUILD_VDSO
#define cpu_relax() asm volatile("\n99:\n\t" \
"rd %%ccr, %%g0\n\t" \
"rd %%ccr, %%g0\n\t" \
"rd %%ccr, %%g0\n\t" \
::: "memory")
#else /* ! BUILD_VDSO */
#define cpu_relax() asm volatile("\n99:\n\t" \
"rd %%ccr, %%g0\n\t" \
"rd %%ccr, %%g0\n\t" \
......@@ -211,6 +218,7 @@ unsigned long get_wchan(struct task_struct *task);
"nop\n\t" \
".previous" \
::: "memory")
#endif
/* Prefetch support. This is tuned for UltraSPARC-III and later.
* UltraSPARC-I will treat these as nops, and UltraSPARC-II has
......
......@@ -217,7 +217,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
sllx REG2, 32, REG2; \
andcc REG1, REG2, %g0; \
be,pt %xcc, 700f; \
sethi %hi(0x1ffc0000), REG2; \
sethi %hi(0xffe00000), REG2; \
sllx REG2, 1, REG2; \
brgez,pn REG1, FAIL_LABEL; \
andn REG1, REG2, REG1; \
......
/*
* Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
*/
#ifndef _ASM_SPARC_VDSO_H
#define _ASM_SPARC_VDSO_H
struct vdso_image {
void *data;
unsigned long size; /* Always a multiple of PAGE_SIZE */
long sym_vvar_start; /* Negative offset to the vvar area */
long sym_vread_tick; /* Start of vread_tick section */
long sym_vread_tick_patch_start; /* Start of tick read */
long sym_vread_tick_patch_end; /* End of tick read */
};
#ifdef CONFIG_SPARC64
extern const struct vdso_image vdso_image_64_builtin;
#endif
#ifdef CONFIG_COMPAT
extern const struct vdso_image vdso_image_32_builtin;
#endif
#endif /* _ASM_SPARC_VDSO_H */
/*
* Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
*/
#ifndef _ASM_SPARC_VVAR_DATA_H
#define _ASM_SPARC_VVAR_DATA_H
#include <asm/clocksource.h>
#include <linux/seqlock.h>
#include <linux/time.h>
#include <linux/types.h>
struct vvar_data {
unsigned int seq;
int vclock_mode;
struct { /* extract of a clocksource struct */
u64 cycle_last;
u64 mask;
int mult;
int shift;
} clock;
/* open coded 'struct timespec' */
u64 wall_time_sec;
u64 wall_time_snsec;
u64 monotonic_time_snsec;
u64 monotonic_time_sec;
u64 monotonic_time_coarse_sec;
u64 monotonic_time_coarse_nsec;
u64 wall_time_coarse_sec;
u64 wall_time_coarse_nsec;
int tz_minuteswest;
int tz_dsttime;
};
extern struct vvar_data *vvar_data;
extern int vdso_fix_stick;
static inline unsigned int vvar_read_begin(const struct vvar_data *s)
{
unsigned int ret;
repeat:
ret = READ_ONCE(s->seq);
if (unlikely(ret & 1)) {
cpu_relax();
goto repeat;
}
smp_rmb(); /* Finish all reads before we return seq */
return ret;
}
static inline int vvar_read_retry(const struct vvar_data *s,
unsigned int start)
{
smp_rmb(); /* Finish all reads before checking the value of seq */
return unlikely(s->seq != start);
}
static inline void vvar_write_begin(struct vvar_data *s)
{
++s->seq;
smp_wmb(); /* Makes sure that increment of seq is reflected */
}
static inline void vvar_write_end(struct vvar_data *s)
{
smp_wmb(); /* Makes the value of seq current before we increment */
++s->seq;
}
#endif /* _ASM_SPARC_VVAR_DATA_H */
#ifndef __ASMSPARC_AUXVEC_H
#define __ASMSPARC_AUXVEC_H
#define AT_SYSINFO_EHDR 33
#define AT_VECTOR_SIZE_ARCH 1
#endif /* !(__ASMSPARC_AUXVEC_H) */
......@@ -43,6 +43,7 @@ obj-$(CONFIG_SPARC32) += systbls_32.o
obj-y += time_$(BITS).o
obj-$(CONFIG_SPARC32) += windows.o
obj-y += cpu.o
obj-$(CONFIG_SPARC64) += vdso.o
obj-$(CONFIG_SPARC32) += devices.o
obj-y += ptrace_$(BITS).o
obj-y += unaligned_$(BITS).o
......
......@@ -641,6 +641,8 @@ niagara4_patch:
nop
call niagara4_patch_pageops
nop
call niagara4_patch_fls
nop
ba,a,pt %xcc, 80f
nop
......
......@@ -13,6 +13,7 @@
#include <linux/miscdevice.h>
#include <linux/bootmem.h>
#include <linux/export.h>
#include <linux/refcount.h>
#include <asm/cpudata.h>
#include <asm/hypervisor.h>
......@@ -71,7 +72,7 @@ struct mdesc_handle {
struct list_head list;
struct mdesc_mem_ops *mops;
void *self_base;
atomic_t refcnt;
refcount_t refcnt;
unsigned int handle_size;
struct mdesc_hdr mdesc;
};
......@@ -153,7 +154,7 @@ static void mdesc_handle_init(struct mdesc_handle *hp,
memset(hp, 0, handle_size);
INIT_LIST_HEAD(&hp->list);
hp->self_base = base;
atomic_set(&hp->refcnt, 1);
refcount_set(&hp->refcnt, 1);
hp->handle_size = handle_size;
}
......@@ -183,7 +184,7 @@ static void __init mdesc_memblock_free(struct mdesc_handle *hp)
unsigned int alloc_size;
unsigned long start;
BUG_ON(atomic_read(&hp->refcnt) != 0);
BUG_ON(refcount_read(&hp->refcnt) != 0);
BUG_ON(!list_empty(&hp->list));
alloc_size = PAGE_ALIGN(hp->handle_size);
......@@ -221,7 +222,7 @@ static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size)
static void mdesc_kfree(struct mdesc_handle *hp)
{
BUG_ON(atomic_read(&hp->refcnt) != 0);
BUG_ON(refcount_read(&hp->refcnt) != 0);
BUG_ON(!list_empty(&hp->list));
kfree(hp->self_base);
......@@ -260,7 +261,7 @@ struct mdesc_handle *mdesc_grab(void)
spin_lock_irqsave(&mdesc_lock, flags);
hp = cur_mdesc;
if (hp)
atomic_inc(&hp->refcnt);
refcount_inc(&hp->refcnt);
spin_unlock_irqrestore(&mdesc_lock, flags);
return hp;
......@@ -272,7 +273,7 @@ void mdesc_release(struct mdesc_handle *hp)
unsigned long flags;
spin_lock_irqsave(&mdesc_lock, flags);
if (atomic_dec_and_test(&hp->refcnt)) {
if (refcount_dec_and_test(&hp->refcnt)) {
list_del_init(&hp->list);
hp->mops->free(hp);
}
......@@ -514,7 +515,7 @@ void mdesc_update(void)
if (status != HV_EOK || real_len > len) {
printk(KERN_ERR "MD: mdesc reread fails with %lu\n",
status);
atomic_dec(&hp->refcnt);
refcount_dec(&hp->refcnt);
mdesc_free(hp);
goto out;
}
......@@ -527,7 +528,7 @@ void mdesc_update(void)
mdesc_notify_clients(orig_hp, hp);
spin_lock_irqsave(&mdesc_lock, flags);
if (atomic_dec_and_test(&orig_hp->refcnt))
if (refcount_dec_and_test(&orig_hp->refcnt))
mdesc_free(orig_hp);
else
list_add(&orig_hp->list, &mdesc_zombie_list);
......
......@@ -28,7 +28,6 @@
#include <linux/jiffies.h>
#include <linux/cpufreq.h>
#include <linux/percpu.h>
#include <linux/miscdevice.h>
#include <linux/rtc/m48t59.h>
#include <linux/kernel_stat.h>
#include <linux/clockchips.h>
......@@ -54,6 +53,8 @@
DEFINE_SPINLOCK(rtc_lock);
unsigned int __read_mostly vdso_fix_stick;
#ifdef CONFIG_SMP
unsigned long profile_pc(struct pt_regs *regs)
{
......@@ -831,12 +832,17 @@ static void init_tick_ops(struct sparc64_tick_ops *ops)
void __init time_init_early(void)
{
if (tlb_type == spitfire) {
if (is_hummingbird())
if (is_hummingbird()) {
init_tick_ops(&hbtick_operations);
else
clocksource_tick.archdata.vclock_mode = VCLOCK_NONE;
} else {
init_tick_ops(&tick_operations);
clocksource_tick.archdata.vclock_mode = VCLOCK_TICK;
vdso_fix_stick = 1;
}
} else {
init_tick_ops(&stick_operations);
clocksource_tick.archdata.vclock_mode = VCLOCK_STICK;
}
}
......
/*
* Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
* Copyright 2003 Andi Kleen, SuSE Labs.
*
* Thanks to hpa@transmeta.com for some useful hint.
* Special thanks to Ingo Molnar for his early experience with
* a different vsyscall implementation for Linux/IA32 and for the name.
*/
#include <linux/seqlock.h>
#include <linux/time.h>
#include <linux/timekeeper_internal.h>
#include <asm/vvar.h>
void update_vsyscall_tz(void)
{
if (unlikely(vvar_data == NULL))
return;
vvar_data->tz_minuteswest = sys_tz.tz_minuteswest;
vvar_data->tz_dsttime = sys_tz.tz_dsttime;
}
void update_vsyscall(struct timekeeper *tk)
{
struct vvar_data *vdata = vvar_data;
if (unlikely(vdata == NULL))
return;
vvar_write_begin(vdata);
vdata->vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode;
vdata->clock.cycle_last = tk->tkr_mono.cycle_last;
vdata->clock.mask = tk->tkr_mono.mask;
vdata->clock.mult = tk->tkr_mono.mult;
vdata->clock.shift = tk->tkr_mono.shift;
vdata->wall_time_sec = tk->xtime_sec;
vdata->wall_time_snsec = tk->tkr_mono.xtime_nsec;
vdata->monotonic_time_sec = tk->xtime_sec +
tk->wall_to_monotonic.tv_sec;
vdata->monotonic_time_snsec = tk->tkr_mono.xtime_nsec +
(tk->wall_to_monotonic.tv_nsec <<
tk->tkr_mono.shift);
while (vdata->monotonic_time_snsec >=
(((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
vdata->monotonic_time_snsec -=
((u64)NSEC_PER_SEC) << tk->tkr_mono.shift;
vdata->monotonic_time_sec++;
}
vdata->wall_time_coarse_sec = tk->xtime_sec;
vdata->wall_time_coarse_nsec =
(long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
vdata->monotonic_time_coarse_sec =
vdata->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec;
vdata->monotonic_time_coarse_nsec =
vdata->wall_time_coarse_nsec + tk->wall_to_monotonic.tv_nsec;
while (vdata->monotonic_time_coarse_nsec >= NSEC_PER_SEC) {
vdata->monotonic_time_coarse_nsec -= NSEC_PER_SEC;
vdata->monotonic_time_coarse_sec++;
}
vvar_write_end(vdata);
}
......@@ -798,9 +798,9 @@ void vio_port_up(struct vio_driver_state *vio)
}
EXPORT_SYMBOL(vio_port_up);
static void vio_port_timer(unsigned long _arg)
static void vio_port_timer(struct timer_list *t)
{
struct vio_driver_state *vio = (struct vio_driver_state *) _arg;
struct vio_driver_state *vio = from_timer(vio, t, timer);
vio_port_up(vio);
}
......@@ -849,7 +849,7 @@ int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev,
vio->ops = ops;
setup_timer(&vio->timer, vio_port_timer, (unsigned long) vio);
timer_setup(&vio->timer, vio_port_timer, 0);
return 0;
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment