diff options
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/Kconfig | 13 | ||||
-rw-r--r-- | arch/ia64/Makefile | 2 | ||||
-rw-r--r-- | arch/ia64/ia32/ia32_support.c | 10 | ||||
-rw-r--r-- | arch/ia64/kernel/acpi.c | 10 | ||||
-rw-r--r-- | arch/ia64/kernel/entry.S | 26 | ||||
-rw-r--r-- | arch/ia64/kernel/palinfo.c | 6 | ||||
-rw-r--r-- | arch/ia64/kernel/perfmon.c | 16 | ||||
-rw-r--r-- | arch/ia64/kernel/process.c | 25 | ||||
-rw-r--r-- | arch/ia64/kernel/sal.c | 11 | ||||
-rw-r--r-- | arch/ia64/kernel/setup.c | 29 | ||||
-rw-r--r-- | arch/ia64/mm/init.c | 25 | ||||
-rw-r--r-- | arch/ia64/uv/Makefile | 12 | ||||
-rw-r--r-- | arch/ia64/uv/kernel/Makefile | 13 | ||||
-rw-r--r-- | arch/ia64/uv/kernel/machvec.c | 11 | ||||
-rw-r--r-- | arch/ia64/uv/kernel/setup.c | 98 |
15 files changed, 269 insertions, 38 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 0df5f6f75edf..16be41446b5b 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -135,6 +135,7 @@ config IA64_GENERIC HP-zx1/sx1000 For HP systems HP-zx1/sx1000+swiotlb For HP systems with (broken) DMA-constrained devices. SGI-SN2 For SGI Altix systems + SGI-UV For SGI UV systems Ski-simulator For the HP simulator <http://www.hpl.hp.com/research/linux/ski/> If you don't know what to do, choose "generic". @@ -170,6 +171,18 @@ config IA64_SGI_SN2 to select this option. If in doubt, select ia64 generic support instead. +config IA64_SGI_UV` + bool "SGI-UV`" + select NUMA + select ACPI_NUMA + select SWIOTLB + help + Selecting this option will optimize the kernel for use on UV based + systems, but the resulting kernel binary will not run on other + types of ia64 systems. If you have an SGI UV system, it's safe + to select this option. If in doubt, select ia64 generic support + instead. + config IA64_HP_SIM bool "Ski-simulator" select SWIOTLB diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile index ec4cca477f49..88f1a55c6c94 100644 --- a/arch/ia64/Makefile +++ b/arch/ia64/Makefile @@ -63,7 +63,7 @@ drivers-$(CONFIG_PCI) += arch/ia64/pci/ drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/ drivers-$(CONFIG_IA64_HP_ZX1) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ drivers-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ -drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/ arch/ia64/sn/ +drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/ arch/ia64/sn/ arch/ia64/uv/ drivers-$(CONFIG_OPROFILE) += arch/ia64/oprofile/ boot := arch/ia64/hp/sim/boot diff --git a/arch/ia64/ia32/ia32_support.c b/arch/ia64/ia32/ia32_support.c index 896b1ebbfb26..a6965ddafc46 100644 --- a/arch/ia64/ia32/ia32_support.c +++ b/arch/ia64/ia32/ia32_support.c @@ -15,7 +15,6 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/mm.h> -#include <linux/personality.h> #include <linux/sched.h> #include <asm/intrinsics.h> @@ -29,7 +28,6 @@ extern int die_if_kernel (char *str, struct pt_regs *regs, long err); -struct exec_domain ia32_exec_domain; struct page *ia32_shared_page[NR_CPUS]; unsigned long *ia32_boot_gdt; unsigned long *cpu_gdt_table[NR_CPUS]; @@ -240,14 +238,6 @@ ia32_cpu_init (void) static int __init ia32_init (void) { - ia32_exec_domain.name = "Linux/x86"; - ia32_exec_domain.handler = NULL; - ia32_exec_domain.pers_low = PER_LINUX32; - ia32_exec_domain.pers_high = PER_LINUX32; - ia32_exec_domain.signal_map = default_exec_domain.signal_map; - ia32_exec_domain.signal_invmap = default_exec_domain.signal_invmap; - register_exec_domain(&ia32_exec_domain); - #if PAGE_SHIFT > IA32_PAGE_SHIFT { extern struct kmem_cache *ia64_partial_page_cachep; diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 19709a079635..853d1f11be00 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -117,7 +117,10 @@ acpi_get_sysname(void) if (!strcmp(hdr->oem_id, "HP")) { return "hpzx1"; } else if (!strcmp(hdr->oem_id, "SGI")) { - return "sn2"; + if (!strcmp(hdr->oem_table_id + 4, "UV")) + return "uv"; + else + return "sn2"; } return "dig"; @@ -130,6 +133,8 @@ acpi_get_sysname(void) return "hpzx1_swiotlb"; # elif defined (CONFIG_IA64_SGI_SN2) return "sn2"; +# elif defined (CONFIG_IA64_SGI_UV) + return "uv"; # elif defined (CONFIG_IA64_DIG) return "dig"; # else @@ -622,6 +627,9 @@ void acpi_unregister_gsi(u32 gsi) if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM) return; + if (has_8259 && gsi < 16) + return; + iosapic_unregister_intr(gsi); } diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index e49ad8c5dc69..ca2bb95726de 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -1156,6 +1156,9 @@ skip_rbs_switch: * r31 = current->thread_info->flags * On exit: * p6 = TRUE if work-pending-check needs to be redone + * + * Interrupts are disabled on entry, reenabled depend on work, and + * disabled on exit. */ .work_pending_syscall: add r2=-8,r2 @@ -1164,16 +1167,16 @@ skip_rbs_switch: st8 [r2]=r8 st8 [r3]=r10 .work_pending: - tbit.z p6,p0=r31,TIF_NEED_RESCHED // current_thread_info()->need_resched==0? + tbit.z p6,p0=r31,TIF_NEED_RESCHED // is resched not needed? (p6) br.cond.sptk.few .notify #ifdef CONFIG_PREEMPT (pKStk) dep r21=-1,r0,PREEMPT_ACTIVE_BIT,1 ;; (pKStk) st4 [r20]=r21 - ssm psr.i // enable interrupts #endif + ssm psr.i // enable interrupts br.call.spnt.many rp=schedule -.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 +.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 (re-check) rsm psr.i // disable interrupts ;; #ifdef CONFIG_PREEMPT @@ -1182,13 +1185,13 @@ skip_rbs_switch: (pKStk) st4 [r20]=r0 // preempt_count() <- 0 #endif (pLvSys)br.cond.sptk.few .work_pending_syscall_end - br.cond.sptk.many .work_processed_kernel // re-check + br.cond.sptk.many .work_processed_kernel .notify: (pUStk) br.call.spnt.many rp=notify_resume_user -.ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0 +.ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0 (don't re-check) (pLvSys)br.cond.sptk.few .work_pending_syscall_end - br.cond.sptk.many .work_processed_kernel // don't re-check + br.cond.sptk.many .work_processed_kernel .work_pending_syscall_end: adds r2=PT(R8)+16,r12 @@ -1196,7 +1199,7 @@ skip_rbs_switch: ;; ld8 r8=[r2] ld8 r10=[r3] - br.cond.sptk.many .work_processed_syscall // re-check + br.cond.sptk.many .work_processed_syscall END(ia64_leave_kernel) @@ -1234,9 +1237,12 @@ GLOBAL_ENTRY(ia64_invoke_schedule_tail) END(ia64_invoke_schedule_tail) /* - * Setup stack and call do_notify_resume_user(). Note that pSys and pNonSys need to - * be set up by the caller. We declare 8 input registers so the system call - * args get preserved, in case we need to restart a system call. + * Setup stack and call do_notify_resume_user(), keeping interrupts + * disabled. + * + * Note that pSys and pNonSys need to be set up by the caller. + * We declare 8 input registers so the system call args get preserved, + * in case we need to restart a system call. */ ENTRY(notify_resume_user) .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index 4547a2092af9..9dc00f7fe10e 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c @@ -900,12 +900,6 @@ static void palinfo_smp_call(void *info) { palinfo_smp_data_t *data = (palinfo_smp_data_t *)info; - if (data == NULL) { - printk(KERN_ERR "palinfo: data pointer is NULL\n"); - data->ret = 0; /* no output */ - return; - } - /* does this actual call */ data->ret = (*data->func)(data->page); } diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index c1ad27de2dd2..71d05133f556 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -5013,12 +5013,13 @@ pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs) } static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds); + /* * pfm_handle_work() can be called with interrupts enabled * (TIF_NEED_RESCHED) or disabled. The down_interruptible * call may sleep, therefore we must re-enable interrupts * to avoid deadlocks. It is safe to do so because this function - * is called ONLY when returning to user level (PUStk=1), in which case + * is called ONLY when returning to user level (pUStk=1), in which case * there is no risk of kernel stack overflow due to deep * interrupt nesting. */ @@ -5034,7 +5035,8 @@ pfm_handle_work(void) ctx = PFM_GET_CTX(current); if (ctx == NULL) { - printk(KERN_ERR "perfmon: [%d] has no PFM context\n", task_pid_nr(current)); + printk(KERN_ERR "perfmon: [%d] has no PFM context\n", + task_pid_nr(current)); return; } @@ -5058,11 +5060,12 @@ pfm_handle_work(void) /* * must be done before we check for simple-reset mode */ - if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE) goto do_zombie; - + if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE) + goto do_zombie; //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking; - if (reason == PFM_TRAP_REASON_RESET) goto skip_blocking; + if (reason == PFM_TRAP_REASON_RESET) + goto skip_blocking; /* * restore interrupt mask to what it was on entry. @@ -5110,7 +5113,8 @@ do_zombie: /* * in case of interruption of down() we don't restart anything */ - if (ret < 0) goto nothing_to_do; + if (ret < 0) + goto nothing_to_do; skip_blocking: pfm_resume_after_ovfl(ctx, ovfl_regs, regs); diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 58dcfac5ea88..a3a34b4eb038 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -167,11 +167,18 @@ void tsk_clear_notify_resume(struct task_struct *tsk) clear_ti_thread_flag(task_thread_info(tsk), TIF_NOTIFY_RESUME); } +/* + * do_notify_resume_user(): + * Called from notify_resume_user at entry.S, with interrupts disabled. + */ void -do_notify_resume_user (sigset_t *unused, struct sigscratch *scr, long in_syscall) +do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall) { if (fsys_mode(current, &scr->pt)) { - /* defer signal-handling etc. until we return to privilege-level 0. */ + /* + * defer signal-handling etc. until we return to + * privilege-level 0. + */ if (!ia64_psr(&scr->pt)->lp) ia64_psr(&scr->pt)->lp = 1; return; @@ -179,16 +186,26 @@ do_notify_resume_user (sigset_t *unused, struct sigscratch *scr, long in_syscall #ifdef CONFIG_PERFMON if (current->thread.pfm_needs_checking) + /* + * Note: pfm_handle_work() allow us to call it with interrupts + * disabled, and may enable interrupts within the function. + */ pfm_handle_work(); #endif /* deal with pending signal delivery */ - if (test_thread_flag(TIF_SIGPENDING)) + if (test_thread_flag(TIF_SIGPENDING)) { + local_irq_enable(); /* force interrupt enable */ ia64_do_signal(scr, in_syscall); + } /* copy user rbs to kernel rbs */ - if (unlikely(test_thread_flag(TIF_RESTORE_RSE))) + if (unlikely(test_thread_flag(TIF_RESTORE_RSE))) { + local_irq_enable(); /* force interrupt enable */ ia64_sync_krbs(); + } + + local_irq_disable(); /* force interrupt disable */ } static int pal_halt = 1; diff --git a/arch/ia64/kernel/sal.c b/arch/ia64/kernel/sal.c index a3022dc48ef8..7e0259709c04 100644 --- a/arch/ia64/kernel/sal.c +++ b/arch/ia64/kernel/sal.c @@ -229,6 +229,14 @@ static void __init sal_desc_ap_wakeup(void *p) { } */ static int sal_cache_flush_drops_interrupts; +static int __init +force_pal_cache_flush(char *str) +{ + sal_cache_flush_drops_interrupts = 1; + return 0; +} +early_param("force_pal_cache_flush", force_pal_cache_flush); + void __init check_sal_cache_flush (void) { @@ -237,6 +245,9 @@ check_sal_cache_flush (void) u64 vector, cache_type = 3; struct ia64_sal_retval isrv; + if (sal_cache_flush_drops_interrupts) + return; + cpu = get_cpu(); local_irq_save(flags); diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 5015ca1275ca..e9596cd0cdab 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -239,6 +239,25 @@ __initcall(register_memory); #ifdef CONFIG_KEXEC + +/* + * This function checks if the reserved crashkernel is allowed on the specific + * IA64 machine flavour. Machines without an IO TLB use swiotlb and require + * some memory below 4 GB (i.e. in 32 bit area), see the implementation of + * lib/swiotlb.c. The hpzx1 architecture has an IO TLB but cannot use that + * in kdump case. See the comment in sba_init() in sba_iommu.c. + * + * So, the only machvec that really supports loading the kdump kernel + * over 4 GB is "sn2". + */ +static int __init check_crashkernel_memory(unsigned long pbase, size_t size) +{ + if (ia64_platform_is("sn2") || ia64_platform_is("uv")) + return 1; + else + return pbase < (1UL << 32); +} + static void __init setup_crashkernel(unsigned long total, int *n) { unsigned long long base = 0, size = 0; @@ -252,6 +271,16 @@ static void __init setup_crashkernel(unsigned long total, int *n) base = kdump_find_rsvd_region(size, rsvd_region, *n); } + + if (!check_crashkernel_memory(base, size)) { + pr_warning("crashkernel: There would be kdump memory " + "at %ld GB but this is unusable because it " + "must\nbe below 4 GB. Change the memory " + "configuration of the machine.\n", + (unsigned long)(base >> 30)); + return; + } + if (base != ~0UL) { printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " "for crashkernel (System RAM: %ldMB)\n", diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index fc6c6636ffda..200100ea7610 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -719,3 +719,28 @@ out: EXPORT_SYMBOL_GPL(remove_memory); #endif /* CONFIG_MEMORY_HOTREMOVE */ #endif + +/* + * Even when CONFIG_IA32_SUPPORT is not enabled it is + * useful to have the Linux/x86 domain registered to + * avoid an attempted module load when emulators call + * personality(PER_LINUX32). This saves several milliseconds + * on each such call. + */ +static struct exec_domain ia32_exec_domain; + +static int __init +per_linux32_init(void) +{ + ia32_exec_domain.name = "Linux/x86"; + ia32_exec_domain.handler = NULL; + ia32_exec_domain.pers_low = PER_LINUX32; + ia32_exec_domain.pers_high = PER_LINUX32; + ia32_exec_domain.signal_map = default_exec_domain.signal_map; + ia32_exec_domain.signal_invmap = default_exec_domain.signal_invmap; + register_exec_domain(&ia32_exec_domain); + + return 0; +} + +__initcall(per_linux32_init); diff --git a/arch/ia64/uv/Makefile b/arch/ia64/uv/Makefile new file mode 100644 index 000000000000..aa9f91947c49 --- /dev/null +++ b/arch/ia64/uv/Makefile @@ -0,0 +1,12 @@ +# arch/ia64/uv/Makefile +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Copyright (C) 2008 Silicon Graphics, Inc. All Rights Reserved. +# +# Makefile for the sn uv subplatform +# + +obj-y += kernel/ diff --git a/arch/ia64/uv/kernel/Makefile b/arch/ia64/uv/kernel/Makefile new file mode 100644 index 000000000000..8d92b4684d8e --- /dev/null +++ b/arch/ia64/uv/kernel/Makefile @@ -0,0 +1,13 @@ +# arch/ia64/uv/kernel/Makefile +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Copyright (C) 2008 Silicon Graphics, Inc. All Rights Reserved. +# + +EXTRA_CFLAGS += -Iarch/ia64/sn/include + +obj-y += setup.o +obj-$(CONFIG_IA64_GENERIC) += machvec.o diff --git a/arch/ia64/uv/kernel/machvec.c b/arch/ia64/uv/kernel/machvec.c new file mode 100644 index 000000000000..50737a9dca74 --- /dev/null +++ b/arch/ia64/uv/kernel/machvec.c @@ -0,0 +1,11 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + */ + +#define MACHVEC_PLATFORM_NAME uv +#define MACHVEC_PLATFORM_HEADER <asm/machvec_uv.h> +#include <asm/machvec_init.h> diff --git a/arch/ia64/uv/kernel/setup.c b/arch/ia64/uv/kernel/setup.c new file mode 100644 index 000000000000..9aa743203c3c --- /dev/null +++ b/arch/ia64/uv/kernel/setup.c @@ -0,0 +1,98 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * SGI UV Core Functions + * + * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved. + */ + +#include <linux/module.h> +#include <linux/percpu.h> +#include <asm/sn/simulator.h> +#include <asm/uv/uv_mmrs.h> +#include <asm/uv/uv_hub.h> + +DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); +EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info); + + +struct redir_addr { + unsigned long redirect; + unsigned long alias; +}; + +#define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT + +static __initdata struct redir_addr redir_addrs[] = { + {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_SI_ALIAS0_OVERLAY_CONFIG}, + {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_SI_ALIAS1_OVERLAY_CONFIG}, + {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_SI_ALIAS2_OVERLAY_CONFIG}, +}; + +static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size) +{ + union uvh_si_alias0_overlay_config_u alias; + union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect; + int i; + + for (i = 0; i < ARRAY_SIZE(redir_addrs); i++) { + alias.v = uv_read_local_mmr(redir_addrs[i].alias); + if (alias.s.base == 0) { + *size = (1UL << alias.s.m_alias); + redirect.v = uv_read_local_mmr(redir_addrs[i].redirect); + *base = (unsigned long)redirect.s.dest_base << DEST_SHIFT; + return; + } + } + BUG(); +} + +void __init uv_setup(char **cmdline_p) +{ + union uvh_si_addr_map_config_u m_n_config; + union uvh_node_id_u node_id; + unsigned long gnode_upper; + int nid, cpu, m_val, n_val; + unsigned long mmr_base, lowmem_redir_base, lowmem_redir_size; + + if (IS_MEDUSA()) { + lowmem_redir_base = 0; + lowmem_redir_size = 0; + node_id.v = 0; + m_n_config.s.m_skt = 37; + m_n_config.s.n_skt = 0; + mmr_base = 0; + } else { + get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size); + node_id.v = uv_read_local_mmr(UVH_NODE_ID); + m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); + mmr_base = + uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) & + ~UV_MMR_ENABLE; + } + + m_val = m_n_config.s.m_skt; + n_val = m_n_config.s.n_skt; + printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base); + + gnode_upper = (((unsigned long)node_id.s.node_id) & + ~((1 << n_val) - 1)) << m_val; + + for_each_present_cpu(cpu) { + nid = cpu_to_node(cpu); + uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base; + uv_cpu_hub_info(cpu)->lowmem_remap_top = + lowmem_redir_base + lowmem_redir_size; + uv_cpu_hub_info(cpu)->m_val = m_val; + uv_cpu_hub_info(cpu)->n_val = m_val; + uv_cpu_hub_info(cpu)->pnode_mask = (1 << n_val) -1; + uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1; + uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; + uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; + uv_cpu_hub_info(cpu)->coherency_domain_number = 0;/* ZZZ */ + printk(KERN_DEBUG "UV cpu %d, nid %d\n", cpu, nid); + } +} + |