diff options
Diffstat (limited to 'arch/ia64')
35 files changed, 684 insertions, 1200 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 01b78e7f992e..2e08942339ad 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -423,6 +423,8 @@ endmenu endif +source "net/Kconfig" + source "drivers/Kconfig" source "fs/Kconfig" diff --git a/arch/ia64/hp/sim/simeth.c b/arch/ia64/hp/sim/simeth.c index ae84a1018a89..0639ec0ed015 100644 --- a/arch/ia64/hp/sim/simeth.c +++ b/arch/ia64/hp/sim/simeth.c @@ -191,7 +191,7 @@ simeth_probe1(void) unsigned char mac_addr[ETH_ALEN]; struct simeth_local *local; struct net_device *dev; - int fd, i, err; + int fd, i, err, rc; /* * XXX Fix me @@ -228,7 +228,9 @@ simeth_probe1(void) return err; } - dev->irq = assign_irq_vector(AUTO_ASSIGN); + if ((rc = assign_irq_vector(AUTO_ASSIGN)) < 0) + panic("%s: out of interrupt vectors!\n", __FUNCTION__); + dev->irq = rc; /* * attach the interrupt in the simulator, this does enable interrupts diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c index 7a8ae0f4b387..7dcb8582ae0d 100644 --- a/arch/ia64/hp/sim/simserial.c +++ b/arch/ia64/hp/sim/simserial.c @@ -982,7 +982,7 @@ static struct tty_operations hp_ops = { static int __init simrs_init (void) { - int i; + int i, rc; struct serial_state *state; if (!ia64_platform_is("hpsim")) @@ -1017,7 +1017,10 @@ simrs_init (void) if (state->type == PORT_UNKNOWN) continue; if (!state->irq) { - state->irq = assign_irq_vector(AUTO_ASSIGN); + if ((rc = assign_irq_vector(AUTO_ASSIGN)) < 0) + panic("%s: out of interrupt vectors!\n", + __FUNCTION__); + state->irq = rc; ia64_ssc_connect_irq(KEYBOARD_INTR, state->irq); } diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index b2e2f6509eb0..e1fb68ddec26 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile @@ -17,6 +17,7 @@ obj-$(CONFIG_IA64_PALINFO) += palinfo.o obj-$(CONFIG_IOSAPIC) += iosapic.o obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_SMP) += smp.o smpboot.o domain.o +obj-$(CONFIG_NUMA) += numa.o obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o obj-$(CONFIG_IA64_CYCLONE) += cyclone.o obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index cda06f88c66e..9609f243e5d0 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -11,6 +11,7 @@ * Copyright (C) 2001 Jenna Hall <jenna.s.hall@intel.com> * Copyright (C) 2001 Takayoshi Kochi <t-kochi@bq.jp.nec.com> * Copyright (C) 2002 Erich Focht <efocht@ess.nec.de> + * Copyright (C) 2004 Ashok Raj <ashok.raj@intel.com> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * @@ -67,6 +68,11 @@ EXPORT_SYMBOL(pm_power_off); unsigned char acpi_kbd_controller_present = 1; unsigned char acpi_legacy_devices; +static unsigned int __initdata acpi_madt_rev; + +unsigned int acpi_cpei_override; +unsigned int acpi_cpei_phys_cpuid; + #define MAX_SAPICS 256 u16 ia64_acpiid_to_sapicid[MAX_SAPICS] = { [0 ... MAX_SAPICS - 1] = -1 }; @@ -265,10 +271,56 @@ acpi_parse_plat_int_src ( (plintsrc->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); platform_intr_list[plintsrc->type] = vector; + if (acpi_madt_rev > 1) { + acpi_cpei_override = plintsrc->plint_flags.cpei_override_flag; + } + + /* + * Save the physical id, so we can check when its being removed + */ + acpi_cpei_phys_cpuid = ((plintsrc->id << 8) | (plintsrc->eid)) & 0xffff; + return 0; } +unsigned int can_cpei_retarget(void) +{ + extern int cpe_vector; + + /* + * Only if CPEI is supported and the override flag + * is present, otherwise return that its re-targettable + * if we are in polling mode. + */ + if (cpe_vector > 0 && !acpi_cpei_override) + return 0; + else + return 1; +} + +unsigned int is_cpu_cpei_target(unsigned int cpu) +{ + unsigned int logical_id; + + logical_id = cpu_logical_id(acpi_cpei_phys_cpuid); + + if (logical_id == cpu) + return 1; + else + return 0; +} + +void set_cpei_target_cpu(unsigned int cpu) +{ + acpi_cpei_phys_cpuid = cpu_physical_id(cpu); +} + +unsigned int get_cpei_target_cpu(void) +{ + return acpi_cpei_phys_cpuid; +} + static int __init acpi_parse_int_src_ovr ( acpi_table_entry_header *header, const unsigned long end) @@ -326,6 +378,8 @@ acpi_parse_madt (unsigned long phys_addr, unsigned long size) acpi_madt = (struct acpi_table_madt *) __va(phys_addr); + acpi_madt_rev = acpi_madt->header.revision; + /* remember the value for reference after free_initmem() */ #ifdef CONFIG_ITANIUM has_8259 = 1; /* Firmware on old Itanium systems is broken */ @@ -640,9 +694,11 @@ acpi_boot_init (void) if (smp_boot_data.cpu_phys_id[cpu] != hard_smp_processor_id()) node_cpuid[i++].phys_id = smp_boot_data.cpu_phys_id[cpu]; } - build_cpu_to_node_map(); # endif #endif +#ifdef CONFIG_ACPI_NUMA + build_cpu_to_node_map(); +#endif /* Make boot-up look pretty */ printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, total_cpus); return 0; diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index 69f88d561d62..bb9a506deb78 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -1249,7 +1249,7 @@ ENTRY(sys_rt_sigreturn) stf.spill [r17]=f11 adds out0=16,sp // out0 = &sigscratch br.call.sptk.many rp=ia64_rt_sigreturn -.ret19: .restore sp 0 +.ret19: .restore sp,0 adds sp=16,sp ;; ld8 r9=[sp] // load new ar.unat diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index c170be095ccd..7936b62f7a2e 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c @@ -489,8 +489,6 @@ static int iosapic_find_sharable_vector (unsigned long trigger, unsigned long po } } } - if (vector < 0) - panic("%s: out of interrupt vectors!\n", __FUNCTION__); return vector; } @@ -506,6 +504,8 @@ iosapic_reassign_vector (int vector) if (!list_empty(&iosapic_intr_info[vector].rtes)) { new_vector = assign_irq_vector(AUTO_ASSIGN); + if (new_vector < 0) + panic("%s: out of interrupt vectors!\n", __FUNCTION__); printk(KERN_INFO "Reassigning vector %d to %d\n", vector, new_vector); memcpy(&iosapic_intr_info[new_vector], &iosapic_intr_info[vector], sizeof(struct iosapic_intr_info)); @@ -734,9 +734,12 @@ again: spin_unlock_irqrestore(&iosapic_lock, flags); /* If vector is running out, we try to find a sharable vector */ - vector = assign_irq_vector_nopanic(AUTO_ASSIGN); - if (vector < 0) + vector = assign_irq_vector(AUTO_ASSIGN); + if (vector < 0) { vector = iosapic_find_sharable_vector(trigger, polarity); + if (vector < 0) + panic("%s: out of interrupt vectors!\n", __FUNCTION__); + } spin_lock_irqsave(&irq_descp(vector)->lock, flags); spin_lock(&iosapic_lock); @@ -884,6 +887,8 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi, break; case ACPI_INTERRUPT_INIT: vector = assign_irq_vector(AUTO_ASSIGN); + if (vector < 0) + panic("%s: out of interrupt vectors!\n", __FUNCTION__); delivery = IOSAPIC_INIT; break; case ACPI_INTERRUPT_CPEI: diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 4fe60c7a2e90..6c4d59fd0364 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -63,30 +63,19 @@ EXPORT_SYMBOL(isa_irq_to_vector_map); static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_NUM_DEVICE_VECTORS)]; int -assign_irq_vector_nopanic (int irq) +assign_irq_vector (int irq) { int pos, vector; again: pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS); vector = IA64_FIRST_DEVICE_VECTOR + pos; if (vector > IA64_LAST_DEVICE_VECTOR) - return -1; + return -ENOSPC; if (test_and_set_bit(pos, ia64_vector_mask)) goto again; return vector; } -int -assign_irq_vector (int irq) -{ - int vector = assign_irq_vector_nopanic(irq); - - if (vector < 0) - panic("assign_irq_vector: out of interrupt vectors!"); - - return vector; -} - void free_irq_vector (int vector) { diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 3aa3167edbec..884f5cd27d8a 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -713,7 +713,7 @@ static struct kprobe trampoline_p = { .pre_handler = trampoline_probe_handler }; -int __init arch_init(void) +int __init arch_init_kprobes(void) { trampoline_p.addr = (kprobe_opcode_t *)((struct fnptr *)kretprobe_trampoline)->ip; diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 736e328b5e61..4ebbf3974381 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -271,7 +271,7 @@ ia64_mca_log_sal_error_record(int sal_info_type) #ifdef CONFIG_ACPI -static int cpe_vector = -1; +int cpe_vector = -1; static irqreturn_t ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs) diff --git a/arch/ia64/kernel/numa.c b/arch/ia64/kernel/numa.c new file mode 100644 index 000000000000..a68ce6678092 --- /dev/null +++ b/arch/ia64/kernel/numa.c @@ -0,0 +1,57 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * ia64 kernel NUMA specific stuff + * + * Copyright (C) 2002 Erich Focht <efocht@ess.nec.de> + * Copyright (C) 2004 Silicon Graphics, Inc. + * Jesse Barnes <jbarnes@sgi.com> + */ +#include <linux/config.h> +#include <linux/topology.h> +#include <linux/module.h> +#include <asm/processor.h> +#include <asm/smp.h> + +u8 cpu_to_node_map[NR_CPUS] __cacheline_aligned; +EXPORT_SYMBOL(cpu_to_node_map); + +cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; + +/** + * build_cpu_to_node_map - setup cpu to node and node to cpumask arrays + * + * Build cpu to node mapping and initialize the per node cpu masks using + * info from the node_cpuid array handed to us by ACPI. + */ +void __init build_cpu_to_node_map(void) +{ + int cpu, i, node; + + for(node=0; node < MAX_NUMNODES; node++) + cpus_clear(node_to_cpu_mask[node]); + + for(cpu = 0; cpu < NR_CPUS; ++cpu) { + node = -1; + for (i = 0; i < NR_CPUS; ++i) + if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) { + node = node_cpuid[i].nid; + break; + } + cpu_to_node_map[cpu] = (node >= 0) ? node : 0; + if (node >= 0) + cpu_set(cpu, node_to_cpu_mask[node]); + } +} diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 6407bff6bfd7..b8ebb8e427ef 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -37,7 +37,6 @@ #include <linux/vfs.h> #include <linux/pagemap.h> #include <linux/mount.h> -#include <linux/version.h> #include <linux/bitops.h> #include <asm/errno.h> diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 6e35bff05d59..e484910246ad 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -196,6 +196,7 @@ update_pal_halt_status(int status) void default_idle (void) { + local_irq_enable(); while (!need_resched()) if (can_do_pal_halt) safe_halt(); diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 2693e1522d7c..5c7c95737bbf 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -40,6 +40,8 @@ #include <linux/serial_core.h> #include <linux/efi.h> #include <linux/initrd.h> +#include <linux/platform.h> +#include <linux/pm.h> #include <asm/ia32.h> #include <asm/machvec.h> @@ -783,6 +785,7 @@ cpu_init (void) /* size of physical stacked register partition plus 8 bytes: */ __get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8; platform_cpu_init(); + pm_idle = default_idle; } void diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index edd9f07860b2..b8a0a7d257a9 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c @@ -143,6 +143,7 @@ restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr) __copy_from_user(current->thread.fph, &sc->sc_fr[32], 96*16); psr->mfh = 0; /* drop signal handler's fph contents... */ + preempt_disable(); if (psr->dfh) ia64_drop_fpu(current); else { @@ -150,6 +151,7 @@ restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr) __ia64_load_fpu(current->thread.fph); ia64_set_local_fpu_owner(current); } + preempt_enable(); } return err; } diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 623b0a546709..7d72c0d872b3 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -525,47 +525,6 @@ smp_build_cpu_map (void) } } -#ifdef CONFIG_NUMA - -/* on which node is each logical CPU (one cacheline even for 64 CPUs) */ -u8 cpu_to_node_map[NR_CPUS] __cacheline_aligned; -EXPORT_SYMBOL(cpu_to_node_map); -/* which logical CPUs are on which nodes */ -cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; - -/* - * Build cpu to node mapping and initialize the per node cpu masks. - */ -void __init -build_cpu_to_node_map (void) -{ - int cpu, i, node; - - for(node=0; node<MAX_NUMNODES; node++) - cpus_clear(node_to_cpu_mask[node]); - for(cpu = 0; cpu < NR_CPUS; ++cpu) { - /* - * All Itanium NUMA platforms I know use ACPI, so maybe we - * can drop this ifdef completely. [EF] - */ -#ifdef CONFIG_ACPI_NUMA - node = -1; - for (i = 0; i < NR_CPUS; ++i) - if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) { - node = node_cpuid[i].nid; - break; - } -#else -# error Fixme: Dunno how to build CPU-to-node map. -#endif - cpu_to_node_map[cpu] = (node >= 0) ? node : 0; - if (node >= 0) - cpu_set(cpu, node_to_cpu_mask[node]); - } -} - -#endif /* CONFIG_NUMA */ - /* * Cycle through the APs sending Wakeup IPIs to boot each. */ diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index f1aafd4c05f9..d8030f3bd865 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c @@ -36,6 +36,13 @@ int arch_register_cpu(int num) parent = &sysfs_nodes[cpu_to_node(num)]; #endif /* CONFIG_NUMA */ + /* + * If CPEI cannot be re-targetted, and this is + * CPEI target, then dont create the control file + */ + if (!can_cpei_retarget() && is_cpu_cpei_target(num)) + sysfs_cpus[num].cpu.no_control = 1; + return register_cpu(&sysfs_cpus[num].cpu, num, parent); } diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index e7e520d90f03..4440c8343fa4 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c @@ -90,14 +90,16 @@ die (const char *str, struct pt_regs *regs, long err) .lock_owner_depth = 0 }; static int die_counter; + int cpu = get_cpu(); - if (die.lock_owner != smp_processor_id()) { + if (die.lock_owner != cpu) { console_verbose(); spin_lock_irq(&die.lock); - die.lock_owner = smp_processor_id(); + die.lock_owner = cpu; die.lock_owner_depth = 0; bust_spinlocks(1); } + put_cpu(); if (++die.lock_owner_depth < 3) { printk("%s[%d]: %s %ld [%d]\n", diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index f3fd528ead3b..b5c90e548195 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -44,150 +44,7 @@ struct early_node_data { }; static struct early_node_data mem_data[MAX_NUMNODES] __initdata; - -/** - * reassign_cpu_only_nodes - called from find_memory to move CPU-only nodes to a memory node - * - * This function will move nodes with only CPUs (no memory) - * to a node with memory which is at the minimum numa_slit distance. - * Any reassigments will result in the compression of the nodes - * and renumbering the nid values where appropriate. - * The static declarations below are to avoid large stack size which - * makes the code not re-entrant. - */ -static void __init reassign_cpu_only_nodes(void) -{ - struct node_memblk_s *p; - int i, j, k, nnode, nid, cpu, cpunid, pxm; - u8 cslit, slit; - static DECLARE_BITMAP(nodes_with_mem, MAX_NUMNODES) __initdata; - static u8 numa_slit_fix[MAX_NUMNODES * MAX_NUMNODES] __initdata; - static int node_flip[MAX_NUMNODES] __initdata; - static int old_nid_map[NR_CPUS] __initdata; - - for (nnode = 0, p = &node_memblk[0]; p < &node_memblk[num_node_memblks]; p++) - if (!test_bit(p->nid, (void *) nodes_with_mem)) { - set_bit(p->nid, (void *) nodes_with_mem); - nnode++; - } - - /* - * All nids with memory. - */ - if (nnode == num_online_nodes()) - return; - - /* - * Change nids and attempt to migrate CPU-only nodes - * to the best numa_slit (closest neighbor) possible. - * For reassigned CPU nodes a nid can't be arrived at - * until after this loop because the target nid's new - * identity might not have been established yet. So - * new nid values are fabricated above num_online_nodes() and - * mapped back later to their true value. - */ - /* MCD - This code is a bit complicated, but may be unnecessary now. - * We can now handle much more interesting node-numbering. - * The old requirement that 0 <= nid <= numnodes <= MAX_NUMNODES - * and that there be no holes in the numbering 0..numnodes - * has become simply 0 <= nid <= MAX_NUMNODES. - */ - nid = 0; - for_each_online_node(i) { - if (test_bit(i, (void *) nodes_with_mem)) { - /* - * Save original nid value for numa_slit - * fixup and node_cpuid reassignments. - */ - node_flip[nid] = i; - - if (i == nid) { - nid++; - continue; - } - - for (p = &node_memblk[0]; p < &node_memblk[num_node_memblks]; p++) - if (p->nid == i) - p->nid = nid; - - cpunid = nid; - nid++; - } else - cpunid = MAX_NUMNODES; - - for (cpu = 0; cpu < NR_CPUS; cpu++) - if (node_cpuid[cpu].nid == i) { - /* - * For nodes not being reassigned just - * fix the cpu's nid and reverse pxm map - */ - if (cpunid < MAX_NUMNODES) { - pxm = nid_to_pxm_map[i]; - pxm_to_nid_map[pxm] = - node_cpuid[cpu].nid = cpunid; - continue; - } - - /* - * For nodes being reassigned, find best node by - * numa_slit information and then make a temporary - * nid value based on current nid and num_online_nodes(). - */ - slit = 0xff; - k = 2*num_online_nodes(); - for_each_online_node(j) { - if (i == j) - continue; - else if (test_bit(j, (void *) nodes_with_mem)) { - cslit = numa_slit[i * num_online_nodes() + j]; - if (cslit < slit) { - k = num_online_nodes() + j; - slit = cslit; - } - } - } - - /* save old nid map so we can update the pxm */ - old_nid_map[cpu] = node_cpuid[cpu].nid; - node_cpuid[cpu].nid = k; - } - } - - /* - * Fixup temporary nid values for CPU-only nodes. - */ - for (cpu = 0; cpu < NR_CPUS; cpu++) - if (node_cpuid[cpu].nid == (2*num_online_nodes())) { - pxm = nid_to_pxm_map[old_nid_map[cpu]]; - pxm_to_nid_map[pxm] = node_cpuid[cpu].nid = nnode - 1; - } else { - for (i = 0; i < nnode; i++) { - if (node_flip[i] != (node_cpuid[cpu].nid - num_online_nodes())) - continue; - - pxm = nid_to_pxm_map[old_nid_map[cpu]]; - pxm_to_nid_map[pxm] = node_cpuid[cpu].nid = i; - break; - } - } - - /* - * Fix numa_slit by compressing from larger - * nid array to reduced nid array. - */ - for (i = 0; i < nnode; i++) - for (j = 0; j < nnode; j++) - numa_slit_fix[i * nnode + j] = - numa_slit[node_flip[i] * num_online_nodes() + node_flip[j]]; - - memcpy(numa_slit, numa_slit_fix, sizeof (numa_slit)); - - nodes_clear(node_online_map); - for (i = 0; i < nnode; i++) - node_set_online(i); - - return; -} +static nodemask_t memory_less_mask __initdata; /* * To prevent cache aliasing effects, align per-node structures so that they @@ -233,44 +90,101 @@ static int __init build_node_maps(unsigned long start, unsigned long len, } /** - * early_nr_phys_cpus_node - return number of physical cpus on a given node + * early_nr_cpus_node - return number of cpus on a given node * @node: node to check * - * Count the number of physical cpus on @node. These are cpus that actually - * exist. We can't use nr_cpus_node() yet because + * Count the number of cpus on @node. We can't use nr_cpus_node() yet because * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been - * called yet. + * called yet. Note that node 0 will also count all non-existent cpus. */ -static int early_nr_phys_cpus_node(int node) +static int __init early_nr_cpus_node(int node) { int cpu, n = 0; for (cpu = 0; cpu < NR_CPUS; cpu++) if (node == node_cpuid[cpu].nid) - if ((cpu == 0) || node_cpuid[cpu].phys_id) - n++; + n++; return n; } +/** + * compute_pernodesize - compute size of pernode data + * @node: the node id. + */ +static unsigned long __init compute_pernodesize(int node) +{ + unsigned long pernodesize = 0, cpus; + + cpus = early_nr_cpus_node(node); + pernodesize += PERCPU_PAGE_SIZE * cpus; + pernodesize += node * L1_CACHE_BYTES; + pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t)); + pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); + pernodesize = PAGE_ALIGN(pernodesize); + return pernodesize; +} /** - * early_nr_cpus_node - return number of cpus on a given node - * @node: node to check + * per_cpu_node_setup - setup per-cpu areas on each node + * @cpu_data: per-cpu area on this node + * @node: node to setup * - * Count the number of cpus on @node. We can't use nr_cpus_node() yet because - * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been - * called yet. Note that node 0 will also count all non-existent cpus. + * Copy the static per-cpu data into the region we just set aside and then + * setup __per_cpu_offset for each CPU on this node. Return a pointer to + * the end of the area. */ -static int early_nr_cpus_node(int node) +static void *per_cpu_node_setup(void *cpu_data, int node) { - int cpu, n = 0; +#ifdef CONFIG_SMP + int cpu; - for (cpu = 0; cpu < NR_CPUS; cpu++) - if (node == node_cpuid[cpu].nid) - n++; + for (cpu = 0; cpu < NR_CPUS; cpu++) { + if (node == node_cpuid[cpu].nid) { + memcpy(__va(cpu_data), __phys_per_cpu_start, + __per_cpu_end - __per_cpu_start); + __per_cpu_offset[cpu] = (char*)__va(cpu_data) - + __per_cpu_start; + cpu_data += PERCPU_PAGE_SIZE; + } + } +#endif + return cpu_data; +} - return n; +/** + * fill_pernode - initialize pernode data. + * @node: the node id. + * @pernode: physical address of pernode data + * @pernodesize: size of the pernode data + */ +static void __init fill_pernode(int node, unsigned long pernode, + unsigned long pernodesize) +{ + void *cpu_data; + int cpus = early_nr_cpus_node(node); + struct bootmem_data *bdp = &mem_data[node].bootmem_data; + + mem_data[node].pernode_addr = pernode; + mem_data[node].pernode_size = pernodesize; + memset(__va(pernode), 0, pernodesize); + + cpu_data = (void *)pernode; + pernode += PERCPU_PAGE_SIZE * cpus; + pernode += node * L1_CACHE_BYTES; + + mem_data[node].pgdat = __va(pernode); + pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); + + mem_data[node].node_data = __va(pernode); + pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); + + mem_data[node].pgdat->bdata = bdp; + pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); + + cpu_data = per_cpu_node_setup(cpu_data, node); + + return; } /** @@ -304,9 +218,8 @@ static int early_nr_cpus_node(int node) static int __init find_pernode_space(unsigned long start, unsigned long len, int node) { - unsigned long epfn, cpu, cpus, phys_cpus; + unsigned long epfn; unsigned long pernodesize = 0, pernode, pages, mapsize; - void *cpu_data; struct bootmem_data *bdp = &mem_data[node].bootmem_data; epfn = (start + len) >> PAGE_SHIFT; @@ -329,49 +242,12 @@ static int __init find_pernode_space(unsigned long start, unsigned long len, * Calculate total size needed, incl. what's necessary * for good alignment and alias prevention. */ - cpus = early_nr_cpus_node(node); - phys_cpus = early_nr_phys_cpus_node(node); - pernodesize += PERCPU_PAGE_SIZE * cpus; - pernodesize += node * L1_CACHE_BYTES; - pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t)); - pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); - pernodesize = PAGE_ALIGN(pernodesize); + pernodesize = compute_pernodesize(node); pernode = NODEDATA_ALIGN(start, node); /* Is this range big enough for what we want to store here? */ - if (start + len > (pernode + pernodesize + mapsize)) { - mem_data[node].pernode_addr = pernode; - mem_data[node].pernode_size = pernodesize; - memset(__va(pernode), 0, pernodesize); - - cpu_data = (void *)pernode; - pernode += PERCPU_PAGE_SIZE * cpus; - pernode += node * L1_CACHE_BYTES; - - mem_data[node].pgdat = __va(pernode); - pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); - - mem_data[node].node_data = __va(pernode); - pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); - - mem_data[node].pgdat->bdata = bdp; - pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); - - /* - * Copy the static per-cpu data into the region we - * just set aside and then setup __per_cpu_offset - * for each CPU on this node. - */ - for (cpu = 0; cpu < NR_CPUS; cpu++) { - if (node == node_cpuid[cpu].nid) { - memcpy(__va(cpu_data), __phys_per_cpu_start, - __per_cpu_end - __per_cpu_start); - __per_cpu_offset[cpu] = (char*)__va(cpu_data) - - __per_cpu_start; - cpu_data += PERCPU_PAGE_SIZE; - } - } - } + if (start + len > (pernode + pernodesize + mapsize)) + fill_pernode(node, pernode, pernodesize); return 0; } @@ -411,6 +287,9 @@ static void __init reserve_pernode_space(void) for_each_online_node(node) { pg_data_t *pdp = mem_data[node].pgdat; + if (node_isset(node, memory_less_mask)) + continue; + bdp = pdp->bdata; /* First the bootmem_map itself */ @@ -436,8 +315,8 @@ static void __init reserve_pernode_space(void) */ static void __init initialize_pernode_data(void) { - int cpu, node; pg_data_t *pgdat_list[MAX_NUMNODES]; + int cpu, node; for_each_online_node(node) pgdat_list[node] = mem_data[node].pgdat; @@ -447,12 +326,99 @@ static void __init initialize_pernode_data(void) memcpy(mem_data[node].node_data->pg_data_ptrs, pgdat_list, sizeof(pgdat_list)); } - +#ifdef CONFIG_SMP /* Set the node_data pointer for each per-cpu struct */ for (cpu = 0; cpu < NR_CPUS; cpu++) { node = node_cpuid[cpu].nid; per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data; } +#else + { + struct cpuinfo_ia64 *cpu0_cpu_info; + cpu = 0; + node = node_cpuid[cpu].nid; + cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start + + ((char *)&per_cpu__cpu_info - __per_cpu_start)); + cpu0_cpu_info->node_data = mem_data[node].node_data; + } +#endif /* CONFIG_SMP */ +} + +/** + * memory_less_node_alloc - * attempt to allocate memory on the best NUMA slit + * node but fall back to any other node when __alloc_bootmem_node fails + * for best. + * @nid: node id + * @pernodesize: size of this node's pernode data + * @align: alignment to use for this node's pernode data + */ +static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize, + unsigned long align) +{ + void *ptr = NULL; + u8 best = 0xff; + int bestnode = -1, node; + + for_each_online_node(node) { + if (node_isset(node, memory_less_mask)) + continue; + else if (node_distance(nid, node) < best) { + best = node_distance(nid, node); + bestnode = node; + } + } + + ptr = __alloc_bootmem_node(mem_data[bestnode].pgdat, + pernodesize, align, __pa(MAX_DMA_ADDRESS)); + + if (!ptr) + panic("NO memory for memory less node\n"); + return ptr; +} + +/** + * pgdat_insert - insert the pgdat into global pgdat_list + * @pgdat: the pgdat for a node. + */ +static void __init pgdat_insert(pg_data_t *pgdat) +{ + pg_data_t *prev = NULL, *next; + + for_each_pgdat(next) + if (pgdat->node_id < next->node_id) + break; + else + prev = next; + + if (prev) { + prev->pgdat_next = pgdat; + pgdat->pgdat_next = next; + } else { + pgdat->pgdat_next = pgdat_list; + pgdat_list = pgdat; + } + + return; +} + +/** + * memory_less_nodes - allocate and initialize CPU only nodes pernode + * information. + */ +static void __init memory_less_nodes(void) +{ + unsigned long pernodesize; + void *pernode; + int node; + + for_each_node_mask(node, memory_less_mask) { + pernodesize = compute_pernodesize(node); + pernode = memory_less_node_alloc(node, pernodesize, + (node) ? (node * PERCPU_PAGE_SIZE) : (1024*1024)); + fill_pernode(node, __pa(pernode), pernodesize); + } + + return; } /** @@ -472,16 +438,19 @@ void __init find_memory(void) node_set_online(0); } + nodes_or(memory_less_mask, memory_less_mask, node_online_map); min_low_pfn = -1; max_low_pfn = 0; - if (num_online_nodes() > 1) - reassign_cpu_only_nodes(); - /* These actually end up getting called by call_pernode_memory() */ efi_memmap_walk(filter_rsvd_memory, build_node_maps); efi_memmap_walk(filter_rsvd_memory, find_pernode_space); + for_each_online_node(node) + if (mem_data[node].bootmem_data.node_low_pfn) { + node_clear(node, memory_less_mask); + mem_data[node].min_pfn = ~0UL; + } /* * Initialize the boot memory maps in reverse order since that's * what the bootmem allocator expects @@ -492,17 +461,14 @@ void __init find_memory(void) if (!node_online(node)) continue; + else if (node_isset(node, memory_less_mask)) + continue; bdp = &mem_data[node].bootmem_data; pernode = mem_data[node].pernode_addr; pernodesize = mem_data[node].pernode_size; map = pernode + pernodesize; - /* Sanity check... */ - if (!pernode) - panic("pernode space for node %d " - "could not be allocated!", node); - init_bootmem_node(mem_data[node].pgdat, map>>PAGE_SHIFT, bdp->node_boot_start>>PAGE_SHIFT, @@ -512,6 +478,7 @@ void __init find_memory(void) efi_memmap_walk(filter_rsvd_memory, free_node_bootmem); reserve_pernode_space(); + memory_less_nodes(); initialize_pernode_data(); max_pfn = max_low_pfn; @@ -519,6 +486,7 @@ void __init find_memory(void) find_initrd(); } +#ifdef CONFIG_SMP /** * per_cpu_init - setup per-cpu variables * @@ -529,15 +497,15 @@ void *per_cpu_init(void) { int cpu; - if (smp_processor_id() == 0) { - for (cpu = 0; cpu < NR_CPUS; cpu++) { - per_cpu(local_per_cpu_offset, cpu) = - __per_cpu_offset[cpu]; - } - } + if (smp_processor_id() != 0) + return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; + + for (cpu = 0; cpu < NR_CPUS; cpu++) + per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu]; return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; } +#endif /* CONFIG_SMP */ /** * show_mem - give short summary of memory stats @@ -680,12 +648,13 @@ void __init paging_init(void) max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; - /* so min() will work in count_node_pages */ - for_each_online_node(node) - mem_data[node].min_pfn = ~0UL; - efi_memmap_walk(filter_rsvd_memory, count_node_pages); + vmalloc_end -= PAGE_ALIGN(max_low_pfn * sizeof(struct page)); + vmem_map = (struct page *) vmalloc_end; + efi_memmap_walk(create_mem_map_page_table, NULL); + printk("Virtual mem_map starts at 0x%p\n", vmem_map); + for_each_online_node(node) { memset(zones_size, 0, sizeof(zones_size)); memset(zholes_size, 0, sizeof(zholes_size)); @@ -719,15 +688,6 @@ void __init paging_init(void) mem_data[node].num_dma_physpages); } - if (node == 0) { - vmalloc_end -= - PAGE_ALIGN(max_low_pfn * sizeof(struct page)); - vmem_map = (struct page *) vmalloc_end; - - efi_memmap_walk(create_mem_map_page_table, NULL); - printk("Virtual mem_map starts at 0x%p\n", vmem_map); - } - pfn_offset = mem_data[node].min_pfn; NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset; @@ -735,5 +695,11 @@ void __init paging_init(void) pfn_offset, zholes_size); } + /* + * Make memory less nodes become a member of the known nodes. + */ + for_each_node_mask(node, memory_less_mask) + pgdat_insert(mem_data[node].pgdat); + zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); } diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 4eb2f52b87a1..65f9958db9f0 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -597,7 +597,8 @@ mem_init (void) kclist_add(&kcore_kernel, _stext, _end - _stext); for_each_pgdat(pgdat) - totalram_pages += free_all_bootmem_node(pgdat); + if (pgdat->bdata->node_bootmem_map) + totalram_pages += free_all_bootmem_node(pgdat); reserved_pages = 0; efi_memmap_walk(count_reserved_pages, &reserved_pages); diff --git a/arch/ia64/sn/include/pci/pcibr_provider.h b/arch/ia64/sn/include/pci/pcibr_provider.h deleted file mode 100644 index 1cd291d8badd..000000000000 --- a/arch/ia64/sn/include/pci/pcibr_provider.h +++ /dev/null @@ -1,151 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1992-1997,2000-2004 Silicon Graphics, Inc. All rights reserved. - */ -#ifndef _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H -#define _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H - -/* Workarounds */ -#define PV907516 (1 << 1) /* TIOCP: Don't write the write buffer flush reg */ - -#define BUSTYPE_MASK 0x1 - -/* Macros given a pcibus structure */ -#define IS_PCIX(ps) ((ps)->pbi_bridge_mode & BUSTYPE_MASK) -#define IS_PCI_BRIDGE_ASIC(asic) (asic == PCIIO_ASIC_TYPE_PIC || \ - asic == PCIIO_ASIC_TYPE_TIOCP) -#define IS_PIC_SOFT(ps) (ps->pbi_bridge_type == PCIBR_BRIDGETYPE_PIC) - - -/* - * The different PCI Bridge types supported on the SGI Altix platforms - */ -#define PCIBR_BRIDGETYPE_UNKNOWN -1 -#define PCIBR_BRIDGETYPE_PIC 2 -#define PCIBR_BRIDGETYPE_TIOCP 3 - -/* - * Bridge 64bit Direct Map Attributes - */ -#define PCI64_ATTR_PREF (1ull << 59) -#define PCI64_ATTR_PREC (1ull << 58) -#define PCI64_ATTR_VIRTUAL (1ull << 57) -#define PCI64_ATTR_BAR (1ull << 56) -#define PCI64_ATTR_SWAP (1ull << 55) -#define PCI64_ATTR_VIRTUAL1 (1ull << 54) - -#define PCI32_LOCAL_BASE 0 -#define PCI32_MAPPED_BASE 0x40000000 -#define PCI32_DIRECT_BASE 0x80000000 - -#define IS_PCI32_MAPPED(x) ((uint64_t)(x) < PCI32_DIRECT_BASE && \ - (uint64_t)(x) >= PCI32_MAPPED_BASE) -#define IS_PCI32_DIRECT(x) ((uint64_t)(x) >= PCI32_MAPPED_BASE) - - -/* - * Bridge PMU Address Transaltion Entry Attibutes - */ -#define PCI32_ATE_V (0x1 << 0) -#define PCI32_ATE_CO (0x1 << 1) -#define PCI32_ATE_PREC (0x1 << 2) -#define PCI32_ATE_PREF (0x1 << 3) -#define PCI32_ATE_BAR (0x1 << 4) -#define PCI32_ATE_ADDR_SHFT 12 - -#define MINIMAL_ATES_REQUIRED(addr, size) \ - (IOPG(IOPGOFF(addr) + (size) - 1) == IOPG((size) - 1)) - -#define MINIMAL_ATE_FLAG(addr, size) \ - (MINIMAL_ATES_REQUIRED((uint64_t)addr, size) ? 1 : 0) - -/* bit 29 of the pci address is the SWAP bit */ -#define ATE_SWAPSHIFT 29 -#define ATE_SWAP_ON(x) ((x) |= (1 << ATE_SWAPSHIFT)) -#define ATE_SWAP_OFF(x) ((x) &= ~(1 << ATE_SWAPSHIFT)) - -/* - * I/O page size - */ -#if PAGE_SIZE < 16384 -#define IOPFNSHIFT 12 /* 4K per mapped page */ -#else -#define IOPFNSHIFT 14 /* 16K per mapped page */ -#endif - -#define IOPGSIZE (1 << IOPFNSHIFT) -#define IOPG(x) ((x) >> IOPFNSHIFT) -#define IOPGOFF(x) ((x) & (IOPGSIZE-1)) - -#define PCIBR_DEV_SWAP_DIR (1ull << 19) -#define PCIBR_CTRL_PAGE_SIZE (0x1 << 21) - -/* - * PMU resources. - */ -struct ate_resource{ - uint64_t *ate; - uint64_t num_ate; - uint64_t lowest_free_index; -}; - -struct pcibus_info { - struct pcibus_bussoft pbi_buscommon; /* common header */ - uint32_t pbi_moduleid; - short pbi_bridge_type; - short pbi_bridge_mode; - - struct ate_resource pbi_int_ate_resource; - uint64_t pbi_int_ate_size; - - uint64_t pbi_dir_xbase; - char pbi_hub_xid; - - uint64_t pbi_devreg[8]; - spinlock_t pbi_lock; - - uint32_t pbi_valid_devices; - uint32_t pbi_enabled_devices; -}; - -/* - * pcibus_info structure locking macros - */ -inline static unsigned long -pcibr_lock(struct pcibus_info *pcibus_info) -{ - unsigned long flag; - spin_lock_irqsave(&pcibus_info->pbi_lock, flag); - return(flag); -} -#define pcibr_unlock(pcibus_info, flag) spin_unlock_irqrestore(&pcibus_info->pbi_lock, flag) - -extern int pcibr_init_provider(void); -extern void *pcibr_bus_fixup(struct pcibus_bussoft *); -extern dma_addr_t pcibr_dma_map(struct pci_dev *, unsigned long, size_t); -extern dma_addr_t pcibr_dma_map_consistent(struct pci_dev *, unsigned long, size_t); -extern void pcibr_dma_unmap(struct pci_dev *, dma_addr_t, int); - -/* - * prototypes for the bridge asic register access routines in pcibr_reg.c - */ -extern void pcireg_control_bit_clr(struct pcibus_info *, uint64_t); -extern void pcireg_control_bit_set(struct pcibus_info *, uint64_t); -extern uint64_t pcireg_tflush_get(struct pcibus_info *); -extern uint64_t pcireg_intr_status_get(struct pcibus_info *); -extern void pcireg_intr_enable_bit_clr(struct pcibus_info *, uint64_t); -extern void pcireg_intr_enable_bit_set(struct pcibus_info *, uint64_t); -extern void pcireg_intr_addr_addr_set(struct pcibus_info *, int, uint64_t); -extern void pcireg_force_intr_set(struct pcibus_info *, int); -extern uint64_t pcireg_wrb_flush_get(struct pcibus_info *, int); -extern void pcireg_int_ate_set(struct pcibus_info *, int, uint64_t); -extern uint64_t * pcireg_int_ate_addr(struct pcibus_info *, int); -extern void pcibr_force_interrupt(struct sn_irq_info *sn_irq_info); -extern void pcibr_change_devices_irq(struct sn_irq_info *sn_irq_info); -extern int pcibr_ate_alloc(struct pcibus_info *, int); -extern void pcibr_ate_free(struct pcibus_info *, int); -extern void ate_write(struct pcibus_info *, int, int, uint64_t); -#endif diff --git a/arch/ia64/sn/include/pci/pic.h b/arch/ia64/sn/include/pci/pic.h deleted file mode 100644 index fd18acecb1e6..000000000000 --- a/arch/ia64/sn/include/pci/pic.h +++ /dev/null @@ -1,261 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved. - */ -#ifndef _ASM_IA64_SN_PCI_PIC_H -#define _ASM_IA64_SN_PCI_PIC_H - -/* - * PIC AS DEVICE ZERO - * ------------------ - * - * PIC handles PCI/X busses. PCI/X requires that the 'bridge' (i.e. PIC) - * be designated as 'device 0'. That is a departure from earlier SGI - * PCI bridges. Because of that we use config space 1 to access the - * config space of the first actual PCI device on the bus. - * Here's what the PIC manual says: - * - * The current PCI-X bus specification now defines that the parent - * hosts bus bridge (PIC for example) must be device 0 on bus 0. PIC - * reduced the total number of devices from 8 to 4 and removed the - * device registers and windows, now only supporting devices 0,1,2, and - * 3. PIC did leave all 8 configuration space windows. The reason was - * there was nothing to gain by removing them. Here in lies the problem. - * The device numbering we do using 0 through 3 is unrelated to the device - * numbering which PCI-X requires in configuration space. In the past we - * correlated Configs pace and our device space 0 <-> 0, 1 <-> 1, etc. - * PCI-X requires we start a 1, not 0 and currently the PX brick - * does associate our: - * - * device 0 with configuration space window 1, - * device 1 with configuration space window 2, - * device 2 with configuration space window 3, - * device 3 with configuration space window 4. - * - * The net effect is that all config space access are off-by-one with - * relation to other per-slot accesses on the PIC. - * Here is a table that shows some of that: - * - * Internal Slot# - * | - * | 0 1 2 3 - * ----------|--------------------------------------- - * config | 0x21000 0x22000 0x23000 0x24000 - * | - * even rrb | 0[0] n/a 1[0] n/a [] == implied even/odd - * | - * odd rrb | n/a 0[1] n/a 1[1] - * | - * int dev | 00 01 10 11 - * | - * ext slot# | 1 2 3 4 - * ----------|--------------------------------------- - */ - -#define PIC_ATE_TARGETID_SHFT 8 -#define PIC_HOST_INTR_ADDR 0x0000FFFFFFFFFFFFUL -#define PIC_PCI64_ATTR_TARG_SHFT 60 - - -/***************************************************************************** - *********************** PIC MMR structure mapping *************************** - *****************************************************************************/ - -/* NOTE: PIC WAR. PV#854697. PIC does not allow writes just to [31:0] - * of a 64-bit register. When writing PIC registers, always write the - * entire 64 bits. - */ - -struct pic { - - /* 0x000000-0x00FFFF -- Local Registers */ - - /* 0x000000-0x000057 -- Standard Widget Configuration */ - uint64_t p_wid_id; /* 0x000000 */ - uint64_t p_wid_stat; /* 0x000008 */ - uint64_t p_wid_err_upper; /* 0x000010 */ - uint64_t p_wid_err_lower; /* 0x000018 */ - #define p_wid_err p_wid_err_lower - uint64_t p_wid_control; /* 0x000020 */ - uint64_t p_wid_req_timeout; /* 0x000028 */ - uint64_t p_wid_int_upper; /* 0x000030 */ - uint64_t p_wid_int_lower; /* 0x000038 */ - #define p_wid_int p_wid_int_lower - uint64_t p_wid_err_cmdword; /* 0x000040 */ - uint64_t p_wid_llp; /* 0x000048 */ - uint64_t p_wid_tflush; /* 0x000050 */ - - /* 0x000058-0x00007F -- Bridge-specific Widget Configuration */ - uint64_t p_wid_aux_err; /* 0x000058 */ - uint64_t p_wid_resp_upper; /* 0x000060 */ - uint64_t p_wid_resp_lower; /* 0x000068 */ - #define p_wid_resp p_wid_resp_lower - uint64_t p_wid_tst_pin_ctrl; /* 0x000070 */ - uint64_t p_wid_addr_lkerr; /* 0x000078 */ - - /* 0x000080-0x00008F -- PMU & MAP */ - uint64_t p_dir_map; /* 0x000080 */ - uint64_t _pad_000088; /* 0x000088 */ - - /* 0x000090-0x00009F -- SSRAM */ - uint64_t p_map_fault; /* 0x000090 */ - uint64_t _pad_000098; /* 0x000098 */ - - /* 0x0000A0-0x0000AF -- Arbitration */ - uint64_t p_arb; /* 0x0000A0 */ - uint64_t _pad_0000A8; /* 0x0000A8 */ - - /* 0x0000B0-0x0000BF -- Number In A Can or ATE Parity Error */ - uint64_t p_ate_parity_err; /* 0x0000B0 */ - uint64_t _pad_0000B8; /* 0x0000B8 */ - - /* 0x0000C0-0x0000FF -- PCI/GIO */ - uint64_t p_bus_timeout; /* 0x0000C0 */ - uint64_t p_pci_cfg; /* 0x0000C8 */ - uint64_t p_pci_err_upper; /* 0x0000D0 */ - uint64_t p_pci_err_lower; /* 0x0000D8 */ - #define p_pci_err p_pci_err_lower - uint64_t _pad_0000E0[4]; /* 0x0000{E0..F8} */ - - /* 0x000100-0x0001FF -- Interrupt */ - uint64_t p_int_status; /* 0x000100 */ - uint64_t p_int_enable; /* 0x000108 */ - uint64_t p_int_rst_stat; /* 0x000110 */ - uint64_t p_int_mode; /* 0x000118 */ - uint64_t p_int_device; /* 0x000120 */ - uint64_t p_int_host_err; /* 0x000128 */ - uint64_t p_int_addr[8]; /* 0x0001{30,,,68} */ - uint64_t p_err_int_view; /* 0x000170 */ - uint64_t p_mult_int; /* 0x000178 */ - uint64_t p_force_always[8]; /* 0x0001{80,,,B8} */ - uint64_t p_force_pin[8]; /* 0x0001{C0,,,F8} */ - - /* 0x000200-0x000298 -- Device */ - uint64_t p_device[4]; /* 0x0002{00,,,18} */ - uint64_t _pad_000220[4]; /* 0x0002{20,,,38} */ - uint64_t p_wr_req_buf[4]; /* 0x0002{40,,,58} */ - uint64_t _pad_000260[4]; /* 0x0002{60,,,78} */ - uint64_t p_rrb_map[2]; /* 0x0002{80,,,88} */ - #define p_even_resp p_rrb_map[0] /* 0x000280 */ - #define p_odd_resp p_rrb_map[1] /* 0x000288 */ - uint64_t p_resp_status; /* 0x000290 */ - uint64_t p_resp_clear; /* 0x000298 */ - - uint64_t _pad_0002A0[12]; /* 0x0002{A0..F8} */ - - /* 0x000300-0x0003F8 -- Buffer Address Match Registers */ - struct { - uint64_t upper; /* 0x0003{00,,,F0} */ - uint64_t lower; /* 0x0003{08,,,F8} */ - } p_buf_addr_match[16]; - - /* 0x000400-0x0005FF -- Performance Monitor Registers (even only) */ - struct { - uint64_t flush_w_touch; /* 0x000{400,,,5C0} */ - uint64_t flush_wo_touch; /* 0x000{408,,,5C8} */ - uint64_t inflight; /* 0x000{410,,,5D0} */ - uint64_t prefetch; /* 0x000{418,,,5D8} */ - uint64_t total_pci_retry; /* 0x000{420,,,5E0} */ - uint64_t max_pci_retry; /* 0x000{428,,,5E8} */ - uint64_t max_latency; /* 0x000{430,,,5F0} */ - uint64_t clear_all; /* 0x000{438,,,5F8} */ - } p_buf_count[8]; - - - /* 0x000600-0x0009FF -- PCI/X registers */ - uint64_t p_pcix_bus_err_addr; /* 0x000600 */ - uint64_t p_pcix_bus_err_attr; /* 0x000608 */ - uint64_t p_pcix_bus_err_data; /* 0x000610 */ - uint64_t p_pcix_pio_split_addr; /* 0x000618 */ - uint64_t p_pcix_pio_split_attr; /* 0x000620 */ - uint64_t p_pcix_dma_req_err_attr; /* 0x000628 */ - uint64_t p_pcix_dma_req_err_addr; /* 0x000630 */ - uint64_t p_pcix_timeout; /* 0x000638 */ - - uint64_t _pad_000640[120]; /* 0x000{640,,,9F8} */ - - /* 0x000A00-0x000BFF -- PCI/X Read&Write Buffer */ - struct { - uint64_t p_buf_addr; /* 0x000{A00,,,AF0} */ - uint64_t p_buf_attr; /* 0X000{A08,,,AF8} */ - } p_pcix_read_buf_64[16]; - - struct { - uint64_t p_buf_addr; /* 0x000{B00,,,BE0} */ - uint64_t p_buf_attr; /* 0x000{B08,,,BE8} */ - uint64_t p_buf_valid; /* 0x000{B10,,,BF0} */ - uint64_t __pad1; /* 0x000{B18,,,BF8} */ - } p_pcix_write_buf_64[8]; - - /* End of Local Registers -- Start of Address Map space */ - - char _pad_000c00[0x010000 - 0x000c00]; - - /* 0x010000-0x011fff -- Internal ATE RAM (Auto Parity Generation) */ - uint64_t p_int_ate_ram[1024]; /* 0x010000-0x011fff */ - - /* 0x012000-0x013fff -- Internal ATE RAM (Manual Parity Generation) */ - uint64_t p_int_ate_ram_mp[1024]; /* 0x012000-0x013fff */ - - char _pad_014000[0x18000 - 0x014000]; - - /* 0x18000-0x197F8 -- PIC Write Request Ram */ - uint64_t p_wr_req_lower[256]; /* 0x18000 - 0x187F8 */ - uint64_t p_wr_req_upper[256]; /* 0x18800 - 0x18FF8 */ - uint64_t p_wr_req_parity[256]; /* 0x19000 - 0x197F8 */ - - char _pad_019800[0x20000 - 0x019800]; - - /* 0x020000-0x027FFF -- PCI Device Configuration Spaces */ - union { - uint8_t c[0x1000 / 1]; /* 0x02{0000,,,7FFF} */ - uint16_t s[0x1000 / 2]; /* 0x02{0000,,,7FFF} */ - uint32_t l[0x1000 / 4]; /* 0x02{0000,,,7FFF} */ - uint64_t d[0x1000 / 8]; /* 0x02{0000,,,7FFF} */ - union { - uint8_t c[0x100 / 1]; - uint16_t s[0x100 / 2]; - uint32_t l[0x100 / 4]; - uint64_t d[0x100 / 8]; - } f[8]; - } p_type0_cfg_dev[8]; /* 0x02{0000,,,7FFF} */ - - /* 0x028000-0x028FFF -- PCI Type 1 Configuration Space */ - union { - uint8_t c[0x1000 / 1]; /* 0x028000-0x029000 */ - uint16_t s[0x1000 / 2]; /* 0x028000-0x029000 */ - uint32_t l[0x1000 / 4]; /* 0x028000-0x029000 */ - uint64_t d[0x1000 / 8]; /* 0x028000-0x029000 */ - union { - uint8_t c[0x100 / 1]; - uint16_t s[0x100 / 2]; - uint32_t l[0x100 / 4]; - uint64_t d[0x100 / 8]; - } f[8]; - } p_type1_cfg; /* 0x028000-0x029000 */ - - char _pad_029000[0x030000-0x029000]; - - /* 0x030000-0x030007 -- PCI Interrupt Acknowledge Cycle */ - union { - uint8_t c[8 / 1]; - uint16_t s[8 / 2]; - uint32_t l[8 / 4]; - uint64_t d[8 / 8]; - } p_pci_iack; /* 0x030000-0x030007 */ - - char _pad_030007[0x040000-0x030008]; - - /* 0x040000-0x030007 -- PCIX Special Cycle */ - union { - uint8_t c[8 / 1]; - uint16_t s[8 / 2]; - uint32_t l[8 / 4]; - uint64_t d[8 / 8]; - } p_pcix_cycle; /* 0x040000-0x040007 */ -}; - -#endif /* _ASM_IA64_SN_PCI_PIC_H */ diff --git a/arch/ia64/sn/include/pci/tiocp.h b/arch/ia64/sn/include/pci/tiocp.h deleted file mode 100644 index f07c83b2bf6e..000000000000 --- a/arch/ia64/sn/include/pci/tiocp.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2003-2004 Silicon Graphics, Inc. All rights reserved. - */ -#ifndef _ASM_IA64_SN_PCI_TIOCP_H -#define _ASM_IA64_SN_PCI_TIOCP_H - -#define TIOCP_HOST_INTR_ADDR 0x003FFFFFFFFFFFFFUL -#define TIOCP_PCI64_CMDTYPE_MEM (0x1ull << 60) - - -/***************************************************************************** - *********************** TIOCP MMR structure mapping *************************** - *****************************************************************************/ - -struct tiocp{ - - /* 0x000000-0x00FFFF -- Local Registers */ - - /* 0x000000-0x000057 -- (Legacy Widget Space) Configuration */ - uint64_t cp_id; /* 0x000000 */ - uint64_t cp_stat; /* 0x000008 */ - uint64_t cp_err_upper; /* 0x000010 */ - uint64_t cp_err_lower; /* 0x000018 */ - #define cp_err cp_err_lower - uint64_t cp_control; /* 0x000020 */ - uint64_t cp_req_timeout; /* 0x000028 */ - uint64_t cp_intr_upper; /* 0x000030 */ - uint64_t cp_intr_lower; /* 0x000038 */ - #define cp_intr cp_intr_lower - uint64_t cp_err_cmdword; /* 0x000040 */ - uint64_t _pad_000048; /* 0x000048 */ - uint64_t cp_tflush; /* 0x000050 */ - - /* 0x000058-0x00007F -- Bridge-specific Configuration */ - uint64_t cp_aux_err; /* 0x000058 */ - uint64_t cp_resp_upper; /* 0x000060 */ - uint64_t cp_resp_lower; /* 0x000068 */ - #define cp_resp cp_resp_lower - uint64_t cp_tst_pin_ctrl; /* 0x000070 */ - uint64_t cp_addr_lkerr; /* 0x000078 */ - - /* 0x000080-0x00008F -- PMU & MAP */ - uint64_t cp_dir_map; /* 0x000080 */ - uint64_t _pad_000088; /* 0x000088 */ - - /* 0x000090-0x00009F -- SSRAM */ - uint64_t cp_map_fault; /* 0x000090 */ - uint64_t _pad_000098; /* 0x000098 */ - - /* 0x0000A0-0x0000AF -- Arbitration */ - uint64_t cp_arb; /* 0x0000A0 */ - uint64_t _pad_0000A8; /* 0x0000A8 */ - - /* 0x0000B0-0x0000BF -- Number In A Can or ATE Parity Error */ - uint64_t cp_ate_parity_err; /* 0x0000B0 */ - uint64_t _pad_0000B8; /* 0x0000B8 */ - - /* 0x0000C0-0x0000FF -- PCI/GIO */ - uint64_t cp_bus_timeout; /* 0x0000C0 */ - uint64_t cp_pci_cfg; /* 0x0000C8 */ - uint64_t cp_pci_err_upper; /* 0x0000D0 */ - uint64_t cp_pci_err_lower; /* 0x0000D8 */ - #define cp_pci_err cp_pci_err_lower - uint64_t _pad_0000E0[4]; /* 0x0000{E0..F8} */ - - /* 0x000100-0x0001FF -- Interrupt */ - uint64_t cp_int_status; /* 0x000100 */ - uint64_t cp_int_enable; /* 0x000108 */ - uint64_t cp_int_rst_stat; /* 0x000110 */ - uint64_t cp_int_mode; /* 0x000118 */ - uint64_t cp_int_device; /* 0x000120 */ - uint64_t cp_int_host_err; /* 0x000128 */ - uint64_t cp_int_addr[8]; /* 0x0001{30,,,68} */ - uint64_t cp_err_int_view; /* 0x000170 */ - uint64_t cp_mult_int; /* 0x000178 */ - uint64_t cp_force_always[8]; /* 0x0001{80,,,B8} */ - uint64_t cp_force_pin[8]; /* 0x0001{C0,,,F8} */ - - /* 0x000200-0x000298 -- Device */ - uint64_t cp_device[4]; /* 0x0002{00,,,18} */ - uint64_t _pad_000220[4]; /* 0x0002{20,,,38} */ - uint64_t cp_wr_req_buf[4]; /* 0x0002{40,,,58} */ - uint64_t _pad_000260[4]; /* 0x0002{60,,,78} */ - uint64_t cp_rrb_map[2]; /* 0x0002{80,,,88} */ - #define cp_even_resp cp_rrb_map[0] /* 0x000280 */ - #define cp_odd_resp cp_rrb_map[1] /* 0x000288 */ - uint64_t cp_resp_status; /* 0x000290 */ - uint64_t cp_resp_clear; /* 0x000298 */ - - uint64_t _pad_0002A0[12]; /* 0x0002{A0..F8} */ - - /* 0x000300-0x0003F8 -- Buffer Address Match Registers */ - struct { - uint64_t upper; /* 0x0003{00,,,F0} */ - uint64_t lower; /* 0x0003{08,,,F8} */ - } cp_buf_addr_match[16]; - - /* 0x000400-0x0005FF -- Performance Monitor Registers (even only) */ - struct { - uint64_t flush_w_touch; /* 0x000{400,,,5C0} */ - uint64_t flush_wo_touch; /* 0x000{408,,,5C8} */ - uint64_t inflight; /* 0x000{410,,,5D0} */ - uint64_t prefetch; /* 0x000{418,,,5D8} */ - uint64_t total_pci_retry; /* 0x000{420,,,5E0} */ - uint64_t max_pci_retry; /* 0x000{428,,,5E8} */ - uint64_t max_latency; /* 0x000{430,,,5F0} */ - uint64_t clear_all; /* 0x000{438,,,5F8} */ - } cp_buf_count[8]; - - - /* 0x000600-0x0009FF -- PCI/X registers */ - uint64_t cp_pcix_bus_err_addr; /* 0x000600 */ - uint64_t cp_pcix_bus_err_attr; /* 0x000608 */ - uint64_t cp_pcix_bus_err_data; /* 0x000610 */ - uint64_t cp_pcix_pio_split_addr; /* 0x000618 */ - uint64_t cp_pcix_pio_split_attr; /* 0x000620 */ - uint64_t cp_pcix_dma_req_err_attr; /* 0x000628 */ - uint64_t cp_pcix_dma_req_err_addr; /* 0x000630 */ - uint64_t cp_pcix_timeout; /* 0x000638 */ - - uint64_t _pad_000640[24]; /* 0x000{640,,,6F8} */ - - /* 0x000700-0x000737 -- Debug Registers */ - uint64_t cp_ct_debug_ctl; /* 0x000700 */ - uint64_t cp_br_debug_ctl; /* 0x000708 */ - uint64_t cp_mux3_debug_ctl; /* 0x000710 */ - uint64_t cp_mux4_debug_ctl; /* 0x000718 */ - uint64_t cp_mux5_debug_ctl; /* 0x000720 */ - uint64_t cp_mux6_debug_ctl; /* 0x000728 */ - uint64_t cp_mux7_debug_ctl; /* 0x000730 */ - - uint64_t _pad_000738[89]; /* 0x000{738,,,9F8} */ - - /* 0x000A00-0x000BFF -- PCI/X Read&Write Buffer */ - struct { - uint64_t cp_buf_addr; /* 0x000{A00,,,AF0} */ - uint64_t cp_buf_attr; /* 0X000{A08,,,AF8} */ - } cp_pcix_read_buf_64[16]; - - struct { - uint64_t cp_buf_addr; /* 0x000{B00,,,BE0} */ - uint64_t cp_buf_attr; /* 0x000{B08,,,BE8} */ - uint64_t cp_buf_valid; /* 0x000{B10,,,BF0} */ - uint64_t __pad1; /* 0x000{B18,,,BF8} */ - } cp_pcix_write_buf_64[8]; - - /* End of Local Registers -- Start of Address Map space */ - - char _pad_000c00[0x010000 - 0x000c00]; - - /* 0x010000-0x011FF8 -- Internal ATE RAM (Auto Parity Generation) */ - uint64_t cp_int_ate_ram[1024]; /* 0x010000-0x011FF8 */ - - char _pad_012000[0x14000 - 0x012000]; - - /* 0x014000-0x015FF8 -- Internal ATE RAM (Manual Parity Generation) */ - uint64_t cp_int_ate_ram_mp[1024]; /* 0x014000-0x015FF8 */ - - char _pad_016000[0x18000 - 0x016000]; - - /* 0x18000-0x197F8 -- TIOCP Write Request Ram */ - uint64_t cp_wr_req_lower[256]; /* 0x18000 - 0x187F8 */ - uint64_t cp_wr_req_upper[256]; /* 0x18800 - 0x18FF8 */ - uint64_t cp_wr_req_parity[256]; /* 0x19000 - 0x197F8 */ - - char _pad_019800[0x1C000 - 0x019800]; - - /* 0x1C000-0x1EFF8 -- TIOCP Read Response Ram */ - uint64_t cp_rd_resp_lower[512]; /* 0x1C000 - 0x1CFF8 */ - uint64_t cp_rd_resp_upper[512]; /* 0x1D000 - 0x1DFF8 */ - uint64_t cp_rd_resp_parity[512]; /* 0x1E000 - 0x1EFF8 */ - - char _pad_01F000[0x20000 - 0x01F000]; - - /* 0x020000-0x021FFF -- Host Device (CP) Configuration Space (not used) */ - char _pad_020000[0x021000 - 0x20000]; - - /* 0x021000-0x027FFF -- PCI Device Configuration Spaces */ - union { - uint8_t c[0x1000 / 1]; /* 0x02{0000,,,7FFF} */ - uint16_t s[0x1000 / 2]; /* 0x02{0000,,,7FFF} */ - uint32_t l[0x1000 / 4]; /* 0x02{0000,,,7FFF} */ - uint64_t d[0x1000 / 8]; /* 0x02{0000,,,7FFF} */ - union { - uint8_t c[0x100 / 1]; - uint16_t s[0x100 / 2]; - uint32_t l[0x100 / 4]; - uint64_t d[0x100 / 8]; - } f[8]; - } cp_type0_cfg_dev[7]; /* 0x02{1000,,,7FFF} */ - - /* 0x028000-0x028FFF -- PCI Type 1 Configuration Space */ - union { - uint8_t c[0x1000 / 1]; /* 0x028000-0x029000 */ - uint16_t s[0x1000 / 2]; /* 0x028000-0x029000 */ - uint32_t l[0x1000 / 4]; /* 0x028000-0x029000 */ - uint64_t d[0x1000 / 8]; /* 0x028000-0x029000 */ - union { - uint8_t c[0x100 / 1]; - uint16_t s[0x100 / 2]; - uint32_t l[0x100 / 4]; - uint64_t d[0x100 / 8]; - } f[8]; - } cp_type1_cfg; /* 0x028000-0x029000 */ - - char _pad_029000[0x030000-0x029000]; - - /* 0x030000-0x030007 -- PCI Interrupt Acknowledge Cycle */ - union { - uint8_t c[8 / 1]; - uint16_t s[8 / 2]; - uint32_t l[8 / 4]; - uint64_t d[8 / 8]; - } cp_pci_iack; /* 0x030000-0x030007 */ - - char _pad_030007[0x040000-0x030008]; - - /* 0x040000-0x040007 -- PCIX Special Cycle */ - union { - uint8_t c[8 / 1]; - uint16_t s[8 / 2]; - uint32_t l[8 / 4]; - uint64_t d[8 / 8]; - } cp_pcix_cycle; /* 0x040000-0x040007 */ - - char _pad_040007[0x200000-0x040008]; - - /* 0x200000-0x7FFFFF -- PCI/GIO Device Spaces */ - union { - uint8_t c[0x100000 / 1]; - uint16_t s[0x100000 / 2]; - uint32_t l[0x100000 / 4]; - uint64_t d[0x100000 / 8]; - } cp_devio_raw[6]; /* 0x200000-0x7FFFFF */ - - #define cp_devio(n) cp_devio_raw[((n)<2)?(n*2):(n+2)] - - char _pad_800000[0xA00000-0x800000]; - - /* 0xA00000-0xBFFFFF -- PCI/GIO Device Spaces w/flush */ - union { - uint8_t c[0x100000 / 1]; - uint16_t s[0x100000 / 2]; - uint32_t l[0x100000 / 4]; - uint64_t d[0x100000 / 8]; - } cp_devio_raw_flush[6]; /* 0xA00000-0xBFFFFF */ - - #define cp_devio_flush(n) cp_devio_raw_flush[((n)<2)?(n*2):(n+2)] - -}; - -#endif /* _ASM_IA64_SN_PCI_TIOCP_H */ diff --git a/arch/ia64/sn/include/xtalk/hubdev.h b/arch/ia64/sn/include/xtalk/hubdev.h index 868e7ecae84b..580a1c0403a7 100644 --- a/arch/ia64/sn/include/xtalk/hubdev.h +++ b/arch/ia64/sn/include/xtalk/hubdev.h @@ -8,6 +8,8 @@ #ifndef _ASM_IA64_SN_XTALK_HUBDEV_H #define _ASM_IA64_SN_XTALK_HUBDEV_H +#include "xtalk/xwidgetdev.h" + #define HUB_WIDGET_ID_MAX 0xf #define DEV_PER_WIDGET (2*2*8) #define IIO_ITTE_WIDGET_BITS 4 /* size of widget field */ diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c index 783eb4323847..a67f39e448cb 100644 --- a/arch/ia64/sn/kernel/io_init.c +++ b/arch/ia64/sn/kernel/io_init.c @@ -9,21 +9,28 @@ #include <linux/bootmem.h> #include <linux/nodemask.h> #include <asm/sn/types.h> -#include <asm/sn/sn_sal.h> #include <asm/sn/addrs.h> -#include <asm/sn/pcibus_provider_defs.h> -#include <asm/sn/pcidev.h> -#include "pci/pcibr_provider.h" -#include "xtalk/xwidgetdev.h" #include <asm/sn/geo.h> -#include "xtalk/hubdev.h" #include <asm/sn/io.h> +#include <asm/sn/pcibr_provider.h> +#include <asm/sn/pcibus_provider_defs.h> +#include <asm/sn/pcidev.h> #include <asm/sn/simulator.h> +#include <asm/sn/sn_sal.h> #include <asm/sn/tioca_provider.h> +#include "xtalk/hubdev.h" +#include "xtalk/xwidgetdev.h" -char master_baseio_wid; nasid_t master_nasid = INVALID_NASID; /* Partition Master */ +static struct list_head sn_sysdata_list; + +/* sysdata list struct */ +struct sysdata_el { + struct list_head entry; + void *sysdata; +}; + struct slab_info { struct hubdev_info hubdev; }; @@ -138,23 +145,6 @@ sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev, } /* - * sn_alloc_pci_sysdata() - This routine allocates a pci controller - * which is expected as the pci_dev and pci_bus sysdata by the Linux - * PCI infrastructure. - */ -static inline struct pci_controller *sn_alloc_pci_sysdata(void) -{ - struct pci_controller *pci_sysdata; - - pci_sysdata = kmalloc(sizeof(*pci_sysdata), GFP_KERNEL); - if (!pci_sysdata) - BUG(); - - memset(pci_sysdata, 0, sizeof(*pci_sysdata)); - return pci_sysdata; -} - -/* * sn_fixup_ionodes() - This routine initializes the HUB data strcuture for * each node in the system. */ @@ -221,22 +211,34 @@ static void sn_fixup_ionodes(void) } +void sn_pci_unfixup_slot(struct pci_dev *dev) +{ + struct pci_dev *host_pci_dev = SN_PCIDEV_INFO(dev)->host_pci_dev; + + sn_irq_unfixup(dev); + pci_dev_put(host_pci_dev); + pci_dev_put(dev); +} + /* * sn_pci_fixup_slot() - This routine sets up a slot's resources * consistent with the Linux PCI abstraction layer. Resources acquired * from our PCI provider include PIO maps to BAR space and interrupt * objects. */ -static void sn_pci_fixup_slot(struct pci_dev *dev) +void sn_pci_fixup_slot(struct pci_dev *dev) { int idx; int segment = 0; - uint64_t size; - struct sn_irq_info *sn_irq_info; - struct pci_dev *host_pci_dev; int status = 0; struct pcibus_bussoft *bs; + struct pci_bus *host_pci_bus; + struct pci_dev *host_pci_dev; + struct sn_irq_info *sn_irq_info; + unsigned long size; + unsigned int bus_no, devfn; + pci_dev_get(dev); /* for the sysdata pointer */ dev->sysdata = kmalloc(sizeof(struct pcidev_info), GFP_KERNEL); if (SN_PCIDEV_INFO(dev) <= 0) BUG(); /* Cannot afford to run out of memory */ @@ -253,7 +255,7 @@ static void sn_pci_fixup_slot(struct pci_dev *dev) (u64) __pa(SN_PCIDEV_INFO(dev)), (u64) __pa(sn_irq_info)); if (status) - BUG(); /* Cannot get platform pci device information information */ + BUG(); /* Cannot get platform pci device information */ /* Copy over PIO Mapped Addresses */ for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) { @@ -275,15 +277,21 @@ static void sn_pci_fixup_slot(struct pci_dev *dev) dev->resource[idx].parent = &iomem_resource; } - /* set up host bus linkages */ - bs = SN_PCIBUS_BUSSOFT(dev->bus); - host_pci_dev = - pci_find_slot(SN_PCIDEV_INFO(dev)->pdi_slot_host_handle >> 32, - SN_PCIDEV_INFO(dev)-> - pdi_slot_host_handle & 0xffffffff); + /* + * Using the PROMs values for the PCI host bus, get the Linux + * PCI host_pci_dev struct and set up host bus linkages + */ + + bus_no = SN_PCIDEV_INFO(dev)->pdi_slot_host_handle >> 32; + devfn = SN_PCIDEV_INFO(dev)->pdi_slot_host_handle & 0xffffffff; + host_pci_bus = pci_find_bus(pci_domain_nr(dev->bus), bus_no); + host_pci_dev = pci_get_slot(host_pci_bus, devfn); + + SN_PCIDEV_INFO(dev)->host_pci_dev = host_pci_dev; SN_PCIDEV_INFO(dev)->pdi_host_pcidev_info = - SN_PCIDEV_INFO(host_pci_dev); + SN_PCIDEV_INFO(host_pci_dev); SN_PCIDEV_INFO(dev)->pdi_linux_pcidev = dev; + bs = SN_PCIBUS_BUSSOFT(dev->bus); SN_PCIDEV_INFO(dev)->pdi_pcibus_info = bs; if (bs && bs->bs_asic_type < PCIIO_ASIC_MAX_TYPES) { @@ -297,6 +305,9 @@ static void sn_pci_fixup_slot(struct pci_dev *dev) SN_PCIDEV_INFO(dev)->pdi_sn_irq_info = sn_irq_info; dev->irq = SN_PCIDEV_INFO(dev)->pdi_sn_irq_info->irq_irq; sn_irq_fixup(dev, sn_irq_info); + } else { + SN_PCIDEV_INFO(dev)->pdi_sn_irq_info = NULL; + kfree(sn_irq_info); } } @@ -304,55 +315,57 @@ static void sn_pci_fixup_slot(struct pci_dev *dev) * sn_pci_controller_fixup() - This routine sets up a bus's resources * consistent with the Linux PCI abstraction layer. */ -static void sn_pci_controller_fixup(int segment, int busnum) +void sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus) { int status = 0; int nasid, cnode; - struct pci_bus *bus; struct pci_controller *controller; struct pcibus_bussoft *prom_bussoft_ptr; struct hubdev_info *hubdev_info; void *provider_soft; struct sn_pcibus_provider *provider; - status = - sal_get_pcibus_info((u64) segment, (u64) busnum, - (u64) ia64_tpa(&prom_bussoft_ptr)); - if (status > 0) { - return; /* bus # does not exist */ - } - + status = sal_get_pcibus_info((u64) segment, (u64) busnum, + (u64) ia64_tpa(&prom_bussoft_ptr)); + if (status > 0) + return; /*bus # does not exist */ prom_bussoft_ptr = __va(prom_bussoft_ptr); - controller = sn_alloc_pci_sysdata(); - /* controller non-zero is BUG'd in sn_alloc_pci_sysdata */ - bus = pci_scan_bus(busnum, &pci_root_ops, controller); + controller = kcalloc(1,sizeof(struct pci_controller), GFP_KERNEL); + if (!controller) + BUG(); + if (bus == NULL) { - return; /* error, or bus already scanned */ + bus = pci_scan_bus(busnum, &pci_root_ops, controller); + if (bus == NULL) + return; /* error, or bus already scanned */ + bus->sysdata = NULL; } + if (bus->sysdata) + goto error_return; /* sysdata already alloc'd */ + /* * Per-provider fixup. Copies the contents from prom to local * area and links SN_PCIBUS_BUSSOFT(). */ - if (prom_bussoft_ptr->bs_asic_type >= PCIIO_ASIC_MAX_TYPES) { + if (prom_bussoft_ptr->bs_asic_type >= PCIIO_ASIC_MAX_TYPES) return; /* unsupported asic type */ - } + + if (prom_bussoft_ptr->bs_asic_type == PCIIO_ASIC_TYPE_PPB) + goto error_return; /* no further fixup necessary */ provider = sn_pci_provider[prom_bussoft_ptr->bs_asic_type]; - if (provider == NULL) { + if (provider == NULL) return; /* no provider registerd for this asic */ - } provider_soft = NULL; - if (provider->bus_fixup) { + if (provider->bus_fixup) provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr); - } - if (provider_soft == NULL) { + if (provider_soft == NULL) return; /* fixup failed or not applicable */ - } /* * Generic bus fixup goes here. Don't reference prom_bussoft_ptr @@ -361,12 +374,47 @@ static void sn_pci_controller_fixup(int segment, int busnum) bus->sysdata = controller; PCI_CONTROLLER(bus)->platform_data = provider_soft; - nasid = NASID_GET(SN_PCIBUS_BUSSOFT(bus)->bs_base); cnode = nasid_to_cnodeid(nasid); hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo); SN_PCIBUS_BUSSOFT(bus)->bs_xwidget_info = &(hubdev_info->hdi_xwidget_info[SN_PCIBUS_BUSSOFT(bus)->bs_xid]); + + return; + +error_return: + + kfree(controller); + return; +} + +void sn_bus_store_sysdata(struct pci_dev *dev) +{ + struct sysdata_el *element; + + element = kcalloc(1, sizeof(struct sysdata_el), GFP_KERNEL); + if (!element) { + dev_dbg(dev, "%s: out of memory!\n", __FUNCTION__); + return; + } + element->sysdata = dev->sysdata; + list_add(&element->entry, &sn_sysdata_list); +} + +void sn_bus_free_sysdata(void) +{ + struct sysdata_el *element; + struct list_head *list; + +sn_sysdata_free_start: + list_for_each(list, &sn_sysdata_list) { + element = list_entry(list, struct sysdata_el, entry); + list_del(&element->entry); + kfree(element->sysdata); + kfree(element); + goto sn_sysdata_free_start; + } + return; } /* @@ -403,20 +451,17 @@ static int __init sn_pci_init(void) */ ia64_max_iommu_merge_mask = ~PAGE_MASK; sn_fixup_ionodes(); - sn_irq = kmalloc(sizeof(struct sn_irq_info *) * NR_IRQS, GFP_KERNEL); - if (sn_irq <= 0) - BUG(); /* Canno afford to run out of memory. */ - memset(sn_irq, 0, sizeof(struct sn_irq_info *) * NR_IRQS); - + sn_irq_lh_init(); + INIT_LIST_HEAD(&sn_sysdata_list); sn_init_cpei_timer(); #ifdef CONFIG_PROC_FS register_sn_procfs(); #endif - for (i = 0; i < PCI_BUSES_TO_SCAN; i++) { - sn_pci_controller_fixup(0, i); - } + /* busses are not known yet ... */ + for (i = 0; i < PCI_BUSES_TO_SCAN; i++) + sn_pci_controller_fixup(0, i, NULL); /* * Generic Linux PCI Layer has created the pci_bus and pci_dev @@ -425,9 +470,8 @@ static int __init sn_pci_init(void) */ while ((pci_dev = - pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pci_dev)) != NULL) { + pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pci_dev)) != NULL) sn_pci_fixup_slot(pci_dev); - } sn_ioif_inited = 1; /* sn I/O infrastructure now initialized */ @@ -469,3 +513,8 @@ cnodeid_get_geoid(cnodeid_t cnode) } subsys_initcall(sn_pci_init); +EXPORT_SYMBOL(sn_pci_fixup_slot); +EXPORT_SYMBOL(sn_pci_unfixup_slot); +EXPORT_SYMBOL(sn_pci_controller_fixup); +EXPORT_SYMBOL(sn_bus_store_sysdata); +EXPORT_SYMBOL(sn_bus_free_sysdata); diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c index 0f4e8138658f..84d276a14ecb 100644 --- a/arch/ia64/sn/kernel/irq.c +++ b/arch/ia64/sn/kernel/irq.c @@ -9,13 +9,13 @@ */ #include <linux/irq.h> -#include <asm/sn/intr.h> +#include <linux/spinlock.h> #include <asm/sn/addrs.h> #include <asm/sn/arch.h> -#include "xtalk/xwidgetdev.h" +#include <asm/sn/intr.h> +#include <asm/sn/pcibr_provider.h> #include <asm/sn/pcibus_provider_defs.h> #include <asm/sn/pcidev.h> -#include "pci/pcibr_provider.h" #include <asm/sn/shub_mmr.h> #include <asm/sn/sn_sal.h> @@ -25,7 +25,8 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info); extern int sn_force_interrupt_flag; extern int sn_ioif_inited; -struct sn_irq_info **sn_irq; +static struct list_head **sn_irq_lh; +static spinlock_t sn_irq_info_lock = SPIN_LOCK_UNLOCKED; /* non-IRQ lock */ static inline uint64_t sn_intr_alloc(nasid_t local_nasid, int local_widget, u64 sn_irq_info, @@ -101,7 +102,7 @@ static void sn_end_irq(unsigned int irq) nasid = get_nasid(); event_occurred = HUB_L((uint64_t *) GLOBAL_MMR_ADDR (nasid, SH_EVENT_OCCURRED)); - /* If the UART bit is set here, we may have received an + /* If the UART bit is set here, we may have received an * interrupt from the UART that the driver missed. To * make sure, we IPI ourselves to force us to look again. */ @@ -115,82 +116,84 @@ static void sn_end_irq(unsigned int irq) force_interrupt(irq); } +static void sn_irq_info_free(struct rcu_head *head); + static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) { - struct sn_irq_info *sn_irq_info = sn_irq[irq]; - struct sn_irq_info *tmp_sn_irq_info; + struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; int cpuid, cpuphys; - nasid_t t_nasid; /* nasid to target */ - int t_slice; /* slice to target */ - - /* allocate a temp sn_irq_info struct to get new target info */ - tmp_sn_irq_info = kmalloc(sizeof(*tmp_sn_irq_info), GFP_KERNEL); - if (!tmp_sn_irq_info) - return; cpuid = first_cpu(mask); cpuphys = cpu_physical_id(cpuid); - t_nasid = cpuid_to_nasid(cpuid); - t_slice = cpuid_to_slice(cpuid); - while (sn_irq_info) { - int status; - int local_widget; - uint64_t bridge = (uint64_t) sn_irq_info->irq_bridge; - nasid_t local_nasid = NASID_GET(bridge); + list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe, + sn_irq_lh[irq], list) { + uint64_t bridge; + int local_widget, status; + nasid_t local_nasid; + struct sn_irq_info *new_irq_info; + + new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC); + if (new_irq_info == NULL) + break; + memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info)); + + bridge = (uint64_t) new_irq_info->irq_bridge; + if (!bridge) { + kfree(new_irq_info); + break; /* irq is not a device interrupt */ + } - if (!bridge) - break; /* irq is not a device interrupt */ + local_nasid = NASID_GET(bridge); if (local_nasid & 1) local_widget = TIO_SWIN_WIDGETNUM(bridge); else local_widget = SWIN_WIDGETNUM(bridge); - /* Free the old PROM sn_irq_info structure */ - sn_intr_free(local_nasid, local_widget, sn_irq_info); + /* Free the old PROM new_irq_info structure */ + sn_intr_free(local_nasid, local_widget, new_irq_info); + /* Update kernels new_irq_info with new target info */ + unregister_intr_pda(new_irq_info); - /* allocate a new PROM sn_irq_info struct */ + /* allocate a new PROM new_irq_info struct */ status = sn_intr_alloc(local_nasid, local_widget, - __pa(tmp_sn_irq_info), irq, t_nasid, - t_slice); - - if (status == 0) { - /* Update kernels sn_irq_info with new target info */ - unregister_intr_pda(sn_irq_info); - sn_irq_info->irq_cpuid = cpuid; - sn_irq_info->irq_nasid = t_nasid; - sn_irq_info->irq_slice = t_slice; - sn_irq_info->irq_xtalkaddr = - tmp_sn_irq_info->irq_xtalkaddr; - sn_irq_info->irq_cookie = tmp_sn_irq_info->irq_cookie; - register_intr_pda(sn_irq_info); - - if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type)) { - pcibr_change_devices_irq(sn_irq_info); - } + __pa(new_irq_info), irq, + cpuid_to_nasid(cpuid), + cpuid_to_slice(cpuid)); + + /* SAL call failed */ + if (status) { + kfree(new_irq_info); + break; + } + + new_irq_info->irq_cpuid = cpuid; + register_intr_pda(new_irq_info); + + if (IS_PCI_BRIDGE_ASIC(new_irq_info->irq_bridge_type)) + pcibr_change_devices_irq(new_irq_info); - sn_irq_info = sn_irq_info->irq_next; + spin_lock(&sn_irq_info_lock); + list_replace_rcu(&sn_irq_info->list, &new_irq_info->list); + spin_unlock(&sn_irq_info_lock); + call_rcu(&sn_irq_info->rcu, sn_irq_info_free); #ifdef CONFIG_SMP - set_irq_affinity_info((irq & 0xff), cpuphys, 0); + set_irq_affinity_info((irq & 0xff), cpuphys, 0); #endif - } else { - break; /* snp_affinity failed the intr_alloc */ - } } - kfree(tmp_sn_irq_info); } struct hw_interrupt_type irq_type_sn = { - "SN hub", - sn_startup_irq, - sn_shutdown_irq, - sn_enable_irq, - sn_disable_irq, - sn_ack_irq, - sn_end_irq, - sn_set_affinity_irq + .typename = "SN hub", + .startup = sn_startup_irq, + .shutdown = sn_shutdown_irq, + .enable = sn_enable_irq, + .disable = sn_disable_irq, + .ack = sn_ack_irq, + .end = sn_end_irq, + .set_affinity = sn_set_affinity_irq }; unsigned int sn_local_vector_to_irq(u8 vector) @@ -231,19 +234,18 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info) struct sn_irq_info *tmp_irq_info; int i, foundmatch; + rcu_read_lock(); if (pdacpu(cpu)->sn_last_irq == irq) { foundmatch = 0; - for (i = pdacpu(cpu)->sn_last_irq - 1; i; i--) { - tmp_irq_info = sn_irq[i]; - while (tmp_irq_info) { + for (i = pdacpu(cpu)->sn_last_irq - 1; + i && !foundmatch; i--) { + list_for_each_entry_rcu(tmp_irq_info, + sn_irq_lh[i], + list) { if (tmp_irq_info->irq_cpuid == cpu) { - foundmatch++; + foundmatch = 1; break; } - tmp_irq_info = tmp_irq_info->irq_next; - } - if (foundmatch) { - break; } } pdacpu(cpu)->sn_last_irq = i; @@ -251,60 +253,27 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info) if (pdacpu(cpu)->sn_first_irq == irq) { foundmatch = 0; - for (i = pdacpu(cpu)->sn_first_irq + 1; i < NR_IRQS; i++) { - tmp_irq_info = sn_irq[i]; - while (tmp_irq_info) { + for (i = pdacpu(cpu)->sn_first_irq + 1; + i < NR_IRQS && !foundmatch; i++) { + list_for_each_entry_rcu(tmp_irq_info, + sn_irq_lh[i], + list) { if (tmp_irq_info->irq_cpuid == cpu) { - foundmatch++; + foundmatch = 1; break; } - tmp_irq_info = tmp_irq_info->irq_next; - } - if (foundmatch) { - break; } } pdacpu(cpu)->sn_first_irq = ((i == NR_IRQS) ? 0 : i); } + rcu_read_unlock(); } -struct sn_irq_info *sn_irq_alloc(nasid_t local_nasid, int local_widget, int irq, - nasid_t nasid, int slice) +static void sn_irq_info_free(struct rcu_head *head) { struct sn_irq_info *sn_irq_info; - int status; - - sn_irq_info = kmalloc(sizeof(*sn_irq_info), GFP_KERNEL); - if (sn_irq_info == NULL) - return NULL; - - memset(sn_irq_info, 0x0, sizeof(*sn_irq_info)); - - status = - sn_intr_alloc(local_nasid, local_widget, __pa(sn_irq_info), irq, - nasid, slice); - - if (status) { - kfree(sn_irq_info); - return NULL; - } else { - return sn_irq_info; - } -} - -void sn_irq_free(struct sn_irq_info *sn_irq_info) -{ - uint64_t bridge = (uint64_t) sn_irq_info->irq_bridge; - nasid_t local_nasid = NASID_GET(bridge); - int local_widget; - - if (local_nasid & 1) /* tio check */ - local_widget = TIO_SWIN_WIDGETNUM(bridge); - else - local_widget = SWIN_WIDGETNUM(bridge); - - sn_intr_free(local_nasid, local_widget, sn_irq_info); + sn_irq_info = container_of(head, struct sn_irq_info, rcu); kfree(sn_irq_info); } @@ -314,30 +283,54 @@ void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info) int slice = sn_irq_info->irq_slice; int cpu = nasid_slice_to_cpuid(nasid, slice); + pci_dev_get(pci_dev); sn_irq_info->irq_cpuid = cpu; sn_irq_info->irq_pciioinfo = SN_PCIDEV_INFO(pci_dev); /* link it into the sn_irq[irq] list */ - sn_irq_info->irq_next = sn_irq[sn_irq_info->irq_irq]; - sn_irq[sn_irq_info->irq_irq] = sn_irq_info; + spin_lock(&sn_irq_info_lock); + list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]); + spin_unlock(&sn_irq_info_lock); (void)register_intr_pda(sn_irq_info); } +void sn_irq_unfixup(struct pci_dev *pci_dev) +{ + struct sn_irq_info *sn_irq_info; + + /* Only cleanup IRQ stuff if this device has a host bus context */ + if (!SN_PCIDEV_BUSSOFT(pci_dev)) + return; + + sn_irq_info = SN_PCIDEV_INFO(pci_dev)->pdi_sn_irq_info; + if (!sn_irq_info || !sn_irq_info->irq_irq) { + kfree(sn_irq_info); + return; + } + + unregister_intr_pda(sn_irq_info); + spin_lock(&sn_irq_info_lock); + list_del_rcu(&sn_irq_info->list); + spin_unlock(&sn_irq_info_lock); + call_rcu(&sn_irq_info->rcu, sn_irq_info_free); + pci_dev_put(pci_dev); +} + static void force_interrupt(int irq) { struct sn_irq_info *sn_irq_info; if (!sn_ioif_inited) return; - sn_irq_info = sn_irq[irq]; - while (sn_irq_info) { + + rcu_read_lock(); + list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[irq], list) { if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) && - (sn_irq_info->irq_bridge != NULL)) { + (sn_irq_info->irq_bridge != NULL)) pcibr_force_interrupt(sn_irq_info); - } - sn_irq_info = sn_irq_info->irq_next; } + rcu_read_unlock(); } /* @@ -402,19 +395,41 @@ static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info) void sn_lb_int_war_check(void) { + struct sn_irq_info *sn_irq_info; int i; if (!sn_ioif_inited || pda->sn_first_irq == 0) return; + + rcu_read_lock(); for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) { - struct sn_irq_info *sn_irq_info = sn_irq[i]; - while (sn_irq_info) { - /* Only call for PCI bridges that are fully initialized. */ + list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[i], list) { + /* + * Only call for PCI bridges that are fully + * initialized. + */ if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) && - (sn_irq_info->irq_bridge != NULL)) { + (sn_irq_info->irq_bridge != NULL)) sn_check_intr(i, sn_irq_info); - } - sn_irq_info = sn_irq_info->irq_next; } } + rcu_read_unlock(); +} + +void sn_irq_lh_init(void) +{ + int i; + + sn_irq_lh = kmalloc(sizeof(struct list_head *) * NR_IRQS, GFP_KERNEL); + if (!sn_irq_lh) + panic("SN PCI INIT: Failed to allocate memory for PCI init\n"); + + for (i = 0; i < NR_IRQS; i++) { + sn_irq_lh[i] = kmalloc(sizeof(struct list_head), GFP_KERNEL); + if (!sn_irq_lh[i]) + panic("SN PCI INIT: Failed IRQ memory allocation\n"); + + INIT_LIST_HEAD(sn_irq_lh[i]); + } + } diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index 22e10d282c7f..7c7fe441d623 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c @@ -270,7 +270,7 @@ void __init sn_setup(char **cmdline_p) { long status, ticks_per_sec, drift; int pxm; - int major = sn_sal_rev_major(), minor = sn_sal_rev_minor(); + u32 version = sn_sal_rev(); extern void sn_cpu_init(void); ia64_sn_plat_set_error_handling_features(); @@ -308,22 +308,21 @@ void __init sn_setup(char **cmdline_p) * support here so we don't have to listen to failed keyboard probe * messages. */ - if ((major < 2 || (major == 2 && minor <= 9)) && - acpi_kbd_controller_present) { + if (version <= 0x0209 && acpi_kbd_controller_present) { printk(KERN_INFO "Disabling legacy keyboard support as prom " "is too old and doesn't provide FADT\n"); acpi_kbd_controller_present = 0; } - printk("SGI SAL version %x.%02x\n", major, minor); + printk("SGI SAL version %x.%02x\n", version >> 8, version & 0x00FF); /* * Confirm the SAL we're running on is recent enough... */ - if ((major < SN_SAL_MIN_MAJOR) || (major == SN_SAL_MIN_MAJOR && - minor < SN_SAL_MIN_MINOR)) { + if (version < SN_SAL_MIN_VERSION) { printk(KERN_ERR "This kernel needs SGI SAL version >= " - "%x.%02x\n", SN_SAL_MIN_MAJOR, SN_SAL_MIN_MINOR); + "%x.%02x\n", SN_SAL_MIN_VERSION >> 8, + SN_SAL_MIN_VERSION & 0x00FF); panic("PROM version too old\n"); } diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c index 8716f4d5314b..254fe15c064b 100644 --- a/arch/ia64/sn/kernel/tiocx.c +++ b/arch/ia64/sn/kernel/tiocx.c @@ -8,12 +8,12 @@ #include <linux/module.h> #include <linux/kernel.h> -#include <linux/version.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/proc_fs.h> #include <linux/device.h> #include <linux/delay.h> +#include <asm/system.h> #include <asm/uaccess.h> #include <asm/sn/sn_sal.h> #include <asm/sn/addrs.h> @@ -481,6 +481,9 @@ static int __init tiocx_init(void) cnodeid_t cnodeid; int found_tiocx_device = 0; + if (!ia64_platform_is("sn2")) + return -ENODEV; + bus_register(&tiocx_bus_type); for (cnodeid = 0; cnodeid < MAX_COMPACT_NODES; cnodeid++) { diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c index 177ddb748ebe..d580adcad927 100644 --- a/arch/ia64/sn/kernel/xpc_main.c +++ b/arch/ia64/sn/kernel/xpc_main.c @@ -53,6 +53,7 @@ #include <linux/cache.h> #include <linux/interrupt.h> #include <linux/slab.h> +#include <linux/delay.h> #include <asm/sn/intr.h> #include <asm/sn/sn_sal.h> #include <asm/uaccess.h> @@ -308,8 +309,7 @@ xpc_make_first_contact(struct xpc_partition *part) "partition %d\n", XPC_PARTID(part)); /* wait a 1/4 of a second or so */ - set_current_state(TASK_INTERRUPTIBLE); - (void) schedule_timeout(0.25 * HZ); + msleep_interruptible(250); if (part->act_state == XPC_P_DEACTIVATING) { return part->reason; @@ -841,9 +841,7 @@ xpc_do_exit(void) down(&xpc_discovery_exited); - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(0.3 * HZ); - set_current_state(TASK_RUNNING); + msleep_interruptible(300); /* wait for all partitions to become inactive */ @@ -860,12 +858,8 @@ xpc_do_exit(void) } } - if (active_part_count) { - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(0.3 * HZ); - set_current_state(TASK_RUNNING); - } - + if (active_part_count) + msleep_interruptible(300); } while (active_part_count > 0); diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index 5da9bdbde7cb..a2f7a88aefbb 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c @@ -11,9 +11,10 @@ #include <linux/module.h> #include <asm/dma.h> -#include <asm/sn/sn_sal.h> +#include <asm/sn/pcibr_provider.h> #include <asm/sn/pcibus_provider_defs.h> #include <asm/sn/pcidev.h> +#include <asm/sn/sn_sal.h> #define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset) #define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG)) diff --git a/arch/ia64/sn/pci/pcibr/pcibr_ate.c b/arch/ia64/sn/pci/pcibr/pcibr_ate.c index 0e47bce85f2d..d1647b863e61 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_ate.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_ate.c @@ -8,9 +8,9 @@ #include <linux/types.h> #include <asm/sn/sn_sal.h> +#include <asm/sn/pcibr_provider.h> #include <asm/sn/pcibus_provider_defs.h> #include <asm/sn/pcidev.h> -#include "pci/pcibr_provider.h" int pcibr_invalidate_ate = 0; /* by default don't invalidate ATE on free */ diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c index 64af2b2c1787..b058dc2a0b9d 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_dma.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c @@ -8,18 +8,17 @@ #include <linux/types.h> #include <linux/pci.h> -#include <asm/sn/sn_sal.h> +#include <asm/sn/addrs.h> #include <asm/sn/geo.h> -#include "xtalk/xwidgetdev.h" -#include "xtalk/hubdev.h" +#include <asm/sn/pcibr_provider.h> #include <asm/sn/pcibus_provider_defs.h> #include <asm/sn/pcidev.h> -#include "pci/tiocp.h" -#include "pci/pic.h" -#include "pci/pcibr_provider.h" -#include "pci/tiocp.h" +#include <asm/sn/pic.h> +#include <asm/sn/sn_sal.h> +#include <asm/sn/tiocp.h> #include "tio.h" -#include <asm/sn/addrs.h> +#include "xtalk/xwidgetdev.h" +#include "xtalk/hubdev.h" extern int sn_ioif_inited; diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c index 3893999d23d8..9813da56d311 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c @@ -6,18 +6,51 @@ * Copyright (C) 2001-2004 Silicon Graphics, Inc. All rights reserved. */ -#include <linux/types.h> #include <linux/interrupt.h> +#include <linux/types.h> #include <linux/pci.h> -#include <asm/sn/sn_sal.h> -#include "xtalk/xwidgetdev.h" +#include <asm/sn/addrs.h> #include <asm/sn/geo.h> -#include "xtalk/hubdev.h" +#include <asm/sn/pcibr_provider.h> #include <asm/sn/pcibus_provider_defs.h> #include <asm/sn/pcidev.h> -#include "pci/pcibr_provider.h" -#include <asm/sn/addrs.h> +#include <asm/sn/sn_sal.h> +#include "xtalk/xwidgetdev.h" +#include "xtalk/hubdev.h" + +int +sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp) +{ + struct ia64_sal_retval ret_stuff; + uint64_t busnum; + + ret_stuff.status = 0; + ret_stuff.v0 = 0; + busnum = soft->pbi_buscommon.bs_persist_busnum; + SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, (u64) busnum, + (u64) device, (u64) resp, 0, 0, 0, 0); + + return (int)ret_stuff.v0; +} + +int +sal_pcibr_slot_disable(struct pcibus_info *soft, int device, int action, + void *resp) +{ + struct ia64_sal_retval ret_stuff; + uint64_t busnum; + + ret_stuff.status = 0; + ret_stuff.v0 = 0; + + busnum = soft->pbi_buscommon.bs_persist_busnum; + SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_DISABLE, + (u64) busnum, (u64) device, (u64) action, + (u64) resp, 0, 0, 0); + + return (int)ret_stuff.v0; +} static int sal_pcibr_error_interrupt(struct pcibus_info *soft) { @@ -188,3 +221,6 @@ pcibr_init_provider(void) return 0; } + +EXPORT_SYMBOL_GPL(sal_pcibr_slot_enable); +EXPORT_SYMBOL_GPL(sal_pcibr_slot_disable); diff --git a/arch/ia64/sn/pci/pcibr/pcibr_reg.c b/arch/ia64/sn/pci/pcibr/pcibr_reg.c index 865c11c3b50a..21426d02fbe6 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_reg.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_reg.c @@ -6,13 +6,13 @@ * Copyright (C) 2004 Silicon Graphics, Inc. All rights reserved. */ -#include <linux/types.h> #include <linux/interrupt.h> +#include <linux/types.h> +#include <asm/sn/pcibr_provider.h> #include <asm/sn/pcibus_provider_defs.h> #include <asm/sn/pcidev.h> -#include "pci/tiocp.h" -#include "pci/pic.h" -#include "pci/pcibr_provider.h" +#include <asm/sn/pic.h> +#include <asm/sn/tiocp.h> union br_ptr { struct tiocp tio; diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c index 05aa8c2fe9bb..51cc4e63092c 100644 --- a/arch/ia64/sn/pci/tioca_provider.c +++ b/arch/ia64/sn/pci/tioca_provider.c @@ -589,8 +589,7 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft) /* sanity check prom rev */ - if (sn_sal_rev_major() < 4 || - (sn_sal_rev_major() == 4 && sn_sal_rev_minor() < 6)) { + if (sn_sal_rev() < 0x0406) { printk (KERN_ERR "%s: SGI prom rev 4.06 or greater required " "for tioca support\n", __FUNCTION__); |