diff options
29 files changed, 334 insertions, 104 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 6c0e44bbf601..1c7c229a0926 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2549,7 +2549,7 @@ S: Supported MISCELLANEOUS MCA-SUPPORT P: James Bottomley -M: jejb@steeleye.com +M: James.Bottomley@HansenPartnership.com L: linux-kernel@vger.kernel.org S: Maintained @@ -3301,9 +3301,11 @@ S: Maintained SCSI SUBSYSTEM P: James E.J. Bottomley -M: James.Bottomley@SteelEye.com +M: James.Bottomley@HansenPartnership.com L: linux-scsi@vger.kernel.org T: git kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6.git +T: git kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6.git +T: git kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-pending-2.6.git S: Maintained SCSI TAPE DRIVER @@ -528,9 +528,22 @@ KBUILD_CFLAGS += $(call cc-option,-Wdeclaration-after-statement,) KBUILD_CFLAGS += $(call cc-option,-Wno-pointer-sign,) # Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments -KBUILD_CPPFLAGS += $(CPPFLAGS) -KBUILD_AFLAGS += $(AFLAGS) -KBUILD_CFLAGS += $(CFLAGS) +# But warn user when we do so +warn-assign = \ +$(warning "WARNING: Appending $$K$(1) ($(K$(1))) from $(origin K$(1)) to kernel $$$(1)") + +ifneq ($(KCPPFLAGS),) + $(call warn-assign,CPPFLAGS) + KBUILD_CPPFLAGS += $(KCPPFLAGS) +endif +ifneq ($(KAFLAGS),) + $(call warn-assign,AFLAGS) + KBUILD_AFLAGS += $(KAFLAGS) +endif +ifneq ($(KCFLAGS),) + $(call warn-assign,CFLAGS) + KBUILD_CFLAGS += $(KCFLAGS) +endif # Use --build-id when available. LDFLAGS_BUILD_ID = $(patsubst -Wl$(comma)%,%,\ diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index e6289ee74ecd..8bf4ae1150be 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -200,11 +200,13 @@ static noinline __init void find_memory_chunks(unsigned long memsize) cc = __tprot(addr); while (cc == old_cc) { addr += CHUNK_INCR; - cc = __tprot(addr); + if (addr >= memsize) + break; #ifndef CONFIG_64BIT if (addr == ADDR2G) break; #endif + cc = __tprot(addr); } if (old_addr != addr && diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 96492cf2d491..29f7884b4ffa 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -92,6 +92,7 @@ EXPORT_SYMBOL(unregister_idle_notifier); void do_monitor_call(struct pt_regs *regs, long interruption_code) { +#ifdef CONFIG_SMP struct s390_idle_data *idle; idle = &__get_cpu_var(s390_idle); @@ -99,7 +100,7 @@ void do_monitor_call(struct pt_regs *regs, long interruption_code) idle->idle_time += get_clock() - idle->idle_enter; idle->in_idle = 0; spin_unlock(&idle->lock); - +#endif /* disable monitor call class 0 */ __ctl_clear_bit(8, 15); @@ -114,7 +115,9 @@ extern void s390_handle_mcck(void); static void default_idle(void) { int cpu, rc; +#ifdef CONFIG_SMP struct s390_idle_data *idle; +#endif /* CPU is going idle. */ cpu = smp_processor_id(); @@ -151,13 +154,14 @@ static void default_idle(void) s390_handle_mcck(); return; } - +#ifdef CONFIG_SMP idle = &__get_cpu_var(s390_idle); spin_lock(&idle->lock); idle->idle_count++; idle->in_idle = 1; idle->idle_enter = get_clock(); spin_unlock(&idle->lock); +#endif trace_hardirqs_on(); /* Wait for external, I/O or machine check interrupt. */ __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 1d97fe1c0e53..b05ae8584258 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -788,14 +788,14 @@ static ssize_t show_idle_time(struct sys_device *dev, char *buf) } new_time = idle->idle_time; spin_unlock_irq(&idle->lock); - return sprintf(buf, "%llu us\n", new_time >> 12); + return sprintf(buf, "%llu\n", new_time >> 12); } -static SYSDEV_ATTR(idle_time, 0444, show_idle_time, NULL); +static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL); static struct attribute *cpu_attrs[] = { &attr_capability.attr, &attr_idle_count.attr, - &attr_idle_time.attr, + &attr_idle_time_us.attr, NULL, }; diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 48dae49bc1ec..a963fe81359e 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -307,7 +307,7 @@ static cycle_t read_tod_clock(void) static struct clocksource clocksource_tod = { .name = "tod", - .rating = 100, + .rating = 400, .read = read_tod_clock, .mask = -1ULL, .mult = 1000, diff --git a/arch/x86/boot/pmjump.S b/arch/x86/boot/pmjump.S index 2e559233725a..fa6bed1fac14 100644 --- a/arch/x86/boot/pmjump.S +++ b/arch/x86/boot/pmjump.S @@ -28,17 +28,19 @@ * void protected_mode_jump(u32 entrypoint, u32 bootparams); */ protected_mode_jump: - xorl %ebx, %ebx # Flag to indicate this is a boot movl %edx, %esi # Pointer to boot_params table movl %eax, 2f # Patch ljmpl instruction - jmp 1f # Short jump to flush instruction q. -1: movw $__BOOT_DS, %cx + xorl %ebx, %ebx # Per the 32-bit boot protocol + xorl %ebp, %ebp # Per the 32-bit boot protocol + xorl %edi, %edi # Per the 32-bit boot protocol movl %cr0, %edx orb $1, %dl # Protected mode (PE) bit movl %edx, %cr0 + jmp 1f # Short jump to serialize on 386/486 +1: movw %cx, %ds movw %cx, %es diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index a55b0902f9d3..92c56117eae5 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -93,38 +93,7 @@ struct lguest_data lguest_data = { }; static cycle_t clock_base; -/*G:035 Notice the lazy_hcall() above, rather than hcall(). This is our first - * real optimization trick! - * - * When lazy_mode is set, it means we're allowed to defer all hypercalls and do - * them as a batch when lazy_mode is eventually turned off. Because hypercalls - * are reasonably expensive, batching them up makes sense. For example, a - * large munmap might update dozens of page table entries: that code calls - * paravirt_enter_lazy_mmu(), does the dozen updates, then calls - * lguest_leave_lazy_mode(). - * - * So, when we're in lazy mode, we call async_hypercall() to store the call for - * future processing. When lazy mode is turned off we issue a hypercall to - * flush the stored calls. - */ -static void lguest_leave_lazy_mode(void) -{ - paravirt_leave_lazy(paravirt_get_lazy_mode()); - hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0); -} - -static void lazy_hcall(unsigned long call, - unsigned long arg1, - unsigned long arg2, - unsigned long arg3) -{ - if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) - hcall(call, arg1, arg2, arg3); - else - async_hcall(call, arg1, arg2, arg3); -} - -/* async_hcall() is pretty simple: I'm quite proud of it really. We have a +/*G:037 async_hcall() is pretty simple: I'm quite proud of it really. We have a * ring buffer of stored hypercalls which the Host will run though next time we * do a normal hypercall. Each entry in the ring has 4 slots for the hypercall * arguments, and a "hcall_status" word which is 0 if the call is ready to go, @@ -134,8 +103,8 @@ static void lazy_hcall(unsigned long call, * full and we just make the hypercall directly. This has the nice side * effect of causing the Host to run all the stored calls in the ring buffer * which empties it for next time! */ -void async_hcall(unsigned long call, - unsigned long arg1, unsigned long arg2, unsigned long arg3) +static void async_hcall(unsigned long call, unsigned long arg1, + unsigned long arg2, unsigned long arg3) { /* Note: This code assumes we're uniprocessor. */ static unsigned int next_call; @@ -161,7 +130,37 @@ void async_hcall(unsigned long call, } local_irq_restore(flags); } -/*:*/ + +/*G:035 Notice the lazy_hcall() above, rather than hcall(). This is our first + * real optimization trick! + * + * When lazy_mode is set, it means we're allowed to defer all hypercalls and do + * them as a batch when lazy_mode is eventually turned off. Because hypercalls + * are reasonably expensive, batching them up makes sense. For example, a + * large munmap might update dozens of page table entries: that code calls + * paravirt_enter_lazy_mmu(), does the dozen updates, then calls + * lguest_leave_lazy_mode(). + * + * So, when we're in lazy mode, we call async_hcall() to store the call for + * future processing. */ +static void lazy_hcall(unsigned long call, + unsigned long arg1, + unsigned long arg2, + unsigned long arg3) +{ + if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) + hcall(call, arg1, arg2, arg3); + else + async_hcall(call, arg1, arg2, arg3); +} + +/* When lazy mode is turned off reset the per-cpu lazy mode variable and then + * issue a hypercall to flush any stored calls. */ +static void lguest_leave_lazy_mode(void) +{ + paravirt_leave_lazy(paravirt_get_lazy_mode()); + hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0); +} /*G:033 * After that diversion we return to our first native-instruction diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 245057df69d6..94144ed50a6b 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -841,6 +841,9 @@ static void ata_scsi_dev_config(struct scsi_device *sdev, blk_queue_max_hw_segments(q, q->max_hw_segments - 1); } + if (dev->flags & ATA_DFLAG_AN) + set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events); + if (dev->flags & ATA_DFLAG_NCQ) { int depth; @@ -3296,10 +3299,9 @@ static void ata_scsi_handle_link_detach(struct ata_link *link) */ void ata_scsi_media_change_notify(struct ata_device *dev) { -#ifdef OTHER_AN_PATCHES_HAVE_BEEN_APPLIED if (dev->sdev) - scsi_device_event_notify(dev->sdev, SDEV_MEDIA_CHANGE); -#endif + sdev_evt_send_simple(dev->sdev, SDEV_EVT_MEDIA_CHANGE, + GFP_ATOMIC); } /** diff --git a/drivers/char/drm/radeon_cp.c b/drivers/char/drm/radeon_cp.c index 335423c5c186..24fca8ec1379 100644 --- a/drivers/char/drm/radeon_cp.c +++ b/drivers/char/drm/radeon_cp.c @@ -1679,7 +1679,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init) dev_priv->gart_info.bus_addr = dev_priv->pcigart_offset + dev_priv->fb_location; dev_priv->gart_info.mapping.offset = - dev_priv->gart_info.bus_addr; + dev_priv->pcigart_offset + dev_priv->fb_aper_offset; dev_priv->gart_info.mapping.size = dev_priv->gart_info.table_size; @@ -2275,7 +2275,8 @@ int radeon_driver_firstopen(struct drm_device *dev) if (ret != 0) return ret; - ret = drm_addmap(dev, drm_get_resource_start(dev, 0), + dev_priv->fb_aper_offset = drm_get_resource_start(dev, 0); + ret = drm_addmap(dev, dev_priv->fb_aper_offset, drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING, &map); if (ret != 0) diff --git a/drivers/char/drm/radeon_drv.h b/drivers/char/drm/radeon_drv.h index e4077bc212b3..bfbb60a9298c 100644 --- a/drivers/char/drm/radeon_drv.h +++ b/drivers/char/drm/radeon_drv.h @@ -293,6 +293,7 @@ typedef struct drm_radeon_private { /* starting from here on, data is preserved accross an open */ uint32_t flags; /* see radeon_chip_flags */ + unsigned long fb_aper_offset; } drm_radeon_private_t; typedef struct drm_radeon_buf_priv { diff --git a/drivers/char/drm/sis_mm.c b/drivers/char/drm/sis_mm.c index 6be1c5757580..a6b7ccdaf73d 100644 --- a/drivers/char/drm/sis_mm.c +++ b/drivers/char/drm/sis_mm.c @@ -134,6 +134,7 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file_priv, dev_priv->agp_initialized)) { DRM_ERROR ("Attempt to allocate from uninitialized memory manager.\n"); + mutex_unlock(&dev->struct_mutex); return -EINVAL; } diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 859f870552e3..5e083d1f57e7 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -193,6 +193,12 @@ dcssblk_segment_warn(int rc, char* seg_name) } } +static void dcssblk_unregister_callback(struct device *dev) +{ + device_unregister(dev); + put_device(dev); +} + /* * device attribute for switching shared/nonshared (exclusive) * operation (show + store) @@ -276,8 +282,7 @@ removeseg: blk_cleanup_queue(dev_info->dcssblk_queue); dev_info->gd->queue = NULL; put_disk(dev_info->gd); - device_unregister(dev); - put_device(dev); + rc = device_schedule_callback(dev, dcssblk_unregister_callback); out: up_write(&dcssblk_devices_sem); return rc; diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index 725b0dd14269..f4c132ab39ed 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c @@ -343,10 +343,10 @@ static int cmf_copy_block(struct ccw_device *cdev) if (sch->schib.scsw.fctl & SCSW_FCTL_START_FUNC) { /* Don't copy if a start function is in progress. */ - if ((!sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED) && + if ((!(sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED)) && (sch->schib.scsw.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) && - (!sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) + (!(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS))) return -EBUSY; } cmb_data = cdev->private->cmb; diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 7ee57f084a89..74f6b539974a 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -738,7 +738,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch, atomic_set(&cdev->private->onoff, 0); cdev->dev.parent = &sch->dev; cdev->dev.release = ccw_device_release; - INIT_LIST_HEAD(&cdev->private->kick_work.entry); + INIT_WORK(&cdev->private->kick_work, NULL); cdev->dev.groups = ccwdev_attr_groups; /* Do first half of device_register. */ device_initialize(&cdev->dev); diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c index 3ccca5871fdf..47bb47b48581 100644 --- a/drivers/s390/net/smsgiucv.c +++ b/drivers/s390/net/smsgiucv.c @@ -148,6 +148,10 @@ static int __init smsg_init(void) { int rc; + if (!MACHINE_IS_VM) { + rc = -EPROTONOSUPPORT; + goto out; + } rc = driver_register(&smsg_driver); if (rc != 0) goto out; diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 22d91ee173c5..5f2396c03958 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -556,7 +556,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, unsigned long timeout) { u64 *crq_as_u64 = (u64 *) &evt_struct->crq; - int request_status; + int request_status = 0; int rc; /* If we have exhausted our request limit, just fail this request, @@ -574,6 +574,13 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, if (request_status < -1) goto send_error; /* Otherwise, we may have run out of requests. */ + /* If request limit was 0 when we started the adapter is in the + * process of performing a login with the server adapter, or + * we may have run out of requests. + */ + else if (request_status == -1 && + evt_struct->iu.srp.login_req.opcode != SRP_LOGIN_REQ) + goto send_busy; /* Abort and reset calls should make it through. * Nothing except abort and reset should use the last two * slots unless we had two or less to begin with. @@ -633,7 +640,8 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev); free_event_struct(&hostdata->pool, evt_struct); - atomic_inc(&hostdata->request_limit); + if (request_status != -1) + atomic_inc(&hostdata->request_limit); return SCSI_MLQUEUE_HOST_BUSY; send_error: @@ -927,10 +935,11 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata) login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT; spin_lock_irqsave(hostdata->host->host_lock, flags); - /* Start out with a request limit of 1, since this is negotiated in - * the login request we are just sending + /* Start out with a request limit of 0, since this is negotiated in + * the login request we are just sending and login requests always + * get sent by the driver regardless of request_limit. */ - atomic_set(&hostdata->request_limit, 1); + atomic_set(&hostdata->request_limit, 0); rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2); spin_unlock_irqrestore(hostdata->host->host_lock, flags); diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index c0755565fae9..4e46045dea6d 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -682,6 +682,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq); int datadir = scsi_cmnd->sc_data_direction; + char tag[2]; lpfc_cmd->fcp_rsp->rspSnsLen = 0; /* clear task management bits */ @@ -692,8 +693,8 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16); - if (scsi_cmnd->device->tagged_supported) { - switch (scsi_cmnd->tag) { + if (scsi_populate_tag_msg(scsi_cmnd, tag)) { + switch (tag[0]) { case HEAD_OF_QUEUE_TAG: fcp_cmnd->fcpCntl1 = HEAD_OF_Q; break; diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c index 4652ad22516b..abef7048f25b 100644 --- a/drivers/scsi/osst.c +++ b/drivers/scsi/osst.c @@ -593,10 +593,11 @@ static int osst_verify_frame(struct osst_tape * STp, int frame_seq_number, int q if (aux->frame_type != OS_FRAME_TYPE_DATA && aux->frame_type != OS_FRAME_TYPE_EOD && aux->frame_type != OS_FRAME_TYPE_MARKER) { - if (!quiet) + if (!quiet) { #if DEBUG printk(OSST_DEB_MSG "%s:D: Skipping frame, frame type %x\n", name, aux->frame_type); #endif + } goto err_out; } if (aux->frame_type == OS_FRAME_TYPE_EOD && @@ -606,11 +607,12 @@ static int osst_verify_frame(struct osst_tape * STp, int frame_seq_number, int q goto err_out; } if (frame_seq_number != -1 && ntohl(aux->frame_seq_num) != frame_seq_number) { - if (!quiet) + if (!quiet) { #if DEBUG printk(OSST_DEB_MSG "%s:D: Skipping frame, sequence number %u (expected %d)\n", name, ntohl(aux->frame_seq_num), frame_seq_number); #endif + } goto err_out; } if (aux->frame_type == OS_FRAME_TYPE_MARKER) { diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 88de771d3569..0e81e4cf8876 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -2115,6 +2115,142 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) EXPORT_SYMBOL(scsi_device_set_state); /** + * sdev_evt_emit - emit a single SCSI device uevent + * @sdev: associated SCSI device + * @evt: event to emit + * + * Send a single uevent (scsi_event) to the associated scsi_device. + */ +static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) +{ + int idx = 0; + char *envp[3]; + + switch (evt->evt_type) { + case SDEV_EVT_MEDIA_CHANGE: + envp[idx++] = "SDEV_MEDIA_CHANGE=1"; + break; + + default: + /* do nothing */ + break; + } + + envp[idx++] = NULL; + + kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp); +} + +/** + * sdev_evt_thread - send a uevent for each scsi event + * @work: work struct for scsi_device + * + * Dispatch queued events to their associated scsi_device kobjects + * as uevents. + */ +void scsi_evt_thread(struct work_struct *work) +{ + struct scsi_device *sdev; + LIST_HEAD(event_list); + + sdev = container_of(work, struct scsi_device, event_work); + + while (1) { + struct scsi_event *evt; + struct list_head *this, *tmp; + unsigned long flags; + + spin_lock_irqsave(&sdev->list_lock, flags); + list_splice_init(&sdev->event_list, &event_list); + spin_unlock_irqrestore(&sdev->list_lock, flags); + + if (list_empty(&event_list)) + break; + + list_for_each_safe(this, tmp, &event_list) { + evt = list_entry(this, struct scsi_event, node); + list_del(&evt->node); + scsi_evt_emit(sdev, evt); + kfree(evt); + } + } +} + +/** + * sdev_evt_send - send asserted event to uevent thread + * @sdev: scsi_device event occurred on + * @evt: event to send + * + * Assert scsi device event asynchronously. + */ +void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt) +{ + unsigned long flags; + + if (!test_bit(evt->evt_type, sdev->supported_events)) { + kfree(evt); + return; + } + + spin_lock_irqsave(&sdev->list_lock, flags); + list_add_tail(&evt->node, &sdev->event_list); + schedule_work(&sdev->event_work); + spin_unlock_irqrestore(&sdev->list_lock, flags); +} +EXPORT_SYMBOL_GPL(sdev_evt_send); + +/** + * sdev_evt_alloc - allocate a new scsi event + * @evt_type: type of event to allocate + * @gfpflags: GFP flags for allocation + * + * Allocates and returns a new scsi_event. + */ +struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, + gfp_t gfpflags) +{ + struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags); + if (!evt) + return NULL; + + evt->evt_type = evt_type; + INIT_LIST_HEAD(&evt->node); + + /* evt_type-specific initialization, if any */ + switch (evt_type) { + case SDEV_EVT_MEDIA_CHANGE: + default: + /* do nothing */ + break; + } + + return evt; +} +EXPORT_SYMBOL_GPL(sdev_evt_alloc); + +/** + * sdev_evt_send_simple - send asserted event to uevent thread + * @sdev: scsi_device event occurred on + * @evt_type: type of event to send + * @gfpflags: GFP flags for allocation + * + * Assert scsi device event asynchronously, given an event type. + */ +void sdev_evt_send_simple(struct scsi_device *sdev, + enum scsi_device_event evt_type, gfp_t gfpflags) +{ + struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags); + if (!evt) { + sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n", + evt_type); + return; + } + + sdev_evt_send(sdev, evt); +} +EXPORT_SYMBOL_GPL(sdev_evt_send_simple); + +/** * scsi_device_quiesce - Block user issued commands. * @sdev: scsi device to quiesce. * diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index b53c5f67e372..40ea71cd2ca6 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -236,6 +236,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, struct scsi_device *sdev; int display_failure_msg = 1, ret; struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + extern void scsi_evt_thread(struct work_struct *work); sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size, GFP_ATOMIC); @@ -254,7 +255,9 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, INIT_LIST_HEAD(&sdev->same_target_siblings); INIT_LIST_HEAD(&sdev->cmd_list); INIT_LIST_HEAD(&sdev->starved_entry); + INIT_LIST_HEAD(&sdev->event_list); spin_lock_init(&sdev->list_lock); + INIT_WORK(&sdev->event_work, scsi_evt_thread); sdev->sdev_gendev.parent = get_device(&starget->dev); sdev->sdev_target = starget; diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index d531ceeb0d8c..f374fdcb6815 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -268,6 +268,7 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work) struct scsi_device *sdev; struct device *parent; struct scsi_target *starget; + struct list_head *this, *tmp; unsigned long flags; sdev = container_of(work, struct scsi_device, ew.work); @@ -282,6 +283,16 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work) list_del(&sdev->starved_entry); spin_unlock_irqrestore(sdev->host->host_lock, flags); + cancel_work_sync(&sdev->event_work); + + list_for_each_safe(this, tmp, &sdev->event_list) { + struct scsi_event *evt; + + evt = list_entry(this, struct scsi_event, node); + list_del(&evt->node); + kfree(evt); + } + if (sdev->request_queue) { sdev->request_queue->queuedata = NULL; /* user context needed to free queue */ @@ -614,6 +625,41 @@ sdev_show_modalias(struct device *dev, struct device_attribute *attr, char *buf) } static DEVICE_ATTR(modalias, S_IRUGO, sdev_show_modalias, NULL); +#define DECLARE_EVT_SHOW(name, Cap_name) \ +static ssize_t \ +sdev_show_evt_##name(struct device *dev, struct device_attribute *attr, \ + char *buf) \ +{ \ + struct scsi_device *sdev = to_scsi_device(dev); \ + int val = test_bit(SDEV_EVT_##Cap_name, sdev->supported_events);\ + return snprintf(buf, 20, "%d\n", val); \ +} + +#define DECLARE_EVT_STORE(name, Cap_name) \ +static ssize_t \ +sdev_store_evt_##name(struct device *dev, struct device_attribute *attr, \ + const char *buf, size_t count) \ +{ \ + struct scsi_device *sdev = to_scsi_device(dev); \ + int val = simple_strtoul(buf, NULL, 0); \ + if (val == 0) \ + clear_bit(SDEV_EVT_##Cap_name, sdev->supported_events); \ + else if (val == 1) \ + set_bit(SDEV_EVT_##Cap_name, sdev->supported_events); \ + else \ + return -EINVAL; \ + return count; \ +} + +#define DECLARE_EVT(name, Cap_name) \ + DECLARE_EVT_SHOW(name, Cap_name) \ + DECLARE_EVT_STORE(name, Cap_name) \ + static DEVICE_ATTR(evt_##name, S_IRUGO, sdev_show_evt_##name, \ + sdev_store_evt_##name); +#define REF_EVT(name) &dev_attr_evt_##name.attr + +DECLARE_EVT(media_change, MEDIA_CHANGE) + /* Default template for device attributes. May NOT be modified */ static struct attribute *scsi_sdev_attrs[] = { &dev_attr_device_blocked.attr, @@ -631,6 +677,7 @@ static struct attribute *scsi_sdev_attrs[] = { &dev_attr_iodone_cnt.attr, &dev_attr_ioerr_cnt.attr, &dev_attr_modalias.attr, + REF_EVT(media_change), NULL }; diff --git a/include/asm-powerpc/commproc.h b/include/asm-powerpc/commproc.h index 0307c84a5c1d..a2328b8addd8 100644 --- a/include/asm-powerpc/commproc.h +++ b/include/asm-powerpc/commproc.h @@ -91,7 +91,7 @@ extern uint m8xx_cpm_hostalloc(uint size); extern int m8xx_cpm_hostfree(uint start); extern void m8xx_cpm_hostdump(void); -extern void cpm_load_patch(volatile immap_t *immr); +extern void cpm_load_patch(cpm8xx_t *cp); /* Buffer descriptors used by many of the CPM protocols. */ diff --git a/include/asm-x86/lguest_hcall.h b/include/asm-x86/lguest_hcall.h index 9c5092b6aa9f..2091779e91fb 100644 --- a/include/asm-x86/lguest_hcall.h +++ b/include/asm-x86/lguest_hcall.h @@ -54,9 +54,6 @@ hcall(unsigned long call, } /*:*/ -void async_hcall(unsigned long call, - unsigned long arg1, unsigned long arg2, unsigned long arg3); - /* Can't use our min() macro here: needs to be a constant */ #define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32) diff --git a/include/linux/futex.h b/include/linux/futex.h index 99650353adfa..92d420fe03f8 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h @@ -149,10 +149,6 @@ union futex_key { int offset; } both; }; -int get_futex_key(u32 __user *uaddr, struct rw_semaphore *shared, - union futex_key *key); -void get_futex_key_refs(union futex_key *key); -void drop_futex_key_refs(union futex_key *key); #ifdef CONFIG_FUTEX extern void exit_robust_list(struct task_struct *curr); diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index d5057bc338ff..66e9058357e0 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -46,6 +46,22 @@ enum scsi_device_state { * to the scsi lld. */ }; +enum scsi_device_event { + SDEV_EVT_MEDIA_CHANGE = 1, /* media has changed */ + + SDEV_EVT_LAST = SDEV_EVT_MEDIA_CHANGE, + SDEV_EVT_MAXBITS = SDEV_EVT_LAST + 1 +}; + +struct scsi_event { + enum scsi_device_event evt_type; + struct list_head node; + + /* put union of data structures, for non-simple event types, + * here + */ +}; + struct scsi_device { struct Scsi_Host *host; struct request_queue *request_queue; @@ -127,6 +143,10 @@ struct scsi_device { unsigned guess_capacity:1; /* READ_CAPACITY might be too high by 1 */ unsigned retry_hwerror:1; /* Retry HARDWARE_ERROR */ + DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */ + struct list_head event_list; /* asserted events */ + struct work_struct event_work; + unsigned int device_blocked; /* Device returned QUEUE_FULL. */ unsigned int max_device_blocked; /* what device_blocked counts down from */ @@ -275,6 +295,11 @@ extern int scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries); extern int scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state); +extern struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, + gfp_t gfpflags); +extern void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt); +extern void sdev_evt_send_simple(struct scsi_device *sdev, + enum scsi_device_event evt_type, gfp_t gfpflags); extern int scsi_device_quiesce(struct scsi_device *sdev); extern void scsi_device_resume(struct scsi_device *sdev); extern void scsi_target_quiesce(struct scsi_target *); diff --git a/kernel/futex.c b/kernel/futex.c index 32710451dc20..9dc591ab681a 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -181,8 +181,8 @@ static inline int match_futex(union futex_key *key1, union futex_key *key2) * For other futexes, it points to ¤t->mm->mmap_sem and * caller must have taken the reader lock. but NOT any spinlocks. */ -int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared, - union futex_key *key) +static int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared, + union futex_key *key) { unsigned long address = (unsigned long)uaddr; struct mm_struct *mm = current->mm; @@ -268,14 +268,13 @@ int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared, } return err; } -EXPORT_SYMBOL_GPL(get_futex_key); /* * Take a reference to the resource addressed by a key. * Can be called while holding spinlocks. * */ -inline void get_futex_key_refs(union futex_key *key) +static void get_futex_key_refs(union futex_key *key) { if (key->both.ptr == 0) return; @@ -288,13 +287,12 @@ inline void get_futex_key_refs(union futex_key *key) break; } } -EXPORT_SYMBOL_GPL(get_futex_key_refs); /* * Drop a reference to the resource addressed by a key. * The hash bucket spinlock must not be held. */ -void drop_futex_key_refs(union futex_key *key) +static void drop_futex_key_refs(union futex_key *key) { if (!key->both.ptr) return; @@ -307,7 +305,6 @@ void drop_futex_key_refs(union futex_key *key) break; } } -EXPORT_SYMBOL_GPL(drop_futex_key_refs); static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval) { diff --git a/mm/memory.c b/mm/memory.c index eefd5b68bc42..9791e4786843 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2748,4 +2748,3 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in return buf - old_buf; } -EXPORT_SYMBOL_GPL(access_process_vm); diff --git a/mm/slub.c b/mm/slub.c index bcdb2c8941a5..84f59fde1a10 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1511,26 +1511,8 @@ new_slab: if (new) { c = get_cpu_slab(s, smp_processor_id()); - if (c->page) { - /* - * Someone else populated the cpu_slab while we - * enabled interrupts, or we have gotten scheduled - * on another cpu. The page may not be on the - * requested node even if __GFP_THISNODE was - * specified. So we need to recheck. - */ - if (node_match(c, node)) { - /* - * Current cpuslab is acceptable and we - * want the current one since its cache hot - */ - discard_slab(s, new); - slab_lock(c->page); - goto load_freelist; - } - /* New slab does not fit our expectations */ + if (c->page) flush_slab(s, c); - } slab_lock(new); SetSlabFrozen(new); c->page = new; |