diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-03-26 11:11:23 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-03-26 11:11:23 -0700 |
commit | 928a726b0e12184729900c076e13dbf1c511c96c (patch) | |
tree | f31a7f23c1b511ebb486598cc746786e1821d48c /drivers | |
parent | 8ff64b539bfd998792614481ccb67139b97075ef (diff) | |
parent | eaeed5d31d8ded02fa0a4b608f57418cc0e65b07 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6: (96 commits)
sh: add support for SMSC Polaris platform
sh: fix the HD64461 level-triggered interrupts handling
sh: sh-rtc wakeup support
sh: sh-rtc invalid time rework
sh: sh-rtc carry interrupt rework
sh: disallow kexec virtual entry
sh: kexec jump: fix for ftrace.
sh: kexec: Drop SR.BL bit toggling.
sh: add kexec jump support
sh: rework kexec segment code
sh: simplify kexec vbr code
sh: Flush only the needed range when unmapping a VMA.
sh: Update debugfs ASID dumping for 16-bit ASID support.
sh: tlb-pteaex: Kill off legacy PTEA updates.
sh: Support for extended ASIDs on PTEAEX-capable SH-X3 cores.
sh: sh7763rdp: Change IRQ number for sh_eth of sh7763rdp
sh: espt-giga board support
sh: dma: Make G2 DMA configurable.
sh: dma: Make PVR2 DMA configurable.
sh: Move IRQ multi definition of DMAC to defconfig
...
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/clocksource/Makefile | 1 | ||||
-rw-r--r-- | drivers/clocksource/sh_cmt.c | 615 | ||||
-rw-r--r-- | drivers/input/joystick/maplecontrol.c | 4 | ||||
-rw-r--r-- | drivers/input/keyboard/maple_keyb.c | 37 | ||||
-rw-r--r-- | drivers/input/keyboard/sh_keysc.c | 26 | ||||
-rw-r--r-- | drivers/mtd/maps/Kconfig | 12 | ||||
-rw-r--r-- | drivers/mtd/maps/Makefile | 1 | ||||
-rw-r--r-- | drivers/mtd/maps/vmu-flash.c | 832 | ||||
-rw-r--r-- | drivers/rtc/rtc-sh.c | 246 | ||||
-rw-r--r-- | drivers/serial/sh-sci.c | 12 | ||||
-rw-r--r-- | drivers/serial/sh-sci.h | 15 | ||||
-rw-r--r-- | drivers/sh/intc.c | 47 | ||||
-rw-r--r-- | drivers/sh/maple/maple.c | 472 | ||||
-rw-r--r-- | drivers/sh/superhyway/superhyway.c | 4 | ||||
-rw-r--r-- | drivers/usb/Kconfig | 1 | ||||
-rw-r--r-- | drivers/usb/host/ohci-hcd.c | 3 | ||||
-rw-r--r-- | drivers/video/pvr2fb.c | 16 | ||||
-rw-r--r-- | drivers/video/sh_mobile_lcdcfb.c | 66 |
18 files changed, 2043 insertions, 367 deletions
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index 1525882190fd..1efb2879a94f 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -2,3 +2,4 @@ obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o obj-$(CONFIG_X86_CYCLONE_TIMER) += cyclone.o obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o +obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c new file mode 100644 index 000000000000..7783b42f6914 --- /dev/null +++ b/drivers/clocksource/sh_cmt.c @@ -0,0 +1,615 @@ +/* + * SuperH Timer Support - CMT + * + * Copyright (C) 2008 Magnus Damm + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/init.h> +#include <linux/bootmem.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/io.h> +#include <linux/clk.h> +#include <linux/irq.h> +#include <linux/err.h> +#include <linux/clocksource.h> +#include <linux/clockchips.h> +#include <linux/sh_cmt.h> + +struct sh_cmt_priv { + void __iomem *mapbase; + struct clk *clk; + unsigned long width; /* 16 or 32 bit version of hardware block */ + unsigned long overflow_bit; + unsigned long clear_bits; + struct irqaction irqaction; + struct platform_device *pdev; + + unsigned long flags; + unsigned long match_value; + unsigned long next_match_value; + unsigned long max_match_value; + unsigned long rate; + spinlock_t lock; + struct clock_event_device ced; + unsigned long total_cycles; +}; + +static DEFINE_SPINLOCK(sh_cmt_lock); + +#define CMSTR -1 /* shared register */ +#define CMCSR 0 /* channel register */ +#define CMCNT 1 /* channel register */ +#define CMCOR 2 /* channel register */ + +static inline unsigned long sh_cmt_read(struct sh_cmt_priv *p, int reg_nr) +{ + struct sh_cmt_config *cfg = p->pdev->dev.platform_data; + void __iomem *base = p->mapbase; + unsigned long offs; + + if (reg_nr == CMSTR) { + offs = 0; + base -= cfg->channel_offset; + } else + offs = reg_nr; + + if (p->width == 16) + offs <<= 1; + else { + offs <<= 2; + if ((reg_nr == CMCNT) || (reg_nr == CMCOR)) + return ioread32(base + offs); + } + + return ioread16(base + offs); +} + +static inline void sh_cmt_write(struct sh_cmt_priv *p, int reg_nr, + unsigned long value) +{ + struct sh_cmt_config *cfg = p->pdev->dev.platform_data; + void __iomem *base = p->mapbase; + unsigned long offs; + + if (reg_nr == CMSTR) { + offs = 0; + base -= cfg->channel_offset; + } else + offs = reg_nr; + + if (p->width == 16) + offs <<= 1; + else { + offs <<= 2; + if ((reg_nr == CMCNT) || (reg_nr == CMCOR)) { + iowrite32(value, base + offs); + return; + } + } + + iowrite16(value, base + offs); +} + +static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p, + int *has_wrapped) +{ + unsigned long v1, v2, v3; + + /* Make sure the timer value is stable. Stolen from acpi_pm.c */ + do { + v1 = sh_cmt_read(p, CMCNT); + v2 = sh_cmt_read(p, CMCNT); + v3 = sh_cmt_read(p, CMCNT); + } while (unlikely((v1 > v2 && v1 < v3) || (v2 > v3 && v2 < v1) + || (v3 > v1 && v3 < v2))); + + *has_wrapped = sh_cmt_read(p, CMCSR) & p->overflow_bit; + return v2; +} + + +static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start) +{ + struct sh_cmt_config *cfg = p->pdev->dev.platform_data; + unsigned long flags, value; + + /* start stop register shared by multiple timer channels */ + spin_lock_irqsave(&sh_cmt_lock, flags); + value = sh_cmt_read(p, CMSTR); + + if (start) + value |= 1 << cfg->timer_bit; + else + value &= ~(1 << cfg->timer_bit); + + sh_cmt_write(p, CMSTR, value); + spin_unlock_irqrestore(&sh_cmt_lock, flags); +} + +static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) +{ + struct sh_cmt_config *cfg = p->pdev->dev.platform_data; + int ret; + + /* enable clock */ + ret = clk_enable(p->clk); + if (ret) { + pr_err("sh_cmt: cannot enable clock \"%s\"\n", cfg->clk); + return ret; + } + *rate = clk_get_rate(p->clk) / 8; + + /* make sure channel is disabled */ + sh_cmt_start_stop_ch(p, 0); + + /* configure channel, periodic mode and maximum timeout */ + if (p->width == 16) + sh_cmt_write(p, CMCSR, 0); + else + sh_cmt_write(p, CMCSR, 0x01a4); + + sh_cmt_write(p, CMCOR, 0xffffffff); + sh_cmt_write(p, CMCNT, 0); + + /* enable channel */ + sh_cmt_start_stop_ch(p, 1); + return 0; +} + +static void sh_cmt_disable(struct sh_cmt_priv *p) +{ + /* disable channel */ + sh_cmt_start_stop_ch(p, 0); + + /* stop clock */ + clk_disable(p->clk); +} + +/* private flags */ +#define FLAG_CLOCKEVENT (1 << 0) +#define FLAG_CLOCKSOURCE (1 << 1) +#define FLAG_REPROGRAM (1 << 2) +#define FLAG_SKIPEVENT (1 << 3) +#define FLAG_IRQCONTEXT (1 << 4) + +static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p, + int absolute) +{ + unsigned long new_match; + unsigned long value = p->next_match_value; + unsigned long delay = 0; + unsigned long now = 0; + int has_wrapped; + + now = sh_cmt_get_counter(p, &has_wrapped); + p->flags |= FLAG_REPROGRAM; /* force reprogram */ + + if (has_wrapped) { + /* we're competing with the interrupt handler. + * -> let the interrupt handler reprogram the timer. + * -> interrupt number two handles the event. + */ + p->flags |= FLAG_SKIPEVENT; + return; + } + + if (absolute) + now = 0; + + do { + /* reprogram the timer hardware, + * but don't save the new match value yet. + */ + new_match = now + value + delay; + if (new_match > p->max_match_value) + new_match = p->max_match_value; + + sh_cmt_write(p, CMCOR, new_match); + + now = sh_cmt_get_counter(p, &has_wrapped); + if (has_wrapped && (new_match > p->match_value)) { + /* we are changing to a greater match value, + * so this wrap must be caused by the counter + * matching the old value. + * -> first interrupt reprograms the timer. + * -> interrupt number two handles the event. + */ + p->flags |= FLAG_SKIPEVENT; + break; + } + + if (has_wrapped) { + /* we are changing to a smaller match value, + * so the wrap must be caused by the counter + * matching the new value. + * -> save programmed match value. + * -> let isr handle the event. + */ + p->match_value = new_match; + break; + } + + /* be safe: verify hardware settings */ + if (now < new_match) { + /* timer value is below match value, all good. + * this makes sure we won't miss any match events. + * -> save programmed match value. + * -> let isr handle the event. + */ + p->match_value = new_match; + break; + } + + /* the counter has reached a value greater + * than our new match value. and since the + * has_wrapped flag isn't set we must have + * programmed a too close event. + * -> increase delay and retry. + */ + if (delay) + delay <<= 1; + else + delay = 1; + + if (!delay) + pr_warning("sh_cmt: too long delay\n"); + + } while (delay); +} + +static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta) +{ + unsigned long flags; + + if (delta > p->max_match_value) + pr_warning("sh_cmt: delta out of range\n"); + + spin_lock_irqsave(&p->lock, flags); + p->next_match_value = delta; + sh_cmt_clock_event_program_verify(p, 0); + spin_unlock_irqrestore(&p->lock, flags); +} + +static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) +{ + struct sh_cmt_priv *p = dev_id; + + /* clear flags */ + sh_cmt_write(p, CMCSR, sh_cmt_read(p, CMCSR) & p->clear_bits); + + /* update clock source counter to begin with if enabled + * the wrap flag should be cleared by the timer specific + * isr before we end up here. + */ + if (p->flags & FLAG_CLOCKSOURCE) + p->total_cycles += p->match_value; + + if (!(p->flags & FLAG_REPROGRAM)) + p->next_match_value = p->max_match_value; + + p->flags |= FLAG_IRQCONTEXT; + + if (p->flags & FLAG_CLOCKEVENT) { + if (!(p->flags & FLAG_SKIPEVENT)) { + if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT) { + p->next_match_value = p->max_match_value; + p->flags |= FLAG_REPROGRAM; + } + + p->ced.event_handler(&p->ced); + } + } + + p->flags &= ~FLAG_SKIPEVENT; + + if (p->flags & FLAG_REPROGRAM) { + p->flags &= ~FLAG_REPROGRAM; + sh_cmt_clock_event_program_verify(p, 1); + + if (p->flags & FLAG_CLOCKEVENT) + if ((p->ced.mode == CLOCK_EVT_MODE_SHUTDOWN) + || (p->match_value == p->next_match_value)) + p->flags &= ~FLAG_REPROGRAM; + } + + p->flags &= ~FLAG_IRQCONTEXT; + + return IRQ_HANDLED; +} + +static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag) +{ + int ret = 0; + unsigned long flags; + + spin_lock_irqsave(&p->lock, flags); + + if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) + ret = sh_cmt_enable(p, &p->rate); + + if (ret) + goto out; + p->flags |= flag; + + /* setup timeout if no clockevent */ + if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT))) + sh_cmt_set_next(p, p->max_match_value); + out: + spin_unlock_irqrestore(&p->lock, flags); + + return ret; +} + +static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag) +{ + unsigned long flags; + unsigned long f; + + spin_lock_irqsave(&p->lock, flags); + + f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE); + p->flags &= ~flag; + + if (f && !(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) + sh_cmt_disable(p); + + /* adjust the timeout to maximum if only clocksource left */ + if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE)) + sh_cmt_set_next(p, p->max_match_value); + + spin_unlock_irqrestore(&p->lock, flags); +} + +static struct sh_cmt_priv *ced_to_sh_cmt(struct clock_event_device *ced) +{ + return container_of(ced, struct sh_cmt_priv, ced); +} + +static void sh_cmt_clock_event_start(struct sh_cmt_priv *p, int periodic) +{ + struct clock_event_device *ced = &p->ced; + + sh_cmt_start(p, FLAG_CLOCKEVENT); + + /* TODO: calculate good shift from rate and counter bit width */ + + ced->shift = 32; + ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift); + ced->max_delta_ns = clockevent_delta2ns(p->max_match_value, ced); + ced->min_delta_ns = clockevent_delta2ns(0x1f, ced); + + if (periodic) + sh_cmt_set_next(p, (p->rate + HZ/2) / HZ); + else + sh_cmt_set_next(p, p->max_match_value); +} + +static void sh_cmt_clock_event_mode(enum clock_event_mode mode, + struct clock_event_device *ced) +{ + struct sh_cmt_priv *p = ced_to_sh_cmt(ced); + + /* deal with old setting first */ + switch (ced->mode) { + case CLOCK_EVT_MODE_PERIODIC: + case CLOCK_EVT_MODE_ONESHOT: + sh_cmt_stop(p, FLAG_CLOCKEVENT); + break; + default: + break; + } + + switch (mode) { + case CLOCK_EVT_MODE_PERIODIC: + pr_info("sh_cmt: %s used for periodic clock events\n", + ced->name); + sh_cmt_clock_event_start(p, 1); + break; + case CLOCK_EVT_MODE_ONESHOT: + pr_info("sh_cmt: %s used for oneshot clock events\n", + ced->name); + sh_cmt_clock_event_start(p, 0); + break; + case CLOCK_EVT_MODE_SHUTDOWN: + case CLOCK_EVT_MODE_UNUSED: + sh_cmt_stop(p, FLAG_CLOCKEVENT); + break; + default: + break; + } +} + +static int sh_cmt_clock_event_next(unsigned long delta, + struct clock_event_device *ced) +{ + struct sh_cmt_priv *p = ced_to_sh_cmt(ced); + + BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT); + if (likely(p->flags & FLAG_IRQCONTEXT)) + p->next_match_value = delta; + else + sh_cmt_set_next(p, delta); + + return 0; +} + +static void sh_cmt_register_clockevent(struct sh_cmt_priv *p, + char *name, unsigned long rating) +{ + struct clock_event_device *ced = &p->ced; + + memset(ced, 0, sizeof(*ced)); + + ced->name = name; + ced->features = CLOCK_EVT_FEAT_PERIODIC; + ced->features |= CLOCK_EVT_FEAT_ONESHOT; + ced->rating = rating; + ced->cpumask = cpumask_of(0); + ced->set_next_event = sh_cmt_clock_event_next; + ced->set_mode = sh_cmt_clock_event_mode; + + pr_info("sh_cmt: %s used for clock events\n", ced->name); + ced->mult = 1; /* work around misplaced WARN_ON() in clockevents.c */ + clockevents_register_device(ced); +} + +int sh_cmt_register(struct sh_cmt_priv *p, char *name, + unsigned long clockevent_rating, + unsigned long clocksource_rating) +{ + if (p->width == (sizeof(p->max_match_value) * 8)) + p->max_match_value = ~0; + else + p->max_match_value = (1 << p->width) - 1; + + p->match_value = p->max_match_value; + spin_lock_init(&p->lock); + + if (clockevent_rating) + sh_cmt_register_clockevent(p, name, clockevent_rating); + + return 0; +} + +static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev) +{ + struct sh_cmt_config *cfg = pdev->dev.platform_data; + struct resource *res; + int irq, ret; + ret = -ENXIO; + + memset(p, 0, sizeof(*p)); + p->pdev = pdev; + + if (!cfg) { + dev_err(&p->pdev->dev, "missing platform data\n"); + goto err0; + } + + platform_set_drvdata(pdev, p); + + res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&p->pdev->dev, "failed to get I/O memory\n"); + goto err0; + } + + irq = platform_get_irq(p->pdev, 0); + if (irq < 0) { + dev_err(&p->pdev->dev, "failed to get irq\n"); + goto err0; + } + + /* map memory, let mapbase point to our channel */ + p->mapbase = ioremap_nocache(res->start, resource_size(res)); + if (p->mapbase == NULL) { + pr_err("sh_cmt: failed to remap I/O memory\n"); + goto err0; + } + + /* request irq using setup_irq() (too early for request_irq()) */ + p->irqaction.name = cfg->name; + p->irqaction.handler = sh_cmt_interrupt; + p->irqaction.dev_id = p; + p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL; + p->irqaction.mask = CPU_MASK_NONE; + ret = setup_irq(irq, &p->irqaction); + if (ret) { + pr_err("sh_cmt: failed to request irq %d\n", irq); + goto err1; + } + + /* get hold of clock */ + p->clk = clk_get(&p->pdev->dev, cfg->clk); + if (IS_ERR(p->clk)) { + pr_err("sh_cmt: cannot get clock \"%s\"\n", cfg->clk); + ret = PTR_ERR(p->clk); + goto err2; + } + + if (resource_size(res) == 6) { + p->width = 16; + p->overflow_bit = 0x80; + p->clear_bits = ~0xc0; + } else { + p->width = 32; + p->overflow_bit = 0x8000; + p->clear_bits = ~0xc000; + } + + return sh_cmt_register(p, cfg->name, + cfg->clockevent_rating, + cfg->clocksource_rating); + err2: + free_irq(irq, p); + err1: + iounmap(p->mapbase); + err0: + return ret; +} + +static int __devinit sh_cmt_probe(struct platform_device *pdev) +{ + struct sh_cmt_priv *p = platform_get_drvdata(pdev); + int ret; + + p = kmalloc(sizeof(*p), GFP_KERNEL); + if (p == NULL) { + dev_err(&pdev->dev, "failed to allocate driver data\n"); + return -ENOMEM; + } + + ret = sh_cmt_setup(p, pdev); + if (ret) { + kfree(p); + + platform_set_drvdata(pdev, NULL); + } + return ret; +} + +static int __devexit sh_cmt_remove(struct platform_device *pdev) +{ + return -EBUSY; /* cannot unregister clockevent and clocksource */ +} + +static struct platform_driver sh_cmt_device_driver = { + .probe = sh_cmt_probe, + .remove = __devexit_p(sh_cmt_remove), + .driver = { + .name = "sh_cmt", + } +}; + +static int __init sh_cmt_init(void) +{ + return platform_driver_register(&sh_cmt_device_driver); +} + +static void __exit sh_cmt_exit(void) +{ + platform_driver_unregister(&sh_cmt_device_driver); +} + +module_init(sh_cmt_init); +module_exit(sh_cmt_exit); + +MODULE_AUTHOR("Magnus Damm"); +MODULE_DESCRIPTION("SuperH CMT Timer Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/input/joystick/maplecontrol.c b/drivers/input/joystick/maplecontrol.c index e50047bfe938..77cfde571bd9 100644 --- a/drivers/input/joystick/maplecontrol.c +++ b/drivers/input/joystick/maplecontrol.c @@ -3,7 +3,7 @@ * Based on drivers/usb/iforce.c * * Copyright Yaegashi Takeshi, 2001 - * Adrian McMenamin, 2008 + * Adrian McMenamin, 2008 - 2009 */ #include <linux/kernel.h> @@ -29,7 +29,7 @@ static void dc_pad_callback(struct mapleq *mq) struct maple_device *mapledev = mq->dev; struct dc_pad *pad = maple_get_drvdata(mapledev); struct input_dev *dev = pad->dev; - unsigned char *res = mq->recvbuf; + unsigned char *res = mq->recvbuf->buf; buttons = ~le16_to_cpup((__le16 *)(res + 8)); diff --git a/drivers/input/keyboard/maple_keyb.c b/drivers/input/keyboard/maple_keyb.c index 22f17a593be7..5aa2361aef95 100644 --- a/drivers/input/keyboard/maple_keyb.c +++ b/drivers/input/keyboard/maple_keyb.c @@ -1,8 +1,8 @@ /* * SEGA Dreamcast keyboard driver * Based on drivers/usb/usbkbd.c - * Copyright YAEGASHI Takeshi, 2001 - * Porting to 2.6 Copyright Adrian McMenamin, 2007, 2008 + * Copyright (c) YAEGASHI Takeshi, 2001 + * Porting to 2.6 Copyright (c) Adrian McMenamin, 2007 - 2009 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -33,7 +33,7 @@ static DEFINE_MUTEX(maple_keyb_mutex); #define NR_SCANCODES 256 -MODULE_AUTHOR("YAEGASHI Takeshi, Adrian McMenamin"); +MODULE_AUTHOR("Adrian McMenamin <adrian@mcmen.demon.co.uk"); MODULE_DESCRIPTION("SEGA Dreamcast keyboard driver"); MODULE_LICENSE("GPL"); @@ -115,7 +115,7 @@ static void dc_scan_kbd(struct dc_kbd *kbd) input_event(dev, EV_MSC, MSC_SCAN, code); input_report_key(dev, keycode, 0); } else - printk(KERN_DEBUG "maple_keyb: " + dev_dbg(&dev->dev, "Unknown key (scancode %#x) released.", code); } @@ -127,7 +127,7 @@ static void dc_scan_kbd(struct dc_kbd *kbd) input_event(dev, EV_MSC, MSC_SCAN, code); input_report_key(dev, keycode, 1); } else - printk(KERN_DEBUG "maple_keyb: " + dev_dbg(&dev->dev, "Unknown key (scancode %#x) pressed.", code); } @@ -140,7 +140,7 @@ static void dc_kbd_callback(struct mapleq *mq) { struct maple_device *mapledev = mq->dev; struct dc_kbd *kbd = maple_get_drvdata(mapledev); - unsigned long *buf = mq->recvbuf; + unsigned long *buf = (unsigned long *)(mq->recvbuf->buf); /* * We should always get the lock because the only @@ -159,22 +159,27 @@ static void dc_kbd_callback(struct mapleq *mq) static int probe_maple_kbd(struct device *dev) { - struct maple_device *mdev = to_maple_dev(dev); - struct maple_driver *mdrv = to_maple_driver(dev->driver); + struct maple_device *mdev; + struct maple_driver *mdrv; int i, error; struct dc_kbd *kbd; struct input_dev *idev; - if (!(mdev->function & MAPLE_FUNC_KEYBOARD)) - return -EINVAL; + mdev = to_maple_dev(dev); + mdrv = to_maple_driver(dev->driver); kbd = kzalloc(sizeof(struct dc_kbd), GFP_KERNEL); - idev = input_allocate_device(); - if (!kbd || !idev) { + if (!kbd) { error = -ENOMEM; goto fail; } + idev = input_allocate_device(); + if (!idev) { + error = -ENOMEM; + goto fail_idev_alloc; + } + kbd->dev = idev; memcpy(kbd->keycode, dc_kbd_keycode, sizeof(kbd->keycode)); @@ -195,7 +200,7 @@ static int probe_maple_kbd(struct device *dev) error = input_register_device(idev); if (error) - goto fail; + goto fail_register; /* Maple polling is locked to VBLANK - which may be just 50/s */ maple_getcond_callback(mdev, dc_kbd_callback, HZ/50, @@ -207,10 +212,12 @@ static int probe_maple_kbd(struct device *dev) return error; -fail: +fail_register: + maple_set_drvdata(mdev, NULL); input_free_device(idev); +fail_idev_alloc: kfree(kbd); - maple_set_drvdata(mdev, NULL); +fail: return error; } diff --git a/drivers/input/keyboard/sh_keysc.c b/drivers/input/keyboard/sh_keysc.c index 5c8a1bcf7ca7..e1480fb11de3 100644 --- a/drivers/input/keyboard/sh_keysc.c +++ b/drivers/input/keyboard/sh_keysc.c @@ -219,6 +219,8 @@ static int __devinit sh_keysc_probe(struct platform_device *pdev) pdata->scan_timing, priv->iomem_base + KYCR1_OFFS); iowrite16(0, priv->iomem_base + KYOUTDR_OFFS); iowrite16(KYCR2_IRQ_LEVEL, priv->iomem_base + KYCR2_OFFS); + + device_init_wakeup(&pdev->dev, 1); return 0; err5: free_irq(irq, pdev); @@ -253,17 +255,33 @@ static int __devexit sh_keysc_remove(struct platform_device *pdev) return 0; } +static int sh_keysc_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct sh_keysc_priv *priv = platform_get_drvdata(pdev); + unsigned short value; + + value = ioread16(priv->iomem_base + KYCR1_OFFS); + + if (device_may_wakeup(dev)) + value |= 0x80; + else + value &= ~0x80; -#define sh_keysc_suspend NULL -#define sh_keysc_resume NULL + iowrite16(value, priv->iomem_base + KYCR1_OFFS); + return 0; +} + +static struct dev_pm_ops sh_keysc_dev_pm_ops = { + .suspend = sh_keysc_suspend, +}; struct platform_driver sh_keysc_device_driver = { .probe = sh_keysc_probe, .remove = __devexit_p(sh_keysc_remove), - .suspend = sh_keysc_suspend, - .resume = sh_keysc_resume, .driver = { .name = "sh_keysc", + .pm = &sh_keysc_dev_pm_ops, } }; diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig index 043d50fb6ef6..729f899a5cd5 100644 --- a/drivers/mtd/maps/Kconfig +++ b/drivers/mtd/maps/Kconfig @@ -551,5 +551,15 @@ config MTD_PLATRAM This selection automatically selects the map_ram driver. -endmenu +config MTD_VMU + tristate "Map driver for Dreamcast VMU" + depends on MAPLE + help + This driver enables access to the Dreamcast Visual Memory Unit (VMU). + + Most Dreamcast users will want to say Y here. + To build this as a module select M here, the module will be called + vmu-flash. + +endmenu diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile index 6d9ba35caf11..26b28a7a90b5 100644 --- a/drivers/mtd/maps/Makefile +++ b/drivers/mtd/maps/Makefile @@ -61,3 +61,4 @@ obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o obj-$(CONFIG_MTD_OMAP_NOR) += omap_nor.o obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o obj-$(CONFIG_MTD_BFIN_ASYNC) += bfin-async-flash.o +obj-$(CONFIG_MTD_VMU) += vmu-flash.o diff --git a/drivers/mtd/maps/vmu-flash.c b/drivers/mtd/maps/vmu-flash.c new file mode 100644 index 000000000000..1f73297e7776 --- /dev/null +++ b/drivers/mtd/maps/vmu-flash.c @@ -0,0 +1,832 @@ +/* vmu-flash.c + * Driver for SEGA Dreamcast Visual Memory Unit + * + * Copyright (c) Adrian McMenamin 2002 - 2009 + * Copyright (c) Paul Mundt 2001 + * + * Licensed under version 2 of the + * GNU General Public Licence + */ +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/delay.h> +#include <linux/maple.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/map.h> + +struct vmu_cache { + unsigned char *buffer; /* Cache */ + unsigned int block; /* Which block was cached */ + unsigned long jiffies_atc; /* When was it cached? */ + int valid; +}; + +struct mdev_part { + struct maple_device *mdev; + int partition; +}; + +struct vmupart { + u16 user_blocks; + u16 root_block; + u16 numblocks; + char *name; + struct vmu_cache *pcache; +}; + +struct memcard { + u16 tempA; + u16 tempB; + u32 partitions; + u32 blocklen; + u32 writecnt; + u32 readcnt; + u32 removeable; + int partition; + int read; + unsigned char *blockread; + struct vmupart *parts; + struct mtd_info *mtd; +}; + +struct vmu_block { + unsigned int num; /* block number */ + unsigned int ofs; /* block offset */ +}; + +static struct vmu_block *ofs_to_block(unsigned long src_ofs, + struct mtd_info *mtd, int partition) +{ + struct vmu_block *vblock; + struct maple_device *mdev; + struct memcard *card; + struct mdev_part *mpart; + int num; + + mpart = mtd->priv; + mdev = mpart->mdev; + card = maple_get_drvdata(mdev); + + if (src_ofs >= card->parts[partition].numblocks * card->blocklen) + goto failed; + + num = src_ofs / card->blocklen; + if (num > card->parts[partition].numblocks) + goto failed; + + vblock = kmalloc(sizeof(struct vmu_block), GFP_KERNEL); + if (!vblock) + goto failed; + + vblock->num = num; + vblock->ofs = src_ofs % card->blocklen; + return vblock; + +failed: + return NULL; +} + +/* Maple bus callback function for reads */ +static void vmu_blockread(struct mapleq *mq) +{ + struct maple_device *mdev; + struct memcard *card; + + mdev = mq->dev; + card = maple_get_drvdata(mdev); + /* copy the read in data */ + + if (unlikely(!card->blockread)) + return; + + memcpy(card->blockread, mq->recvbuf->buf + 12, + card->blocklen/card->readcnt); + +} + +/* Interface with maple bus to read blocks + * caching the results so that other parts + * of the driver can access block reads */ +static int maple_vmu_read_block(unsigned int num, unsigned char *buf, + struct mtd_info *mtd) +{ + struct memcard *card; + struct mdev_part *mpart; + struct maple_device *mdev; + int partition, error = 0, x, wait; + unsigned char *blockread = NULL; + struct vmu_cache *pcache; + __be32 sendbuf; + + mpart = mtd->priv; + mdev = mpart->mdev; + partition = mpart->partition; + card = maple_get_drvdata(mdev); + pcache = card->parts[partition].pcache; + pcache->valid = 0; + + /* prepare the cache for this block */ + if (!pcache->buffer) { + pcache->buffer = kmalloc(card->blocklen, GFP_KERNEL); + if (!pcache->buffer) { + dev_err(&mdev->dev, "VMU at (%d, %d) - read fails due" + " to lack of memory\n", mdev->port, + mdev->unit); + error = -ENOMEM; + goto outB; + } + } + + /* + * Reads may be phased - again the hardware spec + * supports this - though may not be any devices in + * the wild that implement it, but we will here + */ + for (x = 0; x < card->readcnt; x++) { + sendbuf = cpu_to_be32(partition << 24 | x << 16 | num); + + if (atomic_read(&mdev->busy) == 1) { + wait_event_interruptible_timeout(mdev->maple_wait, + atomic_read(&mdev->busy) == 0, HZ); + if (atomic_read(&mdev->busy) == 1) { + dev_notice(&mdev->dev, "VMU at (%d, %d)" + " is busy\n", mdev->port, mdev->unit); + error = -EAGAIN; + goto outB; + } + } + + atomic_set(&mdev->busy, 1); + blockread = kmalloc(card->blocklen/card->readcnt, GFP_KERNEL); + if (!blockread) { + error = -ENOMEM; + atomic_set(&mdev->busy, 0); + goto outB; + } + card->blockread = blockread; + + maple_getcond_callback(mdev, vmu_blockread, 0, + MAPLE_FUNC_MEMCARD); + error = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD, + MAPLE_COMMAND_BREAD, 2, &sendbuf); + /* Very long timeouts seem to be needed when box is stressed */ + wait = wait_event_interruptible_timeout(mdev->maple_wait, + (atomic_read(&mdev->busy) == 0 || + atomic_read(&mdev->busy) == 2), HZ * 3); + /* + * MTD layer does not handle hotplugging well + * so have to return errors when VMU is unplugged + * in the middle of a read (busy == 2) + */ + if (error || atomic_read(&mdev->busy) == 2) { + if (atomic_read(&mdev->busy) == 2) + error = -ENXIO; + atomic_set(&mdev->busy, 0); + card->blockread = NULL; + goto outA; + } + if (wait == 0 || wait == -ERESTARTSYS) { + card->blockread = NULL; + atomic_set(&mdev->busy, 0); + error = -EIO; + list_del_init(&(mdev->mq->list)); + kfree(mdev->mq->sendbuf); + mdev->mq->sendbuf = NULL; + if (wait == -ERESTARTSYS) { + dev_warn(&mdev->dev, "VMU read on (%d, %d)" + " interrupted on block 0x%X\n", + mdev->port, mdev->unit, num); + } else + dev_notice(&mdev->dev, "VMU read on (%d, %d)" + " timed out on block 0x%X\n", + mdev->port, mdev->unit, num); + goto outA; + } + + memcpy(buf + (card->blocklen/card->readcnt) * x, blockread, + card->blocklen/card->readcnt); + + memcpy(pcache->buffer + (card->blocklen/card->readcnt) * x, + card->blockread, card->blocklen/card->readcnt); + card->blockread = NULL; + pcache->block = num; + pcache->jiffies_atc = jiffies; + pcache->valid = 1; + kfree(blockread); + } + + return error; + +outA: + kfree(blockread); +outB: + return error; +} + +/* communicate with maple bus for phased writing */ +static int maple_vmu_write_block(unsigned int num, const unsigned char *buf, + struct mtd_info *mtd) +{ + struct memcard *card; + struct mdev_part *mpart; + struct maple_device *mdev; + int partition, error, locking, x, phaselen, wait; + __be32 *sendbuf; + + mpart = mtd->priv; + mdev = mpart->mdev; + partition = mpart->partition; + card = maple_get_drvdata(mdev); + + phaselen = card->blocklen/card->writecnt; + + sendbuf = kmalloc(phaselen + 4, GFP_KERNEL); + if (!sendbuf) { + error = -ENOMEM; + goto fail_nosendbuf; + } + for (x = 0; x < card->writecnt; x++) { + sendbuf[0] = cpu_to_be32(partition << 24 | x << 16 | num); + memcpy(&sendbuf[1], buf + phaselen * x, phaselen); + /* wait until the device is not busy doing something else + * or 1 second - which ever is longer */ + if (atomic_read(&mdev->busy) == 1) { + wait_event_interruptible_timeout(mdev->maple_wait, + atomic_read(&mdev->busy) == 0, HZ); + if (atomic_read(&mdev->busy) == 1) { + error = -EBUSY; + dev_notice(&mdev->dev, "VMU write at (%d, %d)" + "failed - device is busy\n", + mdev->port, mdev->unit); + goto fail_nolock; + } + } + atomic_set(&mdev->busy, 1); + + locking = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD, + MAPLE_COMMAND_BWRITE, phaselen / 4 + 2, sendbuf); + wait = wait_event_interruptible_timeout(mdev->maple_wait, + atomic_read(&mdev->busy) == 0, HZ/10); + if (locking) { + error = -EIO; + atomic_set(&mdev->busy, 0); + goto fail_nolock; + } + if (atomic_read(&mdev->busy) == 2) { + atomic_set(&mdev->busy, 0); + } else if (wait == 0 || wait == -ERESTARTSYS) { + error = -EIO; + dev_warn(&mdev->dev, "Write at (%d, %d) of block" + " 0x%X at phase %d failed: could not" + " communicate with VMU", mdev->port, + mdev->unit, num, x); + atomic_set(&mdev->busy, 0); + kfree(mdev->mq->sendbuf); + mdev->mq->sendbuf = NULL; + list_del_init(&(mdev->mq->list)); + goto fail_nolock; + } + } + kfree(sendbuf); + + return card->blocklen; + +fail_nolock: + kfree(sendbuf); +fail_nosendbuf: + dev_err(&mdev->dev, "VMU (%d, %d): write failed\n", mdev->port, + mdev->unit); + return error; +} + +/* mtd function to simulate reading byte by byte */ +static unsigned char vmu_flash_read_char(unsigned long ofs, int *retval, + struct mtd_info *mtd) +{ + struct vmu_block *vblock; + struct memcard *card; + struct mdev_part *mpart; + struct maple_device *mdev; + unsigned char *buf, ret; + int partition, error; + + mpart = mtd->priv; + mdev = mpart->mdev; + partition = mpart->partition; + card = maple_get_drvdata(mdev); + *retval = 0; + + buf = kmalloc(card->blocklen, GFP_KERNEL); + if (!buf) { + *retval = 1; + ret = -ENOMEM; + goto finish; + } + + vblock = ofs_to_block(ofs, mtd, partition); + if (!vblock) { + *retval = 3; + ret = -ENOMEM; + goto out_buf; + } + + error = maple_vmu_read_block(vblock->num, buf, mtd); + if (error) { + ret = error; + *retval = 2; + goto out_vblock; + } + + ret = buf[vblock->ofs]; + +out_vblock: + kfree(vblock); +out_buf: + kfree(buf); +finish: + return ret; +} + +/* mtd higher order function to read flash */ +static int vmu_flash_read(struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, u_char *buf) +{ + struct maple_device *mdev; + struct memcard *card; + struct mdev_part *mpart; + struct vmu_cache *pcache; + struct vmu_block *vblock; + int index = 0, retval, partition, leftover, numblocks; + unsigned char cx; + + if (len < 1) + return -EIO; + + mpart = mtd->priv; + mdev = mpart->mdev; + partition = mpart->partition; + card = maple_get_drvdata(mdev); + + numblocks = card->parts[partition].numblocks; + if (from + len > numblocks * card->blocklen) + len = numblocks * card->blocklen - from; + if (len == 0) + return -EIO; + /* Have we cached this bit already? */ + pcache = card->parts[partition].pcache; + do { + vblock = ofs_to_block(from + index, mtd, partition); + if (!vblock) + return -ENOMEM; + /* Have we cached this and is the cache valid and timely? */ + if (pcache->valid && + time_before(jiffies, pcache->jiffies_atc + HZ) && + (pcache->block == vblock->num)) { + /* we have cached it, so do necessary copying */ + leftover = card->blocklen - vblock->ofs; + if (vblock->ofs + len - index < card->blocklen) { + /* only a bit of this block to copy */ + memcpy(buf + index, + pcache->buffer + vblock->ofs, + len - index); + index = len; + } else { + /* otherwise copy remainder of whole block */ + memcpy(buf + index, pcache->buffer + + vblock->ofs, leftover); + index += leftover; + } + } else { + /* + * Not cached so read one byte - + * but cache the rest of the block + */ + cx = vmu_flash_read_char(from + index, &retval, mtd); + if (retval) { + *retlen = index; + kfree(vblock); + return cx; + } + memset(buf + index, cx, 1); + index++; + } + kfree(vblock); + } while (len > index); + *retlen = index; + + return 0; +} + +static int vmu_flash_write(struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const u_char *buf) +{ + struct maple_device *mdev; + struct memcard *card; + struct mdev_part *mpart; + int index = 0, partition, error = 0, numblocks; + struct vmu_cache *pcache; + struct vmu_block *vblock; + unsigned char *buffer; + + mpart = mtd->priv; + mdev = mpart->mdev; + partition = mpart->partition; + card = maple_get_drvdata(mdev); + + /* simple sanity checks */ + if (len < 1) { + error = -EIO; + goto failed; + } + numblocks = card->parts[partition].numblocks; + if (to + len > numblocks * card->blocklen) + len = numblocks * card->blocklen - to; + if (len == 0) { + error = -EIO; + goto failed; + } + + vblock = ofs_to_block(to, mtd, partition); + if (!vblock) { + error = -ENOMEM; + goto failed; + } + + buffer = kmalloc(card->blocklen, GFP_KERNEL); + if (!buffer) { + error = -ENOMEM; + goto fail_buffer; + } + + do { + /* Read in the block we are to write to */ + error = maple_vmu_read_block(vblock->num, buffer, mtd); + if (error) + goto fail_io; + + do { + buffer[vblock->ofs] = buf[index]; + vblock->ofs++; + index++; + if (index >= len) + break; + } while (vblock->ofs < card->blocklen); + + /* write out new buffer */ + error = maple_vmu_write_block(vblock->num, buffer, mtd); + /* invalidate the cache */ + pcache = card->parts[partition].pcache; + pcache->valid = 0; + + if (error != card->blocklen) + goto fail_io; + + vblock->num++; + vblock->ofs = 0; + } while (len > index); + + kfree(buffer); + *retlen = index; + kfree(vblock); + return 0; + +fail_io: + kfree(buffer); +fail_buffer: + kfree(vblock); +failed: + dev_err(&mdev->dev, "VMU write failing with error %d\n", error); + return error; +} + +static void vmu_flash_sync(struct mtd_info *mtd) +{ + /* Do nothing here */ +} + +/* Maple bus callback function to recursively query hardware details */ +static void vmu_queryblocks(struct mapleq *mq) +{ + struct maple_device *mdev; + unsigned short *res; + struct memcard *card; + __be32 partnum; + struct vmu_cache *pcache; + struct mdev_part *mpart; + struct mtd_info *mtd_cur; + struct vmupart *part_cur; + int error; + + mdev = mq->dev; + card = maple_get_drvdata(mdev); + res = (unsigned short *) (mq->recvbuf->buf); + card->tempA = res[12]; + card->tempB = res[6]; + + dev_info(&mdev->dev, "VMU device at partition %d has %d user " + "blocks with a root block at %d\n", card->partition, + card->tempA, card->tempB); + + part_cur = &card->parts[card->partition]; + part_cur->user_blocks = card->tempA; + part_cur->root_block = card->tempB; + part_cur->numblocks = card->tempB + 1; + part_cur->name = kmalloc(12, GFP_KERNEL); + if (!part_cur->name) + goto fail_name; + + sprintf(part_cur->name, "vmu%d.%d.%d", + mdev->port, mdev->unit, card->partition); + mtd_cur = &card->mtd[card->partition]; + mtd_cur->name = part_cur->name; + mtd_cur->type = 8; + mtd_cur->flags = MTD_WRITEABLE|MTD_NO_ERASE; + mtd_cur->size = part_cur->numblocks * card->blocklen; + mtd_cur->erasesize = card->blocklen; + mtd_cur->write = vmu_flash_write; + mtd_cur->read = vmu_flash_read; + mtd_cur->sync = vmu_flash_sync; + mtd_cur->writesize = card->blocklen; + + mpart = kmalloc(sizeof(struct mdev_part), GFP_KERNEL); + if (!mpart) + goto fail_mpart; + + mpart->mdev = mdev; + mpart->partition = card->partition; + mtd_cur->priv = mpart; + mtd_cur->owner = THIS_MODULE; + + pcache = kzalloc(sizeof(struct vmu_cache), GFP_KERNEL); + if (!pcache) + goto fail_cache_create; + part_cur->pcache = pcache; + + error = add_mtd_device(mtd_cur); + if (error) + goto fail_mtd_register; + + maple_getcond_callback(mdev, NULL, 0, + MAPLE_FUNC_MEMCARD); + + /* + * Set up a recursive call to the (probably theoretical) + * second or more partition + */ + if (++card->partition < card->partitions) { + partnum = cpu_to_be32(card->partition << 24); + maple_getcond_callback(mdev, vmu_queryblocks, 0, + MAPLE_FUNC_MEMCARD); + maple_add_packet(mdev, MAPLE_FUNC_MEMCARD, + MAPLE_COMMAND_GETMINFO, 2, &partnum); + } + return; + +fail_mtd_register: + dev_err(&mdev->dev, "Could not register maple device at (%d, %d)" + "error is 0x%X\n", mdev->port, mdev->unit, error); + for (error = 0; error <= card->partition; error++) { + kfree(((card->parts)[error]).pcache); + ((card->parts)[error]).pcache = NULL; + } +fail_cache_create: +fail_mpart: + for (error = 0; error <= card->partition; error++) { + kfree(((card->mtd)[error]).priv); + ((card->mtd)[error]).priv = NULL; + } + maple_getcond_callback(mdev, NULL, 0, + MAPLE_FUNC_MEMCARD); + kfree(part_cur->name); +fail_name: + return; +} + +/* Handles very basic info about the flash, queries for details */ +static int __devinit vmu_connect(struct maple_device *mdev) +{ + unsigned long test_flash_data, basic_flash_data; + int c, error; + struct memcard *card; + u32 partnum = 0; + + test_flash_data = be32_to_cpu(mdev->devinfo.function); + /* Need to count how many bits are set - to find out which + * function_data element has details of the memory card: + * using Brian Kernighan's/Peter Wegner's method */ + for (c = 0; test_flash_data; c++) + test_flash_data &= test_flash_data - 1; + + basic_flash_data = be32_to_cpu(mdev->devinfo.function_data[c - 1]); + + card = kmalloc(sizeof(struct memcard), GFP_KERNEL); + if (!card) { + error = ENOMEM; + goto fail_nomem; + } + + card->partitions = (basic_flash_data >> 24 & 0xFF) + 1; + card->blocklen = ((basic_flash_data >> 16 & 0xFF) + 1) << 5; + card->writecnt = basic_flash_data >> 12 & 0xF; + card->readcnt = basic_flash_data >> 8 & 0xF; + card->removeable = basic_flash_data >> 7 & 1; + + card->partition = 0; + + /* + * Not sure there are actually any multi-partition devices in the + * real world, but the hardware supports them, so, so will we + */ + card->parts = kmalloc(sizeof(struct vmupart) * card->partitions, + GFP_KERNEL); + if (!card->parts) { + error = -ENOMEM; + goto fail_partitions; + } + + card->mtd = kmalloc(sizeof(struct mtd_info) * card->partitions, + GFP_KERNEL); + if (!card->mtd) { + error = -ENOMEM; + goto fail_mtd_info; + } + + maple_set_drvdata(mdev, card); + + /* + * We want to trap meminfo not get cond + * so set interval to zero, but rely on maple bus + * driver to pass back the results of the meminfo + */ + maple_getcond_callback(mdev, vmu_queryblocks, 0, + MAPLE_FUNC_MEMCARD); + + /* Make sure we are clear to go */ + if (atomic_read(&mdev->busy) == 1) { + wait_event_interruptible_timeout(mdev->maple_wait, + atomic_read(&mdev->busy) == 0, HZ); + if (atomic_read(&mdev->busy) == 1) { + dev_notice(&mdev->dev, "VMU at (%d, %d) is busy\n", + mdev->port, mdev->unit); + error = -EAGAIN; + goto fail_device_busy; + } + } + + atomic_set(&mdev->busy, 1); + + /* + * Set up the minfo call: vmu_queryblocks will handle + * the information passed back + */ + error = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD, + MAPLE_COMMAND_GETMINFO, 2, &partnum); + if (error) { + dev_err(&mdev->dev, "Could not lock VMU at (%d, %d)" + " error is 0x%X\n", mdev->port, mdev->unit, error); + goto fail_mtd_info; + } + return 0; + +fail_device_busy: + kfree(card->mtd); +fail_mtd_info: + kfree(card->parts); +fail_partitions: + kfree(card); +fail_nomem: + return error; +} + +static void __devexit vmu_disconnect(struct maple_device *mdev) +{ + struct memcard *card; + struct mdev_part *mpart; + int x; + + mdev->callback = NULL; + card = maple_get_drvdata(mdev); + for (x = 0; x < card->partitions; x++) { + mpart = ((card->mtd)[x]).priv; + mpart->mdev = NULL; + del_mtd_device(&((card->mtd)[x])); + kfree(((card->parts)[x]).name); + } + kfree(card->parts); + kfree(card->mtd); + kfree(card); +} + +/* Callback to handle eccentricities of both mtd subsystem + * and general flakyness of Dreamcast VMUs + */ +static int vmu_can_unload(struct maple_device *mdev) +{ + struct memcard *card; + int x; + struct mtd_info *mtd; + + card = maple_get_drvdata(mdev); + for (x = 0; x < card->partitions; x++) { + mtd = &((card->mtd)[x]); + if (mtd->usecount > 0) + return 0; + } + return 1; +} + +#define ERRSTR "VMU at (%d, %d) file error -" + +static void vmu_file_error(struct maple_device *mdev, void *recvbuf) +{ + enum maple_file_errors error = ((int *)recvbuf)[1]; + + switch (error) { + + case MAPLE_FILEERR_INVALID_PARTITION: + dev_notice(&mdev->dev, ERRSTR " invalid partition number\n", + mdev->port, mdev->unit); + break; + + case MAPLE_FILEERR_PHASE_ERROR: + dev_notice(&mdev->dev, ERRSTR " phase error\n", + mdev->port, mdev->unit); + break; + + case MAPLE_FILEERR_INVALID_BLOCK: + dev_notice(&mdev->dev, ERRSTR " invalid block number\n", + mdev->port, mdev->unit); + break; + + case MAPLE_FILEERR_WRITE_ERROR: + dev_notice(&mdev->dev, ERRSTR " write error\n", + mdev->port, mdev->unit); + break; + + case MAPLE_FILEERR_INVALID_WRITE_LENGTH: + dev_notice(&mdev->dev, ERRSTR " invalid write length\n", + mdev->port, mdev->unit); + break; + + case MAPLE_FILEERR_BAD_CRC: + dev_notice(&mdev->dev, ERRSTR " bad CRC\n", + mdev->port, mdev->unit); + break; + + default: + dev_notice(&mdev->dev, ERRSTR " 0x%X\n", + mdev->port, mdev->unit, error); + } +} + + +static int __devinit probe_maple_vmu(struct device *dev) +{ + int error; + struct maple_device *mdev = to_maple_dev(dev); + struct maple_driver *mdrv = to_maple_driver(dev->driver); + + mdev->can_unload = vmu_can_unload; + mdev->fileerr_handler = vmu_file_error; + mdev->driver = mdrv; + + error = vmu_connect(mdev); + if (error) + return error; + + return 0; +} + +static int __devexit remove_maple_vmu(struct device *dev) +{ + struct maple_device *mdev = to_maple_dev(dev); + + vmu_disconnect(mdev); + return 0; +} + +static struct maple_driver vmu_flash_driver = { + .function = MAPLE_FUNC_MEMCARD, + .drv = { + .name = "Dreamcast_visual_memory", + .probe = probe_maple_vmu, + .remove = __devexit_p(remove_maple_vmu), + }, +}; + +static int __init vmu_flash_map_init(void) +{ + return maple_driver_register(&vmu_flash_driver); +} + +static void __exit vmu_flash_map_exit(void) +{ + maple_driver_unregister(&vmu_flash_driver); +} + +module_init(vmu_flash_map_init); +module_exit(vmu_flash_map_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Adrian McMenamin"); +MODULE_DESCRIPTION("Flash mapping for Sega Dreamcast visual memory"); diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c index 1c3fc6b428e9..4898f7fe8518 100644 --- a/drivers/rtc/rtc-sh.c +++ b/drivers/rtc/rtc-sh.c @@ -28,7 +28,7 @@ #include <asm/rtc.h> #define DRV_NAME "sh-rtc" -#define DRV_VERSION "0.2.0" +#define DRV_VERSION "0.2.1" #define RTC_REG(r) ((r) * rtc_reg_size) @@ -99,56 +99,51 @@ struct sh_rtc { unsigned short periodic_freq; }; -static irqreturn_t sh_rtc_interrupt(int irq, void *dev_id) +static int __sh_rtc_interrupt(struct sh_rtc *rtc) { - struct sh_rtc *rtc = dev_id; - unsigned int tmp; - - spin_lock(&rtc->lock); + unsigned int tmp, pending; tmp = readb(rtc->regbase + RCR1); + pending = tmp & RCR1_CF; tmp &= ~RCR1_CF; writeb(tmp, rtc->regbase + RCR1); /* Users have requested One x Second IRQ */ - if (rtc->periodic_freq & PF_OXS) + if (pending && rtc->periodic_freq & PF_OXS) rtc_update_irq(rtc->rtc_dev, 1, RTC_UF | RTC_IRQF); - spin_unlock(&rtc->lock); - - return IRQ_HANDLED; + return pending; } -static irqreturn_t sh_rtc_alarm(int irq, void *dev_id) +static int __sh_rtc_alarm(struct sh_rtc *rtc) { - struct sh_rtc *rtc = dev_id; - unsigned int tmp; - - spin_lock(&rtc->lock); + unsigned int tmp, pending; tmp = readb(rtc->regbase + RCR1); + pending = tmp & RCR1_AF; tmp &= ~(RCR1_AF | RCR1_AIE); - writeb(tmp, rtc->regbase + RCR1); - - rtc_update_irq(rtc->rtc_dev, 1, RTC_AF | RTC_IRQF); + writeb(tmp, rtc->regbase + RCR1); - spin_unlock(&rtc->lock); + if (pending) + rtc_update_irq(rtc->rtc_dev, 1, RTC_AF | RTC_IRQF); - return IRQ_HANDLED; + return pending; } -static irqreturn_t sh_rtc_periodic(int irq, void *dev_id) +static int __sh_rtc_periodic(struct sh_rtc *rtc) { - struct sh_rtc *rtc = dev_id; struct rtc_device *rtc_dev = rtc->rtc_dev; - unsigned int tmp; - - spin_lock(&rtc->lock); + struct rtc_task *irq_task; + unsigned int tmp, pending; tmp = readb(rtc->regbase + RCR2); + pending = tmp & RCR2_PEF; tmp &= ~RCR2_PEF; writeb(tmp, rtc->regbase + RCR2); + if (!pending) + return 0; + /* Half period enabled than one skipped and the next notified */ if ((rtc->periodic_freq & PF_HP) && (rtc->periodic_freq & PF_COUNT)) rtc->periodic_freq &= ~PF_COUNT; @@ -157,16 +152,65 @@ static irqreturn_t sh_rtc_periodic(int irq, void *dev_id) rtc->periodic_freq |= PF_COUNT; if (rtc->periodic_freq & PF_KOU) { spin_lock(&rtc_dev->irq_task_lock); - if (rtc_dev->irq_task) - rtc_dev->irq_task->func(rtc_dev->irq_task->private_data); + irq_task = rtc_dev->irq_task; + if (irq_task) + irq_task->func(irq_task->private_data); spin_unlock(&rtc_dev->irq_task_lock); } else rtc_update_irq(rtc->rtc_dev, 1, RTC_PF | RTC_IRQF); } + return pending; +} + +static irqreturn_t sh_rtc_interrupt(int irq, void *dev_id) +{ + struct sh_rtc *rtc = dev_id; + int ret; + + spin_lock(&rtc->lock); + ret = __sh_rtc_interrupt(rtc); + spin_unlock(&rtc->lock); + + return IRQ_RETVAL(ret); +} + +static irqreturn_t sh_rtc_alarm(int irq, void *dev_id) +{ + struct sh_rtc *rtc = dev_id; + int ret; + + spin_lock(&rtc->lock); + ret = __sh_rtc_alarm(rtc); + spin_unlock(&rtc->lock); + + return IRQ_RETVAL(ret); +} + +static irqreturn_t sh_rtc_periodic(int irq, void *dev_id) +{ + struct sh_rtc *rtc = dev_id; + int ret; + + spin_lock(&rtc->lock); + ret = __sh_rtc_periodic(rtc); spin_unlock(&rtc->lock); - return IRQ_HANDLED; + return IRQ_RETVAL(ret); +} + +static irqreturn_t sh_rtc_shared(int irq, void *dev_id) +{ + struct sh_rtc *rtc = dev_id; + int ret; + + spin_lock(&rtc->lock); + ret = __sh_rtc_interrupt(rtc); + ret |= __sh_rtc_alarm(rtc); + ret |= __sh_rtc_periodic(rtc); + spin_unlock(&rtc->lock); + + return IRQ_RETVAL(ret); } static inline void sh_rtc_setpie(struct device *dev, unsigned int enable) @@ -275,6 +319,25 @@ static int sh_rtc_proc(struct device *dev, struct seq_file *seq) return 0; } +static inline void sh_rtc_setcie(struct device *dev, unsigned int enable) +{ + struct sh_rtc *rtc = dev_get_drvdata(dev); + unsigned int tmp; + + spin_lock_irq(&rtc->lock); + + tmp = readb(rtc->regbase + RCR1); + + if (!enable) + tmp &= ~RCR1_CIE; + else + tmp |= RCR1_CIE; + + writeb(tmp, rtc->regbase + RCR1); + + spin_unlock_irq(&rtc->lock); +} + static int sh_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) { struct sh_rtc *rtc = dev_get_drvdata(dev); @@ -291,9 +354,11 @@ static int sh_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) break; case RTC_UIE_OFF: rtc->periodic_freq &= ~PF_OXS; + sh_rtc_setcie(dev, 0); break; case RTC_UIE_ON: rtc->periodic_freq |= PF_OXS; + sh_rtc_setcie(dev, 1); break; case RTC_IRQP_READ: ret = put_user(rtc->rtc_dev->irq_freq, @@ -356,18 +421,17 @@ static int sh_rtc_read_time(struct device *dev, struct rtc_time *tm) tm->tm_sec--; #endif + /* only keep the carry interrupt enabled if UIE is on */ + if (!(rtc->periodic_freq & PF_OXS)) + sh_rtc_setcie(dev, 0); + dev_dbg(dev, "%s: tm is secs=%d, mins=%d, hours=%d, " "mday=%d, mon=%d, year=%d, wday=%d\n", __func__, tm->tm_sec, tm->tm_min, tm->tm_hour, tm->tm_mday, tm->tm_mon + 1, tm->tm_year, tm->tm_wday); - if (rtc_valid_tm(tm) < 0) { - dev_err(dev, "invalid date\n"); - rtc_time_to_tm(0, tm); - } - - return 0; + return rtc_valid_tm(tm); } static int sh_rtc_set_time(struct device *dev, struct rtc_time *tm) @@ -572,7 +636,7 @@ static int __devinit sh_rtc_probe(struct platform_device *pdev) { struct sh_rtc *rtc; struct resource *res; - unsigned int tmp; + struct rtc_time r; int ret; rtc = kzalloc(sizeof(struct sh_rtc), GFP_KERNEL); @@ -585,26 +649,12 @@ static int __devinit sh_rtc_probe(struct platform_device *pdev) ret = platform_get_irq(pdev, 0); if (unlikely(ret <= 0)) { ret = -ENOENT; - dev_err(&pdev->dev, "No IRQ for period\n"); + dev_err(&pdev->dev, "No IRQ resource\n"); goto err_badres; } rtc->periodic_irq = ret; - - ret = platform_get_irq(pdev, 1); - if (unlikely(ret <= 0)) { - ret = -ENOENT; - dev_err(&pdev->dev, "No IRQ for carry\n"); - goto err_badres; - } - rtc->carry_irq = ret; - - ret = platform_get_irq(pdev, 2); - if (unlikely(ret <= 0)) { - ret = -ENOENT; - dev_err(&pdev->dev, "No IRQ for alarm\n"); - goto err_badres; - } - rtc->alarm_irq = ret; + rtc->carry_irq = platform_get_irq(pdev, 1); + rtc->alarm_irq = platform_get_irq(pdev, 2); res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (unlikely(res == NULL)) { @@ -646,47 +696,66 @@ static int __devinit sh_rtc_probe(struct platform_device *pdev) } rtc->rtc_dev->max_user_freq = 256; - rtc->rtc_dev->irq_freq = 1; - rtc->periodic_freq = 0x60; platform_set_drvdata(pdev, rtc); - /* register periodic/carry/alarm irqs */ - ret = request_irq(rtc->periodic_irq, sh_rtc_periodic, IRQF_DISABLED, - "sh-rtc period", rtc); - if (unlikely(ret)) { - dev_err(&pdev->dev, - "request period IRQ failed with %d, IRQ %d\n", ret, - rtc->periodic_irq); - goto err_unmap; - } + if (rtc->carry_irq <= 0) { + /* register shared periodic/carry/alarm irq */ + ret = request_irq(rtc->periodic_irq, sh_rtc_shared, + IRQF_DISABLED, "sh-rtc", rtc); + if (unlikely(ret)) { + dev_err(&pdev->dev, + "request IRQ failed with %d, IRQ %d\n", ret, + rtc->periodic_irq); + goto err_unmap; + } + } else { + /* register periodic/carry/alarm irqs */ + ret = request_irq(rtc->periodic_irq, sh_rtc_periodic, + IRQF_DISABLED, "sh-rtc period", rtc); + if (unlikely(ret)) { + dev_err(&pdev->dev, + "request period IRQ failed with %d, IRQ %d\n", + ret, rtc->periodic_irq); + goto err_unmap; + } - ret = request_irq(rtc->carry_irq, sh_rtc_interrupt, IRQF_DISABLED, - "sh-rtc carry", rtc); - if (unlikely(ret)) { - dev_err(&pdev->dev, - "request carry IRQ failed with %d, IRQ %d\n", ret, - rtc->carry_irq); - free_irq(rtc->periodic_irq, rtc); - goto err_unmap; - } + ret = request_irq(rtc->carry_irq, sh_rtc_interrupt, + IRQF_DISABLED, "sh-rtc carry", rtc); + if (unlikely(ret)) { + dev_err(&pdev->dev, + "request carry IRQ failed with %d, IRQ %d\n", + ret, rtc->carry_irq); + free_irq(rtc->periodic_irq, rtc); + goto err_unmap; + } - ret = request_irq(rtc->alarm_irq, sh_rtc_alarm, IRQF_DISABLED, - "sh-rtc alarm", rtc); - if (unlikely(ret)) { - dev_err(&pdev->dev, - "request alarm IRQ failed with %d, IRQ %d\n", ret, - rtc->alarm_irq); - free_irq(rtc->carry_irq, rtc); - free_irq(rtc->periodic_irq, rtc); - goto err_unmap; + ret = request_irq(rtc->alarm_irq, sh_rtc_alarm, + IRQF_DISABLED, "sh-rtc alarm", rtc); + if (unlikely(ret)) { + dev_err(&pdev->dev, + "request alarm IRQ failed with %d, IRQ %d\n", + ret, rtc->alarm_irq); + free_irq(rtc->carry_irq, rtc); + free_irq(rtc->periodic_irq, rtc); + goto err_unmap; + } } - tmp = readb(rtc->regbase + RCR1); - tmp &= ~RCR1_CF; - tmp |= RCR1_CIE; - writeb(tmp, rtc->regbase + RCR1); + /* everything disabled by default */ + rtc->periodic_freq = 0; + rtc->rtc_dev->irq_freq = 0; + sh_rtc_setpie(&pdev->dev, 0); + sh_rtc_setaie(&pdev->dev, 0); + sh_rtc_setcie(&pdev->dev, 0); + + /* reset rtc to epoch 0 if time is invalid */ + if (rtc_read_time(rtc->rtc_dev, &r) < 0) { + rtc_time_to_tm(0, &r); + rtc_set_time(rtc->rtc_dev, &r); + } + device_init_wakeup(&pdev->dev, 1); return 0; err_unmap: @@ -708,10 +777,13 @@ static int __devexit sh_rtc_remove(struct platform_device *pdev) sh_rtc_setpie(&pdev->dev, 0); sh_rtc_setaie(&pdev->dev, 0); + sh_rtc_setcie(&pdev->dev, 0); - free_irq(rtc->carry_irq, rtc); free_irq(rtc->periodic_irq, rtc); - free_irq(rtc->alarm_irq, rtc); + if (rtc->carry_irq > 0) { + free_irq(rtc->carry_irq, rtc); + free_irq(rtc->alarm_irq, rtc); + } release_resource(rtc->res); diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c index 557b54ab2f25..dbf5357a77b3 100644 --- a/drivers/serial/sh-sci.c +++ b/drivers/serial/sh-sci.c @@ -139,7 +139,7 @@ static void sci_poll_put_char(struct uart_port *port, unsigned char c) } while (!(status & SCxSR_TDxE(port))); sci_in(port, SCxSR); /* Dummy read */ - sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port)); + sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port)); sci_out(port, SCxTDR, c); } #endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */ @@ -263,6 +263,7 @@ static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) #elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \ defined(CONFIG_CPU_SUBTYPE_SH7780) || \ defined(CONFIG_CPU_SUBTYPE_SH7785) || \ + defined(CONFIG_CPU_SUBTYPE_SH7786) || \ defined(CONFIG_CPU_SUBTYPE_SHX3) static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) { @@ -284,7 +285,8 @@ static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) #if defined(CONFIG_CPU_SUBTYPE_SH7760) || \ defined(CONFIG_CPU_SUBTYPE_SH7780) || \ - defined(CONFIG_CPU_SUBTYPE_SH7785) + defined(CONFIG_CPU_SUBTYPE_SH7785) || \ + defined(CONFIG_CPU_SUBTYPE_SH7786) static inline int scif_txroom(struct uart_port *port) { return SCIF_TXROOM_MAX - (sci_in(port, SCTFDR) & 0xff); @@ -1095,6 +1097,7 @@ static void serial_console_write(struct console *co, const char *s, unsigned count) { struct uart_port *port = &serial_console_port->port; + unsigned short bits; int i; for (i = 0; i < count; i++) { @@ -1103,6 +1106,11 @@ static void serial_console_write(struct console *co, const char *s, sci_poll_put_char(port, *s++); } + + /* wait until fifo is empty and last bit has been transmitted */ + bits = SCxSR_TDxE(port) | SCxSR_TEND(port); + while ((sci_in(port, SCxSR) & bits) != bits) + cpu_relax(); } static int __init serial_console_setup(struct console *co, char *options) diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h index 022e89ffec1d..d0aa82d7fce0 100644 --- a/drivers/serial/sh-sci.h +++ b/drivers/serial/sh-sci.h @@ -1,6 +1,6 @@ #include <linux/serial_core.h> #include <asm/io.h> -#include <asm/gpio.h> +#include <linux/gpio.h> #if defined(CONFIG_H83007) || defined(CONFIG_H83068) #include <asm/regs306x.h> @@ -126,7 +126,8 @@ # define SCSPTR1 0xffe10024 /* 16 bit SCIF */ # define SCIF_ORER 0x0001 /* Overrun error bit */ # define SCSCR_INIT(port) 0x3a /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ -#elif defined(CONFIG_CPU_SUBTYPE_SH7785) +#elif defined(CONFIG_CPU_SUBTYPE_SH7785) || \ + defined(CONFIG_CPU_SUBTYPE_SH7786) # define SCSPTR0 0xffea0024 /* 16 bit SCIF */ # define SCSPTR1 0xffeb0024 /* 16 bit SCIF */ # define SCSPTR2 0xffec0024 /* 16 bit SCIF */ @@ -182,6 +183,7 @@ defined(CONFIG_CPU_SUBTYPE_SH7763) || \ defined(CONFIG_CPU_SUBTYPE_SH7780) || \ defined(CONFIG_CPU_SUBTYPE_SH7785) || \ + defined(CONFIG_CPU_SUBTYPE_SH7786) || \ defined(CONFIG_CPU_SUBTYPE_SHX3) #define SCI_CTRL_FLAGS_REIE 0x08 /* 7750 SCIF */ #else @@ -413,7 +415,8 @@ SCIx_FNS(SCxRDR, 0x0a, 8, 0x14, 8, 0x0A, 8, 0x14, 8, 0x05, 8) SCIF_FNS(SCFCR, 0x0c, 8, 0x18, 16) #if defined(CONFIG_CPU_SUBTYPE_SH7760) || \ defined(CONFIG_CPU_SUBTYPE_SH7780) || \ - defined(CONFIG_CPU_SUBTYPE_SH7785) + defined(CONFIG_CPU_SUBTYPE_SH7785) || \ + defined(CONFIG_CPU_SUBTYPE_SH7786) SCIF_FNS(SCFDR, 0x0e, 16, 0x1C, 16) SCIF_FNS(SCTFDR, 0x0e, 16, 0x1C, 16) SCIF_FNS(SCRFDR, 0x0e, 16, 0x20, 16) @@ -644,7 +647,8 @@ static inline int sci_rxd_in(struct uart_port *port) return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ return 1; } -#elif defined(CONFIG_CPU_SUBTYPE_SH7785) +#elif defined(CONFIG_CPU_SUBTYPE_SH7785) || \ + defined(CONFIG_CPU_SUBTYPE_SH7786) static inline int sci_rxd_in(struct uart_port *port) { if (port->mapbase == 0xffea0000) @@ -746,7 +750,8 @@ static inline int sci_rxd_in(struct uart_port *port) */ #if defined(CONFIG_CPU_SUBTYPE_SH7780) || \ - defined(CONFIG_CPU_SUBTYPE_SH7785) + defined(CONFIG_CPU_SUBTYPE_SH7785) || \ + defined(CONFIG_CPU_SUBTYPE_SH7786) #define SCBRR_VALUE(bps, clk) ((clk+16*bps)/(16*bps)-1) #elif defined(CONFIG_CPU_SUBTYPE_SH7705) || \ defined(CONFIG_CPU_SUBTYPE_SH7720) || \ diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c index 58d24c5a76ce..2269fbcaa182 100644 --- a/drivers/sh/intc.c +++ b/drivers/sh/intc.c @@ -568,6 +568,10 @@ static void __init intc_register_irq(struct intc_desc *desc, if (!data[0] && data[1]) primary = 1; + if (!data[0] && !data[1]) + pr_warning("intc: missing unique irq mask for " + "irq %d (vect 0x%04x)\n", irq, irq2evt(irq)); + data[0] = data[0] ? data[0] : intc_mask_data(desc, d, enum_id, 1); data[1] = data[1] ? data[1] : intc_prio_data(desc, d, enum_id, 1); @@ -641,6 +645,17 @@ static unsigned int __init save_reg(struct intc_desc_int *d, return 0; } +static unsigned char *intc_evt2irq_table; + +unsigned int intc_evt2irq(unsigned int vector) +{ + unsigned int irq = evt2irq(vector); + + if (intc_evt2irq_table && intc_evt2irq_table[irq]) + irq = intc_evt2irq_table[irq]; + + return irq; +} void __init register_intc_controller(struct intc_desc *desc) { @@ -705,9 +720,41 @@ void __init register_intc_controller(struct intc_desc *desc) BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */ + /* keep the first vector only if same enum is used multiple times */ + for (i = 0; i < desc->nr_vectors; i++) { + struct intc_vect *vect = desc->vectors + i; + int first_irq = evt2irq(vect->vect); + + if (!vect->enum_id) + continue; + + for (k = i + 1; k < desc->nr_vectors; k++) { + struct intc_vect *vect2 = desc->vectors + k; + + if (vect->enum_id != vect2->enum_id) + continue; + + vect2->enum_id = 0; + + if (!intc_evt2irq_table) + intc_evt2irq_table = alloc_bootmem(NR_IRQS); + + if (!intc_evt2irq_table) { + pr_warning("intc: cannot allocate evt2irq!\n"); + continue; + } + + intc_evt2irq_table[evt2irq(vect2->vect)] = first_irq; + } + } + + /* register the vectors one by one */ for (i = 0; i < desc->nr_vectors; i++) { struct intc_vect *vect = desc->vectors + i; + if (!vect->enum_id) + continue; + intc_register_irq(desc, d, vect->enum_id, evt2irq(vect->vect)); } } diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c index 63f0de29aa14..c71bb4b4ce84 100644 --- a/drivers/sh/maple/maple.c +++ b/drivers/sh/maple/maple.c @@ -1,16 +1,10 @@ /* * Core maple bus functionality * - * Copyright (C) 2007, 2008 Adrian McMenamin + * Copyright (C) 2007 - 2009 Adrian McMenamin * Copyright (C) 2001 - 2008 Paul Mundt - * - * Based on 2.4 code by: - * - * Copyright (C) 2000-2001 YAEGASHI Takeshi + * Copyright (C) 2000 - 2001 YAEGASHI Takeshi * Copyright (C) 2001 M. R. Brown - * Copyright (C) 2001 Paul Mundt - * - * and others. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -32,7 +26,7 @@ #include <mach/dma.h> #include <mach/sysasic.h> -MODULE_AUTHOR("Yaegashi Takeshi, Paul Mundt, M. R. Brown, Adrian McMenamin"); +MODULE_AUTHOR("Adrian McMenamin <adrian@mcmen.demon.co.uk>"); MODULE_DESCRIPTION("Maple bus driver for Dreamcast"); MODULE_LICENSE("GPL v2"); MODULE_SUPPORTED_DEVICE("{{SEGA, Dreamcast/Maple}}"); @@ -49,7 +43,7 @@ static LIST_HEAD(maple_sentq); /* mutex to protect queue of waiting packets */ static DEFINE_MUTEX(maple_wlist_lock); -static struct maple_driver maple_dummy_driver; +static struct maple_driver maple_unsupported_device; static struct device maple_bus; static int subdevice_map[MAPLE_PORTS]; static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr; @@ -62,8 +56,9 @@ struct maple_device_specify { int unit; }; -static bool checked[4]; -static struct maple_device *baseunits[4]; +static bool checked[MAPLE_PORTS]; +static bool empty[MAPLE_PORTS]; +static struct maple_device *baseunits[MAPLE_PORTS]; /** * maple_driver_register - register a maple driver @@ -97,12 +92,20 @@ void maple_driver_unregister(struct maple_driver *drv) EXPORT_SYMBOL_GPL(maple_driver_unregister); /* set hardware registers to enable next round of dma */ -static void maplebus_dma_reset(void) +static void maple_dma_reset(void) { ctrl_outl(MAPLE_MAGIC, MAPLE_RESET); /* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */ ctrl_outl(1, MAPLE_TRIGTYPE); - ctrl_outl(MAPLE_2MBPS | MAPLE_TIMEOUT(50000), MAPLE_SPEED); + /* + * Maple system register + * bits 31 - 16 timeout in units of 20nsec + * bit 12 hard trigger - set 0 to keep responding to VBLANK + * bits 9 - 8 set 00 for 2 Mbps, 01 for 1 Mbps + * bits 3 - 0 delay (in 1.3ms) between VBLANK and start of DMA + * max delay is 11 + */ + ctrl_outl(MAPLE_2MBPS | MAPLE_TIMEOUT(0xFFFF), MAPLE_SPEED); ctrl_outl(PHYSADDR(maple_sendbuf), MAPLE_DMAADDR); ctrl_outl(1, MAPLE_ENABLE); } @@ -134,21 +137,16 @@ static void maple_release_device(struct device *dev) { struct maple_device *mdev; struct mapleq *mq; - if (!dev) - return; + mdev = to_maple_dev(dev); mq = mdev->mq; - if (mq) { - if (mq->recvbufdcsp) - kmem_cache_free(maple_queue_cache, mq->recvbufdcsp); - kfree(mq); - mq = NULL; - } + kmem_cache_free(maple_queue_cache, mq->recvbuf); + kfree(mq); kfree(mdev); } /** - * maple_add_packet - add a single instruction to the queue + * maple_add_packet - add a single instruction to the maple bus queue * @mdev: maple device * @function: function on device being queried * @command: maple command to add @@ -158,68 +156,12 @@ static void maple_release_device(struct device *dev) int maple_add_packet(struct maple_device *mdev, u32 function, u32 command, size_t length, void *data) { - int locking, ret = 0; - void *sendbuf = NULL; - - mutex_lock(&maple_wlist_lock); - /* bounce if device already locked */ - locking = mutex_is_locked(&mdev->mq->mutex); - if (locking) { - ret = -EBUSY; - goto out; - } - - mutex_lock(&mdev->mq->mutex); - - if (length) { - sendbuf = kmalloc(length * 4, GFP_KERNEL); - if (!sendbuf) { - mutex_unlock(&mdev->mq->mutex); - ret = -ENOMEM; - goto out; - } - ((__be32 *)sendbuf)[0] = cpu_to_be32(function); - } - - mdev->mq->command = command; - mdev->mq->length = length; - if (length > 1) - memcpy(sendbuf + 4, data, (length - 1) * 4); - mdev->mq->sendbuf = sendbuf; - - list_add(&mdev->mq->list, &maple_waitq); -out: - mutex_unlock(&maple_wlist_lock); - return ret; -} -EXPORT_SYMBOL_GPL(maple_add_packet); - -/** - * maple_add_packet_sleeps - add a single instruction to the queue - * @mdev: maple device - * @function: function on device being queried - * @command: maple command to add - * @length: length of command string (in 32 bit words) - * @data: remainder of command string - * - * Same as maple_add_packet(), but waits for the lock to become free. - */ -int maple_add_packet_sleeps(struct maple_device *mdev, u32 function, - u32 command, size_t length, void *data) -{ - int locking, ret = 0; + int ret = 0; void *sendbuf = NULL; - locking = mutex_lock_interruptible(&mdev->mq->mutex); - if (locking) { - ret = -EIO; - goto out; - } - if (length) { - sendbuf = kmalloc(length * 4, GFP_KERNEL); + sendbuf = kzalloc(length * 4, GFP_KERNEL); if (!sendbuf) { - mutex_unlock(&mdev->mq->mutex); ret = -ENOMEM; goto out; } @@ -233,38 +175,35 @@ int maple_add_packet_sleeps(struct maple_device *mdev, u32 function, mdev->mq->sendbuf = sendbuf; mutex_lock(&maple_wlist_lock); - list_add(&mdev->mq->list, &maple_waitq); + list_add_tail(&mdev->mq->list, &maple_waitq); mutex_unlock(&maple_wlist_lock); out: return ret; } -EXPORT_SYMBOL_GPL(maple_add_packet_sleeps); +EXPORT_SYMBOL_GPL(maple_add_packet); static struct mapleq *maple_allocq(struct maple_device *mdev) { struct mapleq *mq; - mq = kmalloc(sizeof(*mq), GFP_KERNEL); + mq = kzalloc(sizeof(*mq), GFP_KERNEL); if (!mq) goto failed_nomem; + INIT_LIST_HEAD(&mq->list); mq->dev = mdev; - mq->recvbufdcsp = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL); - mq->recvbuf = (void *) P2SEGADDR(mq->recvbufdcsp); + mq->recvbuf = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL); if (!mq->recvbuf) goto failed_p2; - /* - * most devices do not need the mutex - but - * anything that injects block reads or writes - * will rely on it - */ - mutex_init(&mq->mutex); + mq->recvbuf->buf = &((mq->recvbuf->bufx)[0]); return mq; failed_p2: kfree(mq); failed_nomem: + dev_err(&mdev->dev, "could not allocate memory for device (%d, %d)\n", + mdev->port, mdev->unit); return NULL; } @@ -272,12 +211,16 @@ static struct maple_device *maple_alloc_dev(int port, int unit) { struct maple_device *mdev; + /* zero this out to avoid kobj subsystem + * thinking it has already been registered */ + mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); if (!mdev) return NULL; mdev->port = port; mdev->unit = unit; + mdev->mq = maple_allocq(mdev); if (!mdev->mq) { @@ -286,19 +229,14 @@ static struct maple_device *maple_alloc_dev(int port, int unit) } mdev->dev.bus = &maple_bus_type; mdev->dev.parent = &maple_bus; + init_waitqueue_head(&mdev->maple_wait); return mdev; } static void maple_free_dev(struct maple_device *mdev) { - if (!mdev) - return; - if (mdev->mq) { - if (mdev->mq->recvbufdcsp) - kmem_cache_free(maple_queue_cache, - mdev->mq->recvbufdcsp); - kfree(mdev->mq); - } + kmem_cache_free(maple_queue_cache, mdev->mq->recvbuf); + kfree(mdev->mq); kfree(mdev); } @@ -320,7 +258,7 @@ static void maple_build_block(struct mapleq *mq) maple_lastptr = maple_sendptr; *maple_sendptr++ = (port << 16) | len | 0x80000000; - *maple_sendptr++ = PHYSADDR(mq->recvbuf); + *maple_sendptr++ = PHYSADDR(mq->recvbuf->buf); *maple_sendptr++ = mq->command | (to << 8) | (from << 16) | (len << 24); while (len-- > 0) @@ -333,20 +271,28 @@ static void maple_send(void) int i, maple_packets = 0; struct mapleq *mq, *nmq; - if (!list_empty(&maple_sentq)) + if (!maple_dma_done()) return; + + /* disable DMA */ + ctrl_outl(0, MAPLE_ENABLE); + + if (!list_empty(&maple_sentq)) + goto finish; + mutex_lock(&maple_wlist_lock); - if (list_empty(&maple_waitq) || !maple_dma_done()) { + if (list_empty(&maple_waitq)) { mutex_unlock(&maple_wlist_lock); - return; + goto finish; } - mutex_unlock(&maple_wlist_lock); + maple_lastptr = maple_sendbuf; maple_sendptr = maple_sendbuf; - mutex_lock(&maple_wlist_lock); + list_for_each_entry_safe(mq, nmq, &maple_waitq, list) { maple_build_block(mq); - list_move(&mq->list, &maple_sentq); + list_del_init(&mq->list); + list_add_tail(&mq->list, &maple_sentq); if (maple_packets++ > MAPLE_MAXPACKETS) break; } @@ -356,10 +302,13 @@ static void maple_send(void) dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE, PAGE_SIZE, DMA_BIDIRECTIONAL); } + +finish: + maple_dma_reset(); } /* check if there is a driver registered likely to match this device */ -static int check_matching_maple_driver(struct device_driver *driver, +static int maple_check_matching_driver(struct device_driver *driver, void *devptr) { struct maple_driver *maple_drv; @@ -374,10 +323,7 @@ static int check_matching_maple_driver(struct device_driver *driver, static void maple_detach_driver(struct maple_device *mdev) { - if (!mdev) - return; device_unregister(&mdev->dev); - mdev = NULL; } /* process initial MAPLE_COMMAND_DEVINFO for each device or port */ @@ -385,9 +331,9 @@ static void maple_attach_driver(struct maple_device *mdev) { char *p, *recvbuf; unsigned long function; - int matched, retval; + int matched, error; - recvbuf = mdev->mq->recvbuf; + recvbuf = mdev->mq->recvbuf->buf; /* copy the data as individual elements in * case of memory optimisation */ memcpy(&mdev->devinfo.function, recvbuf + 4, 4); @@ -395,7 +341,6 @@ static void maple_attach_driver(struct maple_device *mdev) memcpy(&mdev->devinfo.area_code, recvbuf + 20, 1); memcpy(&mdev->devinfo.connector_direction, recvbuf + 21, 1); memcpy(&mdev->devinfo.product_name[0], recvbuf + 22, 30); - memcpy(&mdev->devinfo.product_licence[0], recvbuf + 52, 60); memcpy(&mdev->devinfo.standby_power, recvbuf + 112, 2); memcpy(&mdev->devinfo.max_power, recvbuf + 114, 2); memcpy(mdev->product_name, mdev->devinfo.product_name, 30); @@ -414,43 +359,40 @@ static void maple_attach_driver(struct maple_device *mdev) else break; - printk(KERN_INFO "Maple device detected: %s\n", - mdev->product_name); - printk(KERN_INFO "Maple device: %s\n", mdev->product_licence); - function = be32_to_cpu(mdev->devinfo.function); + dev_info(&mdev->dev, "detected %s: function 0x%lX: at (%d, %d)\n", + mdev->product_name, function, mdev->port, mdev->unit); + if (function > 0x200) { /* Do this silently - as not a real device */ function = 0; - mdev->driver = &maple_dummy_driver; - sprintf(mdev->dev.bus_id, "%d:0.port", mdev->port); + mdev->driver = &maple_unsupported_device; + dev_set_name(&mdev->dev, "%d:0.port", mdev->port); } else { - printk(KERN_INFO - "Maple bus at (%d, %d): Function 0x%lX\n", - mdev->port, mdev->unit, function); - matched = bus_for_each_drv(&maple_bus_type, NULL, mdev, - check_matching_maple_driver); + maple_check_matching_driver); if (matched == 0) { /* Driver does not exist yet */ - printk(KERN_INFO - "No maple driver found.\n"); - mdev->driver = &maple_dummy_driver; + dev_info(&mdev->dev, "no driver found\n"); + mdev->driver = &maple_unsupported_device; } - sprintf(mdev->dev.bus_id, "%d:0%d.%lX", mdev->port, - mdev->unit, function); + + dev_set_name(&mdev->dev, "%d:0%d.%lX", mdev->port, + mdev->unit, function); } + mdev->function = function; mdev->dev.release = &maple_release_device; - retval = device_register(&mdev->dev); - if (retval) { - printk(KERN_INFO - "Maple bus: Attempt to register device" - " (%x, %x) failed.\n", - mdev->port, mdev->unit); + + atomic_set(&mdev->busy, 0); + error = device_register(&mdev->dev); + if (error) { + dev_warn(&mdev->dev, "could not register device at" + " (%d, %d), with error 0x%X\n", mdev->unit, + mdev->port, error); maple_free_dev(mdev); mdev = NULL; return; @@ -462,7 +404,7 @@ static void maple_attach_driver(struct maple_device *mdev) * port and unit then return 1 - allows identification * of which devices need to be attached or detached */ -static int detach_maple_device(struct device *device, void *portptr) +static int check_maple_device(struct device *device, void *portptr) { struct maple_device_specify *ds; struct maple_device *mdev; @@ -477,21 +419,25 @@ static int detach_maple_device(struct device *device, void *portptr) static int setup_maple_commands(struct device *device, void *ignored) { int add; - struct maple_device *maple_dev = to_maple_dev(device); - - if ((maple_dev->interval > 0) - && time_after(jiffies, maple_dev->when)) { - /* bounce if we cannot lock */ - add = maple_add_packet(maple_dev, - be32_to_cpu(maple_dev->devinfo.function), + struct maple_device *mdev = to_maple_dev(device); + if (mdev->interval > 0 && atomic_read(&mdev->busy) == 0 && + time_after(jiffies, mdev->when)) { + /* bounce if we cannot add */ + add = maple_add_packet(mdev, + be32_to_cpu(mdev->devinfo.function), MAPLE_COMMAND_GETCOND, 1, NULL); if (!add) - maple_dev->when = jiffies + maple_dev->interval; + mdev->when = jiffies + mdev->interval; } else { if (time_after(jiffies, maple_pnp_time)) - /* This will also bounce */ - maple_add_packet(maple_dev, 0, - MAPLE_COMMAND_DEVINFO, 0, NULL); + /* Ensure we don't have block reads and devinfo + * calls interfering with one another - so flag the + * device as busy */ + if (atomic_read(&mdev->busy) == 0) { + atomic_set(&mdev->busy, 1); + maple_add_packet(mdev, 0, + MAPLE_COMMAND_DEVINFO, 0, NULL); + } } return 0; } @@ -499,29 +445,50 @@ static int setup_maple_commands(struct device *device, void *ignored) /* VBLANK bottom half - implemented via workqueue */ static void maple_vblank_handler(struct work_struct *work) { - if (!list_empty(&maple_sentq) || !maple_dma_done()) + int x, locking; + struct maple_device *mdev; + + if (!maple_dma_done()) return; ctrl_outl(0, MAPLE_ENABLE); + if (!list_empty(&maple_sentq)) + goto finish; + + /* + * Set up essential commands - to fetch data and + * check devices are still present + */ bus_for_each_dev(&maple_bus_type, NULL, NULL, - setup_maple_commands); + setup_maple_commands); + + if (time_after(jiffies, maple_pnp_time)) { + /* + * Scan the empty ports - bus is flakey and may have + * mis-reported emptyness + */ + for (x = 0; x < MAPLE_PORTS; x++) { + if (checked[x] && empty[x]) { + mdev = baseunits[x]; + if (!mdev) + break; + atomic_set(&mdev->busy, 1); + locking = maple_add_packet(mdev, 0, + MAPLE_COMMAND_DEVINFO, 0, NULL); + if (!locking) + break; + } + } - if (time_after(jiffies, maple_pnp_time)) maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL; - - mutex_lock(&maple_wlist_lock); - if (!list_empty(&maple_waitq) && list_empty(&maple_sentq)) { - mutex_unlock(&maple_wlist_lock); - maple_send(); - } else { - mutex_unlock(&maple_wlist_lock); } - maplebus_dma_reset(); +finish: + maple_send(); } -/* handle devices added via hotplugs - placing them on queue for DEVINFO*/ +/* handle devices added via hotplugs - placing them on queue for DEVINFO */ static void maple_map_subunits(struct maple_device *mdev, int submask) { int retval, k, devcheck; @@ -533,7 +500,7 @@ static void maple_map_subunits(struct maple_device *mdev, int submask) ds.unit = k + 1; retval = bus_for_each_dev(&maple_bus_type, NULL, &ds, - detach_maple_device); + check_maple_device); if (retval) { submask = submask >> 1; continue; @@ -543,6 +510,7 @@ static void maple_map_subunits(struct maple_device *mdev, int submask) mdev_add = maple_alloc_dev(mdev->port, k + 1); if (!mdev_add) return; + atomic_set(&mdev_add->busy, 1); maple_add_packet(mdev_add, 0, MAPLE_COMMAND_DEVINFO, 0, NULL); /* mark that we are checking sub devices */ @@ -564,27 +532,45 @@ static void maple_clean_submap(struct maple_device *mdev) } /* handle empty port or hotplug removal */ -static void maple_response_none(struct maple_device *mdev, - struct mapleq *mq) -{ - if (mdev->unit != 0) { - list_del(&mq->list); - maple_clean_submap(mdev); - printk(KERN_INFO - "Maple bus device detaching at (%d, %d)\n", - mdev->port, mdev->unit); +static void maple_response_none(struct maple_device *mdev) +{ + maple_clean_submap(mdev); + + if (likely(mdev->unit != 0)) { + /* + * Block devices play up + * and give the impression they have + * been removed even when still in place or + * trip the mtd layer when they have + * really gone - this code traps that eventuality + * and ensures we aren't overloaded with useless + * error messages + */ + if (mdev->can_unload) { + if (!mdev->can_unload(mdev)) { + atomic_set(&mdev->busy, 2); + wake_up(&mdev->maple_wait); + return; + } + } + + dev_info(&mdev->dev, "detaching device at (%d, %d)\n", + mdev->port, mdev->unit); maple_detach_driver(mdev); return; - } - if (!started || !fullscan) { - if (checked[mdev->port] == false) { - checked[mdev->port] = true; - printk(KERN_INFO "No maple devices attached" - " to port %d\n", mdev->port); + } else { + if (!started || !fullscan) { + if (checked[mdev->port] == false) { + checked[mdev->port] = true; + empty[mdev->port] = true; + dev_info(&mdev->dev, "no devices" + " to port %d\n", mdev->port); + } + return; } - return; } - maple_clean_submap(mdev); + /* Some hardware devices generate false detach messages on unit 0 */ + atomic_set(&mdev->busy, 0); } /* preprocess hotplugs or scans */ @@ -599,8 +585,11 @@ static void maple_response_devinfo(struct maple_device *mdev, } else { if (mdev->unit != 0) maple_attach_driver(mdev); + if (mdev->unit == 0) { + empty[mdev->port] = false; + maple_attach_driver(mdev); + } } - return; } if (mdev->unit == 0) { submask = recvbuf[2] & 0x1F; @@ -611,6 +600,17 @@ static void maple_response_devinfo(struct maple_device *mdev, } } +static void maple_response_fileerr(struct maple_device *mdev, void *recvbuf) +{ + if (mdev->fileerr_handler) { + mdev->fileerr_handler(mdev, recvbuf); + return; + } else + dev_warn(&mdev->dev, "device at (%d, %d) reports" + "file error 0x%X\n", mdev->port, mdev->unit, + ((int *)recvbuf)[1]); +} + static void maple_port_rescan(void) { int i; @@ -621,12 +621,6 @@ static void maple_port_rescan(void) if (checked[i] == false) { fullscan = 0; mdev = baseunits[i]; - /* - * test lock in case scan has failed - * but device is still locked - */ - if (mutex_is_locked(&mdev->mq->mutex)) - mutex_unlock(&mdev->mq->mutex); maple_add_packet(mdev, 0, MAPLE_COMMAND_DEVINFO, 0, NULL); } @@ -637,7 +631,7 @@ static void maple_port_rescan(void) static void maple_dma_handler(struct work_struct *work) { struct mapleq *mq, *nmq; - struct maple_device *dev; + struct maple_device *mdev; char *recvbuf; enum maple_code code; @@ -646,43 +640,56 @@ static void maple_dma_handler(struct work_struct *work) ctrl_outl(0, MAPLE_ENABLE); if (!list_empty(&maple_sentq)) { list_for_each_entry_safe(mq, nmq, &maple_sentq, list) { - recvbuf = mq->recvbuf; + mdev = mq->dev; + recvbuf = mq->recvbuf->buf; + dma_cache_sync(&mdev->dev, recvbuf, 0x400, + DMA_FROM_DEVICE); code = recvbuf[0]; - dev = mq->dev; kfree(mq->sendbuf); - mutex_unlock(&mq->mutex); list_del_init(&mq->list); - switch (code) { case MAPLE_RESPONSE_NONE: - maple_response_none(dev, mq); + maple_response_none(mdev); break; case MAPLE_RESPONSE_DEVINFO: - maple_response_devinfo(dev, recvbuf); + maple_response_devinfo(mdev, recvbuf); + atomic_set(&mdev->busy, 0); break; case MAPLE_RESPONSE_DATATRF: - if (dev->callback) - dev->callback(mq); + if (mdev->callback) + mdev->callback(mq); + atomic_set(&mdev->busy, 0); + wake_up(&mdev->maple_wait); break; case MAPLE_RESPONSE_FILEERR: + maple_response_fileerr(mdev, recvbuf); + atomic_set(&mdev->busy, 0); + wake_up(&mdev->maple_wait); + break; + case MAPLE_RESPONSE_AGAIN: case MAPLE_RESPONSE_BADCMD: case MAPLE_RESPONSE_BADFUNC: - printk(KERN_DEBUG - "Maple non-fatal error 0x%X\n", - code); + dev_warn(&mdev->dev, "non-fatal error" + " 0x%X at (%d, %d)\n", code, + mdev->port, mdev->unit); + atomic_set(&mdev->busy, 0); break; case MAPLE_RESPONSE_ALLINFO: - printk(KERN_DEBUG - "Maple - extended device information" - " not supported\n"); + dev_notice(&mdev->dev, "extended" + " device information request for (%d, %d)" + " but call is not supported\n", mdev->port, + mdev->unit); + atomic_set(&mdev->busy, 0); break; case MAPLE_RESPONSE_OK: + atomic_set(&mdev->busy, 0); + wake_up(&mdev->maple_wait); break; default: @@ -699,20 +706,19 @@ static void maple_dma_handler(struct work_struct *work) if (!fullscan) maple_port_rescan(); /* mark that we have been through the first scan */ - if (started == 0) - started = 1; + started = 1; } - maplebus_dma_reset(); + maple_send(); } -static irqreturn_t maplebus_dma_interrupt(int irq, void *dev_id) +static irqreturn_t maple_dma_interrupt(int irq, void *dev_id) { /* Load everything into the bottom half */ schedule_work(&maple_dma_process); return IRQ_HANDLED; } -static irqreturn_t maplebus_vblank_interrupt(int irq, void *dev_id) +static irqreturn_t maple_vblank_interrupt(int irq, void *dev_id) { schedule_work(&maple_vblank_process); return IRQ_HANDLED; @@ -720,14 +726,14 @@ static irqreturn_t maplebus_vblank_interrupt(int irq, void *dev_id) static int maple_set_dma_interrupt_handler(void) { - return request_irq(HW_EVENT_MAPLE_DMA, maplebus_dma_interrupt, - IRQF_SHARED, "maple bus DMA", &maple_dummy_driver); + return request_irq(HW_EVENT_MAPLE_DMA, maple_dma_interrupt, + IRQF_SHARED, "maple bus DMA", &maple_unsupported_device); } static int maple_set_vblank_interrupt_handler(void) { - return request_irq(HW_EVENT_VSYNC, maplebus_vblank_interrupt, - IRQF_SHARED, "maple bus VBLANK", &maple_dummy_driver); + return request_irq(HW_EVENT_VSYNC, maple_vblank_interrupt, + IRQF_SHARED, "maple bus VBLANK", &maple_unsupported_device); } static int maple_get_dma_buffer(void) @@ -740,7 +746,7 @@ static int maple_get_dma_buffer(void) return 0; } -static int match_maple_bus_driver(struct device *devptr, +static int maple_match_bus_driver(struct device *devptr, struct device_driver *drvptr) { struct maple_driver *maple_drv = to_maple_driver(drvptr); @@ -765,22 +771,24 @@ static void maple_bus_release(struct device *dev) { } -static struct maple_driver maple_dummy_driver = { +static struct maple_driver maple_unsupported_device = { .drv = { - .name = "maple_dummy_driver", + .name = "maple_unsupported_device", .bus = &maple_bus_type, }, }; - +/** + * maple_bus_type - core maple bus structure + */ struct bus_type maple_bus_type = { .name = "maple", - .match = match_maple_bus_driver, + .match = maple_match_bus_driver, .uevent = maple_bus_uevent, }; EXPORT_SYMBOL_GPL(maple_bus_type); static struct device maple_bus = { - .bus_id = "maple", + .init_name = "maple", .release = maple_bus_release, }; @@ -788,7 +796,8 @@ static int __init maple_bus_init(void) { int retval, i; struct maple_device *mdev[MAPLE_PORTS]; - ctrl_outl(0, MAPLE_STATE); + + ctrl_outl(0, MAPLE_ENABLE); retval = device_register(&maple_bus); if (retval) @@ -798,36 +807,33 @@ static int __init maple_bus_init(void) if (retval) goto cleanup_device; - retval = driver_register(&maple_dummy_driver.drv); + retval = driver_register(&maple_unsupported_device.drv); if (retval) goto cleanup_bus; /* allocate memory for maple bus dma */ retval = maple_get_dma_buffer(); if (retval) { - printk(KERN_INFO - "Maple bus: Failed to allocate Maple DMA buffers\n"); + dev_err(&maple_bus, "failed to allocate DMA buffers\n"); goto cleanup_basic; } /* set up DMA interrupt handler */ retval = maple_set_dma_interrupt_handler(); if (retval) { - printk(KERN_INFO - "Maple bus: Failed to grab maple DMA IRQ\n"); + dev_err(&maple_bus, "bus failed to grab maple " + "DMA IRQ\n"); goto cleanup_dma; } /* set up VBLANK interrupt handler */ retval = maple_set_vblank_interrupt_handler(); if (retval) { - printk(KERN_INFO "Maple bus: Failed to grab VBLANK IRQ\n"); + dev_err(&maple_bus, "bus failed to grab VBLANK IRQ\n"); goto cleanup_irq; } - maple_queue_cache = - kmem_cache_create("maple_queue_cache", 0x400, 0, - SLAB_HWCACHE_ALIGN, NULL); + maple_queue_cache = KMEM_CACHE(maple_buffer, SLAB_HWCACHE_ALIGN); if (!maple_queue_cache) goto cleanup_bothirqs; @@ -838,23 +844,23 @@ static int __init maple_bus_init(void) /* setup maple ports */ for (i = 0; i < MAPLE_PORTS; i++) { checked[i] = false; + empty[i] = false; mdev[i] = maple_alloc_dev(i, 0); - baseunits[i] = mdev[i]; if (!mdev[i]) { while (i-- > 0) maple_free_dev(mdev[i]); goto cleanup_cache; } + baseunits[i] = mdev[i]; + atomic_set(&mdev[i]->busy, 1); maple_add_packet(mdev[i], 0, MAPLE_COMMAND_DEVINFO, 0, NULL); subdevice_map[i] = 0; } - /* setup maplebus hardware */ - maplebus_dma_reset(); - /* initial detection */ + maple_pnp_time = jiffies + HZ; + /* prepare initial queue */ maple_send(); - maple_pnp_time = jiffies; - printk(KERN_INFO "Maple bus core now registered.\n"); + dev_info(&maple_bus, "bus core now registered\n"); return 0; @@ -871,7 +877,7 @@ cleanup_dma: free_pages((unsigned long) maple_sendbuf, MAPLE_DMA_PAGES); cleanup_basic: - driver_unregister(&maple_dummy_driver.drv); + driver_unregister(&maple_unsupported_device.drv); cleanup_bus: bus_unregister(&maple_bus_type); @@ -880,7 +886,7 @@ cleanup_device: device_unregister(&maple_bus); cleanup: - printk(KERN_INFO "Maple bus registration failed\n"); + printk(KERN_ERR "Maple bus registration failed\n"); return retval; } /* Push init to later to ensure hardware gets detected */ diff --git a/drivers/sh/superhyway/superhyway.c b/drivers/sh/superhyway/superhyway.c index 4d0282b821b5..2d9e7f3d5611 100644 --- a/drivers/sh/superhyway/superhyway.c +++ b/drivers/sh/superhyway/superhyway.c @@ -22,7 +22,7 @@ static int superhyway_devices; static struct device superhyway_bus_device = { - .bus_id = "superhyway", + .init_name = "superhyway", }; static void superhyway_device_release(struct device *dev) @@ -83,7 +83,7 @@ int superhyway_add_device(unsigned long base, struct superhyway_device *sdev, dev->id.id = dev->vcr.mod_id; sprintf(dev->name, "SuperHyway device %04x", dev->id.id); - sprintf(dev->dev.bus_id, "%02x", superhyway_devices); + dev_set_name(&dev->dev, "%02x", superhyway_devices); superhyway_devices++; diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig index 83babb0a1df7..c6c816b7ecb5 100644 --- a/drivers/usb/Kconfig +++ b/drivers/usb/Kconfig @@ -47,6 +47,7 @@ config USB_ARCH_HAS_OHCI default y if CPU_SUBTYPE_SH7720 default y if CPU_SUBTYPE_SH7721 default y if CPU_SUBTYPE_SH7763 + default y if CPU_SUBTYPE_SH7786 # more: default PCI diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 5cf5f1eca4f4..7658589edb1c 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -1049,7 +1049,8 @@ MODULE_LICENSE ("GPL"); #if defined(CONFIG_CPU_SUBTYPE_SH7720) || \ defined(CONFIG_CPU_SUBTYPE_SH7721) || \ - defined(CONFIG_CPU_SUBTYPE_SH7763) + defined(CONFIG_CPU_SUBTYPE_SH7763) || \ + defined(CONFIG_CPU_SUBTYPE_SH7786) #include "ohci-sh.c" #define PLATFORM_DRIVER ohci_hcd_sh_driver #endif diff --git a/drivers/video/pvr2fb.c b/drivers/video/pvr2fb.c index 0a0fd48a8566..53f8f1100e81 100644 --- a/drivers/video/pvr2fb.c +++ b/drivers/video/pvr2fb.c @@ -61,7 +61,7 @@ #include <mach-dreamcast/mach/sysasic.h> #endif -#ifdef CONFIG_SH_DMA +#ifdef CONFIG_PVR2_DMA #include <linux/pagemap.h> #include <mach/dma.h> #include <asm/dma.h> @@ -188,7 +188,7 @@ static unsigned int is_blanked = 0; /* Is the screen blanked? */ static unsigned long pvr2fb_map; #endif -#ifdef CONFIG_SH_DMA +#ifdef CONFIG_PVR2_DMA static unsigned int shdma = PVR2_CASCADE_CHAN; static unsigned int pvr2dma = ONCHIP_NR_DMA_CHANNELS; #endif @@ -207,7 +207,7 @@ static irqreturn_t pvr2fb_interrupt(int irq, void *dev_id); static int pvr2_init_cable(void); static int pvr2_get_param(const struct pvr2_params *p, const char *s, int val, int size); -#ifdef CONFIG_SH_DMA +#ifdef CONFIG_PVR2_DMA static ssize_t pvr2fb_write(struct fb_info *info, const char *buf, size_t count, loff_t *ppos); #endif @@ -218,7 +218,7 @@ static struct fb_ops pvr2fb_ops = { .fb_blank = pvr2fb_blank, .fb_check_var = pvr2fb_check_var, .fb_set_par = pvr2fb_set_par, -#ifdef CONFIG_SH_DMA +#ifdef CONFIG_PVR2_DMA .fb_write = pvr2fb_write, #endif .fb_fillrect = cfb_fillrect, @@ -671,7 +671,7 @@ static int pvr2_init_cable(void) return cable_type; } -#ifdef CONFIG_SH_DMA +#ifdef CONFIG_PVR2_DMA static ssize_t pvr2fb_write(struct fb_info *info, const char *buf, size_t count, loff_t *ppos) { @@ -743,7 +743,7 @@ out_unmap: return ret; } -#endif /* CONFIG_SH_DMA */ +#endif /* CONFIG_PVR2_DMA */ /** * pvr2fb_common_init @@ -893,7 +893,7 @@ static int __init pvr2fb_dc_init(void) return -EBUSY; } -#ifdef CONFIG_SH_DMA +#ifdef CONFIG_PVR2_DMA if (request_dma(pvr2dma, "pvr2") != 0) { free_irq(HW_EVENT_VSYNC, 0); return -EBUSY; @@ -915,7 +915,7 @@ static void __exit pvr2fb_dc_exit(void) } free_irq(HW_EVENT_VSYNC, 0); -#ifdef CONFIG_SH_DMA +#ifdef CONFIG_PVR2_DMA free_dma(pvr2dma); #endif } diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c index 2c5d069e5f06..b433b8ac76d9 100644 --- a/drivers/video/sh_mobile_lcdcfb.c +++ b/drivers/video/sh_mobile_lcdcfb.c @@ -33,6 +33,8 @@ struct sh_mobile_lcdc_chan { struct fb_info info; dma_addr_t dma_handle; struct fb_deferred_io defio; + unsigned long frame_end; + wait_queue_head_t frame_end_wait; }; struct sh_mobile_lcdc_priv { @@ -226,7 +228,10 @@ static void sh_mobile_lcdc_deferred_io_touch(struct fb_info *info) static irqreturn_t sh_mobile_lcdc_irq(int irq, void *data) { struct sh_mobile_lcdc_priv *priv = data; + struct sh_mobile_lcdc_chan *ch; unsigned long tmp; + int is_sub; + int k; /* acknowledge interrupt */ tmp = lcdc_read(priv, _LDINTR); @@ -234,8 +239,24 @@ static irqreturn_t sh_mobile_lcdc_irq(int irq, void *data) tmp |= 0x000000ff ^ LDINTR_FS; /* status in low 8 */ lcdc_write(priv, _LDINTR, tmp); - /* disable clocks */ - sh_mobile_lcdc_clk_off(priv); + /* figure out if this interrupt is for main or sub lcd */ + is_sub = (lcdc_read(priv, _LDSR) & (1 << 10)) ? 1 : 0; + + /* wake up channel and disable clocks*/ + for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { + ch = &priv->ch[k]; + + if (!ch->enabled) + continue; + + if (is_sub == lcdc_chan_is_sublcd(ch)) { + ch->frame_end = 1; + wake_up(&ch->frame_end_wait); + + sh_mobile_lcdc_clk_off(priv); + } + } + return IRQ_HANDLED; } @@ -448,18 +469,27 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv) struct sh_mobile_lcdc_board_cfg *board_cfg; int k; - /* tell the board code to disable the panel */ + /* clean up deferred io and ask board code to disable panel */ for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { ch = &priv->ch[k]; - board_cfg = &ch->cfg.board_cfg; - if (board_cfg->display_off) - board_cfg->display_off(board_cfg->board_data); - /* cleanup deferred io if enabled */ + /* deferred io mode: + * flush frame, and wait for frame end interrupt + * clean up deferred io and enable clock + */ if (ch->info.fbdefio) { + ch->frame_end = 0; + schedule_delayed_work(&ch->info.deferred_work, 0); + wait_event(ch->frame_end_wait, ch->frame_end); fb_deferred_io_cleanup(&ch->info); ch->info.fbdefio = NULL; + sh_mobile_lcdc_clk_on(priv); } + + board_cfg = &ch->cfg.board_cfg; + if (board_cfg->display_off) + board_cfg->display_off(board_cfg->board_data); + } /* stop the lcdc */ @@ -652,6 +682,26 @@ static int sh_mobile_lcdc_set_bpp(struct fb_var_screeninfo *var, int bpp) return 0; } +static int sh_mobile_lcdc_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + + sh_mobile_lcdc_stop(platform_get_drvdata(pdev)); + return 0; +} + +static int sh_mobile_lcdc_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + + return sh_mobile_lcdc_start(platform_get_drvdata(pdev)); +} + +static struct dev_pm_ops sh_mobile_lcdc_dev_pm_ops = { + .suspend = sh_mobile_lcdc_suspend, + .resume = sh_mobile_lcdc_resume, +}; + static int sh_mobile_lcdc_remove(struct platform_device *pdev); static int __init sh_mobile_lcdc_probe(struct platform_device *pdev) @@ -707,6 +757,7 @@ static int __init sh_mobile_lcdc_probe(struct platform_device *pdev) dev_err(&pdev->dev, "unsupported interface type\n"); goto err1; } + init_waitqueue_head(&priv->ch[i].frame_end_wait); switch (pdata->ch[i].chan) { case LCDC_CHAN_MAINLCD: @@ -860,6 +911,7 @@ static struct platform_driver sh_mobile_lcdc_driver = { .driver = { .name = "sh_mobile_lcdc_fb", .owner = THIS_MODULE, + .pm = &sh_mobile_lcdc_dev_pm_ops, }, .probe = sh_mobile_lcdc_probe, .remove = sh_mobile_lcdc_remove, |