Commit 5ff4cea4 authored by Tony Lindgren's avatar Tony Lindgren

Merge with /home/tmlind/src/kernel/linux-2.6

parents 123ae842 2f12c74f
...@@ -42,6 +42,7 @@ EXPORT_SYMBOL(clear_page); ...@@ -42,6 +42,7 @@ EXPORT_SYMBOL(clear_page);
#ifdef CONFIG_VIRTUAL_MEM_MAP #ifdef CONFIG_VIRTUAL_MEM_MAP
#include <linux/bootmem.h> #include <linux/bootmem.h>
EXPORT_SYMBOL(min_low_pfn); /* defined by bootmem.c, but not exported by generic code */
EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic code */ EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic code */
#endif #endif
......
...@@ -740,7 +740,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, ...@@ -740,7 +740,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
switch(val) { switch(val) {
case DIE_BREAK: case DIE_BREAK:
/* err is break number from ia64_bad_break() */ /* err is break number from ia64_bad_break() */
if (args->err == 0x80200 || args->err == 0x80300) if (args->err == 0x80200 || args->err == 0x80300 || args->err == 0)
if (pre_kprobes_handler(args)) if (pre_kprobes_handler(args))
ret = NOTIFY_STOP; ret = NOTIFY_STOP;
break; break;
......
...@@ -132,24 +132,6 @@ __kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs) ...@@ -132,24 +132,6 @@ __kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
siginfo_t siginfo; siginfo_t siginfo;
int sig, code; int sig, code;
/* break.b always sets cr.iim to 0, which causes problems for
* debuggers. Get the real break number from the original instruction,
* but only for kernel code. User space break.b is left alone, to
* preserve the existing behaviour. All break codings have the same
* format, so there is no need to check the slot type.
*/
if (break_num == 0 && !user_mode(regs)) {
struct ia64_psr *ipsr = ia64_psr(regs);
unsigned long *bundle = (unsigned long *)regs->cr_iip;
unsigned long slot;
switch (ipsr->ri) {
case 0: slot = (bundle[0] >> 5); break;
case 1: slot = (bundle[0] >> 46) | (bundle[1] << 18); break;
default: slot = (bundle[1] >> 23); break;
}
break_num = ((slot >> 36 & 1) << 20) | (slot >> 6 & 0xfffff);
}
/* SIGILL, SIGFPE, SIGSEGV, and SIGBUS want these field initialized: */ /* SIGILL, SIGFPE, SIGSEGV, and SIGBUS want these field initialized: */
siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri); siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri);
siginfo.si_imm = break_num; siginfo.si_imm = break_num;
......
...@@ -163,15 +163,13 @@ EXPORT_SYMBOL(giveup_altivec); ...@@ -163,15 +163,13 @@ EXPORT_SYMBOL(giveup_altivec);
EXPORT_SYMBOL(giveup_spe); EXPORT_SYMBOL(giveup_spe);
#endif /* CONFIG_SPE */ #endif /* CONFIG_SPE */
#ifdef CONFIG_PPC64 #ifndef CONFIG_PPC64
EXPORT_SYMBOL(__flush_icache_range);
#else
EXPORT_SYMBOL(flush_instruction_cache); EXPORT_SYMBOL(flush_instruction_cache);
EXPORT_SYMBOL(flush_icache_range);
EXPORT_SYMBOL(flush_tlb_kernel_range); EXPORT_SYMBOL(flush_tlb_kernel_range);
EXPORT_SYMBOL(flush_tlb_page); EXPORT_SYMBOL(flush_tlb_page);
EXPORT_SYMBOL(_tlbie); EXPORT_SYMBOL(_tlbie);
#endif #endif
EXPORT_SYMBOL(__flush_icache_range);
EXPORT_SYMBOL(flush_dcache_range); EXPORT_SYMBOL(flush_dcache_range);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -184,7 +184,7 @@ EXPORT_SYMBOL(kernel_thread); ...@@ -184,7 +184,7 @@ EXPORT_SYMBOL(kernel_thread);
EXPORT_SYMBOL(flush_instruction_cache); EXPORT_SYMBOL(flush_instruction_cache);
EXPORT_SYMBOL(giveup_fpu); EXPORT_SYMBOL(giveup_fpu);
EXPORT_SYMBOL(flush_icache_range); EXPORT_SYMBOL(__flush_icache_range);
EXPORT_SYMBOL(flush_dcache_range); EXPORT_SYMBOL(flush_dcache_range);
EXPORT_SYMBOL(flush_icache_user_range); EXPORT_SYMBOL(flush_icache_user_range);
EXPORT_SYMBOL(flush_dcache_page); EXPORT_SYMBOL(flush_dcache_page);
......
...@@ -77,6 +77,7 @@ static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte, ...@@ -77,6 +77,7 @@ static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte,
BUG_ON(!pte_none(*pte)); BUG_ON(!pte_none(*pte));
set_pte_at(mm, address, pte, entry); set_pte_at(mm, address, pte, entry);
address += PAGE_SIZE; address += PAGE_SIZE;
pte_val(entry) += PAGE_SIZE;
pte++; pte++;
} while (address < curend); } while (address < curend);
} while (address < end); } while (address < end);
......
...@@ -91,9 +91,9 @@ static int mfcounter = 0; ...@@ -91,9 +91,9 @@ static int mfcounter = 0;
* Public data... * Public data...
*/ */
int mpt_lan_index = -1; int mpt_lan_index = -1;
static int mpt_stm_index = -1; int mpt_stm_index = -1;
static struct proc_dir_entry *mpt_proc_root_dir; struct proc_dir_entry *mpt_proc_root_dir;
#define WHOINIT_UNKNOWN 0xAA #define WHOINIT_UNKNOWN 0xAA
...@@ -6271,6 +6271,7 @@ EXPORT_SYMBOL(mpt_resume); ...@@ -6271,6 +6271,7 @@ EXPORT_SYMBOL(mpt_resume);
EXPORT_SYMBOL(mpt_suspend); EXPORT_SYMBOL(mpt_suspend);
#endif #endif
EXPORT_SYMBOL(ioc_list); EXPORT_SYMBOL(ioc_list);
EXPORT_SYMBOL(mpt_proc_root_dir);
EXPORT_SYMBOL(mpt_register); EXPORT_SYMBOL(mpt_register);
EXPORT_SYMBOL(mpt_deregister); EXPORT_SYMBOL(mpt_deregister);
EXPORT_SYMBOL(mpt_event_register); EXPORT_SYMBOL(mpt_event_register);
...@@ -6288,6 +6289,7 @@ EXPORT_SYMBOL(mpt_verify_adapter); ...@@ -6288,6 +6289,7 @@ EXPORT_SYMBOL(mpt_verify_adapter);
EXPORT_SYMBOL(mpt_GetIocState); EXPORT_SYMBOL(mpt_GetIocState);
EXPORT_SYMBOL(mpt_print_ioc_summary); EXPORT_SYMBOL(mpt_print_ioc_summary);
EXPORT_SYMBOL(mpt_lan_index); EXPORT_SYMBOL(mpt_lan_index);
EXPORT_SYMBOL(mpt_stm_index);
EXPORT_SYMBOL(mpt_HardResetHandler); EXPORT_SYMBOL(mpt_HardResetHandler);
EXPORT_SYMBOL(mpt_config); EXPORT_SYMBOL(mpt_config);
EXPORT_SYMBOL(mpt_toolbox); EXPORT_SYMBOL(mpt_toolbox);
......
...@@ -1006,8 +1006,10 @@ extern int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode); ...@@ -1006,8 +1006,10 @@ extern int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode);
* Public data decl's... * Public data decl's...
*/ */
extern struct list_head ioc_list; extern struct list_head ioc_list;
extern struct proc_dir_entry *mpt_proc_root_dir;
extern int mpt_lan_index; /* needed by mptlan.c */ extern int mpt_lan_index; /* needed by mptlan.c */
extern int mpt_stm_index; /* needed by mptstm.c */
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
#endif /* } __KERNEL__ */ #endif /* } __KERNEL__ */
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
* *
* (C) 2000 Red Hat. GPL'd * (C) 2000 Red Hat. GPL'd
* *
* $Id: cfi_cmdset_0001.c,v 1.185 2005/11/07 11:14:22 gleixner Exp $ * $Id: cfi_cmdset_0001.c,v 1.186 2005/11/23 22:07:52 nico Exp $
* *
* *
* 10/10/2000 Nicolas Pitre <nico@cam.org> * 10/10/2000 Nicolas Pitre <nico@cam.org>
...@@ -644,9 +644,8 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr ...@@ -644,9 +644,8 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
* *
* - contension arbitration is handled in the owner's context. * - contension arbitration is handled in the owner's context.
* *
* The 'shared' struct can be read when its lock is taken. * The 'shared' struct can be read and/or written only when
* However any writes to it can only be made when the current * its lock is taken.
* owner's lock is also held.
*/ */
struct flchip_shared *shared = chip->priv; struct flchip_shared *shared = chip->priv;
struct flchip *contender; struct flchip *contender;
...@@ -675,14 +674,13 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr ...@@ -675,14 +674,13 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
} }
timeo = jiffies + HZ; timeo = jiffies + HZ;
spin_lock(&shared->lock); spin_lock(&shared->lock);
spin_unlock(contender->mutex);
} }
/* We now own it */ /* We now own it */
shared->writing = chip; shared->writing = chip;
if (mode == FL_ERASING) if (mode == FL_ERASING)
shared->erasing = chip; shared->erasing = chip;
if (contender && contender != chip)
spin_unlock(contender->mutex);
spin_unlock(&shared->lock); spin_unlock(&shared->lock);
} }
......
/* /*
Common Flash Interface probe code. Common Flash Interface probe code.
(C) 2000 Red Hat. GPL'd. (C) 2000 Red Hat. GPL'd.
$Id: cfi_probe.c,v 1.84 2005/11/07 11:14:23 gleixner Exp $ $Id: cfi_probe.c,v 1.86 2005/11/29 14:48:31 gleixner Exp $
*/ */
#include <linux/config.h> #include <linux/config.h>
...@@ -230,8 +230,8 @@ static int __xipram cfi_chip_setup(struct map_info *map, ...@@ -230,8 +230,8 @@ static int __xipram cfi_chip_setup(struct map_info *map,
cfi_send_gen_cmd(0xaa, 0x555, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0xaa, 0x555, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, 0x2aa, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x55, 0x2aa, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x90, 0x555, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x90, 0x555, base, map, cfi, cfi->device_type, NULL);
cfi->mfr = cfi_read_query(map, base); cfi->mfr = cfi_read_query16(map, base);
cfi->id = cfi_read_query(map, base + ofs_factor); cfi->id = cfi_read_query16(map, base + ofs_factor);
/* Put it back into Read Mode */ /* Put it back into Read Mode */
cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
...@@ -426,7 +426,7 @@ static struct mtd_chip_driver cfi_chipdrv = { ...@@ -426,7 +426,7 @@ static struct mtd_chip_driver cfi_chipdrv = {
.module = THIS_MODULE .module = THIS_MODULE
}; };
int __init cfi_probe_init(void) static int __init cfi_probe_init(void)
{ {
register_mtd_chip_driver(&cfi_chipdrv); register_mtd_chip_driver(&cfi_chipdrv);
return 0; return 0;
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
* Copyright 2000,2001 David A. Schleef <ds@schleef.org> * Copyright 2000,2001 David A. Schleef <ds@schleef.org>
* 2000,2001 Lineo, Inc. * 2000,2001 Lineo, Inc.
* *
* $Id: sharp.c,v 1.16 2005/11/07 11:14:23 gleixner Exp $ * $Id: sharp.c,v 1.17 2005/11/29 14:28:28 gleixner Exp $
* *
* Devices supported: * Devices supported:
* LH28F016SCT Symmetrical block flash memory, 2Mx8 * LH28F016SCT Symmetrical block flash memory, 2Mx8
...@@ -160,22 +160,28 @@ struct mtd_info *sharp_probe(struct map_info *map) ...@@ -160,22 +160,28 @@ struct mtd_info *sharp_probe(struct map_info *map)
return mtd; return mtd;
} }
static inline void sharp_send_cmd(struct map_info *map, unsigned long cmd, unsigned long adr)
{
map_word map_cmd;
map_cmd.x[0] = cmd;
map_write(map, map_cmd, adr);
}
static int sharp_probe_map(struct map_info *map,struct mtd_info *mtd) static int sharp_probe_map(struct map_info *map,struct mtd_info *mtd)
{ {
unsigned long tmp; map_word tmp, read0, read4;
unsigned long base = 0; unsigned long base = 0;
u32 read0, read4;
int width = 4; int width = 4;
tmp = map_read32(map, base+0); tmp = map_read(map, base+0);
map_write32(map, CMD_READ_ID, base+0); sharp_send_cmd(map, CMD_READ_ID, base+0);
read0=map_read32(map, base+0); read0 = map_read(map, base+0);
read4=map_read32(map, base+4); read4 = map_read(map, base+4);
if(read0 == 0x89898989){ if(read0.x[0] == 0x89898989){
printk("Looks like sharp flash\n"); printk("Looks like sharp flash\n");
switch(read4){ switch(read4.x[0]){
case 0xaaaaaaaa: case 0xaaaaaaaa:
case 0xa0a0a0a0: case 0xa0a0a0a0:
/* aa - LH28F016SCT-L95 2Mx8, 32 64k blocks*/ /* aa - LH28F016SCT-L95 2Mx8, 32 64k blocks*/
...@@ -197,16 +203,16 @@ static int sharp_probe_map(struct map_info *map,struct mtd_info *mtd) ...@@ -197,16 +203,16 @@ static int sharp_probe_map(struct map_info *map,struct mtd_info *mtd)
return width; return width;
#endif #endif
default: default:
printk("Sort-of looks like sharp flash, 0x%08x 0x%08x\n", printk("Sort-of looks like sharp flash, 0x%08lx 0x%08lx\n",
read0,read4); read0.x[0], read4.x[0]);
} }
}else if((map_read32(map, base+0) == CMD_READ_ID)){ }else if((map_read(map, base+0).x[0] == CMD_READ_ID)){
/* RAM, probably */ /* RAM, probably */
printk("Looks like RAM\n"); printk("Looks like RAM\n");
map_write32(map, tmp, base+0); map_write(map, tmp, base+0);
}else{ }else{
printk("Doesn't look like sharp flash, 0x%08x 0x%08x\n", printk("Doesn't look like sharp flash, 0x%08lx 0x%08lx\n",
read0,read4); read0.x[0], read4.x[0]);
} }
return 0; return 0;
...@@ -215,7 +221,8 @@ static int sharp_probe_map(struct map_info *map,struct mtd_info *mtd) ...@@ -215,7 +221,8 @@ static int sharp_probe_map(struct map_info *map,struct mtd_info *mtd)
/* This function returns with the chip->mutex lock held. */ /* This function returns with the chip->mutex lock held. */
static int sharp_wait(struct map_info *map, struct flchip *chip) static int sharp_wait(struct map_info *map, struct flchip *chip)
{ {
int status, i; int i;
map_word status;
unsigned long timeo = jiffies + HZ; unsigned long timeo = jiffies + HZ;
DECLARE_WAITQUEUE(wait, current); DECLARE_WAITQUEUE(wait, current);
int adr = 0; int adr = 0;
...@@ -225,12 +232,12 @@ retry: ...@@ -225,12 +232,12 @@ retry:
switch(chip->state){ switch(chip->state){
case FL_READY: case FL_READY:
map_write32(map,CMD_READ_STATUS,adr); sharp_send_cmd(map, CMD_READ_STATUS, adr);
chip->state = FL_STATUS; chip->state = FL_STATUS;
case FL_STATUS: case FL_STATUS:
for(i=0;i<100;i++){ for(i=0;i<100;i++){
status = map_read32(map,adr); status = map_read(map, adr);
if((status & SR_READY)==SR_READY) if((status.x[0] & SR_READY)==SR_READY)
break; break;
udelay(1); udelay(1);
} }
...@@ -254,7 +261,7 @@ retry: ...@@ -254,7 +261,7 @@ retry:
goto retry; goto retry;
} }
map_write32(map,CMD_RESET, adr); sharp_send_cmd(map, CMD_RESET, adr);
chip->state = FL_READY; chip->state = FL_READY;
...@@ -351,37 +358,39 @@ static int sharp_write_oneword(struct map_info *map, struct flchip *chip, ...@@ -351,37 +358,39 @@ static int sharp_write_oneword(struct map_info *map, struct flchip *chip,
int timeo; int timeo;
int try; int try;
int i; int i;
int status = 0; map_word data, status;
status.x[0] = 0;
ret = sharp_wait(map,chip); ret = sharp_wait(map,chip);
for(try=0;try<10;try++){ for(try=0;try<10;try++){
map_write32(map,CMD_BYTE_WRITE,adr); sharp_send_cmd(map, CMD_BYTE_WRITE, adr);
/* cpu_to_le32 -> hack to fix the writel be->le conversion */ /* cpu_to_le32 -> hack to fix the writel be->le conversion */
map_write32(map,cpu_to_le32(datum),adr); data.x[0] = cpu_to_le32(datum);
map_write(map, data, adr);
chip->state = FL_WRITING; chip->state = FL_WRITING;
timeo = jiffies + (HZ/2); timeo = jiffies + (HZ/2);
map_write32(map,CMD_READ_STATUS,adr); sharp_send_cmd(map, CMD_READ_STATUS, adr);
for(i=0;i<100;i++){ for(i=0;i<100;i++){
status = map_read32(map,adr); status = map_read(map, adr);
if((status & SR_READY)==SR_READY) if((status.x[0] & SR_READY) == SR_READY)
break; break;
} }
if(i==100){ if(i==100){
printk("sharp: timed out writing\n"); printk("sharp: timed out writing\n");
} }
if(!(status&SR_ERRORS)) if(!(status.x[0] & SR_ERRORS))
break; break;
printk("sharp: error writing byte at addr=%08lx status=%08x\n",adr,status); printk("sharp: error writing byte at addr=%08lx status=%08lx\n", adr, status.x[0]);
map_write32(map,CMD_CLEAR_STATUS,adr); sharp_send_cmd(map, CMD_CLEAR_STATUS, adr);
} }
map_write32(map,CMD_RESET,adr); sharp_send_cmd(map, CMD_RESET, adr);
chip->state = FL_READY; chip->state = FL_READY;
wake_up(&chip->wq); wake_up(&chip->wq);
...@@ -434,18 +443,18 @@ static int sharp_do_wait_for_ready(struct map_info *map, struct flchip *chip, ...@@ -434,18 +443,18 @@ static int sharp_do_wait_for_ready(struct map_info *map, struct flchip *chip,
{ {
int ret; int ret;
unsigned long timeo; unsigned long timeo;
int status; map_word status;
DECLARE_WAITQUEUE(wait, current); DECLARE_WAITQUEUE(wait, current);
map_write32(map,CMD_READ_STATUS,adr); sharp_send_cmd(map, CMD_READ_STATUS, adr);
status = map_read32(map,adr); status = map_read(map, adr);
timeo = jiffies + HZ; timeo = jiffies + HZ;
while(time_before(jiffies, timeo)){ while(time_before(jiffies, timeo)){
map_write32(map,CMD_READ_STATUS,adr); sharp_send_cmd(map, CMD_READ_STATUS, adr);
status = map_read32(map,adr); status = map_read(map, adr);
if((status & SR_READY)==SR_READY){ if((status.x[0] & SR_READY)==SR_READY){
ret = 0; ret = 0;
goto out; goto out;
} }
...@@ -476,7 +485,7 @@ static int sharp_erase_oneblock(struct map_info *map, struct flchip *chip, ...@@ -476,7 +485,7 @@ static int sharp_erase_oneblock(struct map_info *map, struct flchip *chip,
{ {
int ret; int ret;
//int timeo; //int timeo;
int status; map_word status;
//int i; //int i;
//printk("sharp_erase_oneblock()\n"); //printk("sharp_erase_oneblock()\n");
...@@ -486,26 +495,26 @@ static int sharp_erase_oneblock(struct map_info *map, struct flchip *chip, ...@@ -486,26 +495,26 @@ static int sharp_erase_oneblock(struct map_info *map, struct flchip *chip,
sharp_unlock_oneblock(map,chip,adr); sharp_unlock_oneblock(map,chip,adr);
#endif #endif
map_write32(map,CMD_BLOCK_ERASE_1,adr); sharp_send_cmd(map, CMD_BLOCK_ERASE_1, adr);
map_write32(map,CMD_BLOCK_ERASE_2,adr); sharp_send_cmd(map, CMD_BLOCK_ERASE_2, adr);
chip->state = FL_ERASING; chip->state = FL_ERASING;
ret = sharp_do_wait_for_ready(map,chip,adr); ret = sharp_do_wait_for_ready(map,chip,adr);
if(ret<0)return ret; if(ret<0)return ret;
map_write32(map,CMD_READ_STATUS,adr); sharp_send_cmd(map, CMD_READ_STATUS, adr);
status = map_read32(map,adr); status = map_read(map, adr);
if(!(status&SR_ERRORS)){ if(!(status.x[0] & SR_ERRORS)){
map_write32(map,CMD_RESET,adr); sharp_send_cmd(map, CMD_RESET, adr);
chip->state = FL_READY; chip->state = FL_READY;
//spin_unlock_bh(chip->mutex); //spin_unlock_bh(chip->mutex);
return 0; return 0;
} }
printk("sharp: error erasing block at addr=%08lx status=%08x\n",adr,status); printk("sharp: error erasing block at addr=%08lx status=%08lx\n", adr, status.x[0]);
map_write32(map,CMD_CLEAR_STATUS,adr); sharp_send_cmd(map, CMD_CLEAR_STATUS, adr);
//spin_unlock_bh(chip->mutex); //spin_unlock_bh(chip->mutex);
...@@ -517,20 +526,20 @@ static void sharp_unlock_oneblock(struct map_info *map, struct flchip *chip, ...@@ -517,20 +526,20 @@ static void sharp_unlock_oneblock(struct map_info *map, struct flchip *chip,
unsigned long adr) unsigned long adr)
{ {
int i; int i;
int status; map_word status;
map_write32(map,CMD_CLEAR_BLOCK_LOCKS_1,adr); sharp_send_cmd(map, CMD_CLEAR_BLOCK_LOCKS_1, adr);
map_write32(map,CMD_CLEAR_BLOCK_LOCKS_2,adr); sharp_send_cmd(map, CMD_CLEAR_BLOCK_LOCKS_2, adr);
udelay(100); udelay(100);
status = map_read32(map,adr); status = map_read(map, adr);
printk("status=%08x\n",status); printk("status=%08lx\n", status.x[0]);
for(i=0;i<1000;i++){ for(i=0;i<1000;i++){
//map_write32(map,CMD_READ_STATUS,adr); //sharp_send_cmd(map, CMD_READ_STATUS, adr);
status = map_read32(map,adr); status = map_read(map, adr);
if((status & SR_READY)==SR_READY) if((status.x[0] & SR_READY) == SR_READY)
break; break;
udelay(100); udelay(100);
} }
...@@ -538,14 +547,14 @@ static void sharp_unlock_oneblock(struct map_info *map, struct flchip *chip, ...@@ -538,14 +547,14 @@ static void sharp_unlock_oneblock(struct map_info *map, struct flchip *chip,
printk("sharp: timed out unlocking block\n"); printk("sharp: timed out unlocking block\n");
} }
if(!(status&SR_ERRORS)){ if(!(status.x[0] & SR_ERRORS)){
map_write32(map,CMD_RESET,adr); sharp_send_cmd(map, CMD_RESET, adr);
chip->state = FL_READY; chip->state = FL_READY;
return; return;
} }
printk("sharp: error unlocking block at addr=%08lx status=%08x\n",adr,status); printk("sharp: error unlocking block at addr=%08lx status=%08lx\n", adr, status.x[0]);
map_write32(map,CMD_CLEAR_STATUS,adr); sharp_send_cmd(map, CMD_CLEAR_STATUS, adr);
} }
#endif #endif
......
/* /*
* $Id: block2mtd.c,v 1.29 2005/11/07 11:14:24 gleixner Exp $ * $Id: block2mtd.c,v 1.30 2005/11/29 14:48:32 gleixner Exp $
* *
* block2mtd.c - create an mtd from a block device * block2mtd.c - create an mtd from a block device
* *
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#include <linux/mtd/mtd.h> #include <linux/mtd/mtd.h>
#include <linux/buffer_head.h> #include <linux/buffer_head.h>
#define VERSION "$Revision: 1.29 $" #define VERSION "$Revision: 1.30 $"
#define ERROR(fmt, args...) printk(KERN_ERR "block2mtd: " fmt "\n" , ## args) #define ERROR(fmt, args...) printk(KERN_ERR "block2mtd: " fmt "\n" , ## args)
...@@ -40,7 +40,7 @@ static LIST_HEAD(blkmtd_device_list); ...@@ -40,7 +40,7 @@ static LIST_HEAD(blkmtd_device_list);
#define PAGE_READAHEAD 64 #define PAGE_READAHEAD 64
void cache_readahead(struct address_space *mapping, int index) static void cache_readahead(struct address_space *mapping, int index)
{ {
filler_t *filler = (filler_t*)mapping->a_ops->readpage; filler_t *filler = (filler_t*)mapping->a_ops->readpage;
int i, pagei; int i, pagei;
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
* as published by the Free Software Foundation; either version * as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version. * 2 of the License, or (at your option) any later version.
* *
* $Id: ms02-nv.c,v 1.10 2005/06/20 12:24:41 macro Exp $ * $Id: ms02-nv.c,v 1.11 2005/11/14 13:41:47 macro Exp $
*/ */
#include <linux/init.h> #include <linux/init.h>
...@@ -293,13 +293,13 @@ static int __init ms02nv_init(void) ...@@ -293,13 +293,13 @@ static int __init ms02nv_init(void)
switch (mips_machtype) { switch (mips_machtype) {
case MACH_DS5000_200: case MACH_DS5000_200:
csr = (volatile u32 *)KN02_CSR_BASE; csr = (volatile u32 *)CKSEG1ADDR(KN02_SLOT_BASE + KN02_CSR);
if (*csr & KN02_CSR_BNK32M) if (*csr & KN02_CSR_BNK32M)
stride = 2; stride = 2;
break; break;
case MACH_DS5000_2X0: case MACH_DS5000_2X0:
case MACH_DS5900: case MACH_DS5900:
csr = (volatile u32 *)KN03_MCR_BASE; csr = (volatile u32 *)CKSEG1ADDR(KN03_SLOT_BASE + IOASIC_MCR);
if (*csr & KN03_MCR_BNK32M) if (*csr & KN03_MCR_BNK32M)
stride = 2; stride = 2;
break; break;
......
/* This version ported to the Linux-MTD system by dwmw2@infradead.org /* This version ported to the Linux-MTD system by dwmw2@infradead.org
* $Id: ftl.c,v 1.58 2005/11/07 11:14:19 gleixner Exp $ * $Id: ftl.c,v 1.59 2005/11/29 14:48:31 gleixner Exp $
* *
* Fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br> * Fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
* - fixes some leaks on failure in build_maps and ftl_notify_add, cleanups * - fixes some leaks on failure in build_maps and ftl_notify_add, cleanups
...@@ -1084,9 +1084,9 @@ struct mtd_blktrans_ops ftl_tr = { ...@@ -1084,9 +1084,9 @@ struct mtd_blktrans_ops ftl_tr = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
}; };
int init_ftl(void) static int init_ftl(void)
{ {
DEBUG(0, "$Id: ftl.c,v 1.58 2005/11/07 11:14:19 gleixner Exp $\n"); DEBUG(0, "$Id: ftl.c,v 1.59 2005/11/29 14:48:31 gleixner Exp $\n");
return register_mtd_blktrans(&ftl_tr); return register_mtd_blktrans(&ftl_tr);
} }
......
...@@ -538,12 +538,6 @@ config MTD_MPC1211 ...@@ -538,12 +538,6 @@ config MTD_MPC1211
This enables access to the flash chips on the Interface MPC-1211(CTP/PCI/MPC-SH02). This enables access to the flash chips on the Interface MPC-1211(CTP/PCI/MPC-SH02).
If you have such a board, say 'Y'. If you have such a board, say 'Y'.
config MTD_PQ2FADS
tristate "JEDEC flash SIMM mapped on PQ2FADS and 8272ADS boards"
depends on (ADS8272 || PQ2FADS) && MTD_PARTITIONS && MTD_JEDECPROBE && MTD_PHYSMAP && MTD_CFI_GEOMETRY && MTD_CFI_INTELEXT
help
This enables access to flash SIMM on PQ2FADS-like boards
config MTD_OMAP_NOR config MTD_OMAP_NOR
tristate "TI OMAP board mappings" tristate "TI OMAP board mappings"
depends on MTD_CFI && ARCH_OMAP depends on MTD_CFI && ARCH_OMAP
......
...@@ -70,6 +70,5 @@ obj-$(CONFIG_MTD_DMV182) += dmv182.o ...@@ -70,6 +70,5 @@ obj-$(CONFIG_MTD_DMV182) += dmv182.o
obj-$(CONFIG_MTD_SHARP_SL) += sharpsl-flash.o obj-$(CONFIG_MTD_SHARP_SL) += sharpsl-flash.o
obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o
obj-$(CONFIG_MTD_OMAP_NOR) += omap_nor.o obj-$(CONFIG_MTD_OMAP_NOR) += omap_nor.o
obj-$(CONFIG_MTD_PQ2FADS) += pq2fads.o
obj-$(CONFIG_MTD_MTX1) += mtx-1_flash.o obj-$(CONFIG_MTD_MTX1) += mtx-1_flash.o
obj-$(CONFIG_MTD_TQM834x) += tqm834x.o obj-$(CONFIG_MTD_TQM834x) += tqm834x.o
/* /*
* $Id: ixp4xx.c,v 1.12 2005/11/07 11:14:27 gleixner Exp $ * $Id: ixp4xx.c,v 1.13 2005/11/16 16:23:21 dvrabel Exp $
* *
* drivers/mtd/maps/ixp4xx.c * drivers/mtd/maps/ixp4xx.c
* *
...@@ -34,10 +34,55 @@ ...@@ -34,10 +34,55 @@
#include <linux/reboot.h> #include <linux/reboot.h>
/*
* Read/write a 16 bit word from flash address 'addr'.
*
* When the cpu is in little-endian mode it swizzles the address lines
* ('address coherency') so we need to undo the swizzling to ensure commands
* and the like end up on the correct flash address.
*
* To further complicate matters, due to the way the expansion bus controller
* handles 32 bit reads, the byte stream ABCD is stored on the flash as:
* D15 D0
* +---+---+
* | A | B | 0
* +---+---+
* | C | D | 2
* +---+---+
* This means that on LE systems each 16 bit word must be swapped. Note that
* this requires CONFIG_MTD_CFI_BE_BYTE_SWAP to be enabled to 'unswap' the CFI
* data and other flash commands which are always in D7-D0.
*/
#ifndef __ARMEB__ #ifndef __ARMEB__
#ifndef CONFIG_MTD_CFI_BE_BYTE_SWAP
# error CONFIG_MTD_CFI_BE_BYTE_SWAP required
#endif
static inline u16 flash_read16(void __iomem *addr)
{
return be16_to_cpu(__raw_readw((void __iomem *)((unsigned long)addr ^ 0x2)));
}
static inline void flash_write16(u16 d, void __iomem *addr)
{
__raw_writew(cpu_to_be16(d), (void __iomem *)((unsigned long)addr ^ 0x2));
}
#define BYTE0(h) ((h) & 0xFF) #define BYTE0(h) ((h) & 0xFF)
#define BYTE1(h) (((h) >> 8) & 0xFF) #define BYTE1(h) (((h) >> 8) & 0xFF)
#else #else
static inline u16 flash_read16(const void __iomem *addr)
{
return __raw_readw(addr);
}
static inline void flash_write16(u16 d, void __iomem *addr)
{
__raw_writew(d, addr);
}
#define BYTE0(h) (((h) >> 8) & 0xFF) #define BYTE0(h) (((h) >> 8) & 0xFF)
#define BYTE1(h) ((h) & 0xFF) #define BYTE1(h) ((h) & 0xFF)
#endif #endif
...@@ -45,7 +90,7 @@ ...@@ -45,7 +90,7 @@
static map_word ixp4xx_read16(struct map_info *map, unsigned long ofs) static map_word ixp4xx_read16(struct map_info *map, unsigned long ofs)
{ {
map_word val; map_word val;
val.x[0] = le16_to_cpu(readw(map->virt + ofs)); val.x[0] = flash_read16(map->virt + ofs);
return val; return val;
} }
...@@ -57,19 +102,28 @@ static map_word ixp4xx_read16(struct map_info *map, unsigned long ofs) ...@@ -57,19 +102,28 @@ static map_word ixp4xx_read16(struct map_info *map, unsigned long ofs)
static void ixp4xx_copy_from(struct map_info *map, void *to, static void ixp4xx_copy_from(struct map_info *map, void *to,
unsigned long from, ssize_t len) unsigned long from, ssize_t len)
{ {
int i;
u8 *dest = (u8 *) to; u8 *dest = (u8 *) to;
void __iomem *src = map->virt + from; void __iomem *src = map->virt + from;
u16 data;
for (i = 0; i < (len / 2); i++) { if (len <= 0)
data = le16_to_cpu(readw(src + 2*i)); return;
dest[i * 2] = BYTE0(data);
dest[i * 2 + 1] = BYTE1(data); if (from & 1) {
*dest++ = BYTE1(flash_read16(src));
src++;
--len;
} }
if (len & 1) while (len >= 2) {
dest[len - 1] = BYTE0(le16_to_cpu(readw(src + 2*i))); u16 data = flash_read16(src);
*dest++ = BYTE0(data);
*dest++ = BYTE1(data);
src += 2;
len -= 2;
}
if (len > 0)
*dest++ = BYTE0(flash_read16(src));
} }
/* /*
...@@ -79,7 +133,7 @@ static void ixp4xx_copy_from(struct map_info *map, void *to, ...@@ -79,7 +133,7 @@ static void ixp4xx_copy_from(struct map_info *map, void *to,
static void ixp4xx_probe_write16(struct map_info *map, map_word d, unsigned long adr) static void ixp4xx_probe_write16(struct map_info *map, map_word d, unsigned long adr)
{ {
if (!(adr & 1)) if (!(adr & 1))
writew(cpu_to_le16(d.x[0]), map->virt + adr); flash_write16(d.x[0], map->virt + adr);
} }
/* /*
...@@ -87,7 +141,7 @@ static void ixp4xx_probe_write16(struct map_info *map, map_word d, unsigned long ...@@ -87,7 +141,7 @@ static void ixp4xx_probe_write16(struct map_info *map, map_word d, unsigned long
*/ */
static void ixp4xx_write16(struct map_info *map, map_word d, unsigned long adr) static void ixp4xx_write16(struct map_info *map, map_word d, unsigned long adr)
{ {
writew(cpu_to_le16(d.x[0]), map->virt + adr); flash_write16(d.x[0], map->virt + adr);
} }
struct ixp4xx_flash_info { struct ixp4xx_flash_info {
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
* (C) Copyright 2000-2001, Greg Ungerer (gerg@snapgear.com) * (C) Copyright 2000-2001, Greg Ungerer (gerg@snapgear.com)
* (C) Copyright 2001-2002, SnapGear (www.snapgear.com) * (C) Copyright 2001-2002, SnapGear (www.snapgear.com)
* *
* $Id: nettel.c,v 1.11 2005/11/07 11:14:27 gleixner Exp $ * $Id: nettel.c,v 1.12 2005/11/29 14:30:00 gleixner Exp $
*/ */
/****************************************************************************/ /****************************************************************************/
...@@ -479,7 +479,7 @@ void __exit nettel_cleanup(void) ...@@ -479,7 +479,7 @@ void __exit nettel_cleanup(void)
} }
if (nettel_intel_map.virt) { if (nettel_intel_map.virt) {
iounmap(nettel_intel_map.virt); iounmap(nettel_intel_map.virt);
nettel_intel_map.virt = 0; nettel_intel_map.virt = NULL;
} }
#endif #endif
} }
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation. * published by the Free Software Foundation.
* *
* $Id: pci.c,v 1.13 2005/11/07 11:14:27 gleixner Exp $ * $Id: pci.c,v 1.14 2005/11/17 08:20:27 dwmw2 Exp $
* *
* Generic PCI memory map driver. We support the following boards: * Generic PCI memory map driver. We support the following boards:
* - Intel IQ80310 ATU. * - Intel IQ80310 ATU.
...@@ -102,7 +102,7 @@ static void mtd_pci_copyto(struct map_info *_map, unsigned long to, const void * ...@@ -102,7 +102,7 @@ static void mtd_pci_copyto(struct map_info *_map, unsigned long to, const void *
memcpy_toio(map->base + map->translate(map, to), from, len); memcpy_toio(map->base + map->translate(map, to), from, len);
} }
static struct map_info mtd_pci_map = { static const struct map_info mtd_pci_map = {
.phys = NO_XIP, .phys = NO_XIP,
.copy_from = mtd_pci_copyfrom, .copy_from = mtd_pci_copyfrom,
.copy_to = mtd_pci_copyto, .copy_to = mtd_pci_copyto,
......
/* /*
* $Id: physmap.c,v 1.38 2005/11/07 11:14:28 gleixner Exp $ * $Id: physmap.c,v 1.39 2005/11/29 14:49:36 gleixner Exp $
* *
* Normal mappings of chips in physical memory * Normal mappings of chips in physical memory
* *
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/mtd/map.h> #include <linux/mtd/map.h>
#include <linux/config.h> #include <linux/config.h>
#include <linux/mtd/partitions.h> #include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
static struct mtd_info *mymtd; static struct mtd_info *mymtd;
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
* along with this program; if not, write to the Free Software * along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
* *
* $Id: sc520cdp.c,v 1.22 2005/11/07 11:14:28 gleixner Exp $ * $Id: sc520cdp.c,v 1.23 2005/11/17 08:20:27 dwmw2 Exp $
* *
* *
* The SC520CDP is an evaluation board for the Elan SC520 processor available * The SC520CDP is an evaluation board for the Elan SC520 processor available
...@@ -164,7 +164,7 @@ struct sc520_par_table ...@@ -164,7 +164,7 @@ struct sc520_par_table
unsigned long default_address; unsigned long default_address;
}; };
static struct sc520_par_table par_table[NUM_FLASH_BANKS] = static const struct sc520_par_table par_table[NUM_FLASH_BANKS] =
{ {
{ /* Flash Bank #0: selected by ROMCS0 */ { /* Flash Bank #0: selected by ROMCS0 */
SC520_PAR_ROMCS0, SC520_PAR_ROMCS0,
......
...@@ -1486,7 +1486,7 @@ ns_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len) ...@@ -1486,7 +1486,7 @@ ns_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
/* /*
* Module initialization function * Module initialization function
*/ */
int __init ns_init_module(void) static int __init ns_init_module(void)
{ {
struct nand_chip *chip; struct nand_chip *chip;
struct nandsim *nand; struct nandsim *nand;
......
...@@ -30,11 +30,9 @@ MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit s ...@@ -30,11 +30,9 @@ MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit s
#define PREFIX "rfd_ftl: " #define PREFIX "rfd_ftl: "
/* Major device # for FTL device */ /* This major has been assigned by device@lanana.org */
/* A request for this major has been sent to device@lanana.org */
#ifndef RFD_FTL_MAJOR #ifndef RFD_FTL_MAJOR
#define RFD_FTL_MAJOR 95 #define RFD_FTL_MAJOR 256
#endif #endif
/* Maximum number of partitions in an FTL region */ /* Maximum number of partitions in an FTL region */
......
...@@ -380,23 +380,23 @@ megaraid_queue(Scsi_Cmnd *scmd, void (*done)(Scsi_Cmnd *)) ...@@ -380,23 +380,23 @@ megaraid_queue(Scsi_Cmnd *scmd, void (*done)(Scsi_Cmnd *))
spin_lock_irqsave(&adapter->lock, flags); spin_lock_irqsave(&adapter->lock, flags);
scb = mega_build_cmd(adapter, scmd, &busy); scb = mega_build_cmd(adapter, scmd, &busy);
if (!scb)
goto out;
if(scb) { scb->state |= SCB_PENDQ;
scb->state |= SCB_PENDQ; list_add_tail(&scb->list, &adapter->pending_list);
list_add_tail(&scb->list, &adapter->pending_list);
/* /*
* Check if the HBA is in quiescent state, e.g., during a * Check if the HBA is in quiescent state, e.g., during a
* delete logical drive opertion. If it is, don't run * delete logical drive opertion. If it is, don't run
* the pending_list. * the pending_list.
*/ */
if(atomic_read(&adapter->quiescent) == 0) { if (atomic_read(&adapter->quiescent) == 0)
mega_runpendq(adapter); mega_runpendq(adapter);
}
return 0;
}
spin_unlock_irqrestore(&adapter->lock, flags);
busy = 0;
out:
spin_unlock_irqrestore(&adapter->lock, flags);
return busy; return busy;
} }
...@@ -4677,7 +4677,6 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -4677,7 +4677,6 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
adapter->flag = flag; adapter->flag = flag;
spin_lock_init(&adapter->lock); spin_lock_init(&adapter->lock);
scsi_assign_lock(host, &adapter->lock);
host->cmd_per_lun = max_cmd_per_lun; host->cmd_per_lun = max_cmd_per_lun;
host->max_sectors = max_sectors_per_io; host->max_sectors = max_sectors_per_io;
......
...@@ -306,9 +306,6 @@ void install_arg_page(struct vm_area_struct *vma, ...@@ -306,9 +306,6 @@ void install_arg_page(struct vm_area_struct *vma,
struct page *page, unsigned long address) struct page *page, unsigned long address)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
pgd_t * pgd;
pud_t * pud;
pmd_t * pmd;
pte_t * pte; pte_t * pte;
spinlock_t *ptl; spinlock_t *ptl;
...@@ -316,14 +313,7 @@ void install_arg_page(struct vm_area_struct *vma, ...@@ -316,14 +313,7 @@ void install_arg_page(struct vm_area_struct *vma,
goto out; goto out;
flush_dcache_page(page); flush_dcache_page(page);
pgd = pgd_offset(mm, address); pte = get_locked_pte(mm, address, &ptl);
pud = pud_alloc(mm, pgd, address);
if (!pud)
goto out;
pmd = pmd_alloc(mm, pud, address);
if (!pmd)
goto out;
pte = pte_alloc_map_lock(mm, pmd, address, &ptl);
if (!pte) if (!pte)
goto out; goto out;
if (!pte_none(*pte)) { if (!pte_none(*pte)) {
......
...@@ -234,6 +234,7 @@ void jffs2_read_inode (struct inode *inode) ...@@ -234,6 +234,7 @@ void jffs2_read_inode (struct inode *inode)
c = JFFS2_SB_INFO(inode->i_sb); c = JFFS2_SB_INFO(inode->i_sb);
jffs2_init_inode_info(f); jffs2_init_inode_info(f);
down(&f->sem);
ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node); ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node);
...@@ -400,6 +401,7 @@ struct inode *jffs2_new_inode (struct inode *dir_i, int mode, struct jffs2_raw_i ...@@ -400,6 +401,7 @@ struct inode *jffs2_new_inode (struct inode *dir_i, int mode, struct jffs2_raw_i
f = JFFS2_INODE_INFO(inode); f = JFFS2_INODE_INFO(inode);
jffs2_init_inode_info(f); jffs2_init_inode_info(f);
down(&f->sem);
memset(ri, 0, sizeof(*ri)); memset(ri, 0, sizeof(*ri));
/* Set OS-specific defaults for new inodes */ /* Set OS-specific defaults for new inodes */
......
...@@ -51,7 +51,7 @@ static void jffs2_i_init_once(void * foo, kmem_cache_t * cachep, unsigned long f ...@@ -51,7 +51,7 @@ static void jffs2_i_init_once(void * foo, kmem_cache_t * cachep, unsigned long f
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
SLAB_CTOR_CONSTRUCTOR) { SLAB_CTOR_CONSTRUCTOR) {
init_MUTEX_LOCKED(&ei->sem); init_MUTEX(&ei->sem);
inode_init_once(&ei->vfs_inode); inode_init_once(&ei->vfs_inode);
} }
} }
......
...@@ -110,8 +110,9 @@ extern int ia64_pfn_valid (unsigned long pfn); ...@@ -110,8 +110,9 @@ extern int ia64_pfn_valid (unsigned long pfn);
# define pfn_to_page(pfn) (mem_map + (pfn)) # define pfn_to_page(pfn) (mem_map + (pfn))
#elif defined(CONFIG_DISCONTIGMEM) #elif defined(CONFIG_DISCONTIGMEM)
extern struct page *vmem_map; extern struct page *vmem_map;
extern unsigned long min_low_pfn;
extern unsigned long max_low_pfn; extern unsigned long max_low_pfn;
# define pfn_valid(pfn) (((pfn) < max_low_pfn) && ia64_pfn_valid(pfn)) # define pfn_valid(pfn) (((pfn) >= min_low_pfn) && ((pfn) < max_low_pfn) && ia64_pfn_valid(pfn))
# define page_to_pfn(page) ((unsigned long) (page - vmem_map)) # define page_to_pfn(page) ((unsigned long) (page - vmem_map))
# define pfn_to_page(pfn) (vmem_map + (pfn)) # define pfn_to_page(pfn) (vmem_map + (pfn))
#endif #endif
......
...@@ -163,6 +163,7 @@ extern unsigned int kobjsize(const void *objp); ...@@ -163,6 +163,7 @@ extern unsigned int kobjsize(const void *objp);
#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
#define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
#define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */ #define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */
#define VM_INCOMPLETE 0x02000000 /* Strange partial PFN mapping marker */
#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
...@@ -741,6 +742,8 @@ struct shrinker; ...@@ -741,6 +742,8 @@ struct shrinker;
extern struct shrinker *set_shrinker(int, shrinker_t); extern struct shrinker *set_shrinker(int, shrinker_t);
extern void remove_shrinker(struct shrinker *shrinker); extern void remove_shrinker(struct shrinker *shrinker);
extern pte_t *FASTCALL(get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl));
int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address); int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address);
......
/* Common Flash Interface structures /* Common Flash Interface structures
* See http://support.intel.com/design/flash/technote/index.htm * See http://support.intel.com/design/flash/technote/index.htm
* $Id: cfi.h,v 1.56 2005/11/07 11:14:54 gleixner Exp $ * $Id: cfi.h,v 1.57 2005/11/15 23:28:17 tpoynor Exp $
*/ */
#ifndef __MTD_CFI_H__ #ifndef __MTD_CFI_H__
...@@ -426,6 +426,22 @@ static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr) ...@@ -426,6 +426,22 @@ static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr)
} }
} }
static inline uint16_t cfi_read_query16(struct map_info *map, uint32_t addr)
{
map_word val = map_read(map, addr);
if (map_bankwidth_is_1(map)) {
return val.x[0] & 0xff;
} else if (map_bankwidth_is_2(map)) {
return cfi16_to_cpu(val.x[0]);
} else {
/* No point in a 64-bit byteswap since that would just be
swapping the responses from different chips, and we are
only interested in one chip (a representative sample) */
return cfi32_to_cpu(val.x[0]);
}
}
static inline void cfi_udelay(int us) static inline void cfi_udelay(int us)
{ {
if (us >= 1000) { if (us >= 1000) {
......
...@@ -55,20 +55,10 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -55,20 +55,10 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
pgoff_t size; pgoff_t size;
int err = -ENOMEM; int err = -ENOMEM;
pte_t *pte; pte_t *pte;
pmd_t *pmd;
pud_t *pud;
pgd_t *pgd;
pte_t pte_val; pte_t pte_val;
spinlock_t *ptl; spinlock_t *ptl;
pgd = pgd_offset(mm, addr); pte = get_locked_pte(mm, addr, &ptl);
pud = pud_alloc(mm, pgd, addr);
if (!pud)
goto out;
pmd = pmd_alloc(mm, pud, addr);
if (!pmd)
goto out;
pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
if (!pte) if (!pte)
goto out; goto out;
...@@ -110,20 +100,10 @@ int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -110,20 +100,10 @@ int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
{ {
int err = -ENOMEM; int err = -ENOMEM;
pte_t *pte; pte_t *pte;
pmd_t *pmd;
pud_t *pud;
pgd_t *pgd;
pte_t pte_val; pte_t pte_val;
spinlock_t *ptl; spinlock_t *ptl;
pgd = pgd_offset(mm, addr); pte = get_locked_pte(mm, addr, &ptl);
pud = pud_alloc(mm, pgd, addr);
if (!pud)
goto out;
pmd = pmd_alloc(mm, pud, addr);
if (!pmd)
goto out;
pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
if (!pte) if (!pte)
goto out; goto out;
......
...@@ -988,7 +988,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ...@@ -988,7 +988,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
return i ? : -EFAULT; return i ? : -EFAULT;
} }
if (pages) { if (pages) {
struct page *page = vm_normal_page(vma, start, *pte); struct page *page = vm_normal_page(gate_vma, start, *pte);
pages[i] = page; pages[i] = page;
if (page) if (page)
get_page(page); get_page(page);
...@@ -1146,6 +1146,97 @@ int zeromap_page_range(struct vm_area_struct *vma, ...@@ -1146,6 +1146,97 @@ int zeromap_page_range(struct vm_area_struct *vma,
return err; return err;
} }
pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl)
{
pgd_t * pgd = pgd_offset(mm, addr);
pud_t * pud = pud_alloc(mm, pgd, addr);
if (pud) {
pmd_t * pmd = pmd_alloc(mm, pgd, addr);
if (pmd)
return pte_alloc_map_lock(mm, pmd, addr, ptl);
}
return NULL;
}
/*
* This is the old fallback for page remapping.
*
* For historical reasons, it only allows reserved pages. Only
* old drivers should use this, and they needed to mark their
* pages reserved for the old functions anyway.
*/
static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *page, pgprot_t prot)
{
int retval;
pte_t *pte;
spinlock_t *ptl;
retval = -EINVAL;
if (PageAnon(page) || !PageReserved(page))
goto out;
retval = -ENOMEM;
flush_dcache_page(page);
pte = get_locked_pte(mm, addr, &ptl);
if (!pte)
goto out;
retval = -EBUSY;
if (!pte_none(*pte))
goto out_unlock;
/* Ok, finally just insert the thing.. */
get_page(page);
inc_mm_counter(mm, file_rss);
page_add_file_rmap(page);
set_pte_at(mm, addr, pte, mk_pte(page, prot));
retval = 0;
out_unlock:
pte_unmap_unlock(pte, ptl);
out:
return retval;
}
/*
* Somebody does a pfn remapping that doesn't actually work as a vma.
*
* Do it as individual pages instead, and warn about it. It's bad form,
* and very inefficient.
*/
static int incomplete_pfn_remap(struct vm_area_struct *vma,
unsigned long start, unsigned long end,
unsigned long pfn, pgprot_t prot)
{
static int warn = 10;
struct page *page;
int retval;
if (!(vma->vm_flags & VM_INCOMPLETE)) {
if (warn) {
warn--;
printk("%s does an incomplete pfn remapping", current->comm);
dump_stack();
}
}
vma->vm_flags |= VM_INCOMPLETE | VM_IO | VM_RESERVED;
if (start < vma->vm_start || end > vma->vm_end)
return -EINVAL;
if (!pfn_valid(pfn))
return -EINVAL;
retval = 0;
page = pfn_to_page(pfn);
while (start < end) {
retval = insert_page(vma->vm_mm, start, page, prot);
if (retval < 0)
break;
start += PAGE_SIZE;
page++;
}
return retval;
}
/* /*
* maps a range of physical memory into the requested pages. the old * maps a range of physical memory into the requested pages. the old
* mappings are removed. any references to nonexistent pages results * mappings are removed. any references to nonexistent pages results
...@@ -1220,6 +1311,9 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, ...@@ -1220,6 +1311,9 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
int err; int err;
if (addr != vma->vm_start || end != vma->vm_end)
return incomplete_pfn_remap(vma, addr, end, pfn, prot);
/* /*
* Physically remapped pages are special. Tell the * Physically remapped pages are special. Tell the
* rest of the world about it: * rest of the world about it:
...@@ -1300,8 +1394,15 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo ...@@ -1300,8 +1394,15 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
*/ */
if (unlikely(!src)) { if (unlikely(!src)) {
void *kaddr = kmap_atomic(dst, KM_USER0); void *kaddr = kmap_atomic(dst, KM_USER0);
unsigned long left = __copy_from_user_inatomic(kaddr, (void __user *)va, PAGE_SIZE); void __user *uaddr = (void __user *)(va & PAGE_MASK);
if (left)
/*
* This really shouldn't fail, because the page is there
* in the page tables. But it might just be unreadable,
* in which case we just give up and fill the result with
* zeroes.
*/
if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
memset(kaddr, 0, PAGE_SIZE); memset(kaddr, 0, PAGE_SIZE);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
return; return;
...@@ -1332,12 +1433,11 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1332,12 +1433,11 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd, unsigned long address, pte_t *page_table, pmd_t *pmd,
spinlock_t *ptl, pte_t orig_pte) spinlock_t *ptl, pte_t orig_pte)
{ {
struct page *old_page, *src_page, *new_page; struct page *old_page, *new_page;
pte_t entry; pte_t entry;
int ret = VM_FAULT_MINOR; int ret = VM_FAULT_MINOR;
old_page = vm_normal_page(vma, address, orig_pte); old_page = vm_normal_page(vma, address, orig_pte);
src_page = old_page;
if (!old_page) if (!old_page)
goto gotten; goto gotten;
...@@ -1345,7 +1445,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1345,7 +1445,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
int reuse = can_share_swap_page(old_page); int reuse = can_share_swap_page(old_page);
unlock_page(old_page); unlock_page(old_page);
if (reuse) { if (reuse) {
flush_cache_page(vma, address, pfn); flush_cache_page(vma, address, pte_pfn(orig_pte));
entry = pte_mkyoung(orig_pte); entry = pte_mkyoung(orig_pte);
entry = maybe_mkwrite(pte_mkdirty(entry), vma); entry = maybe_mkwrite(pte_mkdirty(entry), vma);
ptep_set_access_flags(vma, address, page_table, entry, 1); ptep_set_access_flags(vma, address, page_table, entry, 1);
...@@ -1365,7 +1465,7 @@ gotten: ...@@ -1365,7 +1465,7 @@ gotten:
if (unlikely(anon_vma_prepare(vma))) if (unlikely(anon_vma_prepare(vma)))
goto oom; goto oom;
if (src_page == ZERO_PAGE(address)) { if (old_page == ZERO_PAGE(address)) {
new_page = alloc_zeroed_user_highpage(vma, address); new_page = alloc_zeroed_user_highpage(vma, address);
if (!new_page) if (!new_page)
goto oom; goto oom;
...@@ -1373,7 +1473,7 @@ gotten: ...@@ -1373,7 +1473,7 @@ gotten:
new_page = alloc_page_vma(GFP_HIGHUSER, vma, address); new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
if (!new_page) if (!new_page)
goto oom; goto oom;
cow_user_page(new_page, src_page, address); cow_user_page(new_page, old_page, address);
} }
/* /*
...@@ -1389,7 +1489,7 @@ gotten: ...@@ -1389,7 +1489,7 @@ gotten:
} }
} else } else
inc_mm_counter(mm, anon_rss); inc_mm_counter(mm, anon_rss);
flush_cache_page(vma, address, pfn); flush_cache_page(vma, address, pte_pfn(orig_pte));
entry = mk_pte(new_page, vma->vm_page_prot); entry = mk_pte(new_page, vma->vm_page_prot);
entry = maybe_mkwrite(pte_mkdirty(entry), vma); entry = maybe_mkwrite(pte_mkdirty(entry), vma);
ptep_establish(vma, address, page_table, entry); ptep_establish(vma, address, page_table, entry);
...@@ -1909,6 +2009,8 @@ static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1909,6 +2009,8 @@ static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
int anon = 0; int anon = 0;
pte_unmap(page_table); pte_unmap(page_table);
BUG_ON(vma->vm_flags & VM_PFNMAP);
if (vma->vm_file) { if (vma->vm_file) {
mapping = vma->vm_file->f_mapping; mapping = vma->vm_file->f_mapping;
sequence = mapping->truncate_count; sequence = mapping->truncate_count;
...@@ -1941,7 +2043,7 @@ retry: ...@@ -1941,7 +2043,7 @@ retry:
page = alloc_page_vma(GFP_HIGHUSER, vma, address); page = alloc_page_vma(GFP_HIGHUSER, vma, address);
if (!page) if (!page)
goto oom; goto oom;
cow_user_page(page, new_page, address); copy_user_highpage(page, new_page, address);
page_cache_release(new_page); page_cache_release(new_page);
new_page = page; new_page = page;
anon = 1; anon = 1;
......
...@@ -641,7 +641,7 @@ static void try_to_unmap_cluster(unsigned long cursor, ...@@ -641,7 +641,7 @@ static void try_to_unmap_cluster(unsigned long cursor,
continue; continue;
/* Nuke the page table entry. */ /* Nuke the page table entry. */
flush_cache_page(vma, address, pfn); flush_cache_page(vma, address, pte_pfn(*pte));
pteval = ptep_clear_flush(vma, address, pte); pteval = ptep_clear_flush(vma, address, pte);
/* If nonlinear, store the file page offset in the pte. */ /* If nonlinear, store the file page offset in the pte. */
......
...@@ -985,6 +985,8 @@ int ipv6_dev_get_saddr(struct net_device *daddr_dev, ...@@ -985,6 +985,8 @@ int ipv6_dev_get_saddr(struct net_device *daddr_dev,
} }
/* Rule 4: Prefer home address -- not implemented yet */ /* Rule 4: Prefer home address -- not implemented yet */
if (hiscore.rule < 4)
hiscore.rule++;
/* Rule 5: Prefer outgoing interface */ /* Rule 5: Prefer outgoing interface */
if (hiscore.rule < 5) { if (hiscore.rule < 5) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment