Commit 88032b32 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6

* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6:
  [S390] Poison init section before freeing it.
  [S390] Use add_active_range() and free_area_init_nodes().
  [S390] Virtual memmap for s390.
  [S390] Update documentation for dynamic subchannel mapping.
  [S390] Use dev->groups for adding/removing the subchannel attribute group.
  [S390] Support for disconnected devices reappearing on another subchannel.
  [S390] subchannel lock conversion.
  [S390] Some preparations for the dynamic subchannel mapping patch.
  [S390] runtime switch for qdio performance statistics
  [S390] New DASD feature for ERP related logging
  [S390] add reset call handler to the ap bus.
  [S390] more workqueue fixes.
  [S390] workqueue fixes.
  [S390] uaccess_pt: add missing down_read() and convert to is_init().
parents 63f3861d 028d9b3c
...@@ -18,11 +18,18 @@ devices/ ...@@ -18,11 +18,18 @@ devices/
- 0.0.0002/ - 0.0.0002/
- 0.1.0000/0.1.1234/ - 0.1.0000/0.1.1234/
... ...
- defunct/
In this example, device 0815 is accessed via subchannel 0 in subchannel set 0, In this example, device 0815 is accessed via subchannel 0 in subchannel set 0,
device 4711 via subchannel 1 in subchannel set 0, and subchannel 2 is a non-I/O device 4711 via subchannel 1 in subchannel set 0, and subchannel 2 is a non-I/O
subchannel. Device 1234 is accessed via subchannel 0 in subchannel set 1. subchannel. Device 1234 is accessed via subchannel 0 in subchannel set 1.
The subchannel named 'defunct' does not represent any real subchannel on the
system; it is a pseudo subchannel where disconnnected ccw devices are moved to
if they are displaced by another ccw device becoming operational on their
former subchannel. The ccw devices will be moved again to a proper subchannel
if they become operational again on that subchannel.
You should address a ccw device via its bus id (e.g. 0.0.4711); the device can You should address a ccw device via its bus id (e.g. 0.0.4711); the device can
be found under bus/ccw/devices/. be found under bus/ccw/devices/.
......
...@@ -241,8 +241,14 @@ config WARN_STACK_SIZE ...@@ -241,8 +241,14 @@ config WARN_STACK_SIZE
This allows you to specify the maximum frame size a function may This allows you to specify the maximum frame size a function may
have without the compiler complaining about it. have without the compiler complaining about it.
config ARCH_POPULATES_NODE_MAP
def_bool y
source "mm/Kconfig" source "mm/Kconfig"
config HOLES_IN_ZONE
def_bool y
comment "I/O subsystem configuration" comment "I/O subsystem configuration"
config MACHCHK_WARNING config MACHCHK_WARNING
...@@ -266,14 +272,6 @@ config QDIO ...@@ -266,14 +272,6 @@ config QDIO
If unsure, say Y. If unsure, say Y.
config QDIO_PERF_STATS
bool "Performance statistics in /proc"
depends on QDIO
help
Say Y here to get performance statistics in /proc/qdio_perf
If unsure, say N.
config QDIO_DEBUG config QDIO_DEBUG
bool "Extended debugging information" bool "Extended debugging information"
depends on QDIO depends on QDIO
......
...@@ -134,7 +134,6 @@ CONFIG_RESOURCES_64BIT=y ...@@ -134,7 +134,6 @@ CONFIG_RESOURCES_64BIT=y
# #
CONFIG_MACHCHK_WARNING=y CONFIG_MACHCHK_WARNING=y
CONFIG_QDIO=y CONFIG_QDIO=y
# CONFIG_QDIO_PERF_STATS is not set
# CONFIG_QDIO_DEBUG is not set # CONFIG_QDIO_DEBUG is not set
# #
......
...@@ -64,9 +64,8 @@ unsigned int console_devno = -1; ...@@ -64,9 +64,8 @@ unsigned int console_devno = -1;
unsigned int console_irq = -1; unsigned int console_irq = -1;
unsigned long machine_flags = 0; unsigned long machine_flags = 0;
struct mem_chunk memory_chunk[MEMORY_CHUNKS]; struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS];
volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
unsigned long __initdata zholes_size[MAX_NR_ZONES];
static unsigned long __initdata memory_end; static unsigned long __initdata memory_end;
/* /*
...@@ -354,21 +353,6 @@ void machine_power_off(void) ...@@ -354,21 +353,6 @@ void machine_power_off(void)
*/ */
void (*pm_power_off)(void) = machine_power_off; void (*pm_power_off)(void) = machine_power_off;
static void __init
add_memory_hole(unsigned long start, unsigned long end)
{
unsigned long dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
if (end <= dma_pfn)
zholes_size[ZONE_DMA] += end - start + 1;
else if (start > dma_pfn)
zholes_size[ZONE_NORMAL] += end - start + 1;
else {
zholes_size[ZONE_DMA] += dma_pfn - start + 1;
zholes_size[ZONE_NORMAL] += end - dma_pfn;
}
}
static int __init early_parse_mem(char *p) static int __init early_parse_mem(char *p)
{ {
memory_end = memparse(p, &p); memory_end = memparse(p, &p);
...@@ -521,7 +505,6 @@ setup_memory(void) ...@@ -521,7 +505,6 @@ setup_memory(void)
{ {
unsigned long bootmap_size; unsigned long bootmap_size;
unsigned long start_pfn, end_pfn, init_pfn; unsigned long start_pfn, end_pfn, init_pfn;
unsigned long last_rw_end;
int i; int i;
/* /*
...@@ -577,39 +560,27 @@ setup_memory(void) ...@@ -577,39 +560,27 @@ setup_memory(void)
/* /*
* Register RAM areas with the bootmem allocator. * Register RAM areas with the bootmem allocator.
*/ */
last_rw_end = start_pfn;
for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
unsigned long start_chunk, end_chunk; unsigned long start_chunk, end_chunk, pfn;
if (memory_chunk[i].type != CHUNK_READ_WRITE) if (memory_chunk[i].type != CHUNK_READ_WRITE)
continue; continue;
start_chunk = (memory_chunk[i].addr + PAGE_SIZE - 1); start_chunk = PFN_DOWN(memory_chunk[i].addr);
start_chunk >>= PAGE_SHIFT; end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size) - 1;
end_chunk = (memory_chunk[i].addr + memory_chunk[i].size); end_chunk = min(end_chunk, end_pfn);
end_chunk >>= PAGE_SHIFT; if (start_chunk >= end_chunk)
if (start_chunk < start_pfn) continue;
start_chunk = start_pfn; add_active_range(0, start_chunk, end_chunk);
if (end_chunk > end_pfn) pfn = max(start_chunk, start_pfn);
end_chunk = end_pfn; for (; pfn <= end_chunk; pfn++)
if (start_chunk < end_chunk) { page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY);
/* Initialize storage key for RAM pages */
for (init_pfn = start_chunk ; init_pfn < end_chunk;
init_pfn++)
page_set_storage_key(init_pfn << PAGE_SHIFT,
PAGE_DEFAULT_KEY);
free_bootmem(start_chunk << PAGE_SHIFT,
(end_chunk - start_chunk) << PAGE_SHIFT);
if (last_rw_end < start_chunk)
add_memory_hole(last_rw_end, start_chunk - 1);
last_rw_end = end_chunk;
}
} }
psw_set_key(PAGE_DEFAULT_KEY); psw_set_key(PAGE_DEFAULT_KEY);
if (last_rw_end < end_pfn - 1) free_bootmem_with_active_regions(0, max_pfn);
add_memory_hole(last_rw_end, end_pfn - 1); reserve_bootmem(0, PFN_PHYS(start_pfn));
/* /*
* Reserve the bootmem bitmap itself as well. We do this in two * Reserve the bootmem bitmap itself as well. We do this in two
......
...@@ -8,8 +8,8 @@ ...@@ -8,8 +8,8 @@
*/ */
#include <linux/errno.h> #include <linux/errno.h>
#include <asm/uaccess.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/uaccess.h>
#include <asm/futex.h> #include <asm/futex.h>
static inline int __handle_fault(struct mm_struct *mm, unsigned long address, static inline int __handle_fault(struct mm_struct *mm, unsigned long address,
...@@ -60,8 +60,9 @@ out: ...@@ -60,8 +60,9 @@ out:
out_of_memory: out_of_memory:
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
if (current->pid == 1) { if (is_init(current)) {
yield(); yield();
down_read(&mm->mmap_sem);
goto survive; goto survive;
} }
printk("VM: killing process %s\n", current->comm); printk("VM: killing process %s\n", current->comm);
......
...@@ -2,6 +2,6 @@ ...@@ -2,6 +2,6 @@
# Makefile for the linux s390-specific parts of the memory manager. # Makefile for the linux s390-specific parts of the memory manager.
# #
obj-y := init.o fault.o ioremap.o extmem.o mmap.o obj-y := init.o fault.o ioremap.o extmem.o mmap.o vmem.o
obj-$(CONFIG_CMM) += cmm.o obj-$(CONFIG_CMM) += cmm.o
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/ctype.h> #include <linux/ctype.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/ebcdic.h> #include <asm/ebcdic.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/extmem.h> #include <asm/extmem.h>
...@@ -237,65 +238,6 @@ query_segment_type (struct dcss_segment *seg) ...@@ -237,65 +238,6 @@ query_segment_type (struct dcss_segment *seg)
return rc; return rc;
} }
/*
* check if the given segment collides with guest storage.
* returns 1 if this is the case, 0 if no collision was found
*/
static int
segment_overlaps_storage(struct dcss_segment *seg)
{
int i;
for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
if (memory_chunk[i].type != CHUNK_READ_WRITE)
continue;
if ((memory_chunk[i].addr >> 20) > (seg->end >> 20))
continue;
if (((memory_chunk[i].addr + memory_chunk[i].size - 1) >> 20)
< (seg->start_addr >> 20))
continue;
return 1;
}
return 0;
}
/*
* check if segment collides with other segments that are currently loaded
* returns 1 if this is the case, 0 if no collision was found
*/
static int
segment_overlaps_others (struct dcss_segment *seg)
{
struct list_head *l;
struct dcss_segment *tmp;
BUG_ON(!mutex_is_locked(&dcss_lock));
list_for_each(l, &dcss_list) {
tmp = list_entry(l, struct dcss_segment, list);
if ((tmp->start_addr >> 20) > (seg->end >> 20))
continue;
if ((tmp->end >> 20) < (seg->start_addr >> 20))
continue;
if (seg == tmp)
continue;
return 1;
}
return 0;
}
/*
* check if segment exceeds the kernel mapping range (detected or set via mem=)
* returns 1 if this is the case, 0 if segment fits into the range
*/
static inline int
segment_exceeds_range (struct dcss_segment *seg)
{
int seg_last_pfn = (seg->end) >> PAGE_SHIFT;
if (seg_last_pfn > max_pfn)
return 1;
return 0;
}
/* /*
* get info about a segment * get info about a segment
* possible return values: * possible return values:
...@@ -341,24 +283,26 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long ...@@ -341,24 +283,26 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
rc = query_segment_type (seg); rc = query_segment_type (seg);
if (rc < 0) if (rc < 0)
goto out_free; goto out_free;
if (segment_exceeds_range(seg)) {
PRINT_WARN ("segment_load: not loading segment %s - exceeds" rc = add_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
" kernel mapping range\n",name);
rc = -ERANGE; switch (rc) {
case 0:
break;
case -ENOSPC:
PRINT_WARN("segment_load: not loading segment %s - overlaps "
"storage/segment\n", name);
goto out_free; goto out_free;
} case -ERANGE:
if (segment_overlaps_storage(seg)) { PRINT_WARN("segment_load: not loading segment %s - exceeds "
PRINT_WARN ("segment_load: not loading segment %s - overlaps" "kernel mapping range\n", name);
" storage\n",name);
rc = -ENOSPC;
goto out_free; goto out_free;
} default:
if (segment_overlaps_others(seg)) { PRINT_WARN("segment_load: not loading segment %s (rc: %d)\n",
PRINT_WARN ("segment_load: not loading segment %s - overlaps" name, rc);
" other segments\n",name);
rc = -EBUSY;
goto out_free; goto out_free;
} }
if (do_nonshared) if (do_nonshared)
dcss_command = DCSS_LOADNSR; dcss_command = DCSS_LOADNSR;
else else
...@@ -372,7 +316,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long ...@@ -372,7 +316,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
rc = dcss_diag_translate_rc (seg->end); rc = dcss_diag_translate_rc (seg->end);
dcss_diag(DCSS_PURGESEG, seg->dcss_name, dcss_diag(DCSS_PURGESEG, seg->dcss_name,
&seg->start_addr, &seg->end); &seg->start_addr, &seg->end);
goto out_free; goto out_shared;
} }
seg->do_nonshared = do_nonshared; seg->do_nonshared = do_nonshared;
atomic_set(&seg->ref_count, 1); atomic_set(&seg->ref_count, 1);
...@@ -391,6 +335,8 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long ...@@ -391,6 +335,8 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
(void*)seg->start_addr, (void*)seg->end, (void*)seg->start_addr, (void*)seg->end,
segtype_string[seg->vm_segtype]); segtype_string[seg->vm_segtype]);
goto out; goto out;
out_shared:
remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
out_free: out_free:
kfree(seg); kfree(seg);
out: out:
...@@ -530,12 +476,12 @@ segment_unload(char *name) ...@@ -530,12 +476,12 @@ segment_unload(char *name)
"please report to linux390@de.ibm.com\n",name); "please report to linux390@de.ibm.com\n",name);
goto out_unlock; goto out_unlock;
} }
if (atomic_dec_return(&seg->ref_count) == 0) { if (atomic_dec_return(&seg->ref_count) != 0)
goto out_unlock;
remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
list_del(&seg->list); list_del(&seg->list);
dcss_diag(DCSS_PURGESEG, seg->dcss_name, dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy);
&dummy, &dummy);
kfree(seg); kfree(seg);
}
out_unlock: out_unlock:
mutex_unlock(&dcss_lock); mutex_unlock(&dcss_lock);
} }
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/pfn.h> #include <linux/pfn.h>
#include <linux/poison.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/system.h> #include <asm/system.h>
...@@ -69,6 +70,8 @@ void show_mem(void) ...@@ -69,6 +70,8 @@ void show_mem(void)
printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
i = max_mapnr; i = max_mapnr;
while (i-- > 0) { while (i-- > 0) {
if (!pfn_valid(i))
continue;
page = pfn_to_page(i); page = pfn_to_page(i);
total++; total++;
if (PageReserved(page)) if (PageReserved(page))
...@@ -84,150 +87,52 @@ void show_mem(void) ...@@ -84,150 +87,52 @@ void show_mem(void)
printk("%d pages swap cached\n",cached); printk("%d pages swap cached\n",cached);
} }
extern unsigned long __initdata zholes_size[]; static void __init setup_ro_region(void)
/*
* paging_init() sets up the page tables
*/
#ifndef CONFIG_64BIT
void __init paging_init(void)
{ {
pgd_t * pg_dir; pgd_t *pgd;
pte_t * pg_table; pmd_t *pmd;
pte_t pte; pte_t *pte;
int i; pte_t new_pte;
unsigned long tmp; unsigned long address, end;
unsigned long pfn = 0;
unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; address = ((unsigned long)&__start_rodata) & PAGE_MASK;
static const int ssm_mask = 0x04000000L; end = PFN_ALIGN((unsigned long)&__end_rodata);
unsigned long ro_start_pfn, ro_end_pfn;
unsigned long zones_size[MAX_NR_ZONES]; for (; address < end; address += PAGE_SIZE) {
pgd = pgd_offset_k(address);
ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata); pmd = pmd_offset(pgd, address);
ro_end_pfn = PFN_UP((unsigned long)&__end_rodata); pte = pte_offset_kernel(pmd, address);
new_pte = mk_pte_phys(address, __pgprot(_PAGE_RO));
memset(zones_size, 0, sizeof(zones_size)); set_pte(pte, new_pte);
zones_size[ZONE_DMA] = max_low_pfn;
free_area_init_node(0, &contig_page_data, zones_size,
__pa(PAGE_OFFSET) >> PAGE_SHIFT,
zholes_size);
/* unmap whole virtual address space */
pg_dir = swapper_pg_dir;
for (i = 0; i < PTRS_PER_PGD; i++)
pmd_clear((pmd_t *) pg_dir++);
/*
* map whole physical memory to virtual memory (identity mapping)
*/
pg_dir = swapper_pg_dir;
while (pfn < max_low_pfn) {
/*
* pg_table is physical at this point
*/
pg_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
pmd_populate_kernel(&init_mm, (pmd_t *) pg_dir, pg_table);
pg_dir++;
for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) {
if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
else
pte = pfn_pte(pfn, PAGE_KERNEL);
if (pfn >= max_low_pfn)
pte_val(pte) = _PAGE_TYPE_EMPTY;
set_pte(pg_table, pte);
pfn++;
} }
}
S390_lowcore.kernel_asce = pgdir_k;
/* enable virtual mapping in kernel mode */
__ctl_load(pgdir_k, 1, 1);
__ctl_load(pgdir_k, 7, 7);
__ctl_load(pgdir_k, 13, 13);
__raw_local_irq_ssm(ssm_mask);
local_flush_tlb();
} }
#else /* CONFIG_64BIT */ extern void vmem_map_init(void);
/*
* paging_init() sets up the page tables
*/
void __init paging_init(void) void __init paging_init(void)
{ {
pgd_t * pg_dir; pgd_t *pg_dir;
pmd_t * pm_dir; int i;
pte_t * pt_dir; unsigned long pgdir_k;
pte_t pte;
int i,j,k;
unsigned long pfn = 0;
unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) |
_KERN_REGION_TABLE;
static const int ssm_mask = 0x04000000L; static const int ssm_mask = 0x04000000L;
unsigned long zones_size[MAX_NR_ZONES]; unsigned long max_zone_pfns[MAX_NR_ZONES];
unsigned long dma_pfn, high_pfn;
unsigned long ro_start_pfn, ro_end_pfn;
memset(zones_size, 0, sizeof(zones_size));
dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
high_pfn = max_low_pfn;
ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
if (dma_pfn > high_pfn)
zones_size[ZONE_DMA] = high_pfn;
else {
zones_size[ZONE_DMA] = dma_pfn;
zones_size[ZONE_NORMAL] = high_pfn - dma_pfn;
}
/* Initialize mem_map[]. */
free_area_init_node(0, &contig_page_data, zones_size,
__pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
/*
* map whole physical memory to virtual memory (identity mapping)
*/
pg_dir = swapper_pg_dir; pg_dir = swapper_pg_dir;
for (i = 0 ; i < PTRS_PER_PGD ; i++,pg_dir++) { #ifdef CONFIG_64BIT
pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERN_REGION_TABLE;
if (pfn >= max_low_pfn) { for (i = 0; i < PTRS_PER_PGD; i++)
pgd_clear(pg_dir); pgd_clear(pg_dir + i);
continue; #else
} pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
for (i = 0; i < PTRS_PER_PGD; i++)
pm_dir = (pmd_t *) alloc_bootmem_pages(PAGE_SIZE * 4); pmd_clear((pmd_t *)(pg_dir + i));
pgd_populate(&init_mm, pg_dir, pm_dir); #endif
vmem_map_init();
for (j = 0 ; j < PTRS_PER_PMD ; j++,pm_dir++) { setup_ro_region();
if (pfn >= max_low_pfn) {
pmd_clear(pm_dir);
continue;
}
pt_dir = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) {
if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
else
pte = pfn_pte(pfn, PAGE_KERNEL);
if (pfn >= max_low_pfn)
pte_val(pte) = _PAGE_TYPE_EMPTY;
set_pte(pt_dir, pte);
pfn++;
}
}
}
S390_lowcore.kernel_asce = pgdir_k; S390_lowcore.kernel_asce = pgdir_k;
...@@ -237,9 +142,11 @@ void __init paging_init(void) ...@@ -237,9 +142,11 @@ void __init paging_init(void)
__ctl_load(pgdir_k, 13, 13); __ctl_load(pgdir_k, 13, 13);
__raw_local_irq_ssm(ssm_mask); __raw_local_irq_ssm(ssm_mask);
local_flush_tlb(); memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
free_area_init_nodes(max_zone_pfns);
} }
#endif /* CONFIG_64BIT */
void __init mem_init(void) void __init mem_init(void)
{ {
...@@ -269,6 +176,8 @@ void __init mem_init(void) ...@@ -269,6 +176,8 @@ void __init mem_init(void)
printk("Write protected kernel read-only data: %#lx - %#lx\n", printk("Write protected kernel read-only data: %#lx - %#lx\n",
(unsigned long)&__start_rodata, (unsigned long)&__start_rodata,
PFN_ALIGN((unsigned long)&__end_rodata) - 1); PFN_ALIGN((unsigned long)&__end_rodata) - 1);
printk("Virtual memmap size: %ldk\n",
(max_pfn * sizeof(struct page)) >> 10);
} }
void free_initmem(void) void free_initmem(void)
...@@ -279,6 +188,7 @@ void free_initmem(void) ...@@ -279,6 +188,7 @@ void free_initmem(void)
for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr)); ClearPageReserved(virt_to_page(addr));
init_page_count(virt_to_page(addr)); init_page_count(virt_to_page(addr));
memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
free_page(addr); free_page(addr);
totalram_pages++; totalram_pages++;
} }
......
/*
* arch/s390/mm/vmem.c
*
* Copyright IBM Corp. 2006
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#include <linux/bootmem.h>
#include <linux/pfn.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/list.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/setup.h>
#include <asm/tlbflush.h>
unsigned long vmalloc_end;
EXPORT_SYMBOL(vmalloc_end);
static struct page *vmem_map;
static DEFINE_MUTEX(vmem_mutex);
struct memory_segment {
struct list_head list;
unsigned long start;
unsigned long size;
};
static LIST_HEAD(mem_segs);
void memmap_init(unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn)
{
struct page *start, *end;
struct page *map_start, *map_end;
int i;
start = pfn_to_page(start_pfn);
end = start + size;
for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
unsigned long cstart, cend;
cstart = PFN_DOWN(memory_chunk[i].addr);
cend = cstart + PFN_DOWN(memory_chunk[i].size);
map_start = mem_map + cstart;
map_end = mem_map + cend;
if (map_start < start)
map_start = start;
if (map_end > end)
map_end = end;
map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1))
/ sizeof(struct page);
map_end += ((PFN_ALIGN((unsigned long) map_end)
- (unsigned long) map_end)
/ sizeof(struct page));
if (map_start < map_end)
memmap_init_zone((unsigned long)(map_end - map_start),
nid, zone, page_to_pfn(map_start));
}
}
static inline void *vmem_alloc_pages(unsigned int order)
{
if (slab_is_available())
return (void *)__get_free_pages(GFP_KERNEL, order);
return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
}
static inline pmd_t *vmem_pmd_alloc(void)
{
pmd_t *pmd;
int i;
pmd = vmem_alloc_pages(PMD_ALLOC_ORDER);
if (!pmd)
return NULL;
for (i = 0; i < PTRS_PER_PMD; i++)
pmd_clear(pmd + i);
return pmd;
}
static inline pte_t *vmem_pte_alloc(void)
{
pte_t *pte;
pte_t empty_pte;
int i;
pte = vmem_alloc_pages(PTE_ALLOC_ORDER);
if (!pte)
return NULL;
pte_val(empty_pte) = _PAGE_TYPE_EMPTY;
for (i = 0; i < PTRS_PER_PTE; i++)
set_pte(pte + i, empty_pte);
return pte;
}
/*
* Add a physical memory range to the 1:1 mapping.
*/
static int vmem_add_range(unsigned long start, unsigned long size)
{
unsigned long address;
pgd_t *pg_dir;
pmd_t *pm_dir;
pte_t *pt_dir;
pte_t pte;
int ret = -ENOMEM;
for (address = start; address < start + size; address += PAGE_SIZE) {
pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) {
pm_dir = vmem_pmd_alloc();
if (!pm_dir)
goto out;
pgd_populate(&init_mm, pg_dir, pm_dir);
}
pm_dir = pmd_offset(pg_dir, address);
if (pmd_none(*pm_dir)) {
pt_dir = vmem_pte_alloc();
if (!pt_dir)
goto out;
pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
}
pt_dir = pte_offset_kernel(pm_dir, address);
pte = pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL);
set_pte(pt_dir, pte);
}
ret = 0;
out:
flush_tlb_kernel_range(start, start + size);
return ret;
}
/*
* Remove a physical memory range from the 1:1 mapping.
* Currently only invalidates page table entries.
*/
static void vmem_remove_range(unsigned long start, unsigned long size)
{
unsigned long address;
pgd_t *pg_dir;
pmd_t *pm_dir;
pte_t *pt_dir;
pte_t pte;
pte_val(pte) = _PAGE_TYPE_EMPTY;
for (address = start; address < start + size; address += PAGE_SIZE) {
pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir))
continue;
pm_dir = pmd_offset(pg_dir, address);
if (pmd_none(*pm_dir))
continue;
pt_dir = pte_offset_kernel(pm_dir, address);
set_pte(pt_dir, pte);
}
flush_tlb_kernel_range(start, start + size);
}
/*
* Add a backed mem_map array to the virtual mem_map array.
*/
static int vmem_add_mem_map(unsigned long start, unsigned long size)
{
unsigned long address, start_addr, end_addr;
struct page *map_start, *map_end;
pgd_t *pg_dir;
pmd_t *pm_dir;
pte_t *pt_dir;
pte_t pte;
int ret = -ENOMEM;
map_start = vmem_map + PFN_DOWN(start);
map_end = vmem_map + PFN_DOWN(start + size);
start_addr = (unsigned long) map_start & PAGE_MASK;
end_addr = PFN_ALIGN((unsigned long) map_end);
for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) {
pm_dir = vmem_pmd_alloc();
if (!pm_dir)
goto out;
pgd_populate(&init_mm, pg_dir, pm_dir);
}
pm_dir = pmd_offset(pg_dir, address);
if (pmd_none(*pm_dir)) {
pt_dir = vmem_pte_alloc();
if (!pt_dir)
goto out;
pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
}
pt_dir = pte_offset_kernel(pm_dir, address);
if (pte_none(*pt_dir)) {
unsigned long new_page;
new_page =__pa(vmem_alloc_pages(0));
if (!new_page)
goto out;
pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
set_pte(pt_dir, pte);
}
}
ret = 0;
out:
flush_tlb_kernel_range(start_addr, end_addr);
return ret;
}
static int vmem_add_mem(unsigned long start, unsigned long size)
{
int ret;
ret = vmem_add_range(start, size);
if (ret)
return ret;
return vmem_add_mem_map(start, size);
}
/*
* Add memory segment to the segment list if it doesn't overlap with
* an already present segment.
*/
static int insert_memory_segment(struct memory_segment *seg)
{
struct memory_segment *tmp;
if (PFN_DOWN(seg->start + seg->size) > max_pfn ||
seg->start + seg->size < seg->start)
return -ERANGE;
list_for_each_entry(tmp, &mem_segs, list) {
if (seg->start >= tmp->start + tmp->size)
continue;
if (seg->start + seg->size <= tmp->start)
continue;
return -ENOSPC;
}
list_add(&seg->list, &mem_segs);
return 0;
}
/*
* Remove memory segment from the segment list.
*/
static void remove_memory_segment(struct memory_segment *seg)
{
list_del(&seg->list);
}
static void __remove_shared_memory(struct memory_segment *seg)
{
remove_memory_segment(seg);
vmem_remove_range(seg->start, seg->size);
}
int remove_shared_memory(unsigned long start, unsigned long size)
{
struct memory_segment *seg;
int ret;
mutex_lock(&vmem_mutex);
ret = -ENOENT;
list_for_each_entry(seg, &mem_segs, list) {
if (seg->start == start && seg->size == size)
break;
}
if (seg->start != start || seg->size != size)
goto out;
ret = 0;
__remove_shared_memory(seg);
kfree(seg);
out:
mutex_unlock(&vmem_mutex);
return ret;
}
int add_shared_memory(unsigned long start, unsigned long size)
{
struct memory_segment *seg;
struct page *page;
unsigned long pfn, num_pfn, end_pfn;
int ret;
mutex_lock(&vmem_mutex);
ret = -ENOMEM;
seg = kzalloc(sizeof(*seg), GFP_KERNEL);
if (!seg)
goto out;
seg->start = start;
seg->size = size;
ret = insert_memory_segment(seg);
if (ret)
goto out_free;
ret = vmem_add_mem(start, size);
if (ret)
goto out_remove;
pfn = PFN_DOWN(start);
num_pfn = PFN_DOWN(size);
end_pfn = pfn + num_pfn;
page = pfn_to_page(pfn);
memset(page, 0, num_pfn * sizeof(struct page));
for (; pfn < end_pfn; pfn++) {
page = pfn_to_page(pfn);
init_page_count(page);
reset_page_mapcount(page);
SetPageReserved(page);
INIT_LIST_HEAD(&page->lru);
}
goto out;
out_remove:
__remove_shared_memory(seg);
out_free:
kfree(seg);
out:
mutex_unlock(&vmem_mutex);
return ret;
}
/*
* map whole physical memory to virtual memory (identity mapping)
*/
void __init vmem_map_init(void)
{
unsigned long map_size;
int i;
map_size = ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * sizeof(struct page);
vmalloc_end = PFN_ALIGN(VMALLOC_END_INIT) - PFN_ALIGN(map_size);
vmem_map = (struct page *) vmalloc_end;
NODE_DATA(0)->node_mem_map = vmem_map;
for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++)
vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size);
}
/*
* Convert memory chunk array to a memory segment list so there is a single
* list that contains both r/w memory and shared memory segments.
*/
static int __init vmem_convert_memory_chunk(void)
{
struct memory_segment *seg;
int i;
mutex_lock(&vmem_mutex);
for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
if (!memory_chunk[i].size)
continue;
seg = kzalloc(sizeof(*seg), GFP_KERNEL);
if (!seg)
panic("Out of memory...\n");
seg->start = memory_chunk[i].addr;
seg->size = memory_chunk[i].size;
insert_memory_segment(seg);
}
mutex_unlock(&vmem_mutex);
return 0;
}
core_initcall(vmem_convert_memory_chunk);
...@@ -1050,10 +1050,10 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, ...@@ -1050,10 +1050,10 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
} }
} else { /* error */ } else { /* error */
memcpy(&cqr->irb, irb, sizeof (struct irb)); memcpy(&cqr->irb, irb, sizeof (struct irb));
#ifdef ERP_DEBUG if (device->features & DASD_FEATURE_ERPLOG) {
/* dump sense data */ /* dump sense data */
dasd_log_sense(cqr, irb); dasd_log_sense(cqr, irb);
#endif }
switch (era) { switch (era) {
case dasd_era_fatal: case dasd_era_fatal:
cqr->status = DASD_CQR_FAILED; cqr->status = DASD_CQR_FAILED;
......
...@@ -2641,14 +2641,12 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr) ...@@ -2641,14 +2641,12 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
struct dasd_ccw_req *erp = NULL; struct dasd_ccw_req *erp = NULL;
struct dasd_device *device = cqr->device; struct dasd_device *device = cqr->device;
__u32 cpa = cqr->irb.scsw.cpa; __u32 cpa = cqr->irb.scsw.cpa;
struct dasd_ccw_req *temp_erp = NULL;
#ifdef ERP_DEBUG if (device->features & DASD_FEATURE_ERPLOG) {
/* print current erp_chain */ /* print current erp_chain */
DEV_MESSAGE(KERN_ERR, device, "%s", DEV_MESSAGE(KERN_ERR, device, "%s",
"ERP chain at BEGINNING of ERP-ACTION"); "ERP chain at BEGINNING of ERP-ACTION");
{
struct dasd_ccw_req *temp_erp = NULL;
for (temp_erp = cqr; for (temp_erp = cqr;
temp_erp != NULL; temp_erp = temp_erp->refers) { temp_erp != NULL; temp_erp = temp_erp->refers) {
...@@ -2658,7 +2656,6 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr) ...@@ -2658,7 +2656,6 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
temp_erp->refers); temp_erp->refers);
} }
} }
#endif /* ERP_DEBUG */
/* double-check if current erp/cqr was successfull */ /* double-check if current erp/cqr was successfull */
if ((cqr->irb.scsw.cstat == 0x00) && if ((cqr->irb.scsw.cstat == 0x00) &&
...@@ -2695,11 +2692,10 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr) ...@@ -2695,11 +2692,10 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
erp = dasd_3990_erp_handle_match_erp(cqr, erp); erp = dasd_3990_erp_handle_match_erp(cqr, erp);
} }
#ifdef ERP_DEBUG if (device->features & DASD_FEATURE_ERPLOG) {
/* print current erp_chain */ /* print current erp_chain */
DEV_MESSAGE(KERN_ERR, device, "%s", "ERP chain at END of ERP-ACTION"); DEV_MESSAGE(KERN_ERR, device, "%s",
{ "ERP chain at END of ERP-ACTION");
struct dasd_ccw_req *temp_erp = NULL;
for (temp_erp = erp; for (temp_erp = erp;
temp_erp != NULL; temp_erp = temp_erp->refers) { temp_erp != NULL; temp_erp = temp_erp->refers) {
...@@ -2709,7 +2705,6 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr) ...@@ -2709,7 +2705,6 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
temp_erp->refers); temp_erp->refers);
} }
} }
#endif /* ERP_DEBUG */
if (erp->status == DASD_CQR_FAILED) if (erp->status == DASD_CQR_FAILED)
dasd_log_ccw(erp, 1, cpa); dasd_log_ccw(erp, 1, cpa);
......
...@@ -202,6 +202,8 @@ dasd_feature_list(char *str, char **endp) ...@@ -202,6 +202,8 @@ dasd_feature_list(char *str, char **endp)
features |= DASD_FEATURE_READONLY; features |= DASD_FEATURE_READONLY;
else if (len == 4 && !strncmp(str, "diag", 4)) else if (len == 4 && !strncmp(str, "diag", 4))
features |= DASD_FEATURE_USEDIAG; features |= DASD_FEATURE_USEDIAG;
else if (len == 6 && !strncmp(str, "erplog", 6))
features |= DASD_FEATURE_ERPLOG;
else { else {
MESSAGE(KERN_WARNING, MESSAGE(KERN_WARNING,
"unsupported feature: %*s, " "unsupported feature: %*s, "
...@@ -709,6 +711,52 @@ dasd_ro_store(struct device *dev, struct device_attribute *attr, ...@@ -709,6 +711,52 @@ dasd_ro_store(struct device *dev, struct device_attribute *attr,
} }
static DEVICE_ATTR(readonly, 0644, dasd_ro_show, dasd_ro_store); static DEVICE_ATTR(readonly, 0644, dasd_ro_show, dasd_ro_store);
/*
* erplog controls the logging of ERP related data
* (e.g. failing channel programs).
*/
static ssize_t
dasd_erplog_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct dasd_devmap *devmap;
int erplog;
devmap = dasd_find_busid(dev->bus_id);
if (!IS_ERR(devmap))
erplog = (devmap->features & DASD_FEATURE_ERPLOG) != 0;
else
erplog = (DASD_FEATURE_DEFAULT & DASD_FEATURE_ERPLOG) != 0;
return snprintf(buf, PAGE_SIZE, erplog ? "1\n" : "0\n");
}
static ssize_t
dasd_erplog_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct dasd_devmap *devmap;
int val;
char *endp;
devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
if (IS_ERR(devmap))
return PTR_ERR(devmap);
val = simple_strtoul(buf, &endp, 0);
if (((endp + 1) < (buf + count)) || (val > 1))
return -EINVAL;
spin_lock(&dasd_devmap_lock);
if (val)
devmap->features |= DASD_FEATURE_ERPLOG;
else
devmap->features &= ~DASD_FEATURE_ERPLOG;
if (devmap->device)
devmap->device->features = devmap->features;
spin_unlock(&dasd_devmap_lock);
return count;
}
static DEVICE_ATTR(erplog, 0644, dasd_erplog_show, dasd_erplog_store);
/* /*
* use_diag controls whether the driver should use diag rather than ssch * use_diag controls whether the driver should use diag rather than ssch
...@@ -896,6 +944,7 @@ static struct attribute * dasd_attrs[] = { ...@@ -896,6 +944,7 @@ static struct attribute * dasd_attrs[] = {
&dev_attr_uid.attr, &dev_attr_uid.attr,
&dev_attr_use_diag.attr, &dev_attr_use_diag.attr,
&dev_attr_eer_enabled.attr, &dev_attr_eer_enabled.attr,
&dev_attr_erplog.attr,
NULL, NULL,
}; };
......
...@@ -13,10 +13,6 @@ ...@@ -13,10 +13,6 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
/* erp debugging in dasd.c and dasd_3990_erp.c */
#define ERP_DEBUG
/* we keep old device allocation scheme; IOW, minors are still in 0..255 */ /* we keep old device allocation scheme; IOW, minors are still in 0..255 */
#define DASD_PER_MAJOR (1U << (MINORBITS - DASD_PARTN_BITS)) #define DASD_PER_MAJOR (1U << (MINORBITS - DASD_PARTN_BITS))
#define DASD_PARTN_MASK ((1 << DASD_PARTN_BITS) - 1) #define DASD_PARTN_MASK ((1 << DASD_PARTN_BITS) - 1)
......
...@@ -16,14 +16,15 @@ ...@@ -16,14 +16,15 @@
#ifdef CONFIG_MAGIC_SYSRQ #ifdef CONFIG_MAGIC_SYSRQ
static int ctrlchar_sysrq_key; static int ctrlchar_sysrq_key;
static struct tty_struct *sysrq_tty;
static void static void
ctrlchar_handle_sysrq(void *tty) ctrlchar_handle_sysrq(struct work_struct *work)
{ {
handle_sysrq(ctrlchar_sysrq_key, (struct tty_struct *) tty); handle_sysrq(ctrlchar_sysrq_key, sysrq_tty);
} }
static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq, NULL); static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq);
#endif #endif
...@@ -53,7 +54,7 @@ ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty) ...@@ -53,7 +54,7 @@ ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty)
/* racy */ /* racy */
if (len == 3 && buf[1] == '-') { if (len == 3 && buf[1] == '-') {
ctrlchar_sysrq_key = buf[2]; ctrlchar_sysrq_key = buf[2];
ctrlchar_work.data = tty; sysrq_tty = tty;
schedule_work(&ctrlchar_work); schedule_work(&ctrlchar_work);
return CTRLCHAR_SYSRQ; return CTRLCHAR_SYSRQ;
} }
......
...@@ -179,6 +179,7 @@ struct tape_char_data { ...@@ -179,6 +179,7 @@ struct tape_char_data {
/* Block Frontend Data */ /* Block Frontend Data */
struct tape_blk_data struct tape_blk_data
{ {
struct tape_device * device;
/* Block device request queue. */ /* Block device request queue. */
request_queue_t * request_queue; request_queue_t * request_queue;
spinlock_t request_queue_lock; spinlock_t request_queue_lock;
...@@ -240,7 +241,7 @@ struct tape_device { ...@@ -240,7 +241,7 @@ struct tape_device {
#endif #endif
/* Function to start or stop the next request later. */ /* Function to start or stop the next request later. */
struct work_struct tape_dnr; struct delayed_work tape_dnr;
}; };
/* Externals from tape_core.c */ /* Externals from tape_core.c */
......
...@@ -95,6 +95,12 @@ tape_34xx_medium_sense(struct tape_device *device) ...@@ -95,6 +95,12 @@ tape_34xx_medium_sense(struct tape_device *device)
return rc; return rc;
} }
struct tape_34xx_work {
struct tape_device *device;
enum tape_op op;
struct work_struct work;
};
/* /*
* These functions are currently used only to schedule a medium_sense for * These functions are currently used only to schedule a medium_sense for
* later execution. This is because we get an interrupt whenever a medium * later execution. This is because we get an interrupt whenever a medium
...@@ -103,13 +109,10 @@ tape_34xx_medium_sense(struct tape_device *device) ...@@ -103,13 +109,10 @@ tape_34xx_medium_sense(struct tape_device *device)
* interrupt handler. * interrupt handler.
*/ */
static void static void
tape_34xx_work_handler(void *data) tape_34xx_work_handler(struct work_struct *work)
{ {
struct { struct tape_34xx_work *p =
struct tape_device *device; container_of(work, struct tape_34xx_work, work);
enum tape_op op;
struct work_struct work;
} *p = data;
switch(p->op) { switch(p->op) {
case TO_MSEN: case TO_MSEN:
...@@ -126,17 +129,13 @@ tape_34xx_work_handler(void *data) ...@@ -126,17 +129,13 @@ tape_34xx_work_handler(void *data)
static int static int
tape_34xx_schedule_work(struct tape_device *device, enum tape_op op) tape_34xx_schedule_work(struct tape_device *device, enum tape_op op)
{ {
struct { struct tape_34xx_work *p;
struct tape_device *device;
enum tape_op op;
struct work_struct work;
} *p;
if ((p = kmalloc(sizeof(*p), GFP_ATOMIC)) == NULL) if ((p = kmalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
return -ENOMEM; return -ENOMEM;
memset(p, 0, sizeof(*p)); memset(p, 0, sizeof(*p));
INIT_WORK(&p->work, tape_34xx_work_handler, p); INIT_WORK(&p->work, tape_34xx_work_handler);
p->device = tape_get_device_reference(device); p->device = tape_get_device_reference(device);
p->op = op; p->op = op;
......
...@@ -236,9 +236,10 @@ struct work_handler_data { ...@@ -236,9 +236,10 @@ struct work_handler_data {
}; };
static void static void
tape_3590_work_handler(void *data) tape_3590_work_handler(struct work_struct *work)
{ {
struct work_handler_data *p = data; struct work_handler_data *p =
container_of(work, struct work_handler_data, work);
switch (p->op) { switch (p->op) {
case TO_MSEN: case TO_MSEN:
...@@ -263,7 +264,7 @@ tape_3590_schedule_work(struct tape_device *device, enum tape_op op) ...@@ -263,7 +264,7 @@ tape_3590_schedule_work(struct tape_device *device, enum tape_op op)
if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL) if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
return -ENOMEM; return -ENOMEM;
INIT_WORK(&p->work, tape_3590_work_handler, p); INIT_WORK(&p->work, tape_3590_work_handler);
p->device = tape_get_device_reference(device); p->device = tape_get_device_reference(device);
p->op = op; p->op = op;
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/buffer_head.h> #include <linux/buffer_head.h>
#include <linux/kernel.h>
#include <asm/debug.h> #include <asm/debug.h>
...@@ -143,7 +144,8 @@ tapeblock_start_request(struct tape_device *device, struct request *req) ...@@ -143,7 +144,8 @@ tapeblock_start_request(struct tape_device *device, struct request *req)
* queue. * queue.
*/ */
static void static void
tapeblock_requeue(void *data) { tapeblock_requeue(struct work_struct *work) {
struct tape_blk_data * blkdat;
struct tape_device * device; struct tape_device * device;
request_queue_t * queue; request_queue_t * queue;
int nr_queued; int nr_queued;
...@@ -151,7 +153,8 @@ tapeblock_requeue(void *data) { ...@@ -151,7 +153,8 @@ tapeblock_requeue(void *data) {
struct list_head * l; struct list_head * l;
int rc; int rc;
device = (struct tape_device *) data; blkdat = container_of(work, struct tape_blk_data, requeue_task);
device = blkdat->device;
if (!device) if (!device)
return; return;
...@@ -212,6 +215,7 @@ tapeblock_setup_device(struct tape_device * device) ...@@ -212,6 +215,7 @@ tapeblock_setup_device(struct tape_device * device)
int rc; int rc;
blkdat = &device->blk_data; blkdat = &device->blk_data;
blkdat->device = device;
spin_lock_init(&blkdat->request_queue_lock); spin_lock_init(&blkdat->request_queue_lock);
atomic_set(&blkdat->requeue_scheduled, 0); atomic_set(&blkdat->requeue_scheduled, 0);
...@@ -255,8 +259,8 @@ tapeblock_setup_device(struct tape_device * device) ...@@ -255,8 +259,8 @@ tapeblock_setup_device(struct tape_device * device)
add_disk(disk); add_disk(disk);
INIT_WORK(&blkdat->requeue_task, tapeblock_requeue, tape_get_device_reference(device);
tape_get_device_reference(device)); INIT_WORK(&blkdat->requeue_task, tapeblock_requeue);
return 0; return 0;
...@@ -271,7 +275,7 @@ void ...@@ -271,7 +275,7 @@ void
tapeblock_cleanup_device(struct tape_device *device) tapeblock_cleanup_device(struct tape_device *device)
{ {
flush_scheduled_work(); flush_scheduled_work();
device->blk_data.requeue_task.data = tape_put_device(device); tape_put_device(device);
if (!device->blk_data.disk) { if (!device->blk_data.disk) {
PRINT_ERR("(%s): No gendisk to clean up!\n", PRINT_ERR("(%s): No gendisk to clean up!\n",
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
#define PRINTK_HEADER "TAPE_CORE: " #define PRINTK_HEADER "TAPE_CORE: "
static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *); static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *);
static void tape_delayed_next_request(void * data); static void tape_delayed_next_request(struct work_struct *);
/* /*
* One list to contain all tape devices of all disciplines, so * One list to contain all tape devices of all disciplines, so
...@@ -272,7 +272,7 @@ __tape_cancel_io(struct tape_device *device, struct tape_request *request) ...@@ -272,7 +272,7 @@ __tape_cancel_io(struct tape_device *device, struct tape_request *request)
return 0; return 0;
case -EBUSY: case -EBUSY:
request->status = TAPE_REQUEST_CANCEL; request->status = TAPE_REQUEST_CANCEL;
schedule_work(&device->tape_dnr); schedule_delayed_work(&device->tape_dnr, 0);
return 0; return 0;
case -ENODEV: case -ENODEV:
DBF_EXCEPTION(2, "device gone, retry\n"); DBF_EXCEPTION(2, "device gone, retry\n");
...@@ -470,7 +470,7 @@ tape_alloc_device(void) ...@@ -470,7 +470,7 @@ tape_alloc_device(void)
*device->modeset_byte = 0; *device->modeset_byte = 0;
device->first_minor = -1; device->first_minor = -1;
atomic_set(&device->ref_count, 1); atomic_set(&device->ref_count, 1);
INIT_WORK(&device->tape_dnr, tape_delayed_next_request, device); INIT_DELAYED_WORK(&device->tape_dnr, tape_delayed_next_request);
return device; return device;
} }
...@@ -724,7 +724,7 @@ __tape_start_io(struct tape_device *device, struct tape_request *request) ...@@ -724,7 +724,7 @@ __tape_start_io(struct tape_device *device, struct tape_request *request)
} else if (rc == -EBUSY) { } else if (rc == -EBUSY) {
/* The common I/O subsystem is currently busy. Retry later. */ /* The common I/O subsystem is currently busy. Retry later. */
request->status = TAPE_REQUEST_QUEUED; request->status = TAPE_REQUEST_QUEUED;
schedule_work(&device->tape_dnr); schedule_delayed_work(&device->tape_dnr, 0);
rc = 0; rc = 0;
} else { } else {
/* Start failed. Remove request and indicate failure. */ /* Start failed. Remove request and indicate failure. */
...@@ -790,11 +790,11 @@ __tape_start_next_request(struct tape_device *device) ...@@ -790,11 +790,11 @@ __tape_start_next_request(struct tape_device *device)
} }
static void static void
tape_delayed_next_request(void *data) tape_delayed_next_request(struct work_struct *work)
{ {
struct tape_device * device; struct tape_device *device =
container_of(work, struct tape_device, tape_dnr.work);
device = (struct tape_device *) data;
DBF_LH(6, "tape_delayed_next_request(%p)\n", device); DBF_LH(6, "tape_delayed_next_request(%p)\n", device);
spin_lock_irq(get_ccwdev_lock(device->cdev)); spin_lock_irq(get_ccwdev_lock(device->cdev));
__tape_start_next_request(device); __tape_start_next_request(device);
......
...@@ -183,7 +183,7 @@ css_get_ssd_info(struct subchannel *sch) ...@@ -183,7 +183,7 @@ css_get_ssd_info(struct subchannel *sch)
page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
spin_lock_irq(&sch->lock); spin_lock_irq(sch->lock);
ret = chsc_get_sch_desc_irq(sch, page); ret = chsc_get_sch_desc_irq(sch, page);
if (ret) { if (ret) {
static int cio_chsc_err_msg; static int cio_chsc_err_msg;
...@@ -197,7 +197,7 @@ css_get_ssd_info(struct subchannel *sch) ...@@ -197,7 +197,7 @@ css_get_ssd_info(struct subchannel *sch)
cio_chsc_err_msg = 1; cio_chsc_err_msg = 1;
} }
} }
spin_unlock_irq(&sch->lock); spin_unlock_irq(sch->lock);
free_page((unsigned long)page); free_page((unsigned long)page);
if (!ret) { if (!ret) {
int j, chpid, mask; int j, chpid, mask;
...@@ -233,7 +233,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data) ...@@ -233,7 +233,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
if (j >= 8) if (j >= 8)
return 0; return 0;
spin_lock_irq(&sch->lock); spin_lock_irq(sch->lock);
stsch(sch->schid, &schib); stsch(sch->schid, &schib);
if (!schib.pmcw.dnv) if (!schib.pmcw.dnv)
...@@ -265,10 +265,10 @@ s390_subchannel_remove_chpid(struct device *dev, void *data) ...@@ -265,10 +265,10 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
else if (sch->lpm == mask) else if (sch->lpm == mask)
goto out_unreg; goto out_unreg;
out_unlock: out_unlock:
spin_unlock_irq(&sch->lock); spin_unlock_irq(sch->lock);
return 0; return 0;
out_unreg: out_unreg:
spin_unlock_irq(&sch->lock); spin_unlock_irq(sch->lock);
sch->lpm = 0; sch->lpm = 0;
if (css_enqueue_subchannel_slow(sch->schid)) { if (css_enqueue_subchannel_slow(sch->schid)) {
css_clear_subchannel_slow_list(); css_clear_subchannel_slow_list();
...@@ -378,12 +378,12 @@ __s390_process_res_acc(struct subchannel_id schid, void *data) ...@@ -378,12 +378,12 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
/* Check if a subchannel is newly available. */ /* Check if a subchannel is newly available. */
return s390_process_res_acc_new_sch(schid); return s390_process_res_acc_new_sch(schid);
spin_lock_irq(&sch->lock); spin_lock_irq(sch->lock);
chp_mask = s390_process_res_acc_sch(res_data, sch); chp_mask = s390_process_res_acc_sch(res_data, sch);
if (chp_mask == 0) { if (chp_mask == 0) {
spin_unlock_irq(&sch->lock); spin_unlock_irq(sch->lock);
put_device(&sch->dev); put_device(&sch->dev);
return 0; return 0;
} }
...@@ -397,7 +397,7 @@ __s390_process_res_acc(struct subchannel_id schid, void *data) ...@@ -397,7 +397,7 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
else if (sch->driver && sch->driver->verify) else if (sch->driver && sch->driver->verify)
sch->driver->verify(&sch->dev); sch->driver->verify(&sch->dev);
spin_unlock_irq(&sch->lock); spin_unlock_irq(sch->lock);
put_device(&sch->dev); put_device(&sch->dev);
return 0; return 0;
} }
...@@ -635,21 +635,21 @@ __chp_add(struct subchannel_id schid, void *data) ...@@ -635,21 +635,21 @@ __chp_add(struct subchannel_id schid, void *data)
if (!sch) if (!sch)
/* Check if the subchannel is now available. */ /* Check if the subchannel is now available. */
return __chp_add_new_sch(schid); return __chp_add_new_sch(schid);
spin_lock_irq(&sch->lock); spin_lock_irq(sch->lock);
for (i=0; i<8; i++) { for (i=0; i<8; i++) {
mask = 0x80 >> i; mask = 0x80 >> i;
if ((sch->schib.pmcw.pim & mask) && if ((sch->schib.pmcw.pim & mask) &&
(sch->schib.pmcw.chpid[i] == chp->id)) { (sch->schib.pmcw.chpid[i] == chp->id)) {
if (stsch(sch->schid, &sch->schib) != 0) { if (stsch(sch->schid, &sch->schib) != 0) {
/* Endgame. */ /* Endgame. */
spin_unlock_irq(&sch->lock); spin_unlock_irq(sch->lock);
return -ENXIO; return -ENXIO;
} }
break; break;
} }
} }
if (i==8) { if (i==8) {
spin_unlock_irq(&sch->lock); spin_unlock_irq(sch->lock);
return 0; return 0;
} }
sch->lpm = ((sch->schib.pmcw.pim & sch->lpm = ((sch->schib.pmcw.pim &
...@@ -660,7 +660,7 @@ __chp_add(struct subchannel_id schid, void *data) ...@@ -660,7 +660,7 @@ __chp_add(struct subchannel_id schid, void *data)
if (sch->driver && sch->driver->verify) if (sch->driver && sch->driver->verify)
sch->driver->verify(&sch->dev); sch->driver->verify(&sch->dev);
spin_unlock_irq(&sch->lock); spin_unlock_irq(sch->lock);
put_device(&sch->dev); put_device(&sch->dev);
return 0; return 0;
} }
...@@ -750,7 +750,7 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on) ...@@ -750,7 +750,7 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
if (!sch->ssd_info.valid) if (!sch->ssd_info.valid)
return; return;
spin_lock_irqsave(&sch->lock, flags); spin_lock_irqsave(sch->lock, flags);
old_lpm = sch->lpm; old_lpm = sch->lpm;
for (chp = 0; chp < 8; chp++) { for (chp = 0; chp < 8; chp++) {
if (sch->ssd_info.chpid[chp] != chpid) if (sch->ssd_info.chpid[chp] != chpid)
...@@ -785,7 +785,7 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on) ...@@ -785,7 +785,7 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
sch->driver->verify(&sch->dev); sch->driver->verify(&sch->dev);
break; break;
} }
spin_unlock_irqrestore(&sch->lock, flags); spin_unlock_irqrestore(sch->lock, flags);
} }
static int static int
......
...@@ -143,11 +143,11 @@ cio_tpi(void) ...@@ -143,11 +143,11 @@ cio_tpi(void)
return 1; return 1;
local_bh_disable(); local_bh_disable();
irq_enter (); irq_enter ();
spin_lock(&sch->lock); spin_lock(sch->lock);
memcpy (&sch->schib.scsw, &irb->scsw, sizeof (struct scsw)); memcpy (&sch->schib.scsw, &irb->scsw, sizeof (struct scsw));
if (sch->driver && sch->driver->irq) if (sch->driver && sch->driver->irq)
sch->driver->irq(&sch->dev); sch->driver->irq(&sch->dev);
spin_unlock(&sch->lock); spin_unlock(sch->lock);
irq_exit (); irq_exit ();
_local_bh_enable(); _local_bh_enable();
return 1; return 1;
...@@ -415,6 +415,8 @@ cio_enable_subchannel (struct subchannel *sch, unsigned int isc) ...@@ -415,6 +415,8 @@ cio_enable_subchannel (struct subchannel *sch, unsigned int isc)
CIO_TRACE_EVENT (2, "ensch"); CIO_TRACE_EVENT (2, "ensch");
CIO_TRACE_EVENT (2, sch->dev.bus_id); CIO_TRACE_EVENT (2, sch->dev.bus_id);
if (sch_is_pseudo_sch(sch))
return -EINVAL;
ccode = stsch (sch->schid, &sch->schib); ccode = stsch (sch->schid, &sch->schib);
if (ccode) if (ccode)
return -ENODEV; return -ENODEV;
...@@ -462,6 +464,8 @@ cio_disable_subchannel (struct subchannel *sch) ...@@ -462,6 +464,8 @@ cio_disable_subchannel (struct subchannel *sch)
CIO_TRACE_EVENT (2, "dissch"); CIO_TRACE_EVENT (2, "dissch");
CIO_TRACE_EVENT (2, sch->dev.bus_id); CIO_TRACE_EVENT (2, sch->dev.bus_id);
if (sch_is_pseudo_sch(sch))
return 0;
ccode = stsch (sch->schid, &sch->schib); ccode = stsch (sch->schid, &sch->schib);
if (ccode == 3) /* Not operational. */ if (ccode == 3) /* Not operational. */
return -ENODEV; return -ENODEV;
...@@ -496,6 +500,15 @@ cio_disable_subchannel (struct subchannel *sch) ...@@ -496,6 +500,15 @@ cio_disable_subchannel (struct subchannel *sch)
return ret; return ret;
} }
int cio_create_sch_lock(struct subchannel *sch)
{
sch->lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
if (!sch->lock)
return -ENOMEM;
spin_lock_init(sch->lock);
return 0;
}
/* /*
* cio_validate_subchannel() * cio_validate_subchannel()
* *
...@@ -513,6 +526,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) ...@@ -513,6 +526,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
{ {
char dbf_txt[15]; char dbf_txt[15];
int ccode; int ccode;
int err;
sprintf (dbf_txt, "valsch%x", schid.sch_no); sprintf (dbf_txt, "valsch%x", schid.sch_no);
CIO_TRACE_EVENT (4, dbf_txt); CIO_TRACE_EVENT (4, dbf_txt);
...@@ -520,9 +534,15 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) ...@@ -520,9 +534,15 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
/* Nuke all fields. */ /* Nuke all fields. */
memset(sch, 0, sizeof(struct subchannel)); memset(sch, 0, sizeof(struct subchannel));
spin_lock_init(&sch->lock); sch->schid = schid;
if (cio_is_console(schid)) {
sch->lock = cio_get_console_lock();
} else {
err = cio_create_sch_lock(sch);
if (err)
goto out;
}
mutex_init(&sch->reg_mutex); mutex_init(&sch->reg_mutex);
/* Set a name for the subchannel */ /* Set a name for the subchannel */
snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", schid.ssid, snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", schid.ssid,
schid.sch_no); schid.sch_no);
...@@ -534,10 +554,10 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) ...@@ -534,10 +554,10 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
* is not valid. * is not valid.
*/ */
ccode = stsch_err (schid, &sch->schib); ccode = stsch_err (schid, &sch->schib);
if (ccode) if (ccode) {
return (ccode == 3) ? -ENXIO : ccode; err = (ccode == 3) ? -ENXIO : ccode;
goto out;
sch->schid = schid; }
/* Copy subchannel type from path management control word. */ /* Copy subchannel type from path management control word. */
sch->st = sch->schib.pmcw.st; sch->st = sch->schib.pmcw.st;
...@@ -550,14 +570,16 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) ...@@ -550,14 +570,16 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
"non-I/O subchannel type %04X\n", "non-I/O subchannel type %04X\n",
sch->schid.ssid, sch->schid.sch_no, sch->st); sch->schid.ssid, sch->schid.sch_no, sch->st);
/* We stop here for non-io subchannels. */ /* We stop here for non-io subchannels. */
return sch->st; err = sch->st;
goto out;
} }
/* Initialization for io subchannels. */ /* Initialization for io subchannels. */
if (!sch->schib.pmcw.dnv) if (!sch->schib.pmcw.dnv) {
/* io subchannel but device number is invalid. */ /* io subchannel but device number is invalid. */
return -ENODEV; err = -ENODEV;
goto out;
}
/* Devno is valid. */ /* Devno is valid. */
if (is_blacklisted (sch->schid.ssid, sch->schib.pmcw.dev)) { if (is_blacklisted (sch->schid.ssid, sch->schib.pmcw.dev)) {
/* /*
...@@ -567,7 +589,8 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) ...@@ -567,7 +589,8 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
CIO_MSG_EVENT(0, "Blacklisted device detected " CIO_MSG_EVENT(0, "Blacklisted device detected "
"at devno %04X, subchannel set %x\n", "at devno %04X, subchannel set %x\n",
sch->schib.pmcw.dev, sch->schid.ssid); sch->schib.pmcw.dev, sch->schid.ssid);
return -ENODEV; err = -ENODEV;
goto out;
} }
sch->opm = 0xff; sch->opm = 0xff;
if (!cio_is_console(sch->schid)) if (!cio_is_console(sch->schid))
...@@ -595,6 +618,11 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) ...@@ -595,6 +618,11 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
if ((sch->lpm & (sch->lpm - 1)) != 0) if ((sch->lpm & (sch->lpm - 1)) != 0)
sch->schib.pmcw.mp = 1; /* multipath mode */ sch->schib.pmcw.mp = 1; /* multipath mode */
return 0; return 0;
out:
if (!cio_is_console(schid))
kfree(sch->lock);
sch->lock = NULL;
return err;
} }
/* /*
...@@ -637,7 +665,7 @@ do_IRQ (struct pt_regs *regs) ...@@ -637,7 +665,7 @@ do_IRQ (struct pt_regs *regs)
} }
sch = (struct subchannel *)(unsigned long)tpi_info->intparm; sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
if (sch) if (sch)
spin_lock(&sch->lock); spin_lock(sch->lock);
/* Store interrupt response block to lowcore. */ /* Store interrupt response block to lowcore. */
if (tsch (tpi_info->schid, irb) == 0 && sch) { if (tsch (tpi_info->schid, irb) == 0 && sch) {
/* Keep subchannel information word up to date. */ /* Keep subchannel information word up to date. */
...@@ -648,7 +676,7 @@ do_IRQ (struct pt_regs *regs) ...@@ -648,7 +676,7 @@ do_IRQ (struct pt_regs *regs)
sch->driver->irq(&sch->dev); sch->driver->irq(&sch->dev);
} }
if (sch) if (sch)
spin_unlock(&sch->lock); spin_unlock(sch->lock);
/* /*
* Are more interrupts pending? * Are more interrupts pending?
* If so, the tpi instruction will update the lowcore * If so, the tpi instruction will update the lowcore
...@@ -687,10 +715,10 @@ wait_cons_dev (void) ...@@ -687,10 +715,10 @@ wait_cons_dev (void)
__ctl_load (cr6, 6, 6); __ctl_load (cr6, 6, 6);
do { do {
spin_unlock(&console_subchannel.lock); spin_unlock(console_subchannel.lock);
if (!cio_tpi()) if (!cio_tpi())
cpu_relax(); cpu_relax();
spin_lock(&console_subchannel.lock); spin_lock(console_subchannel.lock);
} while (console_subchannel.schib.scsw.actl != 0); } while (console_subchannel.schib.scsw.actl != 0);
/* /*
* restore previous isc value * restore previous isc value
......
...@@ -87,7 +87,7 @@ struct orb { ...@@ -87,7 +87,7 @@ struct orb {
/* subchannel data structure used by I/O subroutines */ /* subchannel data structure used by I/O subroutines */
struct subchannel { struct subchannel {
struct subchannel_id schid; struct subchannel_id schid;
spinlock_t lock; /* subchannel lock */ spinlock_t *lock; /* subchannel lock */
struct mutex reg_mutex; struct mutex reg_mutex;
enum { enum {
SUBCHANNEL_TYPE_IO = 0, SUBCHANNEL_TYPE_IO = 0,
...@@ -131,15 +131,19 @@ extern int cio_set_options (struct subchannel *, int); ...@@ -131,15 +131,19 @@ extern int cio_set_options (struct subchannel *, int);
extern int cio_get_options (struct subchannel *); extern int cio_get_options (struct subchannel *);
extern int cio_modify (struct subchannel *); extern int cio_modify (struct subchannel *);
int cio_create_sch_lock(struct subchannel *);
/* Use with care. */ /* Use with care. */
#ifdef CONFIG_CCW_CONSOLE #ifdef CONFIG_CCW_CONSOLE
extern struct subchannel *cio_probe_console(void); extern struct subchannel *cio_probe_console(void);
extern void cio_release_console(void); extern void cio_release_console(void);
extern int cio_is_console(struct subchannel_id); extern int cio_is_console(struct subchannel_id);
extern struct subchannel *cio_get_console_subchannel(void); extern struct subchannel *cio_get_console_subchannel(void);
extern spinlock_t * cio_get_console_lock(void);
#else #else
#define cio_is_console(schid) 0 #define cio_is_console(schid) 0
#define cio_get_console_subchannel() NULL #define cio_get_console_subchannel() NULL
#define cio_get_console_lock() NULL;
#endif #endif
extern int cio_show_msg; extern int cio_show_msg;
......
...@@ -91,9 +91,9 @@ css_free_subchannel(struct subchannel *sch) ...@@ -91,9 +91,9 @@ css_free_subchannel(struct subchannel *sch)
/* Reset intparm to zeroes. */ /* Reset intparm to zeroes. */
sch->schib.pmcw.intparm = 0; sch->schib.pmcw.intparm = 0;
cio_modify(sch); cio_modify(sch);
kfree(sch->lock);
kfree(sch); kfree(sch);
} }
} }
static void static void
...@@ -102,8 +102,10 @@ css_subchannel_release(struct device *dev) ...@@ -102,8 +102,10 @@ css_subchannel_release(struct device *dev)
struct subchannel *sch; struct subchannel *sch;
sch = to_subchannel(dev); sch = to_subchannel(dev);
if (!cio_is_console(sch->schid)) if (!cio_is_console(sch->schid)) {
kfree(sch->lock);
kfree(sch); kfree(sch);
}
} }
extern int css_get_ssd_info(struct subchannel *sch); extern int css_get_ssd_info(struct subchannel *sch);
...@@ -135,13 +137,15 @@ css_register_subchannel(struct subchannel *sch) ...@@ -135,13 +137,15 @@ css_register_subchannel(struct subchannel *sch)
sch->dev.parent = &css[0]->device; sch->dev.parent = &css[0]->device;
sch->dev.bus = &css_bus_type; sch->dev.bus = &css_bus_type;
sch->dev.release = &css_subchannel_release; sch->dev.release = &css_subchannel_release;
sch->dev.groups = subch_attr_groups;
/* make it known to the system */ /* make it known to the system */
ret = css_sch_device_register(sch); ret = css_sch_device_register(sch);
if (ret) if (ret) {
printk (KERN_WARNING "%s: could not register %s\n", printk (KERN_WARNING "%s: could not register %s\n",
__func__, sch->dev.bus_id); __func__, sch->dev.bus_id);
else return ret;
}
css_get_ssd_info(sch); css_get_ssd_info(sch);
return ret; return ret;
} }
...@@ -201,18 +205,18 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) ...@@ -201,18 +205,18 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
unsigned long flags; unsigned long flags;
enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action; enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action;
spin_lock_irqsave(&sch->lock, flags); spin_lock_irqsave(sch->lock, flags);
disc = device_is_disconnected(sch); disc = device_is_disconnected(sch);
if (disc && slow) { if (disc && slow) {
/* Disconnected devices are evaluated directly only.*/ /* Disconnected devices are evaluated directly only.*/
spin_unlock_irqrestore(&sch->lock, flags); spin_unlock_irqrestore(sch->lock, flags);
return 0; return 0;
} }
/* No interrupt after machine check - kill pending timers. */ /* No interrupt after machine check - kill pending timers. */
device_kill_pending_timer(sch); device_kill_pending_timer(sch);
if (!disc && !slow) { if (!disc && !slow) {
/* Non-disconnected devices are evaluated on the slow path. */ /* Non-disconnected devices are evaluated on the slow path. */
spin_unlock_irqrestore(&sch->lock, flags); spin_unlock_irqrestore(sch->lock, flags);
return -EAGAIN; return -EAGAIN;
} }
event = css_get_subchannel_status(sch); event = css_get_subchannel_status(sch);
...@@ -237,9 +241,9 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) ...@@ -237,9 +241,9 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
/* Ask driver what to do with device. */ /* Ask driver what to do with device. */
action = UNREGISTER; action = UNREGISTER;
if (sch->driver && sch->driver->notify) { if (sch->driver && sch->driver->notify) {
spin_unlock_irqrestore(&sch->lock, flags); spin_unlock_irqrestore(sch->lock, flags);
ret = sch->driver->notify(&sch->dev, event); ret = sch->driver->notify(&sch->dev, event);
spin_lock_irqsave(&sch->lock, flags); spin_lock_irqsave(sch->lock, flags);
if (ret) if (ret)
action = NONE; action = NONE;
} }
...@@ -264,9 +268,9 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) ...@@ -264,9 +268,9 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
case UNREGISTER: case UNREGISTER:
case UNREGISTER_PROBE: case UNREGISTER_PROBE:
/* Unregister device (will use subchannel lock). */ /* Unregister device (will use subchannel lock). */
spin_unlock_irqrestore(&sch->lock, flags); spin_unlock_irqrestore(sch->lock, flags);
css_sch_device_unregister(sch); css_sch_device_unregister(sch);
spin_lock_irqsave(&sch->lock, flags); spin_lock_irqsave(sch->lock, flags);
/* Reset intparm to zeroes. */ /* Reset intparm to zeroes. */
sch->schib.pmcw.intparm = 0; sch->schib.pmcw.intparm = 0;
...@@ -278,7 +282,7 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) ...@@ -278,7 +282,7 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
default: default:
break; break;
} }
spin_unlock_irqrestore(&sch->lock, flags); spin_unlock_irqrestore(sch->lock, flags);
/* Probe if necessary. */ /* Probe if necessary. */
if (action == UNREGISTER_PROBE) if (action == UNREGISTER_PROBE)
ret = css_probe_device(sch->schid); ret = css_probe_device(sch->schid);
...@@ -573,12 +577,24 @@ css_cm_enable_store(struct device *dev, struct device_attribute *attr, ...@@ -573,12 +577,24 @@ css_cm_enable_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store); static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store);
static inline void __init static inline int __init setup_css(int nr)
setup_css(int nr)
{ {
u32 tod_high; u32 tod_high;
int ret;
memset(css[nr], 0, sizeof(struct channel_subsystem)); memset(css[nr], 0, sizeof(struct channel_subsystem));
css[nr]->pseudo_subchannel =
kzalloc(sizeof(*css[nr]->pseudo_subchannel), GFP_KERNEL);
if (!css[nr]->pseudo_subchannel)
return -ENOMEM;
css[nr]->pseudo_subchannel->dev.parent = &css[nr]->device;
css[nr]->pseudo_subchannel->dev.release = css_subchannel_release;
sprintf(css[nr]->pseudo_subchannel->dev.bus_id, "defunct");
ret = cio_create_sch_lock(css[nr]->pseudo_subchannel);
if (ret) {
kfree(css[nr]->pseudo_subchannel);
return ret;
}
mutex_init(&css[nr]->mutex); mutex_init(&css[nr]->mutex);
css[nr]->valid = 1; css[nr]->valid = 1;
css[nr]->cssid = nr; css[nr]->cssid = nr;
...@@ -586,6 +602,7 @@ setup_css(int nr) ...@@ -586,6 +602,7 @@ setup_css(int nr)
css[nr]->device.release = channel_subsystem_release; css[nr]->device.release = channel_subsystem_release;
tod_high = (u32) (get_clock() >> 32); tod_high = (u32) (get_clock() >> 32);
css_generate_pgid(css[nr], tod_high); css_generate_pgid(css[nr], tod_high);
return 0;
} }
/* /*
...@@ -622,10 +639,12 @@ init_channel_subsystem (void) ...@@ -622,10 +639,12 @@ init_channel_subsystem (void)
ret = -ENOMEM; ret = -ENOMEM;
goto out_unregister; goto out_unregister;
} }
setup_css(i); ret = setup_css(i);
ret = device_register(&css[i]->device);
if (ret) if (ret)
goto out_free; goto out_free;
ret = device_register(&css[i]->device);
if (ret)
goto out_free_all;
if (css_characteristics_avail && if (css_characteristics_avail &&
css_chsc_characteristics.secm) { css_chsc_characteristics.secm) {
ret = device_create_file(&css[i]->device, ret = device_create_file(&css[i]->device,
...@@ -633,6 +652,9 @@ init_channel_subsystem (void) ...@@ -633,6 +652,9 @@ init_channel_subsystem (void)
if (ret) if (ret)
goto out_device; goto out_device;
} }
ret = device_register(&css[i]->pseudo_subchannel->dev);
if (ret)
goto out_file;
} }
css_init_done = 1; css_init_done = 1;
...@@ -640,13 +662,19 @@ init_channel_subsystem (void) ...@@ -640,13 +662,19 @@ init_channel_subsystem (void)
for_each_subchannel(__init_channel_subsystem, NULL); for_each_subchannel(__init_channel_subsystem, NULL);
return 0; return 0;
out_file:
device_remove_file(&css[i]->device, &dev_attr_cm_enable);
out_device: out_device:
device_unregister(&css[i]->device); device_unregister(&css[i]->device);
out_free_all:
kfree(css[i]->pseudo_subchannel->lock);
kfree(css[i]->pseudo_subchannel);
out_free: out_free:
kfree(css[i]); kfree(css[i]);
out_unregister: out_unregister:
while (i > 0) { while (i > 0) {
i--; i--;
device_unregister(&css[i]->pseudo_subchannel->dev);
if (css_characteristics_avail && css_chsc_characteristics.secm) if (css_characteristics_avail && css_chsc_characteristics.secm)
device_remove_file(&css[i]->device, device_remove_file(&css[i]->device,
&dev_attr_cm_enable); &dev_attr_cm_enable);
...@@ -658,6 +686,11 @@ out: ...@@ -658,6 +686,11 @@ out:
return ret; return ret;
} }
int sch_is_pseudo_sch(struct subchannel *sch)
{
return sch == to_css(sch->dev.parent)->pseudo_subchannel;
}
/* /*
* find a driver for a subchannel. They identify by the subchannel * find a driver for a subchannel. They identify by the subchannel
* type with the exception that the console subchannel driver has its own * type with the exception that the console subchannel driver has its own
......
...@@ -73,6 +73,8 @@ struct senseid { ...@@ -73,6 +73,8 @@ struct senseid {
} __attribute__ ((packed,aligned(4))); } __attribute__ ((packed,aligned(4)));
struct ccw_device_private { struct ccw_device_private {
struct ccw_device *cdev;
struct subchannel *sch;
int state; /* device state */ int state; /* device state */
atomic_t onoff; atomic_t onoff;
unsigned long registered; unsigned long registered;
...@@ -158,6 +160,8 @@ struct channel_subsystem { ...@@ -158,6 +160,8 @@ struct channel_subsystem {
int cm_enabled; int cm_enabled;
void *cub_addr1; void *cub_addr1;
void *cub_addr2; void *cub_addr2;
/* for orphaned ccw devices */
struct subchannel *pseudo_subchannel;
}; };
#define to_css(dev) container_of(dev, struct channel_subsystem, device) #define to_css(dev) container_of(dev, struct channel_subsystem, device)
...@@ -185,6 +189,11 @@ void css_clear_subchannel_slow_list(void); ...@@ -185,6 +189,11 @@ void css_clear_subchannel_slow_list(void);
int css_slow_subchannels_exist(void); int css_slow_subchannels_exist(void);
extern int need_rescan; extern int need_rescan;
int sch_is_pseudo_sch(struct subchannel *);
extern struct workqueue_struct *slow_path_wq; extern struct workqueue_struct *slow_path_wq;
extern struct work_struct slow_path_work; extern struct work_struct slow_path_work;
int subchannel_add_files (struct device *);
extern struct attribute_group *subch_attr_groups[];
#endif #endif
This diff is collapsed.
...@@ -78,8 +78,10 @@ void io_subchannel_recog_done(struct ccw_device *cdev); ...@@ -78,8 +78,10 @@ void io_subchannel_recog_done(struct ccw_device *cdev);
int ccw_device_cancel_halt_clear(struct ccw_device *); int ccw_device_cancel_halt_clear(struct ccw_device *);
void ccw_device_do_unreg_rereg(void *); void ccw_device_do_unreg_rereg(struct work_struct *);
void ccw_device_call_sch_unregister(void *); void ccw_device_call_sch_unregister(struct work_struct *);
void ccw_device_move_to_orphanage(struct work_struct *);
int ccw_device_is_orphan(struct ccw_device *);
int ccw_device_recognition(struct ccw_device *); int ccw_device_recognition(struct ccw_device *);
int ccw_device_online(struct ccw_device *); int ccw_device_online(struct ccw_device *);
......
...@@ -186,15 +186,14 @@ ccw_device_handle_oper(struct ccw_device *cdev) ...@@ -186,15 +186,14 @@ ccw_device_handle_oper(struct ccw_device *cdev)
/* /*
* Check if cu type and device type still match. If * Check if cu type and device type still match. If
* not, it is certainly another device and we have to * not, it is certainly another device and we have to
* de- and re-register. Also check here for non-matching devno. * de- and re-register.
*/ */
if (cdev->id.cu_type != cdev->private->senseid.cu_type || if (cdev->id.cu_type != cdev->private->senseid.cu_type ||
cdev->id.cu_model != cdev->private->senseid.cu_model || cdev->id.cu_model != cdev->private->senseid.cu_model ||
cdev->id.dev_type != cdev->private->senseid.dev_type || cdev->id.dev_type != cdev->private->senseid.dev_type ||
cdev->id.dev_model != cdev->private->senseid.dev_model || cdev->id.dev_model != cdev->private->senseid.dev_model) {
cdev->private->dev_id.devno != sch->schib.pmcw.dev) {
PREPARE_WORK(&cdev->private->kick_work, PREPARE_WORK(&cdev->private->kick_work,
ccw_device_do_unreg_rereg, cdev); ccw_device_do_unreg_rereg);
queue_work(ccw_device_work, &cdev->private->kick_work); queue_work(ccw_device_work, &cdev->private->kick_work);
return 0; return 0;
} }
...@@ -329,19 +328,21 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err) ...@@ -329,19 +328,21 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err)
} }
static void static void
ccw_device_oper_notify(void *data) ccw_device_oper_notify(struct work_struct *work)
{ {
struct ccw_device_private *priv;
struct ccw_device *cdev; struct ccw_device *cdev;
struct subchannel *sch; struct subchannel *sch;
int ret; int ret;
cdev = data; priv = container_of(work, struct ccw_device_private, kick_work);
cdev = priv->cdev;
sch = to_subchannel(cdev->dev.parent); sch = to_subchannel(cdev->dev.parent);
ret = (sch->driver && sch->driver->notify) ? ret = (sch->driver && sch->driver->notify) ?
sch->driver->notify(&sch->dev, CIO_OPER) : 0; sch->driver->notify(&sch->dev, CIO_OPER) : 0;
if (!ret) if (!ret)
/* Driver doesn't want device back. */ /* Driver doesn't want device back. */
ccw_device_do_unreg_rereg(cdev); ccw_device_do_unreg_rereg(work);
else { else {
/* Reenable channel measurements, if needed. */ /* Reenable channel measurements, if needed. */
cmf_reenable(cdev); cmf_reenable(cdev);
...@@ -377,8 +378,7 @@ ccw_device_done(struct ccw_device *cdev, int state) ...@@ -377,8 +378,7 @@ ccw_device_done(struct ccw_device *cdev, int state)
if (cdev->private->flags.donotify) { if (cdev->private->flags.donotify) {
cdev->private->flags.donotify = 0; cdev->private->flags.donotify = 0;
PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify, PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify);
cdev);
queue_work(ccw_device_notify_work, &cdev->private->kick_work); queue_work(ccw_device_notify_work, &cdev->private->kick_work);
} }
wake_up(&cdev->private->wait_q); wake_up(&cdev->private->wait_q);
...@@ -528,13 +528,15 @@ ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event) ...@@ -528,13 +528,15 @@ ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
static void static void
ccw_device_nopath_notify(void *data) ccw_device_nopath_notify(struct work_struct *work)
{ {
struct ccw_device_private *priv;
struct ccw_device *cdev; struct ccw_device *cdev;
struct subchannel *sch; struct subchannel *sch;
int ret; int ret;
cdev = data; priv = container_of(work, struct ccw_device_private, kick_work);
cdev = priv->cdev;
sch = to_subchannel(cdev->dev.parent); sch = to_subchannel(cdev->dev.parent);
/* Extra sanity. */ /* Extra sanity. */
if (sch->lpm) if (sch->lpm)
...@@ -547,8 +549,7 @@ ccw_device_nopath_notify(void *data) ...@@ -547,8 +549,7 @@ ccw_device_nopath_notify(void *data)
cio_disable_subchannel(sch); cio_disable_subchannel(sch);
if (get_device(&cdev->dev)) { if (get_device(&cdev->dev)) {
PREPARE_WORK(&cdev->private->kick_work, PREPARE_WORK(&cdev->private->kick_work,
ccw_device_call_sch_unregister, ccw_device_call_sch_unregister);
cdev);
queue_work(ccw_device_work, queue_work(ccw_device_work,
&cdev->private->kick_work); &cdev->private->kick_work);
} else } else
...@@ -607,7 +608,7 @@ ccw_device_verify_done(struct ccw_device *cdev, int err) ...@@ -607,7 +608,7 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
/* Reset oper notify indication after verify error. */ /* Reset oper notify indication after verify error. */
cdev->private->flags.donotify = 0; cdev->private->flags.donotify = 0;
PREPARE_WORK(&cdev->private->kick_work, PREPARE_WORK(&cdev->private->kick_work,
ccw_device_nopath_notify, cdev); ccw_device_nopath_notify);
queue_work(ccw_device_notify_work, &cdev->private->kick_work); queue_work(ccw_device_notify_work, &cdev->private->kick_work);
ccw_device_done(cdev, DEV_STATE_NOT_OPER); ccw_device_done(cdev, DEV_STATE_NOT_OPER);
break; break;
...@@ -674,6 +675,10 @@ ccw_device_offline(struct ccw_device *cdev) ...@@ -674,6 +675,10 @@ ccw_device_offline(struct ccw_device *cdev)
{ {
struct subchannel *sch; struct subchannel *sch;
if (ccw_device_is_orphan(cdev)) {
ccw_device_done(cdev, DEV_STATE_OFFLINE);
return 0;
}
sch = to_subchannel(cdev->dev.parent); sch = to_subchannel(cdev->dev.parent);
if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv) if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv)
return -ENODEV; return -ENODEV;
...@@ -738,7 +743,7 @@ ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event) ...@@ -738,7 +743,7 @@ ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event)
sch = to_subchannel(cdev->dev.parent); sch = to_subchannel(cdev->dev.parent);
if (get_device(&cdev->dev)) { if (get_device(&cdev->dev)) {
PREPARE_WORK(&cdev->private->kick_work, PREPARE_WORK(&cdev->private->kick_work,
ccw_device_call_sch_unregister, cdev); ccw_device_call_sch_unregister);
queue_work(ccw_device_work, &cdev->private->kick_work); queue_work(ccw_device_work, &cdev->private->kick_work);
} }
wake_up(&cdev->private->wait_q); wake_up(&cdev->private->wait_q);
...@@ -769,7 +774,7 @@ ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event) ...@@ -769,7 +774,7 @@ ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event)
} }
if (get_device(&cdev->dev)) { if (get_device(&cdev->dev)) {
PREPARE_WORK(&cdev->private->kick_work, PREPARE_WORK(&cdev->private->kick_work,
ccw_device_call_sch_unregister, cdev); ccw_device_call_sch_unregister);
queue_work(ccw_device_work, &cdev->private->kick_work); queue_work(ccw_device_work, &cdev->private->kick_work);
} }
wake_up(&cdev->private->wait_q); wake_up(&cdev->private->wait_q);
...@@ -874,7 +879,7 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event) ...@@ -874,7 +879,7 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
sch = to_subchannel(cdev->dev.parent); sch = to_subchannel(cdev->dev.parent);
if (!sch->lpm) { if (!sch->lpm) {
PREPARE_WORK(&cdev->private->kick_work, PREPARE_WORK(&cdev->private->kick_work,
ccw_device_nopath_notify, cdev); ccw_device_nopath_notify);
queue_work(ccw_device_notify_work, queue_work(ccw_device_notify_work,
&cdev->private->kick_work); &cdev->private->kick_work);
} else } else
...@@ -969,7 +974,7 @@ ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event) ...@@ -969,7 +974,7 @@ ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
ERR_PTR(-EIO)); ERR_PTR(-EIO));
if (!sch->lpm) { if (!sch->lpm) {
PREPARE_WORK(&cdev->private->kick_work, PREPARE_WORK(&cdev->private->kick_work,
ccw_device_nopath_notify, cdev); ccw_device_nopath_notify);
queue_work(ccw_device_notify_work, &cdev->private->kick_work); queue_work(ccw_device_notify_work, &cdev->private->kick_work);
} else if (cdev->private->flags.doverify) } else if (cdev->private->flags.doverify)
/* Start delayed path verification. */ /* Start delayed path verification. */
...@@ -992,7 +997,7 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event) ...@@ -992,7 +997,7 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
sch = to_subchannel(cdev->dev.parent); sch = to_subchannel(cdev->dev.parent);
if (!sch->lpm) { if (!sch->lpm) {
PREPARE_WORK(&cdev->private->kick_work, PREPARE_WORK(&cdev->private->kick_work,
ccw_device_nopath_notify, cdev); ccw_device_nopath_notify);
queue_work(ccw_device_notify_work, queue_work(ccw_device_notify_work,
&cdev->private->kick_work); &cdev->private->kick_work);
} else } else
...@@ -1021,7 +1026,7 @@ void device_kill_io(struct subchannel *sch) ...@@ -1021,7 +1026,7 @@ void device_kill_io(struct subchannel *sch)
if (ret == -ENODEV) { if (ret == -ENODEV) {
if (!sch->lpm) { if (!sch->lpm) {
PREPARE_WORK(&cdev->private->kick_work, PREPARE_WORK(&cdev->private->kick_work,
ccw_device_nopath_notify, cdev); ccw_device_nopath_notify);
queue_work(ccw_device_notify_work, queue_work(ccw_device_notify_work,
&cdev->private->kick_work); &cdev->private->kick_work);
} else } else
...@@ -1033,7 +1038,7 @@ void device_kill_io(struct subchannel *sch) ...@@ -1033,7 +1038,7 @@ void device_kill_io(struct subchannel *sch)
ERR_PTR(-EIO)); ERR_PTR(-EIO));
if (!sch->lpm) { if (!sch->lpm) {
PREPARE_WORK(&cdev->private->kick_work, PREPARE_WORK(&cdev->private->kick_work,
ccw_device_nopath_notify, cdev); ccw_device_nopath_notify);
queue_work(ccw_device_notify_work, &cdev->private->kick_work); queue_work(ccw_device_notify_work, &cdev->private->kick_work);
} else } else
/* Start delayed path verification. */ /* Start delayed path verification. */
...@@ -1104,7 +1109,8 @@ device_trigger_reprobe(struct subchannel *sch) ...@@ -1104,7 +1109,8 @@ device_trigger_reprobe(struct subchannel *sch)
/* Update some values. */ /* Update some values. */
if (stsch(sch->schid, &sch->schib)) if (stsch(sch->schid, &sch->schib))
return; return;
if (!sch->schib.pmcw.dnv)
return;
/* /*
* The pim, pam, pom values may not be accurate, but they are the best * The pim, pam, pom values may not be accurate, but they are the best
* we have before performing device selection :/ * we have before performing device selection :/
...@@ -1118,6 +1124,12 @@ device_trigger_reprobe(struct subchannel *sch) ...@@ -1118,6 +1124,12 @@ device_trigger_reprobe(struct subchannel *sch)
sch->schib.pmcw.mp = 1; sch->schib.pmcw.mp = 1;
sch->schib.pmcw.intparm = (__u32)(unsigned long)sch; sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
/* We should also udate ssd info, but this has to wait. */ /* We should also udate ssd info, but this has to wait. */
/* Check if this is another device which appeared on the same sch. */
if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
PREPARE_WORK(&cdev->private->kick_work,
ccw_device_move_to_orphanage);
queue_work(ccw_device_work, &cdev->private->kick_work);
} else
ccw_device_start_id(cdev, 0); ccw_device_start_id(cdev, 0);
} }
......
...@@ -316,9 +316,9 @@ __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, _ ...@@ -316,9 +316,9 @@ __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, _
ccw_device_set_timeout(cdev, 0); ccw_device_set_timeout(cdev, 0);
if (ret == -EBUSY) { if (ret == -EBUSY) {
/* Try again later. */ /* Try again later. */
spin_unlock_irq(&sch->lock); spin_unlock_irq(sch->lock);
msleep(10); msleep(10);
spin_lock_irq(&sch->lock); spin_lock_irq(sch->lock);
continue; continue;
} }
if (ret != 0) if (ret != 0)
...@@ -326,12 +326,12 @@ __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, _ ...@@ -326,12 +326,12 @@ __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, _
break; break;
/* Wait for end of request. */ /* Wait for end of request. */
cdev->private->intparm = magic; cdev->private->intparm = magic;
spin_unlock_irq(&sch->lock); spin_unlock_irq(sch->lock);
wait_event(cdev->private->wait_q, wait_event(cdev->private->wait_q,
(cdev->private->intparm == -EIO) || (cdev->private->intparm == -EIO) ||
(cdev->private->intparm == -EAGAIN) || (cdev->private->intparm == -EAGAIN) ||
(cdev->private->intparm == 0)); (cdev->private->intparm == 0));
spin_lock_irq(&sch->lock); spin_lock_irq(sch->lock);
/* Check at least for channel end / device end */ /* Check at least for channel end / device end */
if (cdev->private->intparm == -EIO) { if (cdev->private->intparm == -EIO) {
/* Non-retryable error. */ /* Non-retryable error. */
...@@ -342,9 +342,9 @@ __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, _ ...@@ -342,9 +342,9 @@ __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, _
/* Success. */ /* Success. */
break; break;
/* Try again later. */ /* Try again later. */
spin_unlock_irq(&sch->lock); spin_unlock_irq(sch->lock);
msleep(10); msleep(10);
spin_lock_irq(&sch->lock); spin_lock_irq(sch->lock);
} while (1); } while (1);
return ret; return ret;
...@@ -389,7 +389,7 @@ read_dev_chars (struct ccw_device *cdev, void **buffer, int length) ...@@ -389,7 +389,7 @@ read_dev_chars (struct ccw_device *cdev, void **buffer, int length)
return ret; return ret;
} }
spin_lock_irq(&sch->lock); spin_lock_irq(sch->lock);
/* Save interrupt handler. */ /* Save interrupt handler. */
handler = cdev->handler; handler = cdev->handler;
/* Temporarily install own handler. */ /* Temporarily install own handler. */
...@@ -406,7 +406,7 @@ read_dev_chars (struct ccw_device *cdev, void **buffer, int length) ...@@ -406,7 +406,7 @@ read_dev_chars (struct ccw_device *cdev, void **buffer, int length)
/* Restore interrupt handler. */ /* Restore interrupt handler. */
cdev->handler = handler; cdev->handler = handler;
spin_unlock_irq(&sch->lock); spin_unlock_irq(sch->lock);
clear_normalized_cda (rdc_ccw); clear_normalized_cda (rdc_ccw);
kfree(rdc_ccw); kfree(rdc_ccw);
...@@ -463,7 +463,7 @@ read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lp ...@@ -463,7 +463,7 @@ read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lp
rcd_ccw->count = ciw->count; rcd_ccw->count = ciw->count;
rcd_ccw->flags = CCW_FLAG_SLI; rcd_ccw->flags = CCW_FLAG_SLI;
spin_lock_irq(&sch->lock); spin_lock_irq(sch->lock);
/* Save interrupt handler. */ /* Save interrupt handler. */
handler = cdev->handler; handler = cdev->handler;
/* Temporarily install own handler. */ /* Temporarily install own handler. */
...@@ -480,7 +480,7 @@ read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lp ...@@ -480,7 +480,7 @@ read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lp
/* Restore interrupt handler. */ /* Restore interrupt handler. */
cdev->handler = handler; cdev->handler = handler;
spin_unlock_irq(&sch->lock); spin_unlock_irq(sch->lock);
/* /*
* on success we update the user input parms * on success we update the user input parms
...@@ -537,7 +537,7 @@ ccw_device_stlck(struct ccw_device *cdev) ...@@ -537,7 +537,7 @@ ccw_device_stlck(struct ccw_device *cdev)
kfree(buf); kfree(buf);
return -ENOMEM; return -ENOMEM;
} }
spin_lock_irqsave(&sch->lock, flags); spin_lock_irqsave(sch->lock, flags);
ret = cio_enable_subchannel(sch, 3); ret = cio_enable_subchannel(sch, 3);
if (ret) if (ret)
goto out_unlock; goto out_unlock;
...@@ -559,9 +559,9 @@ ccw_device_stlck(struct ccw_device *cdev) ...@@ -559,9 +559,9 @@ ccw_device_stlck(struct ccw_device *cdev)
goto out_unlock; goto out_unlock;
} }
cdev->private->irb.scsw.actl |= SCSW_ACTL_START_PEND; cdev->private->irb.scsw.actl |= SCSW_ACTL_START_PEND;
spin_unlock_irqrestore(&sch->lock, flags); spin_unlock_irqrestore(sch->lock, flags);
wait_event(cdev->private->wait_q, cdev->private->irb.scsw.actl == 0); wait_event(cdev->private->wait_q, cdev->private->irb.scsw.actl == 0);
spin_lock_irqsave(&sch->lock, flags); spin_lock_irqsave(sch->lock, flags);
cio_disable_subchannel(sch); //FIXME: return code? cio_disable_subchannel(sch); //FIXME: return code?
if ((cdev->private->irb.scsw.dstat != if ((cdev->private->irb.scsw.dstat !=
(DEV_STAT_CHN_END|DEV_STAT_DEV_END)) || (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
...@@ -572,7 +572,7 @@ ccw_device_stlck(struct ccw_device *cdev) ...@@ -572,7 +572,7 @@ ccw_device_stlck(struct ccw_device *cdev)
out_unlock: out_unlock:
kfree(buf); kfree(buf);
kfree(buf2); kfree(buf2);
spin_unlock_irqrestore(&sch->lock, flags); spin_unlock_irqrestore(sch->lock, flags);
return ret; return ret;
} }
......
This diff is collapsed.
...@@ -12,10 +12,6 @@ ...@@ -12,10 +12,6 @@
#endif /* CONFIG_QDIO_DEBUG */ #endif /* CONFIG_QDIO_DEBUG */
#define QDIO_USE_PROCESSING_STATE #define QDIO_USE_PROCESSING_STATE
#ifdef CONFIG_QDIO_PERF_STATS
#define QDIO_PERFORMANCE_STATS
#endif /* CONFIG_QDIO_PERF_STATS */
#define QDIO_MINIMAL_BH_RELIEF_TIME 16 #define QDIO_MINIMAL_BH_RELIEF_TIME 16
#define QDIO_TIMER_POLL_VALUE 1 #define QDIO_TIMER_POLL_VALUE 1
#define IQDIO_TIMER_POLL_VALUE 1 #define IQDIO_TIMER_POLL_VALUE 1
...@@ -409,25 +405,23 @@ do_clear_global_summary(void) ...@@ -409,25 +405,23 @@ do_clear_global_summary(void)
#define CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS 0x08 #define CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS 0x08
#define CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS 0x04 #define CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS 0x04
#ifdef QDIO_PERFORMANCE_STATS
struct qdio_perf_stats { struct qdio_perf_stats {
unsigned int tl_runs; unsigned long tl_runs;
unsigned int siga_outs; unsigned long siga_outs;
unsigned int siga_ins; unsigned long siga_ins;
unsigned int siga_syncs; unsigned long siga_syncs;
unsigned int pcis; unsigned long pcis;
unsigned int thinints; unsigned long thinints;
unsigned int fast_reqs; unsigned long fast_reqs;
__u64 start_time_outbound; __u64 start_time_outbound;
unsigned int outbound_cnt; unsigned long outbound_cnt;
unsigned int outbound_time; unsigned long outbound_time;
__u64 start_time_inbound; __u64 start_time_inbound;
unsigned int inbound_cnt; unsigned long inbound_cnt;
unsigned int inbound_time; unsigned long inbound_time;
}; };
#endif /* QDIO_PERFORMANCE_STATS */
/* unlikely as the later the better */ /* unlikely as the later the better */
#define SYNC_MEMORY if (unlikely(q->siga_sync)) qdio_siga_sync_q(q) #define SYNC_MEMORY if (unlikely(q->siga_sync)) qdio_siga_sync_q(q)
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <asm/s390_rdev.h> #include <asm/s390_rdev.h>
#include <asm/reset.h>
#include "ap_bus.h" #include "ap_bus.h"
...@@ -1128,6 +1129,19 @@ static void ap_poll_thread_stop(void) ...@@ -1128,6 +1129,19 @@ static void ap_poll_thread_stop(void)
mutex_unlock(&ap_poll_thread_mutex); mutex_unlock(&ap_poll_thread_mutex);
} }
static void ap_reset(void)
{
int i, j;
for (i = 0; i < AP_DOMAINS; i++)
for (j = 0; j < AP_DEVICES; j++)
ap_reset_queue(AP_MKQID(j, i));
}
static struct reset_call ap_reset_call = {
.fn = ap_reset,
};
/** /**
* The module initialization code. * The module initialization code.
*/ */
...@@ -1144,6 +1158,7 @@ int __init ap_module_init(void) ...@@ -1144,6 +1158,7 @@ int __init ap_module_init(void)
printk(KERN_WARNING "AP instructions not installed.\n"); printk(KERN_WARNING "AP instructions not installed.\n");
return -ENODEV; return -ENODEV;
} }
register_reset_call(&ap_reset_call);
/* Create /sys/bus/ap. */ /* Create /sys/bus/ap. */
rc = bus_register(&ap_bus_type); rc = bus_register(&ap_bus_type);
...@@ -1197,6 +1212,7 @@ out_bus: ...@@ -1197,6 +1212,7 @@ out_bus:
bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
bus_unregister(&ap_bus_type); bus_unregister(&ap_bus_type);
out: out:
unregister_reset_call(&ap_reset_call);
return rc; return rc;
} }
...@@ -1227,6 +1243,7 @@ void ap_module_exit(void) ...@@ -1227,6 +1243,7 @@ void ap_module_exit(void)
for (i = 0; ap_bus_attrs[i]; i++) for (i = 0; ap_bus_attrs[i]; i++)
bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
bus_unregister(&ap_bus_type); bus_unregister(&ap_bus_type);
unregister_reset_call(&ap_reset_call);
} }
#ifndef CONFIG_ZCRYPT_MONOLITHIC #ifndef CONFIG_ZCRYPT_MONOLITHIC
......
...@@ -69,11 +69,13 @@ typedef struct dasd_information2_t { ...@@ -69,11 +69,13 @@ typedef struct dasd_information2_t {
* 0x01: readonly (ro) * 0x01: readonly (ro)
* 0x02: use diag discipline (diag) * 0x02: use diag discipline (diag)
* 0x04: set the device initially online (internal use only) * 0x04: set the device initially online (internal use only)
* 0x08: enable ERP related logging
*/ */
#define DASD_FEATURE_DEFAULT 0x00 #define DASD_FEATURE_DEFAULT 0x00
#define DASD_FEATURE_READONLY 0x01 #define DASD_FEATURE_READONLY 0x01
#define DASD_FEATURE_USEDIAG 0x02 #define DASD_FEATURE_USEDIAG 0x02
#define DASD_FEATURE_INITIAL_ONLINE 0x04 #define DASD_FEATURE_INITIAL_ONLINE 0x04
#define DASD_FEATURE_ERPLOG 0x08
#define DASD_PARTN_BITS 2 #define DASD_PARTN_BITS 2
......
...@@ -127,6 +127,26 @@ page_get_storage_key(unsigned long addr) ...@@ -127,6 +127,26 @@ page_get_storage_key(unsigned long addr)
return skey; return skey;
} }
extern unsigned long max_pfn;
static inline int pfn_valid(unsigned long pfn)
{
unsigned long dummy;
int ccode;
if (pfn >= max_pfn)
return 0;
asm volatile(
" lra %0,0(%2)\n"
" ipm %1\n"
" srl %1,28\n"
: "=d" (dummy), "=d" (ccode)
: "a" (pfn << PAGE_SHIFT)
: "cc");
return !ccode;
}
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
/* to align the pointer to the (next) page boundary */ /* to align the pointer to the (next) page boundary */
...@@ -138,8 +158,6 @@ page_get_storage_key(unsigned long addr) ...@@ -138,8 +158,6 @@ page_get_storage_key(unsigned long addr)
#define __va(x) (void *)(unsigned long)(x) #define __va(x) (void *)(unsigned long)(x)
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
......
...@@ -25,8 +25,11 @@ extern void diag10(unsigned long addr); ...@@ -25,8 +25,11 @@ extern void diag10(unsigned long addr);
* Page allocation orders. * Page allocation orders.
*/ */
#ifndef __s390x__ #ifndef __s390x__
# define PTE_ALLOC_ORDER 0
# define PMD_ALLOC_ORDER 0
# define PGD_ALLOC_ORDER 1 # define PGD_ALLOC_ORDER 1
#else /* __s390x__ */ #else /* __s390x__ */
# define PTE_ALLOC_ORDER 0
# define PMD_ALLOC_ORDER 2 # define PMD_ALLOC_ORDER 2
# define PGD_ALLOC_ORDER 2 # define PGD_ALLOC_ORDER 2
#endif /* __s390x__ */ #endif /* __s390x__ */
......
...@@ -107,23 +107,25 @@ extern char empty_zero_page[PAGE_SIZE]; ...@@ -107,23 +107,25 @@ extern char empty_zero_page[PAGE_SIZE];
* The vmalloc() routines leaves a hole of 4kB between each vmalloced * The vmalloc() routines leaves a hole of 4kB between each vmalloced
* area for the same reason. ;) * area for the same reason. ;)
*/ */
extern unsigned long vmalloc_end;
#define VMALLOC_OFFSET (8*1024*1024) #define VMALLOC_OFFSET (8*1024*1024)
#define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) \ #define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) \
& ~(VMALLOC_OFFSET-1)) & ~(VMALLOC_OFFSET-1))
#define VMALLOC_END vmalloc_end
/* /*
* We need some free virtual space to be able to do vmalloc. * We need some free virtual space to be able to do vmalloc.
* VMALLOC_MIN_SIZE defines the minimum size of the vmalloc * VMALLOC_MIN_SIZE defines the minimum size of the vmalloc
* area. On a machine with 2GB memory we make sure that we * area. On a machine with 2GB memory we make sure that we
* have at least 128MB free space for vmalloc. On a machine * have at least 128MB free space for vmalloc. On a machine
* with 4TB we make sure we have at least 1GB. * with 4TB we make sure we have at least 128GB.
*/ */
#ifndef __s390x__ #ifndef __s390x__
#define VMALLOC_MIN_SIZE 0x8000000UL #define VMALLOC_MIN_SIZE 0x8000000UL
#define VMALLOC_END 0x80000000UL #define VMALLOC_END_INIT 0x80000000UL
#else /* __s390x__ */ #else /* __s390x__ */
#define VMALLOC_MIN_SIZE 0x40000000UL #define VMALLOC_MIN_SIZE 0x2000000000UL
#define VMALLOC_END 0x40000000000UL #define VMALLOC_END_INIT 0x40000000000UL
#endif /* __s390x__ */ #endif /* __s390x__ */
/* /*
...@@ -815,11 +817,17 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) ...@@ -815,11 +817,17 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
#define kern_addr_valid(addr) (1) #define kern_addr_valid(addr) (1)
extern int add_shared_memory(unsigned long start, unsigned long size);
extern int remove_shared_memory(unsigned long start, unsigned long size);
/* /*
* No page table caches to initialise * No page table caches to initialise
*/ */
#define pgtable_cache_init() do { } while (0) #define pgtable_cache_init() do { } while (0)
#define __HAVE_ARCH_MEMMAP_INIT
extern void memmap_init(unsigned long, int, unsigned long, unsigned long);
#define __HAVE_ARCH_PTEP_ESTABLISH #define __HAVE_ARCH_PTEP_ESTABLISH
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment