Commit b30fc14c authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6

* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6:
  [S390] s390: Fix build for !CONFIG_S390_GUEST + CONFIG_VIRTIO_CONSOLE
  [S390] No more 4kb stacks.
  [S390] Change default IPL method to IPL_VM.
  [S390] tape: disable interrupts in tape_open and tape_release
  [S390] appldata: unsigned ops->size cannot be negative
  [S390] tape block: complete request with correct locking
  [S390] Fix sysdev class file creation.
  [S390] pgtables: Fix race in enable_sie vs. page table ops
  [S390] qdio: remove incorrect memset
  [S390] qdio: prevent double qdio shutdown in case of I/O errors
parents 3c136f29 ea4bfdf5
...@@ -241,19 +241,17 @@ config PACK_STACK ...@@ -241,19 +241,17 @@ config PACK_STACK
Say Y if you are unsure. Say Y if you are unsure.
config SMALL_STACK config SMALL_STACK
bool "Use 4kb/8kb for kernel stack instead of 8kb/16kb" bool "Use 8kb for kernel stack instead of 16kb"
depends on PACK_STACK && !LOCKDEP depends on PACK_STACK && 64BIT && !LOCKDEP
help help
If you say Y here and the compiler supports the -mkernel-backchain If you say Y here and the compiler supports the -mkernel-backchain
option the kernel will use a smaller kernel stack size. For 31 bit option the kernel will use a smaller kernel stack size. The reduced
the reduced size is 4kb instead of 8kb and for 64 bit it is 8kb size is 8kb instead of 16kb. This allows to run more threads on a
instead of 16kb. This allows to run more thread on a system and system and reduces the pressure on the memory management for higher
reduces the pressure on the memory management for higher order order page allocations.
page allocations.
Say N if you are unsure. Say N if you are unsure.
config CHECK_STACK config CHECK_STACK
bool "Detect kernel stack overflow" bool "Detect kernel stack overflow"
help help
...@@ -384,7 +382,7 @@ config IPL ...@@ -384,7 +382,7 @@ config IPL
choice choice
prompt "IPL method generated into head.S" prompt "IPL method generated into head.S"
depends on IPL depends on IPL
default IPL_TAPE default IPL_VM
help help
Select "tape" if you want to IPL the image from a Tape. Select "tape" if you want to IPL the image from a Tape.
......
...@@ -424,7 +424,7 @@ out: ...@@ -424,7 +424,7 @@ out:
*/ */
int appldata_register_ops(struct appldata_ops *ops) int appldata_register_ops(struct appldata_ops *ops)
{ {
if ((ops->size > APPLDATA_MAX_REC_SIZE) || (ops->size < 0)) if (ops->size > APPLDATA_MAX_REC_SIZE)
return -EINVAL; return -EINVAL;
ops->ctl_table = kzalloc(4 * sizeof(struct ctl_table), GFP_KERNEL); ops->ctl_table = kzalloc(4 * sizeof(struct ctl_table), GFP_KERNEL);
......
...@@ -52,7 +52,7 @@ struct kvm_vqconfig { ...@@ -52,7 +52,7 @@ struct kvm_vqconfig {
#ifdef __KERNEL__ #ifdef __KERNEL__
/* early virtio console setup */ /* early virtio console setup */
#ifdef CONFIG_VIRTIO_CONSOLE #ifdef CONFIG_S390_GUEST
extern void s390_virtio_console_init(void); extern void s390_virtio_console_init(void);
#else #else
static inline void s390_virtio_console_init(void) static inline void s390_virtio_console_init(void)
......
...@@ -7,7 +7,8 @@ typedef struct { ...@@ -7,7 +7,8 @@ typedef struct {
unsigned long asce_bits; unsigned long asce_bits;
unsigned long asce_limit; unsigned long asce_limit;
int noexec; int noexec;
int pgstes; int has_pgste; /* The mmu context has extended page tables */
int alloc_pgste; /* cloned contexts will have extended page tables */
} mm_context_t; } mm_context_t;
#endif #endif
...@@ -20,12 +20,25 @@ static inline int init_new_context(struct task_struct *tsk, ...@@ -20,12 +20,25 @@ static inline int init_new_context(struct task_struct *tsk,
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
mm->context.asce_bits |= _ASCE_TYPE_REGION3; mm->context.asce_bits |= _ASCE_TYPE_REGION3;
#endif #endif
if (current->mm->context.pgstes) { if (current->mm->context.alloc_pgste) {
/*
* alloc_pgste indicates, that any NEW context will be created
* with extended page tables. The old context is unchanged. The
* page table allocation and the page table operations will
* look at has_pgste to distinguish normal and extended page
* tables. The only way to create extended page tables is to
* set alloc_pgste and then create a new context (e.g. dup_mm).
* The page table allocation is called after init_new_context
* and if has_pgste is set, it will create extended page
* tables.
*/
mm->context.noexec = 0; mm->context.noexec = 0;
mm->context.pgstes = 1; mm->context.has_pgste = 1;
mm->context.alloc_pgste = 1;
} else { } else {
mm->context.noexec = s390_noexec; mm->context.noexec = s390_noexec;
mm->context.pgstes = 0; mm->context.has_pgste = 0;
mm->context.alloc_pgste = 0;
} }
mm->context.asce_limit = STACK_TOP_MAX; mm->context.asce_limit = STACK_TOP_MAX;
crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
......
...@@ -679,7 +679,7 @@ static inline void pmd_clear(pmd_t *pmd) ...@@ -679,7 +679,7 @@ static inline void pmd_clear(pmd_t *pmd)
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{ {
if (mm->context.pgstes) if (mm->context.has_pgste)
ptep_rcp_copy(ptep); ptep_rcp_copy(ptep);
pte_val(*ptep) = _PAGE_TYPE_EMPTY; pte_val(*ptep) = _PAGE_TYPE_EMPTY;
if (mm->context.noexec) if (mm->context.noexec)
...@@ -763,7 +763,7 @@ static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm, ...@@ -763,7 +763,7 @@ static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm,
struct page *page; struct page *page;
unsigned int skey; unsigned int skey;
if (!mm->context.pgstes) if (!mm->context.has_pgste)
return -EINVAL; return -EINVAL;
rcp_lock(ptep); rcp_lock(ptep);
pgste = (unsigned long *) (ptep + PTRS_PER_PTE); pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
...@@ -794,7 +794,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, ...@@ -794,7 +794,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
int young; int young;
unsigned long *pgste; unsigned long *pgste;
if (!vma->vm_mm->context.pgstes) if (!vma->vm_mm->context.has_pgste)
return 0; return 0;
physpage = pte_val(*ptep) & PAGE_MASK; physpage = pte_val(*ptep) & PAGE_MASK;
pgste = (unsigned long *) (ptep + PTRS_PER_PTE); pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
...@@ -844,7 +844,7 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep) ...@@ -844,7 +844,7 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
static inline void ptep_invalidate(struct mm_struct *mm, static inline void ptep_invalidate(struct mm_struct *mm,
unsigned long address, pte_t *ptep) unsigned long address, pte_t *ptep)
{ {
if (mm->context.pgstes) { if (mm->context.has_pgste) {
rcp_lock(ptep); rcp_lock(ptep);
__ptep_ipte(address, ptep); __ptep_ipte(address, ptep);
ptep_rcp_copy(ptep); ptep_rcp_copy(ptep);
......
...@@ -15,13 +15,8 @@ ...@@ -15,13 +15,8 @@
* Size of kernel stack for each process * Size of kernel stack for each process
*/ */
#ifndef __s390x__ #ifndef __s390x__
#ifndef __SMALL_STACK
#define THREAD_ORDER 1 #define THREAD_ORDER 1
#define ASYNC_ORDER 1 #define ASYNC_ORDER 1
#else
#define THREAD_ORDER 0
#define ASYNC_ORDER 0
#endif
#else /* __s390x__ */ #else /* __s390x__ */
#ifndef __SMALL_STACK #ifndef __SMALL_STACK
#define THREAD_ORDER 2 #define THREAD_ORDER 2
......
...@@ -1119,9 +1119,7 @@ out: ...@@ -1119,9 +1119,7 @@ out:
return rc; return rc;
} }
static ssize_t __ref rescan_store(struct sys_device *dev, static ssize_t __ref rescan_store(struct sysdev_class *class, const char *buf,
struct sysdev_attribute *attr,
const char *buf,
size_t count) size_t count)
{ {
int rc; int rc;
...@@ -1129,12 +1127,10 @@ static ssize_t __ref rescan_store(struct sys_device *dev, ...@@ -1129,12 +1127,10 @@ static ssize_t __ref rescan_store(struct sys_device *dev,
rc = smp_rescan_cpus(); rc = smp_rescan_cpus();
return rc ? rc : count; return rc ? rc : count;
} }
static SYSDEV_ATTR(rescan, 0200, NULL, rescan_store); static SYSDEV_CLASS_ATTR(rescan, 0200, NULL, rescan_store);
#endif /* CONFIG_HOTPLUG_CPU */ #endif /* CONFIG_HOTPLUG_CPU */
static ssize_t dispatching_show(struct sys_device *dev, static ssize_t dispatching_show(struct sysdev_class *class, char *buf)
struct sysdev_attribute *attr,
char *buf)
{ {
ssize_t count; ssize_t count;
...@@ -1144,9 +1140,8 @@ static ssize_t dispatching_show(struct sys_device *dev, ...@@ -1144,9 +1140,8 @@ static ssize_t dispatching_show(struct sys_device *dev,
return count; return count;
} }
static ssize_t dispatching_store(struct sys_device *dev, static ssize_t dispatching_store(struct sysdev_class *dev, const char *buf,
struct sysdev_attribute *attr, size_t count)
const char *buf, size_t count)
{ {
int val, rc; int val, rc;
char delim; char delim;
...@@ -1168,7 +1163,8 @@ out: ...@@ -1168,7 +1163,8 @@ out:
put_online_cpus(); put_online_cpus();
return rc ? rc : count; return rc ? rc : count;
} }
static SYSDEV_ATTR(dispatching, 0644, dispatching_show, dispatching_store); static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show,
dispatching_store);
static int __init topology_init(void) static int __init topology_init(void)
{ {
...@@ -1178,13 +1174,11 @@ static int __init topology_init(void) ...@@ -1178,13 +1174,11 @@ static int __init topology_init(void)
register_cpu_notifier(&smp_cpu_nb); register_cpu_notifier(&smp_cpu_nb);
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj, rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_rescan);
&attr_rescan.attr);
if (rc) if (rc)
return rc; return rc;
#endif #endif
rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj, rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_dispatching);
&attr_dispatching.attr);
if (rc) if (rc)
return rc; return rc;
for_each_present_cpu(cpu) { for_each_present_cpu(cpu) {
......
...@@ -169,7 +169,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) ...@@ -169,7 +169,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
unsigned long *table; unsigned long *table;
unsigned long bits; unsigned long bits;
bits = (mm->context.noexec || mm->context.pgstes) ? 3UL : 1UL; bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
page = NULL; page = NULL;
if (!list_empty(&mm->context.pgtable_list)) { if (!list_empty(&mm->context.pgtable_list)) {
...@@ -186,7 +186,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) ...@@ -186,7 +186,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
pgtable_page_ctor(page); pgtable_page_ctor(page);
page->flags &= ~FRAG_MASK; page->flags &= ~FRAG_MASK;
table = (unsigned long *) page_to_phys(page); table = (unsigned long *) page_to_phys(page);
if (mm->context.pgstes) if (mm->context.has_pgste)
clear_table_pgstes(table); clear_table_pgstes(table);
else else
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
...@@ -210,7 +210,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) ...@@ -210,7 +210,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
struct page *page; struct page *page;
unsigned long bits; unsigned long bits;
bits = (mm->context.noexec || mm->context.pgstes) ? 3UL : 1UL; bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
page = pfn_to_page(__pa(table) >> PAGE_SHIFT); page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
...@@ -257,7 +257,7 @@ int s390_enable_sie(void) ...@@ -257,7 +257,7 @@ int s390_enable_sie(void)
struct mm_struct *mm, *old_mm; struct mm_struct *mm, *old_mm;
/* Do we have pgstes? if yes, we are done */ /* Do we have pgstes? if yes, we are done */
if (tsk->mm->context.pgstes) if (tsk->mm->context.has_pgste)
return 0; return 0;
/* lets check if we are allowed to replace the mm */ /* lets check if we are allowed to replace the mm */
...@@ -269,14 +269,14 @@ int s390_enable_sie(void) ...@@ -269,14 +269,14 @@ int s390_enable_sie(void)
} }
task_unlock(tsk); task_unlock(tsk);
/* we copy the mm with pgstes enabled */ /* we copy the mm and let dup_mm create the page tables with_pgstes */
tsk->mm->context.pgstes = 1; tsk->mm->context.alloc_pgste = 1;
mm = dup_mm(tsk); mm = dup_mm(tsk);
tsk->mm->context.pgstes = 0; tsk->mm->context.alloc_pgste = 0;
if (!mm) if (!mm)
return -ENOMEM; return -ENOMEM;
/* Now lets check again if somebody attached ptrace etc */ /* Now lets check again if something happened */
task_lock(tsk); task_lock(tsk);
if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) { tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) {
......
...@@ -76,7 +76,7 @@ tapeblock_trigger_requeue(struct tape_device *device) ...@@ -76,7 +76,7 @@ tapeblock_trigger_requeue(struct tape_device *device)
static void static void
tapeblock_end_request(struct request *req, int error) tapeblock_end_request(struct request *req, int error)
{ {
if (__blk_end_request(req, error, blk_rq_bytes(req))) if (blk_end_request(req, error, blk_rq_bytes(req)))
BUG(); BUG();
} }
...@@ -166,7 +166,7 @@ tapeblock_requeue(struct work_struct *work) { ...@@ -166,7 +166,7 @@ tapeblock_requeue(struct work_struct *work) {
nr_queued++; nr_queued++;
spin_unlock(get_ccwdev_lock(device->cdev)); spin_unlock(get_ccwdev_lock(device->cdev));
spin_lock(&device->blk_data.request_queue_lock); spin_lock_irq(&device->blk_data.request_queue_lock);
while ( while (
!blk_queue_plugged(queue) && !blk_queue_plugged(queue) &&
elv_next_request(queue) && elv_next_request(queue) &&
...@@ -176,7 +176,9 @@ tapeblock_requeue(struct work_struct *work) { ...@@ -176,7 +176,9 @@ tapeblock_requeue(struct work_struct *work) {
if (rq_data_dir(req) == WRITE) { if (rq_data_dir(req) == WRITE) {
DBF_EVENT(1, "TBLOCK: Rejecting write request\n"); DBF_EVENT(1, "TBLOCK: Rejecting write request\n");
blkdev_dequeue_request(req); blkdev_dequeue_request(req);
spin_unlock_irq(&device->blk_data.request_queue_lock);
tapeblock_end_request(req, -EIO); tapeblock_end_request(req, -EIO);
spin_lock_irq(&device->blk_data.request_queue_lock);
continue; continue;
} }
blkdev_dequeue_request(req); blkdev_dequeue_request(req);
......
...@@ -1200,7 +1200,7 @@ tape_open(struct tape_device *device) ...@@ -1200,7 +1200,7 @@ tape_open(struct tape_device *device)
{ {
int rc; int rc;
spin_lock(get_ccwdev_lock(device->cdev)); spin_lock_irq(get_ccwdev_lock(device->cdev));
if (device->tape_state == TS_NOT_OPER) { if (device->tape_state == TS_NOT_OPER) {
DBF_EVENT(6, "TAPE:nodev\n"); DBF_EVENT(6, "TAPE:nodev\n");
rc = -ENODEV; rc = -ENODEV;
...@@ -1218,7 +1218,7 @@ tape_open(struct tape_device *device) ...@@ -1218,7 +1218,7 @@ tape_open(struct tape_device *device)
tape_state_set(device, TS_IN_USE); tape_state_set(device, TS_IN_USE);
rc = 0; rc = 0;
} }
spin_unlock(get_ccwdev_lock(device->cdev)); spin_unlock_irq(get_ccwdev_lock(device->cdev));
return rc; return rc;
} }
...@@ -1228,11 +1228,11 @@ tape_open(struct tape_device *device) ...@@ -1228,11 +1228,11 @@ tape_open(struct tape_device *device)
int int
tape_release(struct tape_device *device) tape_release(struct tape_device *device)
{ {
spin_lock(get_ccwdev_lock(device->cdev)); spin_lock_irq(get_ccwdev_lock(device->cdev));
if (device->tape_state == TS_IN_USE) if (device->tape_state == TS_IN_USE)
tape_state_set(device, TS_UNUSED); tape_state_set(device, TS_UNUSED);
module_put(device->discipline->owner); module_put(device->discipline->owner);
spin_unlock(get_ccwdev_lock(device->cdev)); spin_unlock_irq(get_ccwdev_lock(device->cdev));
return 0; return 0;
} }
......
...@@ -20,6 +20,7 @@ static struct dentry *debugfs_root; ...@@ -20,6 +20,7 @@ static struct dentry *debugfs_root;
#define MAX_DEBUGFS_QUEUES 32 #define MAX_DEBUGFS_QUEUES 32
static struct dentry *debugfs_queues[MAX_DEBUGFS_QUEUES] = { NULL }; static struct dentry *debugfs_queues[MAX_DEBUGFS_QUEUES] = { NULL };
static DEFINE_MUTEX(debugfs_mutex); static DEFINE_MUTEX(debugfs_mutex);
#define QDIO_DEBUGFS_NAME_LEN 40
void qdio_allocate_do_dbf(struct qdio_initialize *init_data) void qdio_allocate_do_dbf(struct qdio_initialize *init_data)
{ {
...@@ -152,17 +153,6 @@ static int qstat_seq_open(struct inode *inode, struct file *filp) ...@@ -152,17 +153,6 @@ static int qstat_seq_open(struct inode *inode, struct file *filp)
filp->f_path.dentry->d_inode->i_private); filp->f_path.dentry->d_inode->i_private);
} }
static void get_queue_name(struct qdio_q *q, struct ccw_device *cdev, char *name)
{
memset(name, 0, sizeof(name));
sprintf(name, "%s", dev_name(&cdev->dev));
if (q->is_input_q)
sprintf(name + strlen(name), "_input");
else
sprintf(name + strlen(name), "_output");
sprintf(name + strlen(name), "_%d", q->nr);
}
static void remove_debugfs_entry(struct qdio_q *q) static void remove_debugfs_entry(struct qdio_q *q)
{ {
int i; int i;
...@@ -189,14 +179,17 @@ static struct file_operations debugfs_fops = { ...@@ -189,14 +179,17 @@ static struct file_operations debugfs_fops = {
static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev) static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev)
{ {
int i = 0; int i = 0;
char name[40]; char name[QDIO_DEBUGFS_NAME_LEN];
while (debugfs_queues[i] != NULL) { while (debugfs_queues[i] != NULL) {
i++; i++;
if (i >= MAX_DEBUGFS_QUEUES) if (i >= MAX_DEBUGFS_QUEUES)
return; return;
} }
get_queue_name(q, cdev, name); snprintf(name, QDIO_DEBUGFS_NAME_LEN, "%s_%s_%d",
dev_name(&cdev->dev),
q->is_input_q ? "input" : "output",
q->nr);
debugfs_queues[i] = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR, debugfs_queues[i] = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR,
debugfs_root, q, &debugfs_fops); debugfs_root, q, &debugfs_fops);
} }
......
...@@ -1083,7 +1083,6 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, ...@@ -1083,7 +1083,6 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
case -EIO: case -EIO:
sprintf(dbf_text, "ierr%4x", irq_ptr->schid.sch_no); sprintf(dbf_text, "ierr%4x", irq_ptr->schid.sch_no);
QDIO_DBF_TEXT2(1, setup, dbf_text); QDIO_DBF_TEXT2(1, setup, dbf_text);
qdio_int_error(cdev);
return; return;
case -ETIMEDOUT: case -ETIMEDOUT:
sprintf(dbf_text, "qtoh%4x", irq_ptr->schid.sch_no); sprintf(dbf_text, "qtoh%4x", irq_ptr->schid.sch_no);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment