Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6

* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6: (28 commits)
  [S390] rework of channel measurement facility.
  [S390] appldata enhancements.
  [S390] Add vmpanic parameter.
  [S390] add PAV support to the dasd driver.
  [S390] remove export of sys_call_table
  [S390] remove unused macros from binfmt_elf32.c
  [S390] fix duplicate export of overflow{ug}id
  [S390] cio chpid offline.
  [S390] avenrun export in appdata_base.c
  Convert s390_collect_crw_info() in s390mach.c from being started
  [S390] dasd eer data format.
  [S390] preempt_count initialization.
  [S390] head.S code moving.
  [S390] dasd whitespace and other cosmetics.
  [S390] virtual cpu accounting vs. machine checks.
  [S390] add __cpuinit to appldata cpu hotplug notifier.
  [S390] dasd_eckd_dump_sense bug.
  [S390] missing check in dasd_eer_open.
  [S390] modular 3270 driver.
  [S390] console_unblank woes.
  ...
This commit is contained in:
Linus Torvalds 2006-06-29 11:01:17 -07:00
commit b026188e82
48 changed files with 1457 additions and 988 deletions

View File

@ -1689,9 +1689,14 @@ running once the system is up.
decrease the size and leave more room for directly decrease the size and leave more room for directly
mapped kernel RAM. mapped kernel RAM.
vmhalt= [KNL,S390] vmhalt= [KNL,S390] Perform z/VM CP command after system halt.
Format: <command>
vmpoff= [KNL,S390] vmpanic= [KNL,S390] Perform z/VM CP command after kernel panic.
Format: <command>
vmpoff= [KNL,S390] Perform z/VM CP command after power off.
Format: <command>
waveartist= [HW,OSS] waveartist= [HW,OSS]
Format: <io>,<irq>,<dma>,<dma2> Format: <io>,<irq>,<dma>,<dma2>

View File

@ -3,9 +3,9 @@
* *
* Definitions and interface for Linux - z/VM Monitor Stream. * Definitions and interface for Linux - z/VM Monitor Stream.
* *
* Copyright (C) 2003 IBM Corporation, IBM Deutschland Entwicklung GmbH. * Copyright (C) 2003,2006 IBM Corporation, IBM Deutschland Entwicklung GmbH.
* *
* Author: Gerald Schaefer <geraldsc@de.ibm.com> * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/ */
//#define APPLDATA_DEBUG /* Debug messages on/off */ //#define APPLDATA_DEBUG /* Debug messages on/off */
@ -29,6 +29,22 @@
#define CTL_APPLDATA_NET_SUM 2125 #define CTL_APPLDATA_NET_SUM 2125
#define CTL_APPLDATA_PROC 2126 #define CTL_APPLDATA_PROC 2126
#ifndef CONFIG_64BIT
#define APPLDATA_START_INTERVAL_REC 0x00 /* Function codes for */
#define APPLDATA_STOP_REC 0x01 /* DIAG 0xDC */
#define APPLDATA_GEN_EVENT_RECORD 0x02
#define APPLDATA_START_CONFIG_REC 0x03
#else
#define APPLDATA_START_INTERVAL_REC 0x80
#define APPLDATA_STOP_REC 0x81
#define APPLDATA_GEN_EVENT_RECORD 0x82
#define APPLDATA_START_CONFIG_REC 0x83
#endif /* CONFIG_64BIT */
#define P_INFO(x...) printk(KERN_INFO MY_PRINT_NAME " info: " x) #define P_INFO(x...) printk(KERN_INFO MY_PRINT_NAME " info: " x)
#define P_ERROR(x...) printk(KERN_ERR MY_PRINT_NAME " error: " x) #define P_ERROR(x...) printk(KERN_ERR MY_PRINT_NAME " error: " x)
#define P_WARNING(x...) printk(KERN_WARNING MY_PRINT_NAME " status: " x) #define P_WARNING(x...) printk(KERN_WARNING MY_PRINT_NAME " status: " x)
@ -53,7 +69,11 @@ struct appldata_ops {
void *data; /* record data */ void *data; /* record data */
unsigned int size; /* size of record */ unsigned int size; /* size of record */
struct module *owner; /* THIS_MODULE */ struct module *owner; /* THIS_MODULE */
char mod_lvl[2]; /* modification level, EBCDIC */
}; };
extern int appldata_register_ops(struct appldata_ops *ops); extern int appldata_register_ops(struct appldata_ops *ops);
extern void appldata_unregister_ops(struct appldata_ops *ops); extern void appldata_unregister_ops(struct appldata_ops *ops);
extern int appldata_diag(char record_nr, u16 function, unsigned long buffer,
u16 length, char *mod_lvl);

View File

@ -5,9 +5,9 @@
* Exports appldata_register_ops() and appldata_unregister_ops() for the * Exports appldata_register_ops() and appldata_unregister_ops() for the
* data gathering modules. * data gathering modules.
* *
* Copyright (C) 2003 IBM Corporation, IBM Deutschland Entwicklung GmbH. * Copyright (C) 2003,2006 IBM Corporation, IBM Deutschland Entwicklung GmbH.
* *
* Author: Gerald Schaefer <geraldsc@de.ibm.com> * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/ */
#include <linux/config.h> #include <linux/config.h>
@ -40,22 +40,6 @@
#define TOD_MICRO 0x01000 /* nr. of TOD clock units #define TOD_MICRO 0x01000 /* nr. of TOD clock units
for 1 microsecond */ for 1 microsecond */
#ifndef CONFIG_64BIT
#define APPLDATA_START_INTERVAL_REC 0x00 /* Function codes for */
#define APPLDATA_STOP_REC 0x01 /* DIAG 0xDC */
#define APPLDATA_GEN_EVENT_RECORD 0x02
#define APPLDATA_START_CONFIG_REC 0x03
#else
#define APPLDATA_START_INTERVAL_REC 0x80
#define APPLDATA_STOP_REC 0x81
#define APPLDATA_GEN_EVENT_RECORD 0x82
#define APPLDATA_START_CONFIG_REC 0x83
#endif /* CONFIG_64BIT */
/* /*
* Parameter list for DIAGNOSE X'DC' * Parameter list for DIAGNOSE X'DC'
@ -195,8 +179,8 @@ static void appldata_work_fn(void *data)
* *
* prepare parameter list, issue DIAG 0xDC * prepare parameter list, issue DIAG 0xDC
*/ */
static int appldata_diag(char record_nr, u16 function, unsigned long buffer, int appldata_diag(char record_nr, u16 function, unsigned long buffer,
u16 length) u16 length, char *mod_lvl)
{ {
unsigned long ry; unsigned long ry;
struct appldata_product_id { struct appldata_product_id {
@ -214,7 +198,7 @@ static int appldata_diag(char record_nr, u16 function, unsigned long buffer,
.record_nr = record_nr, .record_nr = record_nr,
.version_nr = {0xF2, 0xF6}, /* "26" */ .version_nr = {0xF2, 0xF6}, /* "26" */
.release_nr = {0xF0, 0xF1}, /* "01" */ .release_nr = {0xF0, 0xF1}, /* "01" */
.mod_lvl = {0xF0, 0xF0}, /* "00" */ .mod_lvl = {mod_lvl[0], mod_lvl[1]},
}; };
struct appldata_parameter_list appldata_parameter_list = { struct appldata_parameter_list appldata_parameter_list = {
.diag = 0xDC, .diag = 0xDC,
@ -467,24 +451,25 @@ appldata_generic_handler(ctl_table *ctl, int write, struct file *filp,
module_put(ops->owner); module_put(ops->owner);
return -ENODEV; return -ENODEV;
} }
ops->active = 1;
ops->callback(ops->data); // init record ops->callback(ops->data); // init record
rc = appldata_diag(ops->record_nr, rc = appldata_diag(ops->record_nr,
APPLDATA_START_INTERVAL_REC, APPLDATA_START_INTERVAL_REC,
(unsigned long) ops->data, ops->size); (unsigned long) ops->data, ops->size,
ops->mod_lvl);
if (rc != 0) { if (rc != 0) {
P_ERROR("START DIAG 0xDC for %s failed, " P_ERROR("START DIAG 0xDC for %s failed, "
"return code: %d\n", ops->name, rc); "return code: %d\n", ops->name, rc);
module_put(ops->owner); module_put(ops->owner);
ops->active = 0;
} else { } else {
P_INFO("Monitoring %s data enabled, " P_INFO("Monitoring %s data enabled, "
"DIAG 0xDC started.\n", ops->name); "DIAG 0xDC started.\n", ops->name);
ops->active = 1;
} }
} else if ((buf[0] == '0') && (ops->active == 1)) { } else if ((buf[0] == '0') && (ops->active == 1)) {
ops->active = 0; ops->active = 0;
rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC, rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
(unsigned long) ops->data, ops->size); (unsigned long) ops->data, ops->size,
ops->mod_lvl);
if (rc != 0) { if (rc != 0) {
P_ERROR("STOP DIAG 0xDC for %s failed, " P_ERROR("STOP DIAG 0xDC for %s failed, "
"return code: %d\n", ops->name, rc); "return code: %d\n", ops->name, rc);
@ -633,7 +618,7 @@ appldata_offline_cpu(int cpu)
spin_unlock(&appldata_timer_lock); spin_unlock(&appldata_timer_lock);
} }
static int static int __cpuinit
appldata_cpu_notify(struct notifier_block *self, appldata_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu) unsigned long action, void *hcpu)
{ {
@ -710,7 +695,8 @@ static void __exit appldata_exit(void)
list_for_each(lh, &appldata_ops_list) { list_for_each(lh, &appldata_ops_list) {
ops = list_entry(lh, struct appldata_ops, list); ops = list_entry(lh, struct appldata_ops, list);
rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC, rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
(unsigned long) ops->data, ops->size); (unsigned long) ops->data, ops->size,
ops->mod_lvl);
if (rc != 0) { if (rc != 0) {
P_ERROR("STOP DIAG 0xDC for %s failed, " P_ERROR("STOP DIAG 0xDC for %s failed, "
"return code: %d\n", ops->name, rc); "return code: %d\n", ops->name, rc);
@ -739,6 +725,7 @@ MODULE_DESCRIPTION("Linux-VM Monitor Stream, base infrastructure");
EXPORT_SYMBOL_GPL(appldata_register_ops); EXPORT_SYMBOL_GPL(appldata_register_ops);
EXPORT_SYMBOL_GPL(appldata_unregister_ops); EXPORT_SYMBOL_GPL(appldata_unregister_ops);
EXPORT_SYMBOL_GPL(appldata_diag);
#ifdef MODULE #ifdef MODULE
/* /*
@ -779,7 +766,6 @@ unsigned long nr_iowait(void)
#endif /* MODULE */ #endif /* MODULE */
EXPORT_SYMBOL_GPL(si_swapinfo); EXPORT_SYMBOL_GPL(si_swapinfo);
EXPORT_SYMBOL_GPL(nr_threads); EXPORT_SYMBOL_GPL(nr_threads);
EXPORT_SYMBOL_GPL(avenrun);
EXPORT_SYMBOL_GPL(get_full_page_state); EXPORT_SYMBOL_GPL(get_full_page_state);
EXPORT_SYMBOL_GPL(nr_running); EXPORT_SYMBOL_GPL(nr_running);
EXPORT_SYMBOL_GPL(nr_iowait); EXPORT_SYMBOL_GPL(nr_iowait);

View File

@ -4,9 +4,9 @@
* Data gathering module for Linux-VM Monitor Stream, Stage 1. * Data gathering module for Linux-VM Monitor Stream, Stage 1.
* Collects data related to memory management. * Collects data related to memory management.
* *
* Copyright (C) 2003 IBM Corporation, IBM Deutschland Entwicklung GmbH. * Copyright (C) 2003,2006 IBM Corporation, IBM Deutschland Entwicklung GmbH.
* *
* Author: Gerald Schaefer <geraldsc@de.ibm.com> * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/ */
#include <linux/config.h> #include <linux/config.h>
@ -152,6 +152,7 @@ static struct appldata_ops ops = {
.callback = &appldata_get_mem_data, .callback = &appldata_get_mem_data,
.data = &appldata_mem_data, .data = &appldata_mem_data,
.owner = THIS_MODULE, .owner = THIS_MODULE,
.mod_lvl = {0xF0, 0xF0}, /* EBCDIC "00" */
}; };

View File

@ -5,9 +5,9 @@
* Collects accumulated network statistics (Packets received/transmitted, * Collects accumulated network statistics (Packets received/transmitted,
* dropped, errors, ...). * dropped, errors, ...).
* *
* Copyright (C) 2003 IBM Corporation, IBM Deutschland Entwicklung GmbH. * Copyright (C) 2003,2006 IBM Corporation, IBM Deutschland Entwicklung GmbH.
* *
* Author: Gerald Schaefer <geraldsc@de.ibm.com> * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/ */
#include <linux/config.h> #include <linux/config.h>
@ -152,6 +152,7 @@ static struct appldata_ops ops = {
.callback = &appldata_get_net_sum_data, .callback = &appldata_get_net_sum_data,
.data = &appldata_net_sum_data, .data = &appldata_net_sum_data,
.owner = THIS_MODULE, .owner = THIS_MODULE,
.mod_lvl = {0xF0, 0xF0}, /* EBCDIC "00" */
}; };

View File

@ -4,9 +4,9 @@
* Data gathering module for Linux-VM Monitor Stream, Stage 1. * Data gathering module for Linux-VM Monitor Stream, Stage 1.
* Collects misc. OS related data (CPU utilization, running processes). * Collects misc. OS related data (CPU utilization, running processes).
* *
* Copyright (C) 2003 IBM Corporation, IBM Deutschland Entwicklung GmbH. * Copyright (C) 2003,2006 IBM Corporation, IBM Deutschland Entwicklung GmbH.
* *
* Author: Gerald Schaefer <geraldsc@de.ibm.com> * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/ */
#include <linux/config.h> #include <linux/config.h>
@ -44,11 +44,14 @@ struct appldata_os_per_cpu {
u32 per_cpu_system; /* ... spent in kernel mode */ u32 per_cpu_system; /* ... spent in kernel mode */
u32 per_cpu_idle; /* ... spent in idle mode */ u32 per_cpu_idle; /* ... spent in idle mode */
// New in 2.6 --> /* New in 2.6 */
u32 per_cpu_irq; /* ... spent in interrupts */ u32 per_cpu_irq; /* ... spent in interrupts */
u32 per_cpu_softirq; /* ... spent in softirqs */ u32 per_cpu_softirq; /* ... spent in softirqs */
u32 per_cpu_iowait; /* ... spent while waiting for I/O */ u32 per_cpu_iowait; /* ... spent while waiting for I/O */
// <-- New in 2.6
/* New in modification level 01 */
u32 per_cpu_steal; /* ... stolen by hypervisor */
u32 cpu_id; /* number of this CPU */
} __attribute__((packed)); } __attribute__((packed));
struct appldata_os_data { struct appldata_os_data {
@ -68,10 +71,9 @@ struct appldata_os_data {
u32 avenrun[3]; /* average nr. of running processes during */ u32 avenrun[3]; /* average nr. of running processes during */
/* the last 1, 5 and 15 minutes */ /* the last 1, 5 and 15 minutes */
// New in 2.6 --> /* New in 2.6 */
u32 nr_iowait; /* number of blocked threads u32 nr_iowait; /* number of blocked threads
(waiting for I/O) */ (waiting for I/O) */
// <-- New in 2.6
/* per cpu data */ /* per cpu data */
struct appldata_os_per_cpu os_cpu[0]; struct appldata_os_per_cpu os_cpu[0];
@ -79,6 +81,14 @@ struct appldata_os_data {
static struct appldata_os_data *appldata_os_data; static struct appldata_os_data *appldata_os_data;
static struct appldata_ops ops = {
.ctl_nr = CTL_APPLDATA_OS,
.name = "os",
.record_nr = APPLDATA_RECORD_OS_ID,
.owner = THIS_MODULE,
.mod_lvl = {0xF0, 0xF1}, /* EBCDIC "01" */
};
static inline void appldata_print_debug(struct appldata_os_data *os_data) static inline void appldata_print_debug(struct appldata_os_data *os_data)
{ {
@ -100,15 +110,17 @@ static inline void appldata_print_debug(struct appldata_os_data *os_data)
P_DEBUG("nr_cpus = %u\n", os_data->nr_cpus); P_DEBUG("nr_cpus = %u\n", os_data->nr_cpus);
for (i = 0; i < os_data->nr_cpus; i++) { for (i = 0; i < os_data->nr_cpus; i++) {
P_DEBUG("cpu%u : user = %u, nice = %u, system = %u, " P_DEBUG("cpu%u : user = %u, nice = %u, system = %u, "
"idle = %u, irq = %u, softirq = %u, iowait = %u\n", "idle = %u, irq = %u, softirq = %u, iowait = %u, "
i, "steal = %u\n",
os_data->os_cpu[i].cpu_id,
os_data->os_cpu[i].per_cpu_user, os_data->os_cpu[i].per_cpu_user,
os_data->os_cpu[i].per_cpu_nice, os_data->os_cpu[i].per_cpu_nice,
os_data->os_cpu[i].per_cpu_system, os_data->os_cpu[i].per_cpu_system,
os_data->os_cpu[i].per_cpu_idle, os_data->os_cpu[i].per_cpu_idle,
os_data->os_cpu[i].per_cpu_irq, os_data->os_cpu[i].per_cpu_irq,
os_data->os_cpu[i].per_cpu_softirq, os_data->os_cpu[i].per_cpu_softirq,
os_data->os_cpu[i].per_cpu_iowait); os_data->os_cpu[i].per_cpu_iowait,
os_data->os_cpu[i].per_cpu_steal);
} }
P_DEBUG("sync_count_1 = %u\n", os_data->sync_count_1); P_DEBUG("sync_count_1 = %u\n", os_data->sync_count_1);
@ -123,14 +135,13 @@ static inline void appldata_print_debug(struct appldata_os_data *os_data)
*/ */
static void appldata_get_os_data(void *data) static void appldata_get_os_data(void *data)
{ {
int i, j; int i, j, rc;
struct appldata_os_data *os_data; struct appldata_os_data *os_data;
unsigned int new_size;
os_data = data; os_data = data;
os_data->sync_count_1++; os_data->sync_count_1++;
os_data->nr_cpus = num_online_cpus();
os_data->nr_threads = nr_threads; os_data->nr_threads = nr_threads;
os_data->nr_running = nr_running(); os_data->nr_running = nr_running();
os_data->nr_iowait = nr_iowait(); os_data->nr_iowait = nr_iowait();
@ -154,9 +165,44 @@ static void appldata_get_os_data(void *data)
cputime_to_jiffies(kstat_cpu(i).cpustat.softirq); cputime_to_jiffies(kstat_cpu(i).cpustat.softirq);
os_data->os_cpu[j].per_cpu_iowait = os_data->os_cpu[j].per_cpu_iowait =
cputime_to_jiffies(kstat_cpu(i).cpustat.iowait); cputime_to_jiffies(kstat_cpu(i).cpustat.iowait);
os_data->os_cpu[j].per_cpu_steal =
cputime_to_jiffies(kstat_cpu(i).cpustat.steal);
os_data->os_cpu[j].cpu_id = i;
j++; j++;
} }
os_data->nr_cpus = j;
new_size = sizeof(struct appldata_os_data) +
(os_data->nr_cpus * sizeof(struct appldata_os_per_cpu));
if (ops.size != new_size) {
if (ops.active) {
rc = appldata_diag(APPLDATA_RECORD_OS_ID,
APPLDATA_START_INTERVAL_REC,
(unsigned long) ops.data, new_size,
ops.mod_lvl);
if (rc != 0) {
P_ERROR("os: START NEW DIAG 0xDC failed, "
"return code: %d, new size = %i\n", rc,
new_size);
P_INFO("os: stopping old record now\n");
} else
P_INFO("os: new record size = %i\n", new_size);
rc = appldata_diag(APPLDATA_RECORD_OS_ID,
APPLDATA_STOP_REC,
(unsigned long) ops.data, ops.size,
ops.mod_lvl);
if (rc != 0)
P_ERROR("os: STOP OLD DIAG 0xDC failed, "
"return code: %d, old size = %i\n", rc,
ops.size);
else
P_INFO("os: old record size = %i stopped\n",
ops.size);
}
ops.size = new_size;
}
os_data->timestamp = get_clock(); os_data->timestamp = get_clock();
os_data->sync_count_2++; os_data->sync_count_2++;
#ifdef APPLDATA_DEBUG #ifdef APPLDATA_DEBUG
@ -165,15 +211,6 @@ static void appldata_get_os_data(void *data)
} }
static struct appldata_ops ops = {
.ctl_nr = CTL_APPLDATA_OS,
.name = "os",
.record_nr = APPLDATA_RECORD_OS_ID,
.callback = &appldata_get_os_data,
.owner = THIS_MODULE,
};
/* /*
* appldata_os_init() * appldata_os_init()
* *
@ -181,26 +218,25 @@ static struct appldata_ops ops = {
*/ */
static int __init appldata_os_init(void) static int __init appldata_os_init(void)
{ {
int rc, size; int rc, max_size;
size = sizeof(struct appldata_os_data) + max_size = sizeof(struct appldata_os_data) +
(NR_CPUS * sizeof(struct appldata_os_per_cpu)); (NR_CPUS * sizeof(struct appldata_os_per_cpu));
if (size > APPLDATA_MAX_REC_SIZE) { if (max_size > APPLDATA_MAX_REC_SIZE) {
P_ERROR("Size of record = %i, bigger than maximum (%i)!\n", P_ERROR("Max. size of OS record = %i, bigger than maximum "
size, APPLDATA_MAX_REC_SIZE); "record size (%i)\n", max_size, APPLDATA_MAX_REC_SIZE);
rc = -ENOMEM; rc = -ENOMEM;
goto out; goto out;
} }
P_DEBUG("sizeof(os) = %i, sizeof(os_cpu) = %lu\n", size, P_DEBUG("max. sizeof(os) = %i, sizeof(os_cpu) = %lu\n", max_size,
sizeof(struct appldata_os_per_cpu)); sizeof(struct appldata_os_per_cpu));
appldata_os_data = kmalloc(size, GFP_DMA); appldata_os_data = kzalloc(max_size, GFP_DMA);
if (appldata_os_data == NULL) { if (appldata_os_data == NULL) {
P_ERROR("No memory for %s!\n", ops.name); P_ERROR("No memory for %s!\n", ops.name);
rc = -ENOMEM; rc = -ENOMEM;
goto out; goto out;
} }
memset(appldata_os_data, 0, size);
appldata_os_data->per_cpu_size = sizeof(struct appldata_os_per_cpu); appldata_os_data->per_cpu_size = sizeof(struct appldata_os_per_cpu);
appldata_os_data->cpu_offset = offsetof(struct appldata_os_data, appldata_os_data->cpu_offset = offsetof(struct appldata_os_data,
@ -208,7 +244,7 @@ static int __init appldata_os_init(void)
P_DEBUG("cpu offset = %u\n", appldata_os_data->cpu_offset); P_DEBUG("cpu offset = %u\n", appldata_os_data->cpu_offset);
ops.data = appldata_os_data; ops.data = appldata_os_data;
ops.size = size; ops.callback = &appldata_get_os_data;
rc = appldata_register_ops(&ops); rc = appldata_register_ops(&ops);
if (rc != 0) { if (rc != 0) {
P_ERROR("Error registering ops, rc = %i\n", rc); P_ERROR("Error registering ops, rc = %i\n", rc);

View File

@ -177,11 +177,6 @@ struct elf_prpsinfo32
#include <linux/highuid.h> #include <linux/highuid.h>
#undef NEW_TO_OLD_UID
#undef NEW_TO_OLD_GID
#define NEW_TO_OLD_UID(uid) ((uid) > 65535) ? (u16)overflowuid : (u16)(uid)
#define NEW_TO_OLD_GID(gid) ((gid) > 65535) ? (u16)overflowgid : (u16)(gid)
#define elf_addr_t u32 #define elf_addr_t u32
/* /*
#define init_elf_binfmt init_elf32_binfmt #define init_elf_binfmt init_elf32_binfmt

View File

@ -93,13 +93,22 @@ STACK_SIZE = 1 << STACK_SHIFT
l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13 l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
.endm .endm
.macro SAVE_ALL psworg,savearea,sync .macro SAVE_ALL_SYNC psworg,savearea
la %r12,\psworg la %r12,\psworg
.if \sync
tm \psworg+1,0x01 # test problem state bit tm \psworg+1,0x01 # test problem state bit
bz BASED(2f) # skip stack setup save bz BASED(2f) # skip stack setup save
l %r15,__LC_KERNEL_STACK # problem state -> load ksp l %r15,__LC_KERNEL_STACK # problem state -> load ksp
.else #ifdef CONFIG_CHECK_STACK
b BASED(3f)
2: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
bz BASED(stack_overflow)
3:
#endif
2:
.endm
.macro SAVE_ALL_ASYNC psworg,savearea
la %r12,\psworg
tm \psworg+1,0x01 # test problem state bit tm \psworg+1,0x01 # test problem state bit
bnz BASED(1f) # from user -> load async stack bnz BASED(1f) # from user -> load async stack
clc \psworg+4(4),BASED(.Lcritical_end) clc \psworg+4(4),BASED(.Lcritical_end)
@ -115,7 +124,6 @@ STACK_SIZE = 1 << STACK_SHIFT
sra %r14,STACK_SHIFT sra %r14,STACK_SHIFT
be BASED(2f) be BASED(2f)
1: l %r15,__LC_ASYNC_STACK 1: l %r15,__LC_ASYNC_STACK
.endif
#ifdef CONFIG_CHECK_STACK #ifdef CONFIG_CHECK_STACK
b BASED(3f) b BASED(3f)
2: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD 2: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
@ -196,7 +204,7 @@ system_call:
STORE_TIMER __LC_SYNC_ENTER_TIMER STORE_TIMER __LC_SYNC_ENTER_TIMER
sysc_saveall: sysc_saveall:
SAVE_ALL_BASE __LC_SAVE_AREA SAVE_ALL_BASE __LC_SAVE_AREA
SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1 SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
lh %r7,0x8a # get svc number from lowcore lh %r7,0x8a # get svc number from lowcore
#ifdef CONFIG_VIRT_CPU_ACCOUNTING #ifdef CONFIG_VIRT_CPU_ACCOUNTING
@ -425,7 +433,7 @@ pgm_check_handler:
SAVE_ALL_BASE __LC_SAVE_AREA SAVE_ALL_BASE __LC_SAVE_AREA
tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
bnz BASED(pgm_per) # got per exception -> special case bnz BASED(pgm_per) # got per exception -> special case
SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1 SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
#ifdef CONFIG_VIRT_CPU_ACCOUNTING #ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm SP_PSW+1(%r15),0x01 # interrupting from user ? tm SP_PSW+1(%r15),0x01 # interrupting from user ?
@ -464,7 +472,7 @@ pgm_per:
# Normal per exception # Normal per exception
# #
pgm_per_std: pgm_per_std:
SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1 SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
#ifdef CONFIG_VIRT_CPU_ACCOUNTING #ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm SP_PSW+1(%r15),0x01 # interrupting from user ? tm SP_PSW+1(%r15),0x01 # interrupting from user ?
@ -490,7 +498,7 @@ pgm_no_vtime2:
# it was a single stepped SVC that is causing all the trouble # it was a single stepped SVC that is causing all the trouble
# #
pgm_svcper: pgm_svcper:
SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1 SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
#ifdef CONFIG_VIRT_CPU_ACCOUNTING #ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm SP_PSW+1(%r15),0x01 # interrupting from user ? tm SP_PSW+1(%r15),0x01 # interrupting from user ?
@ -519,7 +527,7 @@ io_int_handler:
STORE_TIMER __LC_ASYNC_ENTER_TIMER STORE_TIMER __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK stck __LC_INT_CLOCK
SAVE_ALL_BASE __LC_SAVE_AREA+16 SAVE_ALL_BASE __LC_SAVE_AREA+16
SAVE_ALL __LC_IO_OLD_PSW,__LC_SAVE_AREA+16,0 SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
#ifdef CONFIG_VIRT_CPU_ACCOUNTING #ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm SP_PSW+1(%r15),0x01 # interrupting from user ? tm SP_PSW+1(%r15),0x01 # interrupting from user ?
@ -631,7 +639,7 @@ ext_int_handler:
STORE_TIMER __LC_ASYNC_ENTER_TIMER STORE_TIMER __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK stck __LC_INT_CLOCK
SAVE_ALL_BASE __LC_SAVE_AREA+16 SAVE_ALL_BASE __LC_SAVE_AREA+16
SAVE_ALL __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16,0 SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
#ifdef CONFIG_VIRT_CPU_ACCOUNTING #ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm SP_PSW+1(%r15),0x01 # interrupting from user ? tm SP_PSW+1(%r15),0x01 # interrupting from user ?
@ -657,21 +665,31 @@ __critical_end:
.globl mcck_int_handler .globl mcck_int_handler
mcck_int_handler: mcck_int_handler:
spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer
mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA
lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs
SAVE_ALL_BASE __LC_SAVE_AREA+32 SAVE_ALL_BASE __LC_SAVE_AREA+32
la %r12,__LC_MCK_OLD_PSW la %r12,__LC_MCK_OLD_PSW
tm __LC_MCCK_CODE,0x80 # system damage? tm __LC_MCCK_CODE,0x80 # system damage?
bo BASED(mcck_int_main) # yes -> rest of mcck code invalid bo BASED(mcck_int_main) # yes -> rest of mcck code invalid
tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
bo BASED(0f)
spt __LC_LAST_UPDATE_TIMER # revalidate cpu timer
#ifdef CONFIG_VIRT_CPU_ACCOUNTING #ifdef CONFIG_VIRT_CPU_ACCOUNTING
mvc __LC_ASYNC_ENTER_TIMER(8),__LC_LAST_UPDATE_TIMER mvc __LC_SAVE_AREA+52(8),__LC_ASYNC_ENTER_TIMER
mvc __LC_SYNC_ENTER_TIMER(8),__LC_LAST_UPDATE_TIMER mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA
mvc __LC_EXIT_TIMER(8),__LC_LAST_UPDATE_TIMER tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
bo BASED(1f)
la %r14,__LC_SYNC_ENTER_TIMER
clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER
bl BASED(0f)
la %r14,__LC_ASYNC_ENTER_TIMER
0: clc 0(8,%r14),__LC_EXIT_TIMER
bl BASED(0f)
la %r14,__LC_EXIT_TIMER
0: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER
bl BASED(0f)
la %r14,__LC_LAST_UPDATE_TIMER
0: spt 0(%r14)
mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14)
1:
#endif #endif
0: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
bno BASED(mcck_int_main) # no -> skip cleanup critical bno BASED(mcck_int_main) # no -> skip cleanup critical
tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit
bnz BASED(mcck_int_main) # from user -> load async stack bnz BASED(mcck_int_main) # from user -> load async stack
@ -691,7 +709,7 @@ mcck_int_main:
#ifdef CONFIG_VIRT_CPU_ACCOUNTING #ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
bno BASED(mcck_no_vtime) # no -> skip cleanup critical bno BASED(mcck_no_vtime) # no -> skip cleanup critical
tm __LC_MCK_OLD_PSW+1,0x01 # interrupting from user ? tm SP_PSW+1(%r15),0x01 # interrupting from user ?
bz BASED(mcck_no_vtime) bz BASED(mcck_no_vtime)
UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
@ -715,6 +733,20 @@ mcck_no_vtime:
l %r1,BASED(.Ls390_handle_mcck) l %r1,BASED(.Ls390_handle_mcck)
basr %r14,%r1 # call machine check handler basr %r14,%r1 # call machine check handler
mcck_return: mcck_return:
mvc __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW
ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+52
tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
bno BASED(0f)
lm %r0,%r15,SP_R0(%r15) # load gprs 0-15
stpt __LC_EXIT_TIMER
lpsw __LC_RETURN_MCCK_PSW # back to caller
0:
#endif
lm %r0,%r15,SP_R0(%r15) # load gprs 0-15
lpsw __LC_RETURN_MCCK_PSW # back to caller
RESTORE_ALL __LC_RETURN_MCCK_PSW,0 RESTORE_ALL __LC_RETURN_MCCK_PSW,0
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
@ -781,6 +813,8 @@ cleanup_table_sysc_leave:
.long sysc_leave + 0x80000000, sysc_work_loop + 0x80000000 .long sysc_leave + 0x80000000, sysc_work_loop + 0x80000000
cleanup_table_sysc_work_loop: cleanup_table_sysc_work_loop:
.long sysc_work_loop + 0x80000000, sysc_reschedule + 0x80000000 .long sysc_work_loop + 0x80000000, sysc_reschedule + 0x80000000
cleanup_table_io_return:
.long io_return + 0x80000000, io_leave + 0x80000000
cleanup_table_io_leave: cleanup_table_io_leave:
.long io_leave + 0x80000000, io_done + 0x80000000 .long io_leave + 0x80000000, io_done + 0x80000000
cleanup_table_io_work_loop: cleanup_table_io_work_loop:
@ -806,6 +840,11 @@ cleanup_critical:
bl BASED(0f) bl BASED(0f)
clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop+4) clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop+4)
bl BASED(cleanup_sysc_return) bl BASED(cleanup_sysc_return)
0:
clc 4(4,%r12),BASED(cleanup_table_io_return)
bl BASED(0f)
clc 4(4,%r12),BASED(cleanup_table_io_return+4)
bl BASED(cleanup_io_return)
0: 0:
clc 4(4,%r12),BASED(cleanup_table_io_leave) clc 4(4,%r12),BASED(cleanup_table_io_leave)
bl BASED(0f) bl BASED(0f)
@ -839,7 +878,7 @@ cleanup_system_call:
mvc __LC_SAVE_AREA(16),0(%r12) mvc __LC_SAVE_AREA(16),0(%r12)
0: st %r13,4(%r12) 0: st %r13,4(%r12)
st %r12,__LC_SAVE_AREA+48 # argh st %r12,__LC_SAVE_AREA+48 # argh
SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1 SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
l %r12,__LC_SAVE_AREA+48 # argh l %r12,__LC_SAVE_AREA+48 # argh
st %r15,12(%r12) st %r15,12(%r12)
@ -980,7 +1019,6 @@ cleanup_io_leave_insn:
.long cleanup_critical .long cleanup_critical
#define SYSCALL(esa,esame,emu) .long esa #define SYSCALL(esa,esame,emu) .long esa
.globl sys_call_table
sys_call_table: sys_call_table:
#include "syscalls.S" #include "syscalls.S"
#undef SYSCALL #undef SYSCALL

View File

@ -87,13 +87,22 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \
larl %r13,system_call larl %r13,system_call
.endm .endm
.macro SAVE_ALL psworg,savearea,sync .macro SAVE_ALL_SYNC psworg,savearea
la %r12,\psworg la %r12,\psworg
.if \sync
tm \psworg+1,0x01 # test problem state bit tm \psworg+1,0x01 # test problem state bit
jz 2f # skip stack setup save jz 2f # skip stack setup save
lg %r15,__LC_KERNEL_STACK # problem state -> load ksp lg %r15,__LC_KERNEL_STACK # problem state -> load ksp
.else #ifdef CONFIG_CHECK_STACK
j 3f
2: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
jz stack_overflow
3:
#endif
2:
.endm
.macro SAVE_ALL_ASYNC psworg,savearea
la %r12,\psworg
tm \psworg+1,0x01 # test problem state bit tm \psworg+1,0x01 # test problem state bit
jnz 1f # from user -> load kernel stack jnz 1f # from user -> load kernel stack
clc \psworg+8(8),BASED(.Lcritical_end) clc \psworg+8(8),BASED(.Lcritical_end)
@ -108,7 +117,6 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \
srag %r14,%r14,STACK_SHIFT srag %r14,%r14,STACK_SHIFT
jz 2f jz 2f
1: lg %r15,__LC_ASYNC_STACK # load async stack 1: lg %r15,__LC_ASYNC_STACK # load async stack
.endif
#ifdef CONFIG_CHECK_STACK #ifdef CONFIG_CHECK_STACK
j 3f j 3f
2: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD 2: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
@ -187,7 +195,7 @@ system_call:
STORE_TIMER __LC_SYNC_ENTER_TIMER STORE_TIMER __LC_SYNC_ENTER_TIMER
sysc_saveall: sysc_saveall:
SAVE_ALL_BASE __LC_SAVE_AREA SAVE_ALL_BASE __LC_SAVE_AREA
SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1 SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore
#ifdef CONFIG_VIRT_CPU_ACCOUNTING #ifdef CONFIG_VIRT_CPU_ACCOUNTING
@ -446,7 +454,7 @@ pgm_check_handler:
SAVE_ALL_BASE __LC_SAVE_AREA SAVE_ALL_BASE __LC_SAVE_AREA
tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
jnz pgm_per # got per exception -> special case jnz pgm_per # got per exception -> special case
SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1 SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
#ifdef CONFIG_VIRT_CPU_ACCOUNTING #ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm SP_PSW+1(%r15),0x01 # interrupting from user ? tm SP_PSW+1(%r15),0x01 # interrupting from user ?
@ -485,7 +493,7 @@ pgm_per:
# Normal per exception # Normal per exception
# #
pgm_per_std: pgm_per_std:
SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1 SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
#ifdef CONFIG_VIRT_CPU_ACCOUNTING #ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm SP_PSW+1(%r15),0x01 # interrupting from user ? tm SP_PSW+1(%r15),0x01 # interrupting from user ?
@ -511,7 +519,7 @@ pgm_no_vtime2:
# it was a single stepped SVC that is causing all the trouble # it was a single stepped SVC that is causing all the trouble
# #
pgm_svcper: pgm_svcper:
SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1 SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
#ifdef CONFIG_VIRT_CPU_ACCOUNTING #ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm SP_PSW+1(%r15),0x01 # interrupting from user ? tm SP_PSW+1(%r15),0x01 # interrupting from user ?
@ -539,7 +547,7 @@ io_int_handler:
STORE_TIMER __LC_ASYNC_ENTER_TIMER STORE_TIMER __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK stck __LC_INT_CLOCK
SAVE_ALL_BASE __LC_SAVE_AREA+32 SAVE_ALL_BASE __LC_SAVE_AREA+32
SAVE_ALL __LC_IO_OLD_PSW,__LC_SAVE_AREA+32,0 SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+32
CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+32 CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+32
#ifdef CONFIG_VIRT_CPU_ACCOUNTING #ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm SP_PSW+1(%r15),0x01 # interrupting from user ? tm SP_PSW+1(%r15),0x01 # interrupting from user ?
@ -647,7 +655,7 @@ ext_int_handler:
STORE_TIMER __LC_ASYNC_ENTER_TIMER STORE_TIMER __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK stck __LC_INT_CLOCK
SAVE_ALL_BASE __LC_SAVE_AREA+32 SAVE_ALL_BASE __LC_SAVE_AREA+32
SAVE_ALL __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32,0 SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32
CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32 CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32
#ifdef CONFIG_VIRT_CPU_ACCOUNTING #ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm SP_PSW+1(%r15),0x01 # interrupting from user ? tm SP_PSW+1(%r15),0x01 # interrupting from user ?
@ -672,21 +680,32 @@ __critical_end:
mcck_int_handler: mcck_int_handler:
la %r1,4095 # revalidate r1 la %r1,4095 # revalidate r1
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r1)
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
SAVE_ALL_BASE __LC_SAVE_AREA+64 SAVE_ALL_BASE __LC_SAVE_AREA+64
la %r12,__LC_MCK_OLD_PSW la %r12,__LC_MCK_OLD_PSW
tm __LC_MCCK_CODE,0x80 # system damage? tm __LC_MCCK_CODE,0x80 # system damage?
jo mcck_int_main # yes -> rest of mcck code invalid jo mcck_int_main # yes -> rest of mcck code invalid
tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
jo 0f
spt __LC_LAST_UPDATE_TIMER
#ifdef CONFIG_VIRT_CPU_ACCOUNTING #ifdef CONFIG_VIRT_CPU_ACCOUNTING
mvc __LC_ASYNC_ENTER_TIMER(8),__LC_LAST_UPDATE_TIMER la %r14,4095
mvc __LC_SYNC_ENTER_TIMER(8),__LC_LAST_UPDATE_TIMER mvc __LC_SAVE_AREA+104(8),__LC_ASYNC_ENTER_TIMER
mvc __LC_EXIT_TIMER(8),__LC_LAST_UPDATE_TIMER mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r14)
tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
jo 1f
la %r14,__LC_SYNC_ENTER_TIMER
clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER
jl 0f
la %r14,__LC_ASYNC_ENTER_TIMER
0: clc 0(8,%r14),__LC_EXIT_TIMER
jl 0f
la %r14,__LC_EXIT_TIMER
0: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER
jl 0f
la %r14,__LC_LAST_UPDATE_TIMER
0: spt 0(%r14)
mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14)
1:
#endif #endif
0: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
jno mcck_int_main # no -> skip cleanup critical jno mcck_int_main # no -> skip cleanup critical
tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit
jnz mcck_int_main # from user -> load kernel stack jnz mcck_int_main # from user -> load kernel stack
@ -705,7 +724,7 @@ mcck_int_main:
#ifdef CONFIG_VIRT_CPU_ACCOUNTING #ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
jno mcck_no_vtime # no -> no timer update jno mcck_no_vtime # no -> no timer update
tm __LC_MCK_OLD_PSW+1,0x01 # interrupting from user ? tm SP_PSW+1(%r15),0x01 # interrupting from user ?
jz mcck_no_vtime jz mcck_no_vtime
UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
@ -727,7 +746,17 @@ mcck_no_vtime:
jno mcck_return jno mcck_return
brasl %r14,s390_handle_mcck brasl %r14,s390_handle_mcck
mcck_return: mcck_return:
RESTORE_ALL __LC_RETURN_MCCK_PSW,0 mvc __LC_RETURN_MCCK_PSW(16),SP_PSW(%r15) # move return PSW
ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+104
tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
jno 0f
stpt __LC_EXIT_TIMER
0:
#endif
lpswe __LC_RETURN_MCCK_PSW # back to caller
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* /*
@ -789,6 +818,8 @@ cleanup_table_sysc_leave:
.quad sysc_leave, sysc_work_loop .quad sysc_leave, sysc_work_loop
cleanup_table_sysc_work_loop: cleanup_table_sysc_work_loop:
.quad sysc_work_loop, sysc_reschedule .quad sysc_work_loop, sysc_reschedule
cleanup_table_io_return:
.quad io_return, io_leave
cleanup_table_io_leave: cleanup_table_io_leave:
.quad io_leave, io_done .quad io_leave, io_done
cleanup_table_io_work_loop: cleanup_table_io_work_loop:
@ -814,6 +845,11 @@ cleanup_critical:
jl 0f jl 0f
clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop+8) clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop+8)
jl cleanup_sysc_return jl cleanup_sysc_return
0:
clc 8(8,%r12),BASED(cleanup_table_io_return)
jl 0f
clc 8(8,%r12),BASED(cleanup_table_io_return+8)
jl cleanup_io_return
0: 0:
clc 8(8,%r12),BASED(cleanup_table_io_leave) clc 8(8,%r12),BASED(cleanup_table_io_leave)
jl 0f jl 0f
@ -847,7 +883,7 @@ cleanup_system_call:
mvc __LC_SAVE_AREA(32),0(%r12) mvc __LC_SAVE_AREA(32),0(%r12)
0: stg %r13,8(%r12) 0: stg %r13,8(%r12)
stg %r12,__LC_SAVE_AREA+96 # argh stg %r12,__LC_SAVE_AREA+96 # argh
SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1 SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
lg %r12,__LC_SAVE_AREA+96 # argh lg %r12,__LC_SAVE_AREA+96 # argh
stg %r15,24(%r12) stg %r15,24(%r12)
@ -957,7 +993,6 @@ cleanup_io_leave_insn:
.quad __critical_end .quad __critical_end
#define SYSCALL(esa,esame,emu) .long esame #define SYSCALL(esa,esame,emu) .long esame
.globl sys_call_table
sys_call_table: sys_call_table:
#include "syscalls.S" #include "syscalls.S"
#undef SYSCALL #undef SYSCALL
@ -965,7 +1000,6 @@ sys_call_table:
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
#define SYSCALL(esa,esame,emu) .long emu #define SYSCALL(esa,esame,emu) .long emu
.globl sys_call_table_emu
sys_call_table_emu: sys_call_table_emu:
#include "syscalls.S" #include "syscalls.S"
#undef SYSCALL #undef SYSCALL

View File

@ -1,7 +1,7 @@
/* /*
* arch/s390/kernel/head.S * arch/s390/kernel/head.S
* *
* (C) Copyright IBM Corp. 1999, 2005 * Copyright (C) IBM Corp. 1999,2006
* *
* Author(s): Hartmut Penner <hp@de.ibm.com> * Author(s): Hartmut Penner <hp@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com>
@ -482,24 +482,23 @@ start:
.macro GET_IPL_DEVICE .macro GET_IPL_DEVICE
.Lget_ipl_device: .Lget_ipl_device:
basr %r12,0 l %r1,0xb8 # get sid
.LGID: l %r1,0xb8 # get sid
sll %r1,15 # test if subchannel is enabled sll %r1,15 # test if subchannel is enabled
srl %r1,31 srl %r1,31
ltr %r1,%r1 ltr %r1,%r1
bz 0(%r14) # subchannel disabled bz 2f-.LPG1(%r13) # subchannel disabled
l %r1,0xb8 l %r1,0xb8
la %r5,.Lipl_schib-.LGID(%r12) la %r5,.Lipl_schib-.LPG1(%r13)
stsch 0(%r5) # get schib of subchannel stsch 0(%r5) # get schib of subchannel
bnz 0(%r14) # schib not available bnz 2f-.LPG1(%r13) # schib not available
tm 5(%r5),0x01 # devno valid? tm 5(%r5),0x01 # devno valid?
bno 0(%r14) bno 2f-.LPG1(%r13)
la %r6,ipl_parameter_flags-.LGID(%r12) la %r6,ipl_parameter_flags-.LPG1(%r13)
oi 3(%r6),0x01 # set flag oi 3(%r6),0x01 # set flag
la %r2,ipl_devno-.LGID(%r12) la %r2,ipl_devno-.LPG1(%r13)
mvc 0(2,%r2),6(%r5) # store devno mvc 0(2,%r2),6(%r5) # store devno
tm 4(%r5),0x80 # qdio capable device? tm 4(%r5),0x80 # qdio capable device?
bno 0(%r14) bno 2f-.LPG1(%r13)
oi 3(%r6),0x02 # set flag oi 3(%r6),0x02 # set flag
# copy ipl parameters # copy ipl parameters
@ -523,7 +522,7 @@ start:
ar %r2,%r1 ar %r2,%r1
sr %r0,%r4 sr %r0,%r4
jne 1b jne 1b
b 0(%r14) b 2f-.LPG1(%r13)
.align 4 .align 4
.Lipl_schib: .Lipl_schib:
@ -537,6 +536,7 @@ ipl_parameter_flags:
.globl ipl_devno .globl ipl_devno
ipl_devno: ipl_devno:
.word 0 .word 0
2:
.endm .endm
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT

View File

@ -1,7 +1,7 @@
/* /*
* arch/s390/kernel/head31.S * arch/s390/kernel/head31.S
* *
* (C) Copyright IBM Corp. 2005 * Copyright (C) IBM Corp. 2005,2006
* *
* Author(s): Hartmut Penner <hp@de.ibm.com> * Author(s): Hartmut Penner <hp@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com>
@ -16,12 +16,31 @@
# or linload or SALIPL # or linload or SALIPL
# #
.org 0x10000 .org 0x10000
startup:basr %r13,0 # get base startup:basr %r13,0 # get base
.LPG1: l %r1, .Lget_ipl_device_addr-.LPG1(%r13) .LPG0: l %r13,0f-.LPG0(%r13)
basr %r14, %r1 b 0(%r13)
0: .long startup_continue
#
# params at 10400 (setup.h)
#
.org PARMAREA
.long 0,0 # IPL_DEVICE
.long 0,RAMDISK_ORIGIN # INITRD_START
.long 0,RAMDISK_SIZE # INITRD_SIZE
.org COMMAND_LINE
.byte "root=/dev/ram0 ro"
.byte 0
.org 0x11000
startup_continue:
basr %r13,0 # get base
.LPG1: GET_IPL_DEVICE
lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
la %r12,_pstart-.LPG1(%r13) # pointer to parameter area l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
# move IPL device to lowcore # move IPL device to lowcore
mvc __LC_IPLDEV(4),IPL_DEVICE-PARMAREA(%r12) mvc __LC_IPLDEV(4),IPL_DEVICE-PARMAREA(%r12)
# #
@ -51,8 +70,8 @@ startup:basr %r13,0 # get base
a %r1,__LC_EXT_NEW_PSW+4 # set handler a %r1,__LC_EXT_NEW_PSW+4 # set handler
st %r1,__LC_EXT_NEW_PSW+4 st %r1,__LC_EXT_NEW_PSW+4
la %r4,_pstart-.LPG1(%r13) # %r4 is our index for sccb stuff l %r4,.Lsccbaddr-.LPG1(%r13) # %r4 is our index for sccb stuff
la %r1, .Lsccb-PARMAREA(%r4) # our sccb lr %r1,%r4 # our sccb
.insn rre,0xb2200000,%r2,%r1 # service call .insn rre,0xb2200000,%r2,%r1 # service call
ipm %r1 ipm %r1
srl %r1,28 # get cc code srl %r1,28 # get cc code
@ -63,7 +82,7 @@ startup:basr %r13,0 # get base
be .Lservicecall-.LPG1(%r13) be .Lservicecall-.LPG1(%r13)
lpsw .Lwaitsclp-.LPG1(%r13) lpsw .Lwaitsclp-.LPG1(%r13)
.Lsclph: .Lsclph:
lh %r1,.Lsccbr-PARMAREA(%r4) lh %r1,.Lsccbr-.Lsccb(%r4)
chi %r1,0x10 # 0x0010 is the sucess code chi %r1,0x10 # 0x0010 is the sucess code
je .Lprocsccb # let's process the sccb je .Lprocsccb # let's process the sccb
chi %r1,0x1f0 chi %r1,0x1f0
@ -74,7 +93,7 @@ startup:basr %r13,0 # get base
b .Lservicecall-.LPG1(%r13) b .Lservicecall-.LPG1(%r13)
.Lprocsccb: .Lprocsccb:
lhi %r1,0 lhi %r1,0
icm %r1,3,.Lscpincr1-PARMAREA(%r4) # use this one if != 0 icm %r1,3,.Lscpincr1-.Lsccb(%r4) # use this one if != 0
jnz .Lscnd jnz .Lscnd
lhi %r1,0x800 # otherwise report 2GB lhi %r1,0x800 # otherwise report 2GB
.Lscnd: .Lscnd:
@ -84,10 +103,10 @@ startup:basr %r13,0 # get base
lr %r1,%r3 lr %r1,%r3
.Lno2gb: .Lno2gb:
xr %r3,%r3 # same logic xr %r3,%r3 # same logic
ic %r3,.Lscpa1-PARMAREA(%r4) ic %r3,.Lscpa1-.Lsccb(%r4)
chi %r3,0x00 chi %r3,0x00
jne .Lcompmem jne .Lcompmem
l %r3,.Lscpa2-PARMAREA(%r13) l %r3,.Lscpa2-.Lsccb(%r4)
.Lcompmem: .Lcompmem:
mr %r2,%r1 # mem in MB on 128-bit mr %r2,%r1 # mem in MB on 128-bit
l %r1,.Lonemb-.LPG1(%r13) l %r1,.Lonemb-.LPG1(%r13)
@ -95,8 +114,6 @@ startup:basr %r13,0 # get base
b .Lfchunk-.LPG1(%r13) b .Lfchunk-.LPG1(%r13)
.align 4 .align 4
.Lget_ipl_device_addr:
.long .Lget_ipl_device
.Lpmask: .Lpmask:
.byte 0 .byte 0
.align 8 .align 8
@ -242,6 +259,8 @@ startup:basr %r13,0 # get base
.long 0 # cr13: home space segment table .long 0 # cr13: home space segment table
.long 0xc0000000 # cr14: machine check handling off .long 0xc0000000 # cr14: machine check handling off
.long 0 # cr15: linkage stack operations .long 0 # cr15: linkage stack operations
.Lduct: .long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.Lpcmem:.long 0x00080000,0x80000000 + .Lchkmem .Lpcmem:.long 0x00080000,0x80000000 + .Lchkmem
.Lpcfpu:.long 0x00080000,0x80000000 + .Lchkfpu .Lpcfpu:.long 0x00080000,0x80000000 + .Lchkfpu
.Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp .Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp
@ -252,25 +271,9 @@ startup:basr %r13,0 # get base
.Lmflags:.long machine_flags .Lmflags:.long machine_flags
.Lbss_bgn: .long __bss_start .Lbss_bgn: .long __bss_start
.Lbss_end: .long _end .Lbss_end: .long _end
.Lparmaddr: .long PARMAREA
.org PARMAREA-64 .Lsccbaddr: .long .Lsccb
.Lduct: .long 0,0,0,0,0,0,0,0 .align 4096
.long 0,0,0,0,0,0,0,0
#
# params at 10400 (setup.h)
#
.org PARMAREA
.global _pstart
_pstart:
.long 0,0 # IPL_DEVICE
.long 0,RAMDISK_ORIGIN # INITRD_START
.long 0,RAMDISK_SIZE # INITRD_SIZE
.org COMMAND_LINE
.byte "root=/dev/ram0 ro"
.byte 0
.org 0x11000
.Lsccb: .Lsccb:
.hword 0x1000 # length, one page .hword 0x1000 # length, one page
.byte 0x00,0x00,0x00 .byte 0x00,0x00,0x00
@ -287,18 +290,14 @@ _pstart:
.Lscpincr2: .Lscpincr2:
.quad 0x00 .quad 0x00
.fill 3984,1,0 .fill 3984,1,0
.org 0x12000 .align 4096
.global _pend
_pend:
GET_IPL_DEVICE
#ifdef CONFIG_SHARED_KERNEL #ifdef CONFIG_SHARED_KERNEL
.org 0x100000 .org 0x100000
#endif #endif
# #
# startup-code, running in virtual mode # startup-code, running in absolute addressing mode
# #
.globl _stext .globl _stext
_stext: basr %r13,0 # get base _stext: basr %r13,0 # get base

View File

@ -1,7 +1,7 @@
/* /*
* arch/s390/kernel/head64.S * arch/s390/kernel/head64.S
* *
* (C) Copyright IBM Corp. 1999,2005 * Copyright (C) IBM Corp. 1999,2006
* *
* Author(s): Hartmut Penner <hp@de.ibm.com> * Author(s): Hartmut Penner <hp@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com>
@ -15,18 +15,37 @@
# this is called either by the ipl loader or directly by PSW restart # this is called either by the ipl loader or directly by PSW restart
# or linload or SALIPL # or linload or SALIPL
# #
.org 0x10000 .org 0x10000
startup:basr %r13,0 # get base startup:basr %r13,0 # get base
.LPG0: l %r13,0f-.LPG0(%r13)
b 0(%r13)
0: .long startup_continue
#
# params at 10400 (setup.h)
#
.org PARMAREA
.quad 0 # IPL_DEVICE
.quad RAMDISK_ORIGIN # INITRD_START
.quad RAMDISK_SIZE # INITRD_SIZE
.org COMMAND_LINE
.byte "root=/dev/ram0 ro"
.byte 0
.org 0x11000
startup_continue:
basr %r13,0 # get base
.LPG1: sll %r13,1 # remove high order bit .LPG1: sll %r13,1 # remove high order bit
srl %r13,1 srl %r13,1
l %r1,.Lget_ipl_device_addr-.LPG1(%r13) GET_IPL_DEVICE
basr %r14,%r1
lhi %r1,1 # mode 1 = esame lhi %r1,1 # mode 1 = esame
slr %r0,%r0 # set cpuid to zero slr %r0,%r0 # set cpuid to zero
sigp %r1,%r0,0x12 # switch to esame mode sigp %r1,%r0,0x12 # switch to esame mode
sam64 # switch to 64 bit mode sam64 # switch to 64 bit mode
lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
larl %r12,_pstart # pointer to parameter area lg %r12,.Lparmaddr-.LPG1(%r13)# pointer to parameter area
# move IPL device to lowcore # move IPL device to lowcore
mvc __LC_IPLDEV(4),IPL_DEVICE+4-PARMAREA(%r12) mvc __LC_IPLDEV(4),IPL_DEVICE+4-PARMAREA(%r12)
@ -55,8 +74,8 @@ startup:basr %r13,0 # get base
larl %r1,.Lsclph larl %r1,.Lsclph
stg %r1,__LC_EXT_NEW_PSW+8 # set handler stg %r1,__LC_EXT_NEW_PSW+8 # set handler
larl %r4,_pstart # %r4 is our index for sccb stuff larl %r4,.Lsccb # %r4 is our index for sccb stuff
la %r1,.Lsccb-PARMAREA(%r4) # our sccb lgr %r1,%r4 # our sccb
.insn rre,0xb2200000,%r2,%r1 # service call .insn rre,0xb2200000,%r2,%r1 # service call
ipm %r1 ipm %r1
srl %r1,28 # get cc code srl %r1,28 # get cc code
@ -67,7 +86,7 @@ startup:basr %r13,0 # get base
be .Lservicecall-.LPG1(%r13) be .Lservicecall-.LPG1(%r13)
lpswe .Lwaitsclp-.LPG1(%r13) lpswe .Lwaitsclp-.LPG1(%r13)
.Lsclph: .Lsclph:
lh %r1,.Lsccbr-PARMAREA(%r4) lh %r1,.Lsccbr-.Lsccb(%r4)
chi %r1,0x10 # 0x0010 is the sucess code chi %r1,0x10 # 0x0010 is the sucess code
je .Lprocsccb # let's process the sccb je .Lprocsccb # let's process the sccb
chi %r1,0x1f0 chi %r1,0x1f0
@ -78,15 +97,15 @@ startup:basr %r13,0 # get base
b .Lservicecall-.LPG1(%r13) b .Lservicecall-.LPG1(%r13)
.Lprocsccb: .Lprocsccb:
lghi %r1,0 lghi %r1,0
icm %r1,3,.Lscpincr1-PARMAREA(%r4) # use this one if != 0 icm %r1,3,.Lscpincr1-.Lsccb(%r4) # use this one if != 0
jnz .Lscnd jnz .Lscnd
lg %r1,.Lscpincr2-PARMAREA(%r4) # otherwise use this one lg %r1,.Lscpincr2-.Lsccb(%r4) # otherwise use this one
.Lscnd: .Lscnd:
xr %r3,%r3 # same logic xr %r3,%r3 # same logic
ic %r3,.Lscpa1-PARMAREA(%r4) ic %r3,.Lscpa1-.Lsccb(%r4)
chi %r3,0x00 chi %r3,0x00
jne .Lcompmem jne .Lcompmem
l %r3,.Lscpa2-PARMAREA(%r13) l %r3,.Lscpa2-.Lsccb(%r4)
.Lcompmem: .Lcompmem:
mlgr %r2,%r1 # mem in MB on 128-bit mlgr %r2,%r1 # mem in MB on 128-bit
l %r1,.Lonemb-.LPG1(%r13) l %r1,.Lonemb-.LPG1(%r13)
@ -94,8 +113,6 @@ startup:basr %r13,0 # get base
b .Lfchunk-.LPG1(%r13) b .Lfchunk-.LPG1(%r13)
.align 4 .align 4
.Lget_ipl_device_addr:
.long .Lget_ipl_device
.Lpmask: .Lpmask:
.byte 0 .byte 0
.align 8 .align 8
@ -242,29 +259,16 @@ startup:basr %r13,0 # get base
.quad 0 # cr13: home space segment table .quad 0 # cr13: home space segment table
.quad 0xc0000000 # cr14: machine check handling off .quad 0xc0000000 # cr14: machine check handling off
.quad 0 # cr15: linkage stack operations .quad 0 # cr15: linkage stack operations
.Lduct: .long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.Lpcmsk:.quad 0x0000000180000000 .Lpcmsk:.quad 0x0000000180000000
.L4malign:.quad 0xffffffffffc00000 .L4malign:.quad 0xffffffffffc00000
.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 .Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
.Lnop: .long 0x07000700 .Lnop: .long 0x07000700
.Lparmaddr:
.quad PARMAREA
.org PARMAREA-64 .align 4096
.Lduct: .long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
#
# params at 10400 (setup.h)
#
.org PARMAREA
.global _pstart
_pstart:
.quad 0 # IPL_DEVICE
.quad RAMDISK_ORIGIN # INITRD_START
.quad RAMDISK_SIZE # INITRD_SIZE
.org COMMAND_LINE
.byte "root=/dev/ram0 ro"
.byte 0
.org 0x11000
.Lsccb: .Lsccb:
.hword 0x1000 # length, one page .hword 0x1000 # length, one page
.byte 0x00,0x00,0x00 .byte 0x00,0x00,0x00
@ -281,18 +285,14 @@ _pstart:
.Lscpincr2: .Lscpincr2:
.quad 0x00 .quad 0x00
.fill 3984,1,0 .fill 3984,1,0
.org 0x12000 .align 4096
.global _pend
_pend:
GET_IPL_DEVICE
#ifdef CONFIG_SHARED_KERNEL #ifdef CONFIG_SHARED_KERNEL
.org 0x100000 .org 0x100000
#endif #endif
# #
# startup-code, running in virtual mode # startup-code, running in absolute addressing mode
# #
.globl _stext .globl _stext
_stext: basr %r13,0 # get base _stext: basr %r13,0 # get base
@ -326,4 +326,3 @@ _stext: basr %r13,0 # get base
.align 8 .align 8
.Ldw: .quad 0x0002000180000000,0x0000000000000000 .Ldw: .quad 0x0002000180000000,0x0000000000000000
.Laregs: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 .Laregs: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0

View File

@ -46,8 +46,6 @@ EXPORT_SYMBOL(__down_interruptible);
*/ */
extern int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs); extern int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs);
EXPORT_SYMBOL(dump_fpu); EXPORT_SYMBOL(dump_fpu);
EXPORT_SYMBOL(overflowuid);
EXPORT_SYMBOL(overflowgid);
EXPORT_SYMBOL(empty_zero_page); EXPORT_SYMBOL(empty_zero_page);
/* /*

View File

@ -37,6 +37,7 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/notifier.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/system.h> #include <asm/system.h>
@ -115,6 +116,7 @@ void __devinit cpu_init (void)
*/ */
char vmhalt_cmd[128] = ""; char vmhalt_cmd[128] = "";
char vmpoff_cmd[128] = ""; char vmpoff_cmd[128] = "";
char vmpanic_cmd[128] = "";
static inline void strncpy_skip_quote(char *dst, char *src, int n) static inline void strncpy_skip_quote(char *dst, char *src, int n)
{ {
@ -146,6 +148,38 @@ static int __init vmpoff_setup(char *str)
__setup("vmpoff=", vmpoff_setup); __setup("vmpoff=", vmpoff_setup);
static int vmpanic_notify(struct notifier_block *self, unsigned long event,
void *data)
{
if (MACHINE_IS_VM && strlen(vmpanic_cmd) > 0)
cpcmd(vmpanic_cmd, NULL, 0, NULL);
return NOTIFY_OK;
}
#define PANIC_PRI_VMPANIC 0
static struct notifier_block vmpanic_nb = {
.notifier_call = vmpanic_notify,
.priority = PANIC_PRI_VMPANIC
};
static int __init vmpanic_setup(char *str)
{
static int register_done __initdata = 0;
strncpy_skip_quote(vmpanic_cmd, str, 127);
vmpanic_cmd[127] = 0;
if (!register_done) {
register_done = 1;
atomic_notifier_chain_register(&panic_notifier_list,
&vmpanic_nb);
}
return 1;
}
__setup("vmpanic=", vmpanic_setup);
/* /*
* condev= and conmode= setup parameter. * condev= and conmode= setup parameter.
*/ */
@ -289,19 +323,34 @@ void (*_machine_power_off)(void) = do_machine_power_off_nonsmp;
void machine_restart(char *command) void machine_restart(char *command)
{ {
console_unblank(); if (!in_interrupt() || oops_in_progress)
/*
* Only unblank the console if we are called in enabled
* context or a bust_spinlocks cleared the way for us.
*/
console_unblank();
_machine_restart(command); _machine_restart(command);
} }
void machine_halt(void) void machine_halt(void)
{ {
console_unblank(); if (!in_interrupt() || oops_in_progress)
/*
* Only unblank the console if we are called in enabled
* context or a bust_spinlocks cleared the way for us.
*/
console_unblank();
_machine_halt(); _machine_halt();
} }
void machine_power_off(void) void machine_power_off(void)
{ {
console_unblank(); if (!in_interrupt() || oops_in_progress)
/*
* Only unblank the console if we are called in enabled
* context or a bust_spinlocks cleared the way for us.
*/
console_unblank();
_machine_power_off(); _machine_power_off();
} }

View File

@ -150,13 +150,11 @@ void show_stack(struct task_struct *task, unsigned long *sp)
unsigned long *stack; unsigned long *stack;
int i; int i;
// debugging aid: "show_stack(NULL);" prints the
// back trace for this cpu.
if (!sp) if (!sp)
sp = task ? (unsigned long *) task->thread.ksp : __r15; stack = task ? (unsigned long *) task->thread.ksp : __r15;
else
stack = sp;
stack = sp;
for (i = 0; i < kstack_depth_to_print; i++) { for (i = 0; i < kstack_depth_to_print; i++) {
if (((addr_t) stack & (THREAD_SIZE-1)) == 0) if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
break; break;

View File

@ -95,7 +95,7 @@ dasd_alloc_device(void)
spin_lock_init(&device->mem_lock); spin_lock_init(&device->mem_lock);
spin_lock_init(&device->request_queue_lock); spin_lock_init(&device->request_queue_lock);
atomic_set (&device->tasklet_scheduled, 0); atomic_set (&device->tasklet_scheduled, 0);
tasklet_init(&device->tasklet, tasklet_init(&device->tasklet,
(void (*)(unsigned long)) dasd_tasklet, (void (*)(unsigned long)) dasd_tasklet,
(unsigned long) device); (unsigned long) device);
INIT_LIST_HEAD(&device->ccw_queue); INIT_LIST_HEAD(&device->ccw_queue);
@ -128,7 +128,7 @@ dasd_state_new_to_known(struct dasd_device *device)
int rc; int rc;
/* /*
* As long as the device is not in state DASD_STATE_NEW we want to * As long as the device is not in state DASD_STATE_NEW we want to
* keep the reference count > 0. * keep the reference count > 0.
*/ */
dasd_get_device(device); dasd_get_device(device);
@ -336,7 +336,7 @@ dasd_decrease_state(struct dasd_device *device)
if (device->state == DASD_STATE_ONLINE && if (device->state == DASD_STATE_ONLINE &&
device->target <= DASD_STATE_READY) device->target <= DASD_STATE_READY)
dasd_state_online_to_ready(device); dasd_state_online_to_ready(device);
if (device->state == DASD_STATE_READY && if (device->state == DASD_STATE_READY &&
device->target <= DASD_STATE_BASIC) device->target <= DASD_STATE_BASIC)
dasd_state_ready_to_basic(device); dasd_state_ready_to_basic(device);
@ -348,7 +348,7 @@ dasd_decrease_state(struct dasd_device *device)
if (device->state == DASD_STATE_BASIC && if (device->state == DASD_STATE_BASIC &&
device->target <= DASD_STATE_KNOWN) device->target <= DASD_STATE_KNOWN)
dasd_state_basic_to_known(device); dasd_state_basic_to_known(device);
if (device->state == DASD_STATE_KNOWN && if (device->state == DASD_STATE_KNOWN &&
device->target <= DASD_STATE_NEW) device->target <= DASD_STATE_NEW)
dasd_state_known_to_new(device); dasd_state_known_to_new(device);
@ -994,7 +994,7 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr); ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr);
/* Find out the appropriate era_action. */ /* Find out the appropriate era_action. */
if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC) if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC)
era = dasd_era_fatal; era = dasd_era_fatal;
else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
irb->scsw.cstat == 0 && irb->scsw.cstat == 0 &&
@ -1004,7 +1004,7 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
era = dasd_era_fatal; /* don't recover this request */ era = dasd_era_fatal; /* don't recover this request */
else if (irb->esw.esw0.erw.cons) else if (irb->esw.esw0.erw.cons)
era = device->discipline->examine_error(cqr, irb); era = device->discipline->examine_error(cqr, irb);
else else
era = dasd_era_recover; era = dasd_era_recover;
DBF_DEV_EVENT(DBF_DEBUG, device, "era_code %d", era); DBF_DEV_EVENT(DBF_DEBUG, device, "era_code %d", era);
@ -1287,7 +1287,7 @@ __dasd_start_head(struct dasd_device * device)
} }
/* /*
* Remove requests from the ccw queue. * Remove requests from the ccw queue.
*/ */
static void static void
dasd_flush_ccw_queue(struct dasd_device * device, int all) dasd_flush_ccw_queue(struct dasd_device * device, int all)
@ -1450,23 +1450,23 @@ dasd_sleep_on(struct dasd_ccw_req * cqr)
wait_queue_head_t wait_q; wait_queue_head_t wait_q;
struct dasd_device *device; struct dasd_device *device;
int rc; int rc;
device = cqr->device; device = cqr->device;
spin_lock_irq(get_ccwdev_lock(device->cdev)); spin_lock_irq(get_ccwdev_lock(device->cdev));
init_waitqueue_head (&wait_q); init_waitqueue_head (&wait_q);
cqr->callback = dasd_wakeup_cb; cqr->callback = dasd_wakeup_cb;
cqr->callback_data = (void *) &wait_q; cqr->callback_data = (void *) &wait_q;
cqr->status = DASD_CQR_QUEUED; cqr->status = DASD_CQR_QUEUED;
list_add_tail(&cqr->list, &device->ccw_queue); list_add_tail(&cqr->list, &device->ccw_queue);
/* let the bh start the request to keep them in order */ /* let the bh start the request to keep them in order */
dasd_schedule_bh(device); dasd_schedule_bh(device);
spin_unlock_irq(get_ccwdev_lock(device->cdev)); spin_unlock_irq(get_ccwdev_lock(device->cdev));
wait_event(wait_q, _wait_for_wakeup(cqr)); wait_event(wait_q, _wait_for_wakeup(cqr));
/* Request status is either done or failed. */ /* Request status is either done or failed. */
rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0; rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
return rc; return rc;
@ -1568,7 +1568,7 @@ dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr)
wait_queue_head_t wait_q; wait_queue_head_t wait_q;
struct dasd_device *device; struct dasd_device *device;
int rc; int rc;
device = cqr->device; device = cqr->device;
spin_lock_irq(get_ccwdev_lock(device->cdev)); spin_lock_irq(get_ccwdev_lock(device->cdev));
rc = _dasd_term_running_cqr(device); rc = _dasd_term_running_cqr(device);
@ -1576,20 +1576,20 @@ dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr)
spin_unlock_irq(get_ccwdev_lock(device->cdev)); spin_unlock_irq(get_ccwdev_lock(device->cdev));
return rc; return rc;
} }
init_waitqueue_head (&wait_q); init_waitqueue_head (&wait_q);
cqr->callback = dasd_wakeup_cb; cqr->callback = dasd_wakeup_cb;
cqr->callback_data = (void *) &wait_q; cqr->callback_data = (void *) &wait_q;
cqr->status = DASD_CQR_QUEUED; cqr->status = DASD_CQR_QUEUED;
list_add(&cqr->list, &device->ccw_queue); list_add(&cqr->list, &device->ccw_queue);
/* let the bh start the request to keep them in order */ /* let the bh start the request to keep them in order */
dasd_schedule_bh(device); dasd_schedule_bh(device);
spin_unlock_irq(get_ccwdev_lock(device->cdev)); spin_unlock_irq(get_ccwdev_lock(device->cdev));
wait_event(wait_q, _wait_for_wakeup(cqr)); wait_event(wait_q, _wait_for_wakeup(cqr));
/* Request status is either done or failed. */ /* Request status is either done or failed. */
rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0; rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
return rc; return rc;
@ -1725,7 +1725,7 @@ dasd_flush_request_queue(struct dasd_device * device)
if (!device->request_queue) if (!device->request_queue)
return; return;
spin_lock_irq(&device->request_queue_lock); spin_lock_irq(&device->request_queue_lock);
while (!list_empty(&device->request_queue->queue_head)) { while (!list_empty(&device->request_queue->queue_head)) {
req = elv_next_request(device->request_queue); req = elv_next_request(device->request_queue);
@ -1855,15 +1855,34 @@ dasd_generic_probe (struct ccw_device *cdev,
{ {
int ret; int ret;
ret = ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP);
if (ret) {
printk(KERN_WARNING
"dasd_generic_probe: could not set ccw-device options "
"for %s\n", cdev->dev.bus_id);
return ret;
}
ret = dasd_add_sysfs_files(cdev); ret = dasd_add_sysfs_files(cdev);
if (ret) { if (ret) {
printk(KERN_WARNING printk(KERN_WARNING
"dasd_generic_probe: could not add sysfs entries " "dasd_generic_probe: could not add sysfs entries "
"for %s\n", cdev->dev.bus_id); "for %s\n", cdev->dev.bus_id);
} else { return ret;
cdev->handler = &dasd_int_handler;
} }
cdev->handler = &dasd_int_handler;
/*
* Automatically online either all dasd devices (dasd_autodetect)
* or all devices specified with dasd= parameters during
* initial probe.
*/
if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) ||
(dasd_autodetect && dasd_busid_known(cdev->dev.bus_id) != 0))
ret = ccw_device_set_online(cdev);
if (ret)
printk(KERN_WARNING
"dasd_generic_probe: could not initially online "
"ccw-device %s\n", cdev->dev.bus_id);
return ret; return ret;
} }
@ -1911,6 +1930,8 @@ dasd_generic_set_online (struct ccw_device *cdev,
struct dasd_device *device; struct dasd_device *device;
int rc; int rc;
/* first online clears initial online feature flag */
dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
device = dasd_create_device(cdev); device = dasd_create_device(cdev);
if (IS_ERR(device)) if (IS_ERR(device))
return PTR_ERR(device); return PTR_ERR(device);
@ -2065,31 +2086,6 @@ dasd_generic_notify(struct ccw_device *cdev, int event)
return ret; return ret;
} }
/*
* Automatically online either all dasd devices (dasd_autodetect) or
* all devices specified with dasd= parameters.
*/
static int
__dasd_auto_online(struct device *dev, void *data)
{
struct ccw_device *cdev;
cdev = to_ccwdev(dev);
if (dasd_autodetect || dasd_busid_known(cdev->dev.bus_id) == 0)
ccw_device_set_online(cdev);
return 0;
}
void
dasd_generic_auto_online (struct ccw_driver *dasd_discipline_driver)
{
struct device_driver *drv;
drv = get_driver(&dasd_discipline_driver->driver);
driver_for_each_device(drv, NULL, NULL, __dasd_auto_online);
put_driver(drv);
}
static int __init static int __init
dasd_init(void) dasd_init(void)
@ -2170,23 +2166,4 @@ EXPORT_SYMBOL_GPL(dasd_generic_remove);
EXPORT_SYMBOL_GPL(dasd_generic_notify); EXPORT_SYMBOL_GPL(dasd_generic_notify);
EXPORT_SYMBOL_GPL(dasd_generic_set_online); EXPORT_SYMBOL_GPL(dasd_generic_set_online);
EXPORT_SYMBOL_GPL(dasd_generic_set_offline); EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
EXPORT_SYMBOL_GPL(dasd_generic_auto_online);
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
* adjust the settings for this buffer only. This must remain at the end
* of the file.
* ---------------------------------------------------------------------------
* Local variables:
* c-indent-level: 4
* c-brace-imaginary-offset: 0
* c-brace-offset: -4
* c-argdecl-indent: 4
* c-label-offset: -4
* c-continued-statement-offset: 4
* c-continued-brace-offset: 0
* indent-tabs-mode: 1
* tab-width: 8
* End:
*/

View File

@ -1,4 +1,4 @@
/* /*
* File...........: linux/drivers/s390/block/dasd_3370_erp.c * File...........: linux/drivers/s390/block/dasd_3370_erp.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com>
@ -12,10 +12,10 @@
/* /*
* DASD_3370_ERP_EXAMINE * DASD_3370_ERP_EXAMINE
* *
* DESCRIPTION * DESCRIPTION
* Checks only for fatal/no/recover error. * Checks only for fatal/no/recover error.
* A detailed examination of the sense data is done later outside * A detailed examination of the sense data is done later outside
* the interrupt handler. * the interrupt handler.
* *
@ -23,7 +23,7 @@
* 'Chapter 7. 3370 Sense Data'. * 'Chapter 7. 3370 Sense Data'.
* *
* RETURN VALUES * RETURN VALUES
* dasd_era_none no error * dasd_era_none no error
* dasd_era_fatal for all fatal (unrecoverable errors) * dasd_era_fatal for all fatal (unrecoverable errors)
* dasd_era_recover for all others. * dasd_era_recover for all others.
*/ */
@ -82,22 +82,3 @@ dasd_3370_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb)
return dasd_era_recover; return dasd_era_recover;
} /* END dasd_3370_erp_examine */ } /* END dasd_3370_erp_examine */
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
* adjust the settings for this buffer only. This must remain at the end
* of the file.
* ---------------------------------------------------------------------------
* Local variables:
* c-indent-level: 4
* c-brace-imaginary-offset: 0
* c-brace-offset: -4
* c-argdecl-indent: 4
* c-label-offset: -4
* c-continued-statement-offset: 4
* c-continued-brace-offset: 0
* indent-tabs-mode: 1
* tab-width: 8
* End:
*/

View File

@ -1,6 +1,6 @@
/* /*
* File...........: linux/drivers/s390/block/dasd_3990_erp.c * File...........: linux/drivers/s390/block/dasd_3990_erp.c
* Author(s)......: Horst Hummel <Horst.Hummel@de.ibm.com> * Author(s)......: Horst Hummel <Horst.Hummel@de.ibm.com>
* Holger Smolinski <Holger.Smolinski@de.ibm.com> * Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com>
* (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000, 2001 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000, 2001
@ -25,23 +25,23 @@ struct DCTL_data {
} __attribute__ ((packed)); } __attribute__ ((packed));
/* /*
***************************************************************************** *****************************************************************************
* SECTION ERP EXAMINATION * SECTION ERP EXAMINATION
***************************************************************************** *****************************************************************************
*/ */
/* /*
* DASD_3990_ERP_EXAMINE_24 * DASD_3990_ERP_EXAMINE_24
* *
* DESCRIPTION * DESCRIPTION
* Checks only for fatal (unrecoverable) error. * Checks only for fatal (unrecoverable) error.
* A detailed examination of the sense data is done later outside * A detailed examination of the sense data is done later outside
* the interrupt handler. * the interrupt handler.
* *
* Each bit configuration leading to an action code 2 (Exit with * Each bit configuration leading to an action code 2 (Exit with
* programming error or unusual condition indication) * programming error or unusual condition indication)
* are handled as fatal error´s. * are handled as fatal error´s.
* *
* All other configurations are handled as recoverable errors. * All other configurations are handled as recoverable errors.
* *
* RETURN VALUES * RETURN VALUES
@ -93,15 +93,15 @@ dasd_3990_erp_examine_24(struct dasd_ccw_req * cqr, char *sense)
} /* END dasd_3990_erp_examine_24 */ } /* END dasd_3990_erp_examine_24 */
/* /*
* DASD_3990_ERP_EXAMINE_32 * DASD_3990_ERP_EXAMINE_32
* *
* DESCRIPTION * DESCRIPTION
* Checks only for fatal/no/recoverable error. * Checks only for fatal/no/recoverable error.
* A detailed examination of the sense data is done later outside * A detailed examination of the sense data is done later outside
* the interrupt handler. * the interrupt handler.
* *
* RETURN VALUES * RETURN VALUES
* dasd_era_none no error * dasd_era_none no error
* dasd_era_fatal for all fatal (unrecoverable errors) * dasd_era_fatal for all fatal (unrecoverable errors)
* dasd_era_recover for recoverable others. * dasd_era_recover for recoverable others.
*/ */
@ -128,10 +128,10 @@ dasd_3990_erp_examine_32(struct dasd_ccw_req * cqr, char *sense)
} /* end dasd_3990_erp_examine_32 */ } /* end dasd_3990_erp_examine_32 */
/* /*
* DASD_3990_ERP_EXAMINE * DASD_3990_ERP_EXAMINE
* *
* DESCRIPTION * DESCRIPTION
* Checks only for fatal/no/recover error. * Checks only for fatal/no/recover error.
* A detailed examination of the sense data is done later outside * A detailed examination of the sense data is done later outside
* the interrupt handler. * the interrupt handler.
* *
@ -139,7 +139,7 @@ dasd_3990_erp_examine_32(struct dasd_ccw_req * cqr, char *sense)
* 'Chapter 7. Error Recovery Procedures'. * 'Chapter 7. Error Recovery Procedures'.
* *
* RETURN VALUES * RETURN VALUES
* dasd_era_none no error * dasd_era_none no error
* dasd_era_fatal for all fatal (unrecoverable errors) * dasd_era_fatal for all fatal (unrecoverable errors)
* dasd_era_recover for all others. * dasd_era_recover for all others.
*/ */
@ -178,18 +178,18 @@ dasd_3990_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb)
} /* END dasd_3990_erp_examine */ } /* END dasd_3990_erp_examine */
/* /*
***************************************************************************** *****************************************************************************
* SECTION ERP HANDLING * SECTION ERP HANDLING
***************************************************************************** *****************************************************************************
*/ */
/* /*
***************************************************************************** *****************************************************************************
* 24 and 32 byte sense ERP functions * 24 and 32 byte sense ERP functions
***************************************************************************** *****************************************************************************
*/ */
/* /*
* DASD_3990_ERP_CLEANUP * DASD_3990_ERP_CLEANUP
* *
* DESCRIPTION * DESCRIPTION
* Removes the already build but not necessary ERP request and sets * Removes the already build but not necessary ERP request and sets
@ -197,10 +197,10 @@ dasd_3990_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb)
* *
* PARAMETER * PARAMETER
* erp request to be blocked * erp request to be blocked
* final_status either DASD_CQR_DONE or DASD_CQR_FAILED * final_status either DASD_CQR_DONE or DASD_CQR_FAILED
* *
* RETURN VALUES * RETURN VALUES
* cqr original cqr * cqr original cqr
*/ */
static struct dasd_ccw_req * static struct dasd_ccw_req *
dasd_3990_erp_cleanup(struct dasd_ccw_req * erp, char final_status) dasd_3990_erp_cleanup(struct dasd_ccw_req * erp, char final_status)
@ -214,7 +214,7 @@ dasd_3990_erp_cleanup(struct dasd_ccw_req * erp, char final_status)
} /* end dasd_3990_erp_cleanup */ } /* end dasd_3990_erp_cleanup */
/* /*
* DASD_3990_ERP_BLOCK_QUEUE * DASD_3990_ERP_BLOCK_QUEUE
* *
* DESCRIPTION * DESCRIPTION
* Block the given device request queue to prevent from further * Block the given device request queue to prevent from further
@ -237,7 +237,7 @@ dasd_3990_erp_block_queue(struct dasd_ccw_req * erp, int expires)
} }
/* /*
* DASD_3990_ERP_INT_REQ * DASD_3990_ERP_INT_REQ
* *
* DESCRIPTION * DESCRIPTION
* Handles 'Intervention Required' error. * Handles 'Intervention Required' error.
@ -277,7 +277,7 @@ dasd_3990_erp_int_req(struct dasd_ccw_req * erp)
} /* end dasd_3990_erp_int_req */ } /* end dasd_3990_erp_int_req */
/* /*
* DASD_3990_ERP_ALTERNATE_PATH * DASD_3990_ERP_ALTERNATE_PATH
* *
* DESCRIPTION * DESCRIPTION
* Repeat the operation on a different channel path. * Repeat the operation on a different channel path.
@ -330,15 +330,15 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
* DASD_3990_ERP_DCTL * DASD_3990_ERP_DCTL
* *
* DESCRIPTION * DESCRIPTION
* Setup cqr to do the Diagnostic Control (DCTL) command with an * Setup cqr to do the Diagnostic Control (DCTL) command with an
* Inhibit Write subcommand (0x20) and the given modifier. * Inhibit Write subcommand (0x20) and the given modifier.
* *
* PARAMETER * PARAMETER
* erp pointer to the current (failed) ERP * erp pointer to the current (failed) ERP
* modifier subcommand modifier * modifier subcommand modifier
* *
* RETURN VALUES * RETURN VALUES
* dctl_cqr pointer to NEW dctl_cqr * dctl_cqr pointer to NEW dctl_cqr
* *
*/ */
static struct dasd_ccw_req * static struct dasd_ccw_req *
@ -386,7 +386,7 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
} /* end dasd_3990_erp_DCTL */ } /* end dasd_3990_erp_DCTL */
/* /*
* DASD_3990_ERP_ACTION_1 * DASD_3990_ERP_ACTION_1
* *
* DESCRIPTION * DESCRIPTION
* Setup ERP to do the ERP action 1 (see Reference manual). * Setup ERP to do the ERP action 1 (see Reference manual).
@ -415,7 +415,7 @@ dasd_3990_erp_action_1(struct dasd_ccw_req * erp)
} /* end dasd_3990_erp_action_1 */ } /* end dasd_3990_erp_action_1 */
/* /*
* DASD_3990_ERP_ACTION_4 * DASD_3990_ERP_ACTION_4
* *
* DESCRIPTION * DESCRIPTION
* Setup ERP to do the ERP action 4 (see Reference manual). * Setup ERP to do the ERP action 4 (see Reference manual).
@ -453,11 +453,11 @@ dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense)
if (sense[25] == 0x1D) { /* state change pending */ if (sense[25] == 0x1D) { /* state change pending */
DEV_MESSAGE(KERN_INFO, device, DEV_MESSAGE(KERN_INFO, device,
"waiting for state change pending " "waiting for state change pending "
"interrupt, %d retries left", "interrupt, %d retries left",
erp->retries); erp->retries);
dasd_3990_erp_block_queue(erp, 30*HZ); dasd_3990_erp_block_queue(erp, 30*HZ);
} else if (sense[25] == 0x1E) { /* busy */ } else if (sense[25] == 0x1E) { /* busy */
@ -469,9 +469,9 @@ dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense)
} else { } else {
/* no state change pending - retry */ /* no state change pending - retry */
DEV_MESSAGE (KERN_INFO, device, DEV_MESSAGE (KERN_INFO, device,
"redriving request immediately, " "redriving request immediately, "
"%d retries left", "%d retries left",
erp->retries); erp->retries);
erp->status = DASD_CQR_QUEUED; erp->status = DASD_CQR_QUEUED;
} }
@ -482,13 +482,13 @@ dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense)
} /* end dasd_3990_erp_action_4 */ } /* end dasd_3990_erp_action_4 */
/* /*
***************************************************************************** *****************************************************************************
* 24 byte sense ERP functions (only) * 24 byte sense ERP functions (only)
***************************************************************************** *****************************************************************************
*/ */
/* /*
* DASD_3990_ERP_ACTION_5 * DASD_3990_ERP_ACTION_5
* *
* DESCRIPTION * DESCRIPTION
* Setup ERP to do the ERP action 5 (see Reference manual). * Setup ERP to do the ERP action 5 (see Reference manual).
@ -523,7 +523,7 @@ dasd_3990_erp_action_5(struct dasd_ccw_req * erp)
* *
* PARAMETER * PARAMETER
* sense current sense data * sense current sense data
* *
* RETURN VALUES * RETURN VALUES
* void * void
*/ */
@ -1150,9 +1150,9 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
* PARAMETER * PARAMETER
* erp current erp_head * erp current erp_head
* sense current sense data * sense current sense data
* *
* RETURN VALUES * RETURN VALUES
* erp 'new' erp_head - pointer to new ERP * erp 'new' erp_head - pointer to new ERP
*/ */
static struct dasd_ccw_req * static struct dasd_ccw_req *
dasd_3990_erp_com_rej(struct dasd_ccw_req * erp, char *sense) dasd_3990_erp_com_rej(struct dasd_ccw_req * erp, char *sense)
@ -1185,7 +1185,7 @@ dasd_3990_erp_com_rej(struct dasd_ccw_req * erp, char *sense)
} /* end dasd_3990_erp_com_rej */ } /* end dasd_3990_erp_com_rej */
/* /*
* DASD_3990_ERP_BUS_OUT * DASD_3990_ERP_BUS_OUT
* *
* DESCRIPTION * DESCRIPTION
* Handles 24 byte 'Bus Out Parity Check' error. * Handles 24 byte 'Bus Out Parity Check' error.
@ -1483,7 +1483,7 @@ dasd_3990_erp_env_data(struct dasd_ccw_req * erp, char *sense)
* *
* PARAMETER * PARAMETER
* erp already added default ERP * erp already added default ERP
* *
* RETURN VALUES * RETURN VALUES
* erp new erp_head - pointer to new ERP * erp new erp_head - pointer to new ERP
*/ */
@ -1527,11 +1527,11 @@ dasd_3990_erp_file_prot(struct dasd_ccw_req * erp)
} /* end dasd_3990_erp_file_prot */ } /* end dasd_3990_erp_file_prot */
/* /*
* DASD_3990_ERP_INSPECT_24 * DASD_3990_ERP_INSPECT_24
* *
* DESCRIPTION * DESCRIPTION
* Does a detailed inspection of the 24 byte sense data * Does a detailed inspection of the 24 byte sense data
* and sets up a related error recovery action. * and sets up a related error recovery action.
* *
* PARAMETER * PARAMETER
* sense sense data of the actual error * sense sense data of the actual error
@ -1602,13 +1602,13 @@ dasd_3990_erp_inspect_24(struct dasd_ccw_req * erp, char *sense)
} /* END dasd_3990_erp_inspect_24 */ } /* END dasd_3990_erp_inspect_24 */
/* /*
***************************************************************************** *****************************************************************************
* 32 byte sense ERP functions (only) * 32 byte sense ERP functions (only)
***************************************************************************** *****************************************************************************
*/ */
/* /*
* DASD_3990_ERPACTION_10_32 * DASD_3990_ERPACTION_10_32
* *
* DESCRIPTION * DESCRIPTION
* Handles 32 byte 'Action 10' of Single Program Action Codes. * Handles 32 byte 'Action 10' of Single Program Action Codes.
@ -1616,7 +1616,7 @@ dasd_3990_erp_inspect_24(struct dasd_ccw_req * erp, char *sense)
* *
* PARAMETER * PARAMETER
* erp current erp_head * erp current erp_head
* sense current sense data * sense current sense data
* RETURN VALUES * RETURN VALUES
* erp modified erp_head * erp modified erp_head
*/ */
@ -1640,18 +1640,18 @@ dasd_3990_erp_action_10_32(struct dasd_ccw_req * erp, char *sense)
* *
* DESCRIPTION * DESCRIPTION
* Handles 32 byte 'Action 1B' of Single Program Action Codes. * Handles 32 byte 'Action 1B' of Single Program Action Codes.
* A write operation could not be finished because of an unexpected * A write operation could not be finished because of an unexpected
* condition. * condition.
* The already created 'default erp' is used to get the link to * The already created 'default erp' is used to get the link to
* the erp chain, but it can not be used for this recovery * the erp chain, but it can not be used for this recovery
* action because it contains no DE/LO data space. * action because it contains no DE/LO data space.
* *
* PARAMETER * PARAMETER
* default_erp already added default erp. * default_erp already added default erp.
* sense current sense data * sense current sense data
* *
* RETURN VALUES * RETURN VALUES
* erp new erp or * erp new erp or
* default_erp in case of imprecise ending or error * default_erp in case of imprecise ending or error
*/ */
static struct dasd_ccw_req * static struct dasd_ccw_req *
@ -1789,16 +1789,16 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
* DASD_3990_UPDATE_1B * DASD_3990_UPDATE_1B
* *
* DESCRIPTION * DESCRIPTION
* Handles the update to the 32 byte 'Action 1B' of Single Program * Handles the update to the 32 byte 'Action 1B' of Single Program
* Action Codes in case the first action was not successful. * Action Codes in case the first action was not successful.
* The already created 'previous_erp' is the currently not successful * The already created 'previous_erp' is the currently not successful
* ERP. * ERP.
* *
* PARAMETER * PARAMETER
* previous_erp already created previous erp. * previous_erp already created previous erp.
* sense current sense data * sense current sense data
* RETURN VALUES * RETURN VALUES
* erp modified erp * erp modified erp
*/ */
static struct dasd_ccw_req * static struct dasd_ccw_req *
dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense) dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
@ -1897,7 +1897,7 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
} /* end dasd_3990_update_1B */ } /* end dasd_3990_update_1B */
/* /*
* DASD_3990_ERP_COMPOUND_RETRY * DASD_3990_ERP_COMPOUND_RETRY
* *
* DESCRIPTION * DESCRIPTION
* Handles the compound ERP action retry code. * Handles the compound ERP action retry code.
@ -1943,7 +1943,7 @@ dasd_3990_erp_compound_retry(struct dasd_ccw_req * erp, char *sense)
} /* end dasd_3990_erp_compound_retry */ } /* end dasd_3990_erp_compound_retry */
/* /*
* DASD_3990_ERP_COMPOUND_PATH * DASD_3990_ERP_COMPOUND_PATH
* *
* DESCRIPTION * DESCRIPTION
* Handles the compound ERP action for retry on alternate * Handles the compound ERP action for retry on alternate
@ -1965,7 +1965,7 @@ dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense)
dasd_3990_erp_alternate_path(erp); dasd_3990_erp_alternate_path(erp);
if (erp->status == DASD_CQR_FAILED) { if (erp->status == DASD_CQR_FAILED) {
/* reset the lpm and the status to be able to /* reset the lpm and the status to be able to
* try further actions. */ * try further actions. */
erp->lpm = 0; erp->lpm = 0;
@ -1980,7 +1980,7 @@ dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense)
} /* end dasd_3990_erp_compound_path */ } /* end dasd_3990_erp_compound_path */
/* /*
* DASD_3990_ERP_COMPOUND_CODE * DASD_3990_ERP_COMPOUND_CODE
* *
* DESCRIPTION * DESCRIPTION
* Handles the compound ERP action for retry code. * Handles the compound ERP action for retry code.
@ -2001,18 +2001,18 @@ dasd_3990_erp_compound_code(struct dasd_ccw_req * erp, char *sense)
switch (sense[28]) { switch (sense[28]) {
case 0x17: case 0x17:
/* issue a Diagnostic Control command with an /* issue a Diagnostic Control command with an
* Inhibit Write subcommand and controler modifier */ * Inhibit Write subcommand and controler modifier */
erp = dasd_3990_erp_DCTL(erp, 0x20); erp = dasd_3990_erp_DCTL(erp, 0x20);
break; break;
case 0x25: case 0x25:
/* wait for 5 seconds and retry again */ /* wait for 5 seconds and retry again */
erp->retries = 1; erp->retries = 1;
dasd_3990_erp_block_queue (erp, 5*HZ); dasd_3990_erp_block_queue (erp, 5*HZ);
break; break;
default: default:
/* should not happen - continue */ /* should not happen - continue */
break; break;
@ -2026,7 +2026,7 @@ dasd_3990_erp_compound_code(struct dasd_ccw_req * erp, char *sense)
} /* end dasd_3990_erp_compound_code */ } /* end dasd_3990_erp_compound_code */
/* /*
* DASD_3990_ERP_COMPOUND_CONFIG * DASD_3990_ERP_COMPOUND_CONFIG
* *
* DESCRIPTION * DESCRIPTION
* Handles the compound ERP action for configruation * Handles the compound ERP action for configruation
@ -2063,10 +2063,10 @@ dasd_3990_erp_compound_config(struct dasd_ccw_req * erp, char *sense)
} /* end dasd_3990_erp_compound_config */ } /* end dasd_3990_erp_compound_config */
/* /*
* DASD_3990_ERP_COMPOUND * DASD_3990_ERP_COMPOUND
* *
* DESCRIPTION * DESCRIPTION
* Does the further compound program action if * Does the further compound program action if
* compound retry was not successful. * compound retry was not successful.
* *
* PARAMETER * PARAMETER
@ -2110,11 +2110,11 @@ dasd_3990_erp_compound(struct dasd_ccw_req * erp, char *sense)
} /* end dasd_3990_erp_compound */ } /* end dasd_3990_erp_compound */
/* /*
* DASD_3990_ERP_INSPECT_32 * DASD_3990_ERP_INSPECT_32
* *
* DESCRIPTION * DESCRIPTION
* Does a detailed inspection of the 32 byte sense data * Does a detailed inspection of the 32 byte sense data
* and sets up a related error recovery action. * and sets up a related error recovery action.
* *
* PARAMETER * PARAMETER
* sense sense data of the actual error * sense sense data of the actual error
@ -2228,9 +2228,9 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
} /* end dasd_3990_erp_inspect_32 */ } /* end dasd_3990_erp_inspect_32 */
/* /*
***************************************************************************** *****************************************************************************
* main ERP control fuctions (24 and 32 byte sense) * main ERP control fuctions (24 and 32 byte sense)
***************************************************************************** *****************************************************************************
*/ */
/* /*
@ -2243,7 +2243,7 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
* PARAMETER * PARAMETER
* erp pointer to the currently created default ERP * erp pointer to the currently created default ERP
* RETURN VALUES * RETURN VALUES
* erp_new contens was possibly modified * erp_new contens was possibly modified
*/ */
static struct dasd_ccw_req * static struct dasd_ccw_req *
dasd_3990_erp_inspect(struct dasd_ccw_req * erp) dasd_3990_erp_inspect(struct dasd_ccw_req * erp)
@ -2272,14 +2272,14 @@ dasd_3990_erp_inspect(struct dasd_ccw_req * erp)
/* /*
* DASD_3990_ERP_ADD_ERP * DASD_3990_ERP_ADD_ERP
* *
* DESCRIPTION * DESCRIPTION
* This funtion adds an additional request block (ERP) to the head of * This funtion adds an additional request block (ERP) to the head of
* the given cqr (or erp). * the given cqr (or erp).
* This erp is initialized as an default erp (retry TIC) * This erp is initialized as an default erp (retry TIC)
* *
* PARAMETER * PARAMETER
* cqr head of the current ERP-chain (or single cqr if * cqr head of the current ERP-chain (or single cqr if
* first error) * first error)
* RETURN VALUES * RETURN VALUES
* erp pointer to new ERP-chain head * erp pointer to new ERP-chain head
@ -2332,15 +2332,15 @@ dasd_3990_erp_add_erp(struct dasd_ccw_req * cqr)
} }
/* /*
* DASD_3990_ERP_ADDITIONAL_ERP * DASD_3990_ERP_ADDITIONAL_ERP
* *
* DESCRIPTION * DESCRIPTION
* An additional ERP is needed to handle the current error. * An additional ERP is needed to handle the current error.
* Add ERP to the head of the ERP-chain containing the ERP processing * Add ERP to the head of the ERP-chain containing the ERP processing
* determined based on the sense data. * determined based on the sense data.
* *
* PARAMETER * PARAMETER
* cqr head of the current ERP-chain (or single cqr if * cqr head of the current ERP-chain (or single cqr if
* first error) * first error)
* *
* RETURN VALUES * RETURN VALUES
@ -2376,7 +2376,7 @@ dasd_3990_erp_additional_erp(struct dasd_ccw_req * cqr)
* 24 byte sense byte 25 and 27 is set as well. * 24 byte sense byte 25 and 27 is set as well.
* *
* PARAMETER * PARAMETER
* cqr1 first cqr, which will be compared with the * cqr1 first cqr, which will be compared with the
* cqr2 second cqr. * cqr2 second cqr.
* *
* RETURN VALUES * RETURN VALUES
@ -2415,7 +2415,7 @@ dasd_3990_erp_error_match(struct dasd_ccw_req *cqr1, struct dasd_ccw_req *cqr2)
* cqr failed cqr (either original cqr or already an erp) * cqr failed cqr (either original cqr or already an erp)
* *
* RETURN VALUES * RETURN VALUES
* erp erp-pointer to the already defined error * erp erp-pointer to the already defined error
* recovery procedure OR * recovery procedure OR
* NULL if a 'new' error occurred. * NULL if a 'new' error occurred.
*/ */
@ -2451,10 +2451,10 @@ dasd_3990_erp_in_erp(struct dasd_ccw_req *cqr)
* DASD_3990_ERP_FURTHER_ERP (24 & 32 byte sense) * DASD_3990_ERP_FURTHER_ERP (24 & 32 byte sense)
* *
* DESCRIPTION * DESCRIPTION
* No retry is left for the current ERP. Check what has to be done * No retry is left for the current ERP. Check what has to be done
* with the ERP. * with the ERP.
* - do further defined ERP action or * - do further defined ERP action or
* - wait for interrupt or * - wait for interrupt or
* - exit with permanent error * - exit with permanent error
* *
* PARAMETER * PARAMETER
@ -2485,7 +2485,7 @@ dasd_3990_erp_further_erp(struct dasd_ccw_req *erp)
if (!(sense[2] & DASD_SENSE_BIT_0)) { if (!(sense[2] & DASD_SENSE_BIT_0)) {
/* issue a Diagnostic Control command with an /* issue a Diagnostic Control command with an
* Inhibit Write subcommand */ * Inhibit Write subcommand */
switch (sense[25]) { switch (sense[25]) {
@ -2535,14 +2535,14 @@ dasd_3990_erp_further_erp(struct dasd_ccw_req *erp)
} /* end dasd_3990_erp_further_erp */ } /* end dasd_3990_erp_further_erp */
/* /*
* DASD_3990_ERP_HANDLE_MATCH_ERP * DASD_3990_ERP_HANDLE_MATCH_ERP
* *
* DESCRIPTION * DESCRIPTION
* An error occurred again and an ERP has been detected which is already * An error occurred again and an ERP has been detected which is already
* used to handle this error (e.g. retries). * used to handle this error (e.g. retries).
* All prior ERP's are asumed to be successful and therefore removed * All prior ERP's are asumed to be successful and therefore removed
* from queue. * from queue.
* If retry counter of matching erp is already 0, it is checked if further * If retry counter of matching erp is already 0, it is checked if further
* action is needed (besides retry) or if the ERP has failed. * action is needed (besides retry) or if the ERP has failed.
* *
* PARAMETER * PARAMETER
@ -2631,7 +2631,7 @@ dasd_3990_erp_handle_match_erp(struct dasd_ccw_req *erp_head,
* erp erp-pointer to the head of the ERP action chain. * erp erp-pointer to the head of the ERP action chain.
* This means: * This means:
* - either a ptr to an additional ERP cqr or * - either a ptr to an additional ERP cqr or
* - the original given cqr (which's status might * - the original given cqr (which's status might
* be modified) * be modified)
*/ */
struct dasd_ccw_req * struct dasd_ccw_req *
@ -2723,22 +2723,3 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
return erp; return erp;
} /* end dasd_3990_erp_action */ } /* end dasd_3990_erp_action */
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
* adjust the settings for this buffer only. This must remain at the end
* of the file.
* ---------------------------------------------------------------------------
* Local variables:
* c-indent-level: 4
* c-brace-imaginary-offset: 0
* c-brace-offset: -4
* c-argdecl-indent: 4
* c-label-offset: -4
* c-continued-statement-offset: 4
* c-continued-brace-offset: 0
* indent-tabs-mode: 1
* tab-width: 8
* End:
*/

View File

@ -1,4 +1,4 @@
/* /*
* File...........: linux/drivers/s390/block/dasd_9336_erp.c * File...........: linux/drivers/s390/block/dasd_9336_erp.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com>
@ -12,10 +12,10 @@
/* /*
* DASD_9336_ERP_EXAMINE * DASD_9336_ERP_EXAMINE
* *
* DESCRIPTION * DESCRIPTION
* Checks only for fatal/no/recover error. * Checks only for fatal/no/recover error.
* A detailed examination of the sense data is done later outside * A detailed examination of the sense data is done later outside
* the interrupt handler. * the interrupt handler.
* *
@ -23,7 +23,7 @@
* 'Chapter 7. 9336 Sense Data'. * 'Chapter 7. 9336 Sense Data'.
* *
* RETURN VALUES * RETURN VALUES
* dasd_era_none no error * dasd_era_none no error
* dasd_era_fatal for all fatal (unrecoverable errors) * dasd_era_fatal for all fatal (unrecoverable errors)
* dasd_era_recover for all others. * dasd_era_recover for all others.
*/ */
@ -39,22 +39,3 @@ dasd_9336_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb)
return dasd_era_recover; return dasd_era_recover;
} /* END dasd_9336_erp_examine */ } /* END dasd_9336_erp_examine */
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
* adjust the settings for this buffer only. This must remain at the end
* of the file.
* ---------------------------------------------------------------------------
* Local variables:
* c-indent-level: 4
* c-brace-imaginary-offset: 0
* c-brace-offset: -4
* c-argdecl-indent: 4
* c-label-offset: -4
* c-continued-statement-offset: 4
* c-continued-brace-offset: 0
* indent-tabs-mode: 1
* tab-width: 8
* End:
*/

View File

@ -1,4 +1,4 @@
/* /*
* File...........: linux/drivers/s390/block/dasd_9345_erp.c * File...........: linux/drivers/s390/block/dasd_9345_erp.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com>

View File

@ -27,7 +27,7 @@
#include "dasd_int.h" #include "dasd_int.h"
kmem_cache_t *dasd_page_cache; kmem_cache_t *dasd_page_cache;
EXPORT_SYMBOL(dasd_page_cache); EXPORT_SYMBOL_GPL(dasd_page_cache);
/* /*
* dasd_devmap_t is used to store the features and the relation * dasd_devmap_t is used to store the features and the relation
@ -48,6 +48,20 @@ struct dasd_devmap {
struct dasd_uid uid; struct dasd_uid uid;
}; };
/*
* dasd_servermap is used to store the server_id of all storage servers
* accessed by DASD device driver.
*/
struct dasd_servermap {
struct list_head list;
struct server_id {
char vendor[4];
char serial[15];
} sid;
};
static struct list_head dasd_serverlist;
/* /*
* Parameter parsing functions for dasd= parameter. The syntax is: * Parameter parsing functions for dasd= parameter. The syntax is:
* <devno> : (0x)?[0-9a-fA-F]+ * <devno> : (0x)?[0-9a-fA-F]+
@ -64,6 +78,8 @@ struct dasd_devmap {
int dasd_probeonly = 0; /* is true, when probeonly mode is active */ int dasd_probeonly = 0; /* is true, when probeonly mode is active */
int dasd_autodetect = 0; /* is true, when autodetection is active */ int dasd_autodetect = 0; /* is true, when autodetection is active */
int dasd_nopav = 0; /* is true, when PAV is disabled */
EXPORT_SYMBOL_GPL(dasd_nopav);
/* /*
* char *dasd[] is intended to hold the ranges supplied by the dasd= statement * char *dasd[] is intended to hold the ranges supplied by the dasd= statement
@ -123,7 +139,7 @@ static inline int
dasd_busid(char **str, int *id0, int *id1, int *devno) dasd_busid(char **str, int *id0, int *id1, int *devno)
{ {
int val, old_style; int val, old_style;
/* check for leading '0x' */ /* check for leading '0x' */
old_style = 0; old_style = 0;
if ((*str)[0] == '0' && (*str)[1] == 'x') { if ((*str)[0] == '0' && (*str)[1] == 'x') {
@ -179,7 +195,7 @@ dasd_feature_list(char *str, char **endp)
features = 0; features = 0;
while (1) { while (1) {
for (len = 0; for (len = 0;
str[len] && str[len] != ':' && str[len] != ')'; len++); str[len] && str[len] != ':' && str[len] != ')'; len++);
if (len == 2 && !strncmp(str, "ro", 2)) if (len == 2 && !strncmp(str, "ro", 2))
features |= DASD_FEATURE_READONLY; features |= DASD_FEATURE_READONLY;
@ -228,19 +244,24 @@ dasd_parse_keyword( char *parsestring ) {
length = strlen(parsestring); length = strlen(parsestring);
residual_str = parsestring + length; residual_str = parsestring + length;
} }
if (strncmp ("autodetect", parsestring, length) == 0) { if (strncmp("autodetect", parsestring, length) == 0) {
dasd_autodetect = 1; dasd_autodetect = 1;
MESSAGE (KERN_INFO, "%s", MESSAGE (KERN_INFO, "%s",
"turning to autodetection mode"); "turning to autodetection mode");
return residual_str; return residual_str;
} }
if (strncmp ("probeonly", parsestring, length) == 0) { if (strncmp("probeonly", parsestring, length) == 0) {
dasd_probeonly = 1; dasd_probeonly = 1;
MESSAGE(KERN_INFO, "%s", MESSAGE(KERN_INFO, "%s",
"turning to probeonly mode"); "turning to probeonly mode");
return residual_str; return residual_str;
} }
if (strncmp ("fixedbuffers", parsestring, length) == 0) { if (strncmp("nopav", parsestring, length) == 0) {
dasd_nopav = 1;
MESSAGE(KERN_INFO, "%s", "disable PAV mode");
return residual_str;
}
if (strncmp("fixedbuffers", parsestring, length) == 0) {
if (dasd_page_cache) if (dasd_page_cache)
return residual_str; return residual_str;
dasd_page_cache = dasd_page_cache =
@ -294,6 +315,8 @@ dasd_parse_range( char *parsestring ) {
features = dasd_feature_list(str, &str); features = dasd_feature_list(str, &str);
if (features < 0) if (features < 0)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
/* each device in dasd= parameter should be set initially online */
features |= DASD_FEATURE_INITIAL_ONLINE;
while (from <= to) { while (from <= to) {
sprintf(bus_id, "%01x.%01x.%04x", sprintf(bus_id, "%01x.%01x.%04x",
from_id0, from_id1, from++); from_id0, from_id1, from++);
@ -359,7 +382,7 @@ dasd_parse(void)
* Add a devmap for the device specified by busid. It is possible that * Add a devmap for the device specified by busid. It is possible that
* the devmap already exists (dasd= parameter). The order of the devices * the devmap already exists (dasd= parameter). The order of the devices
* added through this function will define the kdevs for the individual * added through this function will define the kdevs for the individual
* devices. * devices.
*/ */
static struct dasd_devmap * static struct dasd_devmap *
dasd_add_busid(char *bus_id, int features) dasd_add_busid(char *bus_id, int features)
@ -368,7 +391,7 @@ dasd_add_busid(char *bus_id, int features)
int hash; int hash;
new = (struct dasd_devmap *) new = (struct dasd_devmap *)
kmalloc(sizeof(struct dasd_devmap), GFP_KERNEL); kzalloc(sizeof(struct dasd_devmap), GFP_KERNEL);
if (!new) if (!new)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
spin_lock(&dasd_devmap_lock); spin_lock(&dasd_devmap_lock);
@ -630,7 +653,8 @@ dasd_ro_show(struct device *dev, struct device_attribute *attr, char *buf)
} }
static ssize_t static ssize_t
dasd_ro_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) dasd_ro_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{ {
struct dasd_devmap *devmap; struct dasd_devmap *devmap;
int ro_flag; int ro_flag;
@ -658,7 +682,7 @@ static DEVICE_ATTR(readonly, 0644, dasd_ro_show, dasd_ro_store);
* use_diag controls whether the driver should use diag rather than ssch * use_diag controls whether the driver should use diag rather than ssch
* to talk to the device * to talk to the device
*/ */
static ssize_t static ssize_t
dasd_use_diag_show(struct device *dev, struct device_attribute *attr, char *buf) dasd_use_diag_show(struct device *dev, struct device_attribute *attr, char *buf)
{ {
struct dasd_devmap *devmap; struct dasd_devmap *devmap;
@ -673,7 +697,8 @@ dasd_use_diag_show(struct device *dev, struct device_attribute *attr, char *buf)
} }
static ssize_t static ssize_t
dasd_use_diag_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) dasd_use_diag_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{ {
struct dasd_devmap *devmap; struct dasd_devmap *devmap;
ssize_t rc; ssize_t rc;
@ -697,11 +722,11 @@ dasd_use_diag_store(struct device *dev, struct device_attribute *attr, const cha
return rc; return rc;
} }
static static DEVICE_ATTR(use_diag, 0644, dasd_use_diag_show, dasd_use_diag_store);
DEVICE_ATTR(use_diag, 0644, dasd_use_diag_show, dasd_use_diag_store);
static ssize_t static ssize_t
dasd_discipline_show(struct device *dev, struct device_attribute *attr, char *buf) dasd_discipline_show(struct device *dev, struct device_attribute *attr,
char *buf)
{ {
struct dasd_devmap *devmap; struct dasd_devmap *devmap;
char *dname; char *dname;
@ -834,6 +859,38 @@ static struct attribute_group dasd_attr_group = {
.attrs = dasd_attrs, .attrs = dasd_attrs,
}; };
/*
* Check if the related storage server is already contained in the
* dasd_serverlist. If server is not contained, create new entry.
* Return 0 if server was already in serverlist,
* 1 if the server was added successfully
* <0 in case of error.
*/
static int
dasd_add_server(struct dasd_uid *uid)
{
struct dasd_servermap *new, *tmp;
/* check if server is already contained */
list_for_each_entry(tmp, &dasd_serverlist, list)
// normale cmp?
if (strncmp(tmp->sid.vendor, uid->vendor,
sizeof(tmp->sid.vendor)) == 0
&& strncmp(tmp->sid.serial, uid->serial,
sizeof(tmp->sid.serial)) == 0)
return 0;
new = (struct dasd_servermap *)
kzalloc(sizeof(struct dasd_servermap), GFP_KERNEL);
if (!new)
return -ENOMEM;
strncpy(new->sid.vendor, uid->vendor, sizeof(new->sid.vendor));
strncpy(new->sid.serial, uid->serial, sizeof(new->sid.serial));
list_add(&new->list, &dasd_serverlist);
return 1;
}
/* /*
* Return copy of the device unique identifier. * Return copy of the device unique identifier.
@ -854,21 +911,26 @@ dasd_get_uid(struct ccw_device *cdev, struct dasd_uid *uid)
/* /*
* Register the given device unique identifier into devmap struct. * Register the given device unique identifier into devmap struct.
* Return 0 if server was already in serverlist,
* 1 if the server was added successful
* <0 in case of error.
*/ */
int int
dasd_set_uid(struct ccw_device *cdev, struct dasd_uid *uid) dasd_set_uid(struct ccw_device *cdev, struct dasd_uid *uid)
{ {
struct dasd_devmap *devmap; struct dasd_devmap *devmap;
int rc;
devmap = dasd_find_busid(cdev->dev.bus_id); devmap = dasd_find_busid(cdev->dev.bus_id);
if (IS_ERR(devmap)) if (IS_ERR(devmap))
return PTR_ERR(devmap); return PTR_ERR(devmap);
spin_lock(&dasd_devmap_lock); spin_lock(&dasd_devmap_lock);
devmap->uid = *uid; devmap->uid = *uid;
rc = dasd_add_server(uid);
spin_unlock(&dasd_devmap_lock); spin_unlock(&dasd_devmap_lock);
return 0; return rc;
} }
EXPORT_SYMBOL(dasd_set_uid); EXPORT_SYMBOL_GPL(dasd_set_uid);
/* /*
* Return value of the specified feature. * Return value of the specified feature.
@ -880,7 +942,7 @@ dasd_get_feature(struct ccw_device *cdev, int feature)
devmap = dasd_find_busid(cdev->dev.bus_id); devmap = dasd_find_busid(cdev->dev.bus_id);
if (IS_ERR(devmap)) if (IS_ERR(devmap))
return (int) PTR_ERR(devmap); return PTR_ERR(devmap);
return ((devmap->features & feature) != 0); return ((devmap->features & feature) != 0);
} }
@ -896,7 +958,7 @@ dasd_set_feature(struct ccw_device *cdev, int feature, int flag)
devmap = dasd_find_busid(cdev->dev.bus_id); devmap = dasd_find_busid(cdev->dev.bus_id);
if (IS_ERR(devmap)) if (IS_ERR(devmap))
return (int) PTR_ERR(devmap); return PTR_ERR(devmap);
spin_lock(&dasd_devmap_lock); spin_lock(&dasd_devmap_lock);
if (flag) if (flag)
@ -932,8 +994,10 @@ dasd_devmap_init(void)
dasd_max_devindex = 0; dasd_max_devindex = 0;
for (i = 0; i < 256; i++) for (i = 0; i < 256; i++)
INIT_LIST_HEAD(&dasd_hashlists[i]); INIT_LIST_HEAD(&dasd_hashlists[i]);
return 0;
/* Initialize servermap structure. */
INIT_LIST_HEAD(&dasd_serverlist);
return 0;
} }
void void

View File

@ -1,4 +1,4 @@
/* /*
* File...........: linux/drivers/s390/block/dasd_diag.c * File...........: linux/drivers/s390/block/dasd_diag.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Based on.......: linux/drivers/s390/block/mdisk.c * Based on.......: linux/drivers/s390/block/mdisk.c
@ -336,7 +336,7 @@ dasd_diag_check_device(struct dasd_device *device)
private = (struct dasd_diag_private *) device->private; private = (struct dasd_diag_private *) device->private;
if (private == NULL) { if (private == NULL) {
private = kmalloc(sizeof(struct dasd_diag_private),GFP_KERNEL); private = kzalloc(sizeof(struct dasd_diag_private),GFP_KERNEL);
if (private == NULL) { if (private == NULL) {
DEV_MESSAGE(KERN_WARNING, device, "%s", DEV_MESSAGE(KERN_WARNING, device, "%s",
"memory allocation failed for private data"); "memory allocation failed for private data");
@ -527,7 +527,7 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
datasize, device); datasize, device);
if (IS_ERR(cqr)) if (IS_ERR(cqr))
return cqr; return cqr;
dreq = (struct dasd_diag_req *) cqr->data; dreq = (struct dasd_diag_req *) cqr->data;
dreq->block_count = count; dreq->block_count = count;
dbio = dreq->bio; dbio = dreq->bio;

View File

@ -1,4 +1,4 @@
/* /*
* File...........: linux/drivers/s390/block/dasd_diag.h * File...........: linux/drivers/s390/block/dasd_diag.h
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Based on.......: linux/drivers/s390/block/mdisk.h * Based on.......: linux/drivers/s390/block/mdisk.h

View File

@ -1,7 +1,7 @@
/* /*
* File...........: linux/drivers/s390/block/dasd_eckd.c * File...........: linux/drivers/s390/block/dasd_eckd.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com> * Horst Hummel <Horst.Hummel@de.ibm.com>
* Carsten Otte <Cotte@de.ibm.com> * Carsten Otte <Cotte@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com>
@ -24,6 +24,7 @@
#include <asm/io.h> #include <asm/io.h>
#include <asm/todclk.h> #include <asm/todclk.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/cio.h>
#include <asm/ccwdev.h> #include <asm/ccwdev.h>
#include "dasd_int.h" #include "dasd_int.h"
@ -89,17 +90,22 @@ dasd_eckd_probe (struct ccw_device *cdev)
{ {
int ret; int ret;
ret = dasd_generic_probe (cdev, &dasd_eckd_discipline); /* set ECKD specific ccw-device options */
if (ret) ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE);
if (ret) {
printk(KERN_WARNING
"dasd_eckd_probe: could not set ccw-device options "
"for %s\n", cdev->dev.bus_id);
return ret; return ret;
ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP | CCWDEV_ALLOW_FORCE); }
return 0; ret = dasd_generic_probe(cdev, &dasd_eckd_discipline);
return ret;
} }
static int static int
dasd_eckd_set_online(struct ccw_device *cdev) dasd_eckd_set_online(struct ccw_device *cdev)
{ {
return dasd_generic_set_online (cdev, &dasd_eckd_discipline); return dasd_generic_set_online(cdev, &dasd_eckd_discipline);
} }
static struct ccw_driver dasd_eckd_driver = { static struct ccw_driver dasd_eckd_driver = {
@ -210,14 +216,14 @@ check_XRC (struct ccw1 *de_ccw,
/* switch on System Time Stamp - needed for XRC Support */ /* switch on System Time Stamp - needed for XRC Support */
if (private->rdc_data.facilities.XRC_supported) { if (private->rdc_data.facilities.XRC_supported) {
data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */ data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */
data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */ data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
data->ep_sys_time = get_clock (); data->ep_sys_time = get_clock ();
de_ccw->count = sizeof (struct DE_eckd_data); de_ccw->count = sizeof (struct DE_eckd_data);
de_ccw->flags |= CCW_FLAG_SLI; de_ccw->flags |= CCW_FLAG_SLI;
} }
return; return;
@ -296,8 +302,8 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
/* check for sequential prestage - enhance cylinder range */ /* check for sequential prestage - enhance cylinder range */
if (data->attributes.operation == DASD_SEQ_PRESTAGE || if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
data->attributes.operation == DASD_SEQ_ACCESS) { data->attributes.operation == DASD_SEQ_ACCESS) {
if (end.cyl + private->attrib.nr_cyl < geo.cyl) if (end.cyl + private->attrib.nr_cyl < geo.cyl)
end.cyl += private->attrib.nr_cyl; end.cyl += private->attrib.nr_cyl;
else else
end.cyl = (geo.cyl - 1); end.cyl = (geo.cyl - 1);
@ -317,7 +323,7 @@ locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk,
struct dasd_eckd_private *private; struct dasd_eckd_private *private;
int sector; int sector;
int dn, d; int dn, d;
private = (struct dasd_eckd_private *) device->private; private = (struct dasd_eckd_private *) device->private;
DBF_DEV_EVENT(DBF_INFO, device, DBF_DEV_EVENT(DBF_INFO, device,
@ -540,6 +546,86 @@ dasd_eckd_read_conf(struct dasd_device *device)
return 0; return 0;
} }
/*
* Build CP for Perform Subsystem Function - SSC.
*/
struct dasd_ccw_req *
dasd_eckd_build_psf_ssc(struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
struct dasd_psf_ssc_data *psf_ssc_data;
struct ccw1 *ccw;
cqr = dasd_smalloc_request("ECKD", 1 /* PSF */ ,
sizeof(struct dasd_psf_ssc_data),
device);
if (IS_ERR(cqr)) {
DEV_MESSAGE(KERN_WARNING, device, "%s",
"Could not allocate PSF-SSC request");
return cqr;
}
psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
psf_ssc_data->order = PSF_ORDER_SSC;
psf_ssc_data->suborder = 0x08;
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->cda = (__u32)(addr_t)psf_ssc_data;
ccw->count = 66;
cqr->device = device;
cqr->expires = 10*HZ;
cqr->buildclk = get_clock();
cqr->status = DASD_CQR_FILLED;
return cqr;
}
/*
* Perform Subsystem Function.
* It is necessary to trigger CIO for channel revalidation since this
* call might change behaviour of DASD devices.
*/
static int
dasd_eckd_psf_ssc(struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
int rc;
cqr = dasd_eckd_build_psf_ssc(device);
if (IS_ERR(cqr))
return PTR_ERR(cqr);
rc = dasd_sleep_on(cqr);
if (!rc)
/* trigger CIO to reprobe devices */
css_schedule_reprobe();
dasd_sfree_request(cqr, cqr->device);
return rc;
}
/*
* Valide storage server of current device.
*/
static int
dasd_eckd_validate_server(struct dasd_device *device)
{
int rc;
/* Currently PAV is the only reason to 'validate' server on LPAR */
if (dasd_nopav || MACHINE_IS_VM)
return 0;
rc = dasd_eckd_psf_ssc(device);
if (rc)
/* may be requested feature is not available on server,
* therefore just report error and go ahead */
DEV_MESSAGE(KERN_INFO, device,
"Perform Subsystem Function returned rc=%d", rc);
/* RE-Read Configuration Data */
return dasd_eckd_read_conf(device);
}
/* /*
* Check device characteristics. * Check device characteristics.
* If the device is accessible using ECKD discipline, the device is enabled. * If the device is accessible using ECKD discipline, the device is enabled.
@ -554,7 +640,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
private = (struct dasd_eckd_private *) device->private; private = (struct dasd_eckd_private *) device->private;
if (private == NULL) { if (private == NULL) {
private = kmalloc(sizeof(struct dasd_eckd_private), private = kzalloc(sizeof(struct dasd_eckd_private),
GFP_KERNEL | GFP_DMA); GFP_KERNEL | GFP_DMA);
if (private == NULL) { if (private == NULL) {
DEV_MESSAGE(KERN_WARNING, device, "%s", DEV_MESSAGE(KERN_WARNING, device, "%s",
@ -562,7 +648,6 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
"data"); "data");
return -ENOMEM; return -ENOMEM;
} }
memset(private, 0, sizeof(struct dasd_eckd_private));
device->private = (void *) private; device->private = (void *) private;
} }
/* Invalidate status of initial analysis. */ /* Invalidate status of initial analysis. */
@ -571,16 +656,29 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
private->attrib.operation = DASD_NORMAL_CACHE; private->attrib.operation = DASD_NORMAL_CACHE;
private->attrib.nr_cyl = 0; private->attrib.nr_cyl = 0;
/* Read Configuration Data */
rc = dasd_eckd_read_conf(device);
if (rc)
return rc;
/* Generate device unique id and register in devmap */
rc = dasd_eckd_generate_uid(device, &uid);
if (rc)
return rc;
rc = dasd_set_uid(device->cdev, &uid);
if (rc == 1) /* new server found */
rc = dasd_eckd_validate_server(device);
if (rc)
return rc;
/* Read Device Characteristics */ /* Read Device Characteristics */
rdc_data = (void *) &(private->rdc_data); rdc_data = (void *) &(private->rdc_data);
memset(rdc_data, 0, sizeof(rdc_data)); memset(rdc_data, 0, sizeof(rdc_data));
rc = read_dev_chars(device->cdev, &rdc_data, 64); rc = read_dev_chars(device->cdev, &rdc_data, 64);
if (rc) { if (rc)
DEV_MESSAGE(KERN_WARNING, device, DEV_MESSAGE(KERN_WARNING, device,
"Read device characteristics returned error %d", "Read device characteristics returned "
rc); "rc=%d", rc);
return rc;
}
DEV_MESSAGE(KERN_INFO, device, DEV_MESSAGE(KERN_INFO, device,
"%04X/%02X(CU:%04X/%02X) Cyl:%d Head:%d Sec:%d", "%04X/%02X(CU:%04X/%02X) Cyl:%d Head:%d Sec:%d",
@ -591,19 +689,6 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
private->rdc_data.no_cyl, private->rdc_data.no_cyl,
private->rdc_data.trk_per_cyl, private->rdc_data.trk_per_cyl,
private->rdc_data.sec_per_trk); private->rdc_data.sec_per_trk);
/* Read Configuration Data */
rc = dasd_eckd_read_conf (device);
if (rc)
return rc;
/* Generate device unique id and register in devmap */
rc = dasd_eckd_generate_uid(device, &uid);
if (rc)
return rc;
rc = dasd_set_uid(device->cdev, &uid);
return rc; return rc;
} }
@ -773,7 +858,7 @@ dasd_eckd_end_analysis(struct dasd_device *device)
((private->rdc_data.no_cyl * ((private->rdc_data.no_cyl *
private->rdc_data.trk_per_cyl * private->rdc_data.trk_per_cyl *
blk_per_trk * (device->bp_block >> 9)) >> 1), blk_per_trk * (device->bp_block >> 9)) >> 1),
((blk_per_trk * device->bp_block) >> 10), ((blk_per_trk * device->bp_block) >> 10),
private->uses_cdl ? private->uses_cdl ?
"compatible disk layout" : "linux disk layout"); "compatible disk layout" : "linux disk layout");
@ -970,7 +1055,7 @@ dasd_eckd_format_device(struct dasd_device * device,
if (i < 3) { if (i < 3) {
ect->kl = 4; ect->kl = 4;
ect->dl = sizes_trk0[i] - 4; ect->dl = sizes_trk0[i] - 4;
} }
} }
if ((fdata->intensity & 0x08) && if ((fdata->intensity & 0x08) &&
fdata->start_unit == 1) { fdata->start_unit == 1) {
@ -1270,7 +1355,7 @@ dasd_eckd_fill_info(struct dasd_device * device,
/* /*
* Release device ioctl. * Release device ioctl.
* Buils a channel programm to releases a prior reserved * Buils a channel programm to releases a prior reserved
* (see dasd_eckd_reserve) device. * (see dasd_eckd_reserve) device.
*/ */
static int static int
@ -1310,8 +1395,8 @@ dasd_eckd_release(struct dasd_device *device)
/* /*
* Reserve device ioctl. * Reserve device ioctl.
* Options are set to 'synchronous wait for interrupt' and * Options are set to 'synchronous wait for interrupt' and
* 'timeout the request'. This leads to a terminate IO if * 'timeout the request'. This leads to a terminate IO if
* the interrupt is outstanding for a certain time. * the interrupt is outstanding for a certain time.
*/ */
static int static int
dasd_eckd_reserve(struct dasd_device *device) dasd_eckd_reserve(struct dasd_device *device)
@ -1349,7 +1434,7 @@ dasd_eckd_reserve(struct dasd_device *device)
/* /*
* Steal lock ioctl - unconditional reserve device. * Steal lock ioctl - unconditional reserve device.
* Buils a channel programm to break a device's reservation. * Buils a channel programm to break a device's reservation.
* (unconditional reserve) * (unconditional reserve)
*/ */
static int static int
@ -1521,6 +1606,40 @@ dasd_eckd_ioctl(struct dasd_device *device, unsigned int cmd, void __user *argp)
} }
} }
/*
* Dump the range of CCWs into 'page' buffer
* and return number of printed chars.
*/
static inline int
dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
{
int len, count;
char *datap;
len = 0;
while (from <= to) {
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
" CCW %p: %08X %08X DAT:",
from, ((int *) from)[0], ((int *) from)[1]);
/* get pointer to data (consider IDALs) */
if (from->flags & CCW_FLAG_IDA)
datap = (char *) *((addr_t *) (addr_t) from->cda);
else
datap = (char *) ((addr_t) from->cda);
/* dump data (max 32 bytes) */
for (count = 0; count < from->count && count < 32; count++) {
if (count % 8 == 0) len += sprintf(page + len, " ");
if (count % 4 == 0) len += sprintf(page + len, " ");
len += sprintf(page + len, "%02x", datap[count]);
}
len += sprintf(page + len, "\n");
from++;
}
return len;
}
/* /*
* Print sense data and related channel program. * Print sense data and related channel program.
* Parts are printed because printk buffer is only 1024 bytes. * Parts are printed because printk buffer is only 1024 bytes.
@ -1530,8 +1649,8 @@ dasd_eckd_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
struct irb *irb) struct irb *irb)
{ {
char *page; char *page;
struct ccw1 *act, *end, *last; struct ccw1 *first, *last, *fail, *from, *to;
int len, sl, sct, count; int len, sl, sct;
page = (char *) get_zeroed_page(GFP_ATOMIC); page = (char *) get_zeroed_page(GFP_ATOMIC);
if (page == NULL) { if (page == NULL) {
@ -1539,7 +1658,8 @@ dasd_eckd_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
"No memory to dump sense data"); "No memory to dump sense data");
return; return;
} }
len = sprintf(page, KERN_ERR PRINTK_HEADER /* dump the sense data */
len = sprintf(page, KERN_ERR PRINTK_HEADER
" I/O status report for device %s:\n", " I/O status report for device %s:\n",
device->cdev->dev.bus_id); device->cdev->dev.bus_id);
len += sprintf(page + len, KERN_ERR PRINTK_HEADER len += sprintf(page + len, KERN_ERR PRINTK_HEADER
@ -1564,87 +1684,55 @@ dasd_eckd_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
if (irb->ecw[27] & DASD_SENSE_BIT_0) { if (irb->ecw[27] & DASD_SENSE_BIT_0) {
/* 24 Byte Sense Data */ /* 24 Byte Sense Data */
len += sprintf(page + len, KERN_ERR PRINTK_HEADER sprintf(page + len, KERN_ERR PRINTK_HEADER
" 24 Byte: %x MSG %x, " " 24 Byte: %x MSG %x, "
"%s MSGb to SYSOP\n", "%s MSGb to SYSOP\n",
irb->ecw[7] >> 4, irb->ecw[7] & 0x0f, irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
irb->ecw[1] & 0x10 ? "" : "no"); irb->ecw[1] & 0x10 ? "" : "no");
} else { } else {
/* 32 Byte Sense Data */ /* 32 Byte Sense Data */
len += sprintf(page + len, KERN_ERR PRINTK_HEADER sprintf(page + len, KERN_ERR PRINTK_HEADER
" 32 Byte: Format: %x " " 32 Byte: Format: %x "
"Exception class %x\n", "Exception class %x\n",
irb->ecw[6] & 0x0f, irb->ecw[22] >> 4); irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
} }
} else { } else {
len += sprintf(page + len, KERN_ERR PRINTK_HEADER sprintf(page + len, KERN_ERR PRINTK_HEADER
" SORRY - NO VALID SENSE AVAILABLE\n"); " SORRY - NO VALID SENSE AVAILABLE\n");
} }
MESSAGE_LOG(KERN_ERR, "%s", printk("%s", page);
page + sizeof(KERN_ERR PRINTK_HEADER));
/* dump the Channel Program */ /* dump the Channel Program (max 140 Bytes per line) */
/* print first CCWs (maximum 8) */ /* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */
act = req->cpaddr; first = req->cpaddr;
for (last = act; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++); for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
end = min(act + 8, last); to = min(first + 6, last);
len = sprintf(page, KERN_ERR PRINTK_HEADER len = sprintf(page, KERN_ERR PRINTK_HEADER
" Related CP in req: %p\n", req); " Related CP in req: %p\n", req);
while (act <= end) { dasd_eckd_dump_ccw_range(first, to, page + len);
len += sprintf(page + len, KERN_ERR PRINTK_HEADER printk("%s", page);
" CCW %p: %08X %08X DAT:",
act, ((int *) act)[0], ((int *) act)[1]);
for (count = 0; count < 32 && count < act->count;
count += sizeof(int))
len += sprintf(page + len, " %08X",
((int *) (addr_t) act->cda)
[(count>>2)]);
len += sprintf(page + len, "\n");
act++;
}
MESSAGE_LOG(KERN_ERR, "%s",
page + sizeof(KERN_ERR PRINTK_HEADER));
/* print failing CCW area */ /* print failing CCW area (maximum 4) */
/* scsw->cda is either valid or zero */
len = 0; len = 0;
if (act < ((struct ccw1 *)(addr_t) irb->scsw.cpa) - 2) { from = ++to;
act = ((struct ccw1 *)(addr_t) irb->scsw.cpa) - 2; fail = (struct ccw1 *)(addr_t) irb->scsw.cpa; /* failing CCW */
len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n"); if (from < fail - 2) {
} from = fail - 2; /* there is a gap - print header */
end = min((struct ccw1 *)(addr_t) irb->scsw.cpa + 2, last); len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n");
while (act <= end) {
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
" CCW %p: %08X %08X DAT:",
act, ((int *) act)[0], ((int *) act)[1]);
for (count = 0; count < 32 && count < act->count;
count += sizeof(int))
len += sprintf(page + len, " %08X",
((int *) (addr_t) act->cda)
[(count>>2)]);
len += sprintf(page + len, "\n");
act++;
} }
to = min(fail + 1, last);
len += dasd_eckd_dump_ccw_range(from, to, page + len);
/* print last CCWs */ /* print last CCWs (maximum 2) */
if (act < last - 2) { from = max(from, ++to);
act = last - 2; if (from < last - 1) {
from = last - 1; /* there is a gap - print header */
len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n"); len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
} }
while (act <= last) { len += dasd_eckd_dump_ccw_range(from, last, page + len);
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
" CCW %p: %08X %08X DAT:",
act, ((int *) act)[0], ((int *) act)[1]);
for (count = 0; count < 32 && count < act->count;
count += sizeof(int))
len += sprintf(page + len, " %08X",
((int *) (addr_t) act->cda)
[(count>>2)]);
len += sprintf(page + len, "\n");
act++;
}
if (len > 0) if (len > 0)
MESSAGE_LOG(KERN_ERR, "%s", printk("%s", page);
page + sizeof(KERN_ERR PRINTK_HEADER));
free_page((unsigned long) page); free_page((unsigned long) page);
} }
@ -1685,14 +1773,8 @@ static struct dasd_discipline dasd_eckd_discipline = {
static int __init static int __init
dasd_eckd_init(void) dasd_eckd_init(void)
{ {
int ret;
ASCEBC(dasd_eckd_discipline.ebcname, 4); ASCEBC(dasd_eckd_discipline.ebcname, 4);
return ccw_driver_register(&dasd_eckd_driver);
ret = ccw_driver_register(&dasd_eckd_driver);
if (!ret)
dasd_generic_auto_online(&dasd_eckd_driver);
return ret;
} }
static void __exit static void __exit
@ -1703,22 +1785,3 @@ dasd_eckd_cleanup(void)
module_init(dasd_eckd_init); module_init(dasd_eckd_init);
module_exit(dasd_eckd_cleanup); module_exit(dasd_eckd_cleanup);
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
* adjust the settings for this buffer only. This must remain at the end
* of the file.
* ---------------------------------------------------------------------------
* Local variables:
* c-indent-level: 4
* c-brace-imaginary-offset: 0
* c-brace-offset: -4
* c-argdecl-indent: 4
* c-label-offset: -4
* c-continued-statement-offset: 4
* c-continued-brace-offset: 0
* indent-tabs-mode: 1
* tab-width: 8
* End:
*/

View File

@ -1,7 +1,7 @@
/* /*
* File...........: linux/drivers/s390/block/dasd_eckd.h * File...........: linux/drivers/s390/block/dasd_eckd.h
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com> * Horst Hummel <Horst.Hummel@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com>
* (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
* *
@ -41,9 +41,10 @@
#define DASD_ECKD_CCW_RESERVE 0xB4 #define DASD_ECKD_CCW_RESERVE 0xB4
/* /*
*Perform Subsystem Function / Sub-Orders * Perform Subsystem Function / Sub-Orders
*/ */
#define PSF_ORDER_PRSSD 0x18 #define PSF_ORDER_PRSSD 0x18
#define PSF_ORDER_SSC 0x1D
/***************************************************************************** /*****************************************************************************
* SECTION: Type Definitions * SECTION: Type Definitions
@ -155,7 +156,7 @@ struct dasd_eckd_characteristics {
unsigned char reserved2:4; unsigned char reserved2:4;
unsigned char reserved3:8; unsigned char reserved3:8;
unsigned char defect_wr:1; unsigned char defect_wr:1;
unsigned char XRC_supported:1; unsigned char XRC_supported:1;
unsigned char reserved4:1; unsigned char reserved4:1;
unsigned char striping:1; unsigned char striping:1;
unsigned char reserved5:4; unsigned char reserved5:4;
@ -343,7 +344,7 @@ struct dasd_eckd_path {
}; };
/* /*
* Perform Subsystem Function - Prepare for Read Subsystem Data * Perform Subsystem Function - Prepare for Read Subsystem Data
*/ */
struct dasd_psf_prssd_data { struct dasd_psf_prssd_data {
unsigned char order; unsigned char order;
@ -353,4 +354,15 @@ struct dasd_psf_prssd_data {
unsigned char varies[9]; unsigned char varies[9];
} __attribute__ ((packed)); } __attribute__ ((packed));
/*
* Perform Subsystem Function - Set Subsystem Characteristics
*/
struct dasd_psf_ssc_data {
unsigned char order;
unsigned char flags;
unsigned char cu_type[4];
unsigned char suborder;
unsigned char reserved[59];
} __attribute__((packed));
#endif /* DASD_ECKD_H */ #endif /* DASD_ECKD_H */

View File

@ -276,7 +276,7 @@ struct dasd_eer_header {
__u64 tv_sec; __u64 tv_sec;
__u64 tv_usec; __u64 tv_usec;
char busid[DASD_EER_BUSID_SIZE]; char busid[DASD_EER_BUSID_SIZE];
}; } __attribute__ ((packed));
/* /*
* The following function can be used for those triggers that have * The following function can be used for those triggers that have
@ -521,6 +521,8 @@ static int dasd_eer_open(struct inode *inp, struct file *filp)
unsigned long flags; unsigned long flags;
eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL); eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL);
if (!eerb)
return -ENOMEM;
eerb->buffer_page_count = eer_pages; eerb->buffer_page_count = eer_pages;
if (eerb->buffer_page_count < 1 || if (eerb->buffer_page_count < 1 ||
eerb->buffer_page_count > INT_MAX / PAGE_SIZE) { eerb->buffer_page_count > INT_MAX / PAGE_SIZE) {

View File

@ -90,7 +90,7 @@ dasd_default_erp_action(struct dasd_ccw_req * cqr)
/* just retry - there is nothing to save ... I got no sense data.... */ /* just retry - there is nothing to save ... I got no sense data.... */
if (cqr->retries > 0) { if (cqr->retries > 0) {
DEV_MESSAGE (KERN_DEBUG, device, DEV_MESSAGE (KERN_DEBUG, device,
"default ERP called (%i retries left)", "default ERP called (%i retries left)",
cqr->retries); cqr->retries);
cqr->lpm = LPM_ANYPATH; cqr->lpm = LPM_ANYPATH;
@ -155,7 +155,7 @@ dasd_default_erp_postaction(struct dasd_ccw_req * cqr)
/* /*
* Print the hex dump of the memory used by a request. This includes * Print the hex dump of the memory used by a request. This includes
* all error recovery ccws that have been chained in from of the * all error recovery ccws that have been chained in from of the
* real request. * real request.
*/ */
static inline void static inline void
@ -227,12 +227,12 @@ dasd_log_ccw(struct dasd_ccw_req * cqr, int caller, __u32 cpa)
/* /*
* Log bytes arround failed CCW but only if we did * Log bytes arround failed CCW but only if we did
* not log the whole CP of the CCW is outside the * not log the whole CP of the CCW is outside the
* logged CP. * logged CP.
*/ */
if (cplength > 40 || if (cplength > 40 ||
((addr_t) cpa < (addr_t) lcqr->cpaddr && ((addr_t) cpa < (addr_t) lcqr->cpaddr &&
(addr_t) cpa > (addr_t) (lcqr->cpaddr + cplength + 4))) { (addr_t) cpa > (addr_t) (lcqr->cpaddr + cplength + 4))) {
DEV_MESSAGE(KERN_ERR, device, DEV_MESSAGE(KERN_ERR, device,
"Failed CCW (%p) (area):", "Failed CCW (%p) (area):",
(void *) (long) cpa); (void *) (long) cpa);

View File

@ -1,4 +1,4 @@
/* /*
* File...........: linux/drivers/s390/block/dasd_fba.c * File...........: linux/drivers/s390/block/dasd_fba.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com>
@ -56,19 +56,13 @@ static struct ccw_driver dasd_fba_driver; /* see below */
static int static int
dasd_fba_probe(struct ccw_device *cdev) dasd_fba_probe(struct ccw_device *cdev)
{ {
int ret; return dasd_generic_probe(cdev, &dasd_fba_discipline);
ret = dasd_generic_probe (cdev, &dasd_fba_discipline);
if (ret)
return ret;
ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP);
return 0;
} }
static int static int
dasd_fba_set_online(struct ccw_device *cdev) dasd_fba_set_online(struct ccw_device *cdev)
{ {
return dasd_generic_set_online (cdev, &dasd_fba_discipline); return dasd_generic_set_online(cdev, &dasd_fba_discipline);
} }
static struct ccw_driver dasd_fba_driver = { static struct ccw_driver dasd_fba_driver = {
@ -125,13 +119,13 @@ static int
dasd_fba_check_characteristics(struct dasd_device *device) dasd_fba_check_characteristics(struct dasd_device *device)
{ {
struct dasd_fba_private *private; struct dasd_fba_private *private;
struct ccw_device *cdev = device->cdev; struct ccw_device *cdev = device->cdev;
void *rdc_data; void *rdc_data;
int rc; int rc;
private = (struct dasd_fba_private *) device->private; private = (struct dasd_fba_private *) device->private;
if (private == NULL) { if (private == NULL) {
private = kmalloc(sizeof(struct dasd_fba_private), GFP_KERNEL); private = kzalloc(sizeof(struct dasd_fba_private), GFP_KERNEL);
if (private == NULL) { if (private == NULL) {
DEV_MESSAGE(KERN_WARNING, device, "%s", DEV_MESSAGE(KERN_WARNING, device, "%s",
"memory allocation failed for private " "memory allocation failed for private "
@ -204,7 +198,7 @@ dasd_fba_examine_error(struct dasd_ccw_req * cqr, struct irb * irb)
if (irb->scsw.cstat == 0x00 && if (irb->scsw.cstat == 0x00 &&
irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END)) irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
return dasd_era_none; return dasd_era_none;
cdev = device->cdev; cdev = device->cdev;
switch (cdev->id.dev_type) { switch (cdev->id.dev_type) {
case 0x3370: case 0x3370:
@ -539,7 +533,7 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
* 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has
* 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use
* up to 16 bytes (8 for the ccw and 8 for the idal pointer). In * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In
* addition we have one define extent ccw + 16 bytes of data and a * addition we have one define extent ccw + 16 bytes of data and a
* locate record ccw for each block (stupid devices!) + 16 bytes of data. * locate record ccw for each block (stupid devices!) + 16 bytes of data.
* That makes: * That makes:
* (8192 - 24 - 136 - 8 - 16) / 40 = 200.2 blocks at maximum. * (8192 - 24 - 136 - 8 - 16) / 40 = 200.2 blocks at maximum.
@ -569,16 +563,8 @@ static struct dasd_discipline dasd_fba_discipline = {
static int __init static int __init
dasd_fba_init(void) dasd_fba_init(void)
{ {
int ret;
ASCEBC(dasd_fba_discipline.ebcname, 4); ASCEBC(dasd_fba_discipline.ebcname, 4);
return ccw_driver_register(&dasd_fba_driver);
ret = ccw_driver_register(&dasd_fba_driver);
if (ret)
return ret;
dasd_generic_auto_online(&dasd_fba_driver);
return 0;
} }
static void __exit static void __exit
@ -589,22 +575,3 @@ dasd_fba_cleanup(void)
module_init(dasd_fba_init); module_init(dasd_fba_init);
module_exit(dasd_fba_cleanup); module_exit(dasd_fba_cleanup);
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
* adjust the settings for this buffer only. This must remain at the end
* of the file.
* ---------------------------------------------------------------------------
* Local variables:
* c-indent-level: 4
* c-brace-imaginary-offset: 0
* c-brace-offset: -4
* c-argdecl-indent: 4
* c-label-offset: -4
* c-continued-statement-offset: 4
* c-continued-brace-offset: 0
* indent-tabs-mode: 1
* tab-width: 8
* End:
*/

View File

@ -1,4 +1,4 @@
/* /*
* File...........: linux/drivers/s390/block/dasd_fba.h * File...........: linux/drivers/s390/block/dasd_fba.h
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com>

View File

@ -1,7 +1,7 @@
/* /*
* File...........: linux/drivers/s390/block/dasd_int.h * File...........: linux/drivers/s390/block/dasd_int.h
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com> * Horst Hummel <Horst.Hummel@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com>
* (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
@ -186,7 +186,7 @@ struct dasd_ccw_req {
void *callback_data; void *callback_data;
}; };
/* /*
* dasd_ccw_req -> status can be: * dasd_ccw_req -> status can be:
*/ */
#define DASD_CQR_FILLED 0x00 /* request is ready to be processed */ #define DASD_CQR_FILLED 0x00 /* request is ready to be processed */
@ -248,7 +248,7 @@ struct dasd_discipline {
/* /*
* Error recovery functions. examine_error() returns a value that * Error recovery functions. examine_error() returns a value that
* indicates what to do for an error condition. If examine_error() * indicates what to do for an error condition. If examine_error()
* returns 'dasd_era_recover' erp_action() is called to create a * returns 'dasd_era_recover' erp_action() is called to create a
* special error recovery ccw. erp_postaction() is called after * special error recovery ccw. erp_postaction() is called after
* an error recovery ccw has finished its execution. dump_sense * an error recovery ccw has finished its execution. dump_sense
* is called for every error condition to print the sense data * is called for every error condition to print the sense data
@ -302,11 +302,11 @@ struct dasd_device {
spinlock_t request_queue_lock; spinlock_t request_queue_lock;
struct block_device *bdev; struct block_device *bdev;
unsigned int devindex; unsigned int devindex;
unsigned long blocks; /* size of volume in blocks */ unsigned long blocks; /* size of volume in blocks */
unsigned int bp_block; /* bytes per block */ unsigned int bp_block; /* bytes per block */
unsigned int s2b_shift; /* log2 (bp_block/512) */ unsigned int s2b_shift; /* log2 (bp_block/512) */
unsigned long flags; /* per device flags */ unsigned long flags; /* per device flags */
unsigned short features; /* copy of devmap-features (read-only!) */ unsigned short features; /* copy of devmap-features (read-only!) */
/* extended error reporting stuff (eer) */ /* extended error reporting stuff (eer) */
struct dasd_ccw_req *eer_cqr; struct dasd_ccw_req *eer_cqr;
@ -513,12 +513,12 @@ void dasd_generic_remove (struct ccw_device *cdev);
int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *); int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *);
int dasd_generic_set_offline (struct ccw_device *cdev); int dasd_generic_set_offline (struct ccw_device *cdev);
int dasd_generic_notify(struct ccw_device *, int); int dasd_generic_notify(struct ccw_device *, int);
void dasd_generic_auto_online (struct ccw_driver *);
/* externals in dasd_devmap.c */ /* externals in dasd_devmap.c */
extern int dasd_max_devindex; extern int dasd_max_devindex;
extern int dasd_probeonly; extern int dasd_probeonly;
extern int dasd_autodetect; extern int dasd_autodetect;
extern int dasd_nopav;
int dasd_devmap_init(void); int dasd_devmap_init(void);
void dasd_devmap_exit(void); void dasd_devmap_exit(void);
@ -606,22 +606,3 @@ static inline int dasd_eer_enabled(struct dasd_device *device)
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* DASD_H */ #endif /* DASD_H */
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
* adjust the settings for this buffer only. This must remain at the end
* of the file.
* ---------------------------------------------------------------------------
* Local variables:
* c-indent-level: 4
* c-brace-imaginary-offset: 0
* c-brace-offset: -4
* c-argdecl-indent: 4
* c-label-offset: -4
* c-continued-statement-offset: 4
* c-continued-brace-offset: 0
* indent-tabs-mode: 1
* tab-width: 8
* End:
*/

View File

@ -90,10 +90,10 @@ static int
dasd_ioctl_quiesce(struct dasd_device *device) dasd_ioctl_quiesce(struct dasd_device *device)
{ {
unsigned long flags; unsigned long flags;
if (!capable (CAP_SYS_ADMIN)) if (!capable (CAP_SYS_ADMIN))
return -EACCES; return -EACCES;
DEV_MESSAGE (KERN_DEBUG, device, "%s", DEV_MESSAGE (KERN_DEBUG, device, "%s",
"Quiesce IO on device"); "Quiesce IO on device");
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
@ -110,13 +110,13 @@ static int
dasd_ioctl_resume(struct dasd_device *device) dasd_ioctl_resume(struct dasd_device *device)
{ {
unsigned long flags; unsigned long flags;
if (!capable (CAP_SYS_ADMIN)) if (!capable (CAP_SYS_ADMIN))
return -EACCES; return -EACCES;
DEV_MESSAGE (KERN_DEBUG, device, "%s", DEV_MESSAGE (KERN_DEBUG, device, "%s",
"resume IO on device"); "resume IO on device");
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
device->stopped &= ~DASD_STOPPED_QUIESCE; device->stopped &= ~DASD_STOPPED_QUIESCE;
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
@ -287,7 +287,7 @@ dasd_ioctl_information(struct dasd_device *device,
dasd_info->open_count = atomic_read(&device->open_count); dasd_info->open_count = atomic_read(&device->open_count);
if (!device->bdev) if (!device->bdev)
dasd_info->open_count++; dasd_info->open_count++;
/* /*
* check if device is really formatted * check if device is really formatted
* LDL / CDL was returned by 'fill_info' * LDL / CDL was returned by 'fill_info'

View File

@ -50,6 +50,9 @@ struct raw3270 {
unsigned char *ascebc; /* ascii -> ebcdic table */ unsigned char *ascebc; /* ascii -> ebcdic table */
struct class_device *clttydev; /* 3270-class tty device ptr */ struct class_device *clttydev; /* 3270-class tty device ptr */
struct class_device *cltubdev; /* 3270-class tub device ptr */ struct class_device *cltubdev; /* 3270-class tub device ptr */
struct raw3270_request init_request;
unsigned char init_data[256];
}; };
/* raw3270->flags */ /* raw3270->flags */
@ -484,8 +487,6 @@ struct raw3270_ua { /* Query Reply structure for Usable Area */
} __attribute__ ((packed)) aua; } __attribute__ ((packed)) aua;
} __attribute__ ((packed)); } __attribute__ ((packed));
static unsigned char raw3270_init_data[256];
static struct raw3270_request raw3270_init_request;
static struct diag210 raw3270_init_diag210; static struct diag210 raw3270_init_diag210;
static DECLARE_MUTEX(raw3270_init_sem); static DECLARE_MUTEX(raw3270_init_sem);
@ -644,17 +645,17 @@ __raw3270_size_device(struct raw3270 *rp)
* required (3270 device switched to 'stand-by') and command * required (3270 device switched to 'stand-by') and command
* rejects (old devices that can't do 'read partition'). * rejects (old devices that can't do 'read partition').
*/ */
memset(&raw3270_init_request, 0, sizeof(raw3270_init_request)); memset(&rp->init_request, 0, sizeof(rp->init_request));
memset(raw3270_init_data, 0, sizeof(raw3270_init_data)); memset(&rp->init_data, 0, 256);
/* Store 'read partition' data stream to raw3270_init_data */ /* Store 'read partition' data stream to init_data */
memcpy(raw3270_init_data, wbuf, sizeof(wbuf)); memcpy(&rp->init_data, wbuf, sizeof(wbuf));
INIT_LIST_HEAD(&raw3270_init_request.list); INIT_LIST_HEAD(&rp->init_request.list);
raw3270_init_request.ccw.cmd_code = TC_WRITESF; rp->init_request.ccw.cmd_code = TC_WRITESF;
raw3270_init_request.ccw.flags = CCW_FLAG_SLI; rp->init_request.ccw.flags = CCW_FLAG_SLI;
raw3270_init_request.ccw.count = sizeof(wbuf); rp->init_request.ccw.count = sizeof(wbuf);
raw3270_init_request.ccw.cda = (__u32) __pa(raw3270_init_data); rp->init_request.ccw.cda = (__u32) __pa(&rp->init_data);
rc = raw3270_start_init(rp, &raw3270_init_view, &raw3270_init_request); rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request);
if (rc) if (rc)
/* Check error cases: -ERESTARTSYS, -EIO and -EOPNOTSUPP */ /* Check error cases: -ERESTARTSYS, -EIO and -EOPNOTSUPP */
return rc; return rc;
@ -679,18 +680,18 @@ __raw3270_size_device(struct raw3270 *rp)
* The device accepted the 'read partition' command. Now * The device accepted the 'read partition' command. Now
* set up a read ccw and issue it. * set up a read ccw and issue it.
*/ */
raw3270_init_request.ccw.cmd_code = TC_READMOD; rp->init_request.ccw.cmd_code = TC_READMOD;
raw3270_init_request.ccw.flags = CCW_FLAG_SLI; rp->init_request.ccw.flags = CCW_FLAG_SLI;
raw3270_init_request.ccw.count = sizeof(raw3270_init_data); rp->init_request.ccw.count = sizeof(rp->init_data);
raw3270_init_request.ccw.cda = (__u32) __pa(raw3270_init_data); rp->init_request.ccw.cda = (__u32) __pa(rp->init_data);
rc = raw3270_start_init(rp, &raw3270_init_view, &raw3270_init_request); rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request);
if (rc) if (rc)
return rc; return rc;
/* Got a Query Reply */ /* Got a Query Reply */
count = sizeof(raw3270_init_data) - raw3270_init_request.rescnt; count = sizeof(rp->init_data) - rp->init_request.rescnt;
uap = (struct raw3270_ua *) (raw3270_init_data + 1); uap = (struct raw3270_ua *) (rp->init_data + 1);
/* Paranoia check. */ /* Paranoia check. */
if (raw3270_init_data[0] != 0x88 || uap->uab.qcode != 0x81) if (rp->init_data[0] != 0x88 || uap->uab.qcode != 0x81)
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* Copy rows/columns of default Usable Area */ /* Copy rows/columns of default Usable Area */
rp->rows = uap->uab.h; rp->rows = uap->uab.h;
@ -749,18 +750,18 @@ raw3270_reset_device(struct raw3270 *rp)
int rc; int rc;
down(&raw3270_init_sem); down(&raw3270_init_sem);
memset(&raw3270_init_request, 0, sizeof(raw3270_init_request)); memset(&rp->init_request, 0, sizeof(rp->init_request));
memset(raw3270_init_data, 0, sizeof(raw3270_init_data)); memset(&rp->init_data, 0, sizeof(rp->init_data));
/* Store reset data stream to raw3270_init_data/raw3270_init_request */ /* Store reset data stream to init_data/init_request */
raw3270_init_data[0] = TW_KR; rp->init_data[0] = TW_KR;
INIT_LIST_HEAD(&raw3270_init_request.list); INIT_LIST_HEAD(&rp->init_request.list);
raw3270_init_request.ccw.cmd_code = TC_EWRITEA; rp->init_request.ccw.cmd_code = TC_EWRITEA;
raw3270_init_request.ccw.flags = CCW_FLAG_SLI; rp->init_request.ccw.flags = CCW_FLAG_SLI;
raw3270_init_request.ccw.count = 1; rp->init_request.ccw.count = 1;
raw3270_init_request.ccw.cda = (__u32) __pa(raw3270_init_data); rp->init_request.ccw.cda = (__u32) __pa(rp->init_data);
rp->view = &raw3270_init_view; rp->view = &raw3270_init_view;
raw3270_init_view.dev = rp; raw3270_init_view.dev = rp;
rc = raw3270_start_init(rp, &raw3270_init_view, &raw3270_init_request); rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request);
raw3270_init_view.dev = 0; raw3270_init_view.dev = 0;
rp->view = 0; rp->view = 0;
up(&raw3270_init_sem); up(&raw3270_init_sem);
@ -854,7 +855,7 @@ raw3270_setup_console(struct ccw_device *cdev)
char *ascebc; char *ascebc;
int rc; int rc;
rp = (struct raw3270 *) alloc_bootmem(sizeof(struct raw3270)); rp = (struct raw3270 *) alloc_bootmem_low(sizeof(struct raw3270));
ascebc = (char *) alloc_bootmem(256); ascebc = (char *) alloc_bootmem(256);
rc = raw3270_setup_device(cdev, rp, ascebc); rc = raw3270_setup_device(cdev, rp, ascebc);
if (rc) if (rc)
@ -895,7 +896,7 @@ raw3270_create_device(struct ccw_device *cdev)
char *ascebc; char *ascebc;
int rc; int rc;
rp = kmalloc(sizeof(struct raw3270), GFP_KERNEL); rp = kmalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA);
if (!rp) if (!rp)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
ascebc = kmalloc(256, GFP_KERNEL); ascebc = kmalloc(256, GFP_KERNEL);

View File

@ -224,39 +224,6 @@ is_blacklisted (int ssid, int devno)
} }
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
static int
__s390_redo_validation(struct subchannel_id schid, void *data)
{
int ret;
struct subchannel *sch;
sch = get_subchannel_by_schid(schid);
if (sch) {
/* Already known. */
put_device(&sch->dev);
return 0;
}
ret = css_probe_device(schid);
if (ret == -ENXIO)
return ret; /* We're through. */
if (ret == -ENOMEM)
/* Stop validation for now. Bad, but no need for a panic. */
return ret;
return 0;
}
/*
* Function: s390_redo_validation
* Look for no longer blacklisted devices
* FIXME: there must be a better way to do this */
static inline void
s390_redo_validation (void)
{
CIO_TRACE_EVENT (0, "redoval");
for_each_subchannel(__s390_redo_validation, NULL);
}
/* /*
* Function: blacklist_parse_proc_parameters * Function: blacklist_parse_proc_parameters
* parse the stuff which is piped to /proc/cio_ignore * parse the stuff which is piped to /proc/cio_ignore
@ -281,7 +248,7 @@ blacklist_parse_proc_parameters (char *buf)
return; return;
} }
s390_redo_validation (); css_schedule_reprobe();
} }
/* Iterator struct for all devices. */ /* Iterator struct for all devices. */

View File

@ -404,21 +404,24 @@ ccwgroup_driver_register (struct ccwgroup_driver *cdriver)
} }
static int static int
__ccwgroup_driver_unregister_device(struct device *dev, void *data) __ccwgroup_match_all(struct device *dev, void *data)
{ {
__ccwgroup_remove_symlinks(to_ccwgroupdev(dev)); return 1;
device_unregister(dev);
put_device(dev);
return 0;
} }
void void
ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver) ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver)
{ {
struct device *dev;
/* We don't want ccwgroup devices to live longer than their driver. */ /* We don't want ccwgroup devices to live longer than their driver. */
get_driver(&cdriver->driver); get_driver(&cdriver->driver);
driver_for_each_device(&cdriver->driver, NULL, NULL, while ((dev = driver_find_device(&cdriver->driver, NULL, NULL,
__ccwgroup_driver_unregister_device); __ccwgroup_match_all))) {
__ccwgroup_remove_symlinks(to_ccwgroupdev(dev));
device_unregister(dev);
put_device(dev);
}
put_driver(&cdriver->driver); put_driver(&cdriver->driver);
driver_unregister(&cdriver->driver); driver_unregister(&cdriver->driver);
} }

View File

@ -244,8 +244,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) && if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) &&
(sch->schib.scsw.actl & SCSW_ACTL_SCHACT) && (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) &&
(sch->schib.pmcw.lpum == mask) && (sch->schib.pmcw.lpum == mask)) {
(sch->vpm == 0)) {
int cc; int cc;
cc = cio_clear(sch); cc = cio_clear(sch);
@ -918,12 +917,13 @@ chp_measurement_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
chp = to_channelpath(container_of(kobj, struct device, kobj)); chp = to_channelpath(container_of(kobj, struct device, kobj));
css = to_css(chp->dev.parent); css = to_css(chp->dev.parent);
size = sizeof(struct cmg_chars); size = sizeof(struct cmg_entry);
/* Only allow single reads. */ /* Only allow single reads. */
if (off || count < size) if (off || count < size)
return 0; return 0;
chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->id); chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->id);
count = size;
return count; return count;
} }

View File

@ -3,9 +3,10 @@
* *
* Linux on zSeries Channel Measurement Facility support * Linux on zSeries Channel Measurement Facility support
* *
* Copyright 2000,2003 IBM Corporation * Copyright 2000,2006 IBM Corporation
* *
* Author: Arnd Bergmann <arndb@de.ibm.com> * Authors: Arnd Bergmann <arndb@de.ibm.com>
* Cornelia Huck <cornelia.huck@de.ibm.com>
* *
* original idea from Natarajan Krishnaswami <nkrishna@us.ibm.com> * original idea from Natarajan Krishnaswami <nkrishna@us.ibm.com>
* *
@ -96,9 +97,9 @@ module_param(format, bool, 0444);
/** /**
* struct cmb_operations - functions to use depending on cmb_format * struct cmb_operations - functions to use depending on cmb_format
* *
* all these functions operate on a struct cmf_device. There is only * Most of these functions operate on a struct ccw_device. There is only
* one instance of struct cmb_operations because all cmf_device * one instance of struct cmb_operations because the format of the measurement
* objects are guaranteed to be of the same type. * data is guaranteed to be the same for every ccw_device.
* *
* @alloc: allocate memory for a channel measurement block, * @alloc: allocate memory for a channel measurement block,
* either with the help of a special pool or with kmalloc * either with the help of a special pool or with kmalloc
@ -107,6 +108,7 @@ module_param(format, bool, 0444);
* @readall: read a measurement block in a common format * @readall: read a measurement block in a common format
* @reset: clear the data in the associated measurement block and * @reset: clear the data in the associated measurement block and
* reset its time stamp * reset its time stamp
* @align: align an allocated block so that the hardware can use it
*/ */
struct cmb_operations { struct cmb_operations {
int (*alloc) (struct ccw_device*); int (*alloc) (struct ccw_device*);
@ -115,11 +117,19 @@ struct cmb_operations {
u64 (*read) (struct ccw_device*, int); u64 (*read) (struct ccw_device*, int);
int (*readall)(struct ccw_device*, struct cmbdata *); int (*readall)(struct ccw_device*, struct cmbdata *);
void (*reset) (struct ccw_device*); void (*reset) (struct ccw_device*);
void * (*align) (void *);
struct attribute_group *attr_group; struct attribute_group *attr_group;
}; };
static struct cmb_operations *cmbops; static struct cmb_operations *cmbops;
struct cmb_data {
void *hw_block; /* Pointer to block updated by hardware */
void *last_block; /* Last changed block copied from hardware block */
int size; /* Size of hw_block and last_block */
unsigned long long last_update; /* when last_block was updated */
};
/* our user interface is designed in terms of nanoseconds, /* our user interface is designed in terms of nanoseconds,
* while the hardware measures total times in its own * while the hardware measures total times in its own
* unit.*/ * unit.*/
@ -226,63 +236,229 @@ struct set_schib_struct {
unsigned long address; unsigned long address;
wait_queue_head_t wait; wait_queue_head_t wait;
int ret; int ret;
struct kref kref;
}; };
static void cmf_set_schib_release(struct kref *kref)
{
struct set_schib_struct *set_data;
set_data = container_of(kref, struct set_schib_struct, kref);
kfree(set_data);
}
#define CMF_PENDING 1
static int set_schib_wait(struct ccw_device *cdev, u32 mme, static int set_schib_wait(struct ccw_device *cdev, u32 mme,
int mbfc, unsigned long address) int mbfc, unsigned long address)
{ {
struct set_schib_struct s = { struct set_schib_struct *set_data;
.mme = mme, int ret;
.mbfc = mbfc,
.address = address,
.wait = __WAIT_QUEUE_HEAD_INITIALIZER(s.wait),
};
spin_lock_irq(cdev->ccwlock); spin_lock_irq(cdev->ccwlock);
s.ret = set_schib(cdev, mme, mbfc, address); if (!cdev->private->cmb) {
if (s.ret != -EBUSY) { ret = -ENODEV;
goto out_nowait; goto out;
} }
set_data = kzalloc(sizeof(struct set_schib_struct), GFP_ATOMIC);
if (!set_data) {
ret = -ENOMEM;
goto out;
}
init_waitqueue_head(&set_data->wait);
kref_init(&set_data->kref);
set_data->mme = mme;
set_data->mbfc = mbfc;
set_data->address = address;
ret = set_schib(cdev, mme, mbfc, address);
if (ret != -EBUSY)
goto out_put;
if (cdev->private->state != DEV_STATE_ONLINE) { if (cdev->private->state != DEV_STATE_ONLINE) {
s.ret = -EBUSY;
/* if the device is not online, don't even try again */ /* if the device is not online, don't even try again */
goto out_nowait; ret = -EBUSY;
goto out_put;
} }
cdev->private->state = DEV_STATE_CMFCHANGE; cdev->private->state = DEV_STATE_CMFCHANGE;
cdev->private->cmb_wait = &s; set_data->ret = CMF_PENDING;
s.ret = 1; cdev->private->cmb_wait = set_data;
spin_unlock_irq(cdev->ccwlock); spin_unlock_irq(cdev->ccwlock);
if (wait_event_interruptible(s.wait, s.ret != 1)) { if (wait_event_interruptible(set_data->wait,
set_data->ret != CMF_PENDING)) {
spin_lock_irq(cdev->ccwlock); spin_lock_irq(cdev->ccwlock);
if (s.ret == 1) { if (set_data->ret == CMF_PENDING) {
s.ret = -ERESTARTSYS; set_data->ret = -ERESTARTSYS;
cdev->private->cmb_wait = 0;
if (cdev->private->state == DEV_STATE_CMFCHANGE) if (cdev->private->state == DEV_STATE_CMFCHANGE)
cdev->private->state = DEV_STATE_ONLINE; cdev->private->state = DEV_STATE_ONLINE;
} }
spin_unlock_irq(cdev->ccwlock); spin_unlock_irq(cdev->ccwlock);
} }
return s.ret; spin_lock_irq(cdev->ccwlock);
cdev->private->cmb_wait = NULL;
out_nowait: ret = set_data->ret;
out_put:
kref_put(&set_data->kref, cmf_set_schib_release);
out:
spin_unlock_irq(cdev->ccwlock); spin_unlock_irq(cdev->ccwlock);
return s.ret; return ret;
} }
void retry_set_schib(struct ccw_device *cdev) void retry_set_schib(struct ccw_device *cdev)
{ {
struct set_schib_struct *s; struct set_schib_struct *set_data;
s = cdev->private->cmb_wait; set_data = cdev->private->cmb_wait;
cdev->private->cmb_wait = 0; if (!set_data) {
if (!s) {
WARN_ON(1); WARN_ON(1);
return; return;
} }
s->ret = set_schib(cdev, s->mme, s->mbfc, s->address); kref_get(&set_data->kref);
wake_up(&s->wait); set_data->ret = set_schib(cdev, set_data->mme, set_data->mbfc,
set_data->address);
wake_up(&set_data->wait);
kref_put(&set_data->kref, cmf_set_schib_release);
}
static int cmf_copy_block(struct ccw_device *cdev)
{
struct subchannel *sch;
void *reference_buf;
void *hw_block;
struct cmb_data *cmb_data;
sch = to_subchannel(cdev->dev.parent);
if (stsch(sch->schid, &sch->schib))
return -ENODEV;
if (sch->schib.scsw.fctl & SCSW_FCTL_START_FUNC) {
/* Don't copy if a start function is in progress. */
if ((!sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED) &&
(sch->schib.scsw.actl &
(SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) &&
(!sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS))
return -EBUSY;
}
cmb_data = cdev->private->cmb;
hw_block = cmbops->align(cmb_data->hw_block);
if (!memcmp(cmb_data->last_block, hw_block, cmb_data->size))
/* No need to copy. */
return 0;
reference_buf = kzalloc(cmb_data->size, GFP_ATOMIC);
if (!reference_buf)
return -ENOMEM;
/* Ensure consistency of block copied from hardware. */
do {
memcpy(cmb_data->last_block, hw_block, cmb_data->size);
memcpy(reference_buf, hw_block, cmb_data->size);
} while (memcmp(cmb_data->last_block, reference_buf, cmb_data->size));
cmb_data->last_update = get_clock();
kfree(reference_buf);
return 0;
}
struct copy_block_struct {
wait_queue_head_t wait;
int ret;
struct kref kref;
};
static void cmf_copy_block_release(struct kref *kref)
{
struct copy_block_struct *copy_block;
copy_block = container_of(kref, struct copy_block_struct, kref);
kfree(copy_block);
}
static int cmf_cmb_copy_wait(struct ccw_device *cdev)
{
struct copy_block_struct *copy_block;
int ret;
unsigned long flags;
spin_lock_irqsave(cdev->ccwlock, flags);
if (!cdev->private->cmb) {
ret = -ENODEV;
goto out;
}
copy_block = kzalloc(sizeof(struct copy_block_struct), GFP_ATOMIC);
if (!copy_block) {
ret = -ENOMEM;
goto out;
}
init_waitqueue_head(&copy_block->wait);
kref_init(&copy_block->kref);
ret = cmf_copy_block(cdev);
if (ret != -EBUSY)
goto out_put;
if (cdev->private->state != DEV_STATE_ONLINE) {
ret = -EBUSY;
goto out_put;
}
cdev->private->state = DEV_STATE_CMFUPDATE;
copy_block->ret = CMF_PENDING;
cdev->private->cmb_wait = copy_block;
spin_unlock_irqrestore(cdev->ccwlock, flags);
if (wait_event_interruptible(copy_block->wait,
copy_block->ret != CMF_PENDING)) {
spin_lock_irqsave(cdev->ccwlock, flags);
if (copy_block->ret == CMF_PENDING) {
copy_block->ret = -ERESTARTSYS;
if (cdev->private->state == DEV_STATE_CMFUPDATE)
cdev->private->state = DEV_STATE_ONLINE;
}
spin_unlock_irqrestore(cdev->ccwlock, flags);
}
spin_lock_irqsave(cdev->ccwlock, flags);
cdev->private->cmb_wait = NULL;
ret = copy_block->ret;
out_put:
kref_put(&copy_block->kref, cmf_copy_block_release);
out:
spin_unlock_irqrestore(cdev->ccwlock, flags);
return ret;
}
void cmf_retry_copy_block(struct ccw_device *cdev)
{
struct copy_block_struct *copy_block;
copy_block = cdev->private->cmb_wait;
if (!copy_block) {
WARN_ON(1);
return;
}
kref_get(&copy_block->kref);
copy_block->ret = cmf_copy_block(cdev);
wake_up(&copy_block->wait);
kref_put(&copy_block->kref, cmf_copy_block_release);
}
static void cmf_generic_reset(struct ccw_device *cdev)
{
struct cmb_data *cmb_data;
spin_lock_irq(cdev->ccwlock);
cmb_data = cdev->private->cmb;
if (cmb_data) {
memset(cmb_data->last_block, 0, cmb_data->size);
/*
* Need to reset hw block as well to make the hardware start
* from 0 again.
*/
memset(cmbops->align(cmb_data->hw_block), 0, cmb_data->size);
cmb_data->last_update = 0;
}
cdev->private->cmb_start_time = get_clock();
spin_unlock_irq(cdev->ccwlock);
} }
/** /**
@ -343,8 +519,8 @@ struct cmb {
/* insert a single device into the cmb_area list /* insert a single device into the cmb_area list
* called with cmb_area.lock held from alloc_cmb * called with cmb_area.lock held from alloc_cmb
*/ */
static inline int static inline int alloc_cmb_single (struct ccw_device *cdev,
alloc_cmb_single (struct ccw_device *cdev) struct cmb_data *cmb_data)
{ {
struct cmb *cmb; struct cmb *cmb;
struct ccw_device_private *node; struct ccw_device_private *node;
@ -358,10 +534,12 @@ alloc_cmb_single (struct ccw_device *cdev)
/* find first unused cmb in cmb_area.mem. /* find first unused cmb in cmb_area.mem.
* this is a little tricky: cmb_area.list * this is a little tricky: cmb_area.list
* remains sorted by ->cmb pointers */ * remains sorted by ->cmb->hw_data pointers */
cmb = cmb_area.mem; cmb = cmb_area.mem;
list_for_each_entry(node, &cmb_area.list, cmb_list) { list_for_each_entry(node, &cmb_area.list, cmb_list) {
if ((struct cmb*)node->cmb > cmb) struct cmb_data *data;
data = node->cmb;
if ((struct cmb*)data->hw_block > cmb)
break; break;
cmb++; cmb++;
} }
@ -372,7 +550,8 @@ alloc_cmb_single (struct ccw_device *cdev)
/* insert new cmb */ /* insert new cmb */
list_add_tail(&cdev->private->cmb_list, &node->cmb_list); list_add_tail(&cdev->private->cmb_list, &node->cmb_list);
cdev->private->cmb = cmb; cmb_data->hw_block = cmb;
cdev->private->cmb = cmb_data;
ret = 0; ret = 0;
out: out:
spin_unlock_irq(cdev->ccwlock); spin_unlock_irq(cdev->ccwlock);
@ -385,7 +564,19 @@ alloc_cmb (struct ccw_device *cdev)
int ret; int ret;
struct cmb *mem; struct cmb *mem;
ssize_t size; ssize_t size;
struct cmb_data *cmb_data;
/* Allocate private cmb_data. */
cmb_data = kzalloc(sizeof(struct cmb_data), GFP_KERNEL);
if (!cmb_data)
return -ENOMEM;
cmb_data->last_block = kzalloc(sizeof(struct cmb), GFP_KERNEL);
if (!cmb_data->last_block) {
kfree(cmb_data);
return -ENOMEM;
}
cmb_data->size = sizeof(struct cmb);
spin_lock(&cmb_area.lock); spin_lock(&cmb_area.lock);
if (!cmb_area.mem) { if (!cmb_area.mem) {
@ -414,29 +605,36 @@ alloc_cmb (struct ccw_device *cdev)
} }
/* do the actual allocation */ /* do the actual allocation */
ret = alloc_cmb_single(cdev); ret = alloc_cmb_single(cdev, cmb_data);
out: out:
spin_unlock(&cmb_area.lock); spin_unlock(&cmb_area.lock);
if (ret) {
kfree(cmb_data->last_block);
kfree(cmb_data);
}
return ret; return ret;
} }
static void static void free_cmb(struct ccw_device *cdev)
free_cmb(struct ccw_device *cdev)
{ {
struct ccw_device_private *priv; struct ccw_device_private *priv;
struct cmb_data *cmb_data;
priv = cdev->private;
spin_lock(&cmb_area.lock); spin_lock(&cmb_area.lock);
spin_lock_irq(cdev->ccwlock); spin_lock_irq(cdev->ccwlock);
priv = cdev->private;
if (list_empty(&priv->cmb_list)) { if (list_empty(&priv->cmb_list)) {
/* already freed */ /* already freed */
goto out; goto out;
} }
cmb_data = priv->cmb;
priv->cmb = NULL; priv->cmb = NULL;
if (cmb_data)
kfree(cmb_data->last_block);
kfree(cmb_data);
list_del_init(&priv->cmb_list); list_del_init(&priv->cmb_list);
if (list_empty(&cmb_area.list)) { if (list_empty(&cmb_area.list)) {
@ -451,83 +649,97 @@ out:
spin_unlock(&cmb_area.lock); spin_unlock(&cmb_area.lock);
} }
static int static int set_cmb(struct ccw_device *cdev, u32 mme)
set_cmb(struct ccw_device *cdev, u32 mme)
{ {
u16 offset; u16 offset;
struct cmb_data *cmb_data;
unsigned long flags;
if (!cdev->private->cmb) spin_lock_irqsave(cdev->ccwlock, flags);
if (!cdev->private->cmb) {
spin_unlock_irqrestore(cdev->ccwlock, flags);
return -EINVAL; return -EINVAL;
}
offset = mme ? (struct cmb *)cdev->private->cmb - cmb_area.mem : 0; cmb_data = cdev->private->cmb;
offset = mme ? (struct cmb *)cmb_data->hw_block - cmb_area.mem : 0;
spin_unlock_irqrestore(cdev->ccwlock, flags);
return set_schib_wait(cdev, mme, 0, offset); return set_schib_wait(cdev, mme, 0, offset);
} }
static u64 static u64 read_cmb (struct ccw_device *cdev, int index)
read_cmb (struct ccw_device *cdev, int index)
{ {
/* yes, we have to put it on the stack struct cmb *cmb;
* because the cmb must only be accessed
* atomically, e.g. with mvc */
struct cmb cmb;
unsigned long flags;
u32 val; u32 val;
int ret;
unsigned long flags;
ret = cmf_cmb_copy_wait(cdev);
if (ret < 0)
return 0;
spin_lock_irqsave(cdev->ccwlock, flags); spin_lock_irqsave(cdev->ccwlock, flags);
if (!cdev->private->cmb) { if (!cdev->private->cmb) {
spin_unlock_irqrestore(cdev->ccwlock, flags); ret = 0;
return 0; goto out;
} }
cmb = ((struct cmb_data *)cdev->private->cmb)->last_block;
cmb = *(struct cmb*)cdev->private->cmb;
spin_unlock_irqrestore(cdev->ccwlock, flags);
switch (index) { switch (index) {
case cmb_ssch_rsch_count: case cmb_ssch_rsch_count:
return cmb.ssch_rsch_count; ret = cmb->ssch_rsch_count;
goto out;
case cmb_sample_count: case cmb_sample_count:
return cmb.sample_count; ret = cmb->sample_count;
goto out;
case cmb_device_connect_time: case cmb_device_connect_time:
val = cmb.device_connect_time; val = cmb->device_connect_time;
break; break;
case cmb_function_pending_time: case cmb_function_pending_time:
val = cmb.function_pending_time; val = cmb->function_pending_time;
break; break;
case cmb_device_disconnect_time: case cmb_device_disconnect_time:
val = cmb.device_disconnect_time; val = cmb->device_disconnect_time;
break; break;
case cmb_control_unit_queuing_time: case cmb_control_unit_queuing_time:
val = cmb.control_unit_queuing_time; val = cmb->control_unit_queuing_time;
break; break;
case cmb_device_active_only_time: case cmb_device_active_only_time:
val = cmb.device_active_only_time; val = cmb->device_active_only_time;
break; break;
default: default:
return 0; ret = 0;
goto out;
} }
return time_to_avg_nsec(val, cmb.sample_count); ret = time_to_avg_nsec(val, cmb->sample_count);
out:
spin_unlock_irqrestore(cdev->ccwlock, flags);
return ret;
} }
static int static int readall_cmb (struct ccw_device *cdev, struct cmbdata *data)
readall_cmb (struct ccw_device *cdev, struct cmbdata *data)
{ {
/* yes, we have to put it on the stack struct cmb *cmb;
* because the cmb must only be accessed struct cmb_data *cmb_data;
* atomically, e.g. with mvc */
struct cmb cmb;
unsigned long flags;
u64 time; u64 time;
unsigned long flags;
int ret;
ret = cmf_cmb_copy_wait(cdev);
if (ret < 0)
return ret;
spin_lock_irqsave(cdev->ccwlock, flags); spin_lock_irqsave(cdev->ccwlock, flags);
if (!cdev->private->cmb) { cmb_data = cdev->private->cmb;
spin_unlock_irqrestore(cdev->ccwlock, flags); if (!cmb_data) {
return -ENODEV; ret = -ENODEV;
goto out;
} }
if (cmb_data->last_update == 0) {
cmb = *(struct cmb*)cdev->private->cmb; ret = -EAGAIN;
time = get_clock() - cdev->private->cmb_start_time; goto out;
spin_unlock_irqrestore(cdev->ccwlock, flags); }
cmb = cmb_data->last_block;
time = cmb_data->last_update - cdev->private->cmb_start_time;
memset(data, 0, sizeof(struct cmbdata)); memset(data, 0, sizeof(struct cmbdata));
@ -538,31 +750,32 @@ readall_cmb (struct ccw_device *cdev, struct cmbdata *data)
data->elapsed_time = (time * 1000) >> 12; data->elapsed_time = (time * 1000) >> 12;
/* copy data to new structure */ /* copy data to new structure */
data->ssch_rsch_count = cmb.ssch_rsch_count; data->ssch_rsch_count = cmb->ssch_rsch_count;
data->sample_count = cmb.sample_count; data->sample_count = cmb->sample_count;
/* time fields are converted to nanoseconds while copying */ /* time fields are converted to nanoseconds while copying */
data->device_connect_time = time_to_nsec(cmb.device_connect_time); data->device_connect_time = time_to_nsec(cmb->device_connect_time);
data->function_pending_time = time_to_nsec(cmb.function_pending_time); data->function_pending_time = time_to_nsec(cmb->function_pending_time);
data->device_disconnect_time = time_to_nsec(cmb.device_disconnect_time); data->device_disconnect_time =
time_to_nsec(cmb->device_disconnect_time);
data->control_unit_queuing_time data->control_unit_queuing_time
= time_to_nsec(cmb.control_unit_queuing_time); = time_to_nsec(cmb->control_unit_queuing_time);
data->device_active_only_time data->device_active_only_time
= time_to_nsec(cmb.device_active_only_time); = time_to_nsec(cmb->device_active_only_time);
ret = 0;
return 0; out:
spin_unlock_irqrestore(cdev->ccwlock, flags);
return ret;
} }
static void static void reset_cmb(struct ccw_device *cdev)
reset_cmb(struct ccw_device *cdev)
{ {
struct cmb *cmb; cmf_generic_reset(cdev);
spin_lock_irq(cdev->ccwlock); }
cmb = cdev->private->cmb;
if (cmb) static void * align_cmb(void *area)
memset (cmb, 0, sizeof (*cmb)); {
cdev->private->cmb_start_time = get_clock(); return area;
spin_unlock_irq(cdev->ccwlock);
} }
static struct attribute_group cmf_attr_group; static struct attribute_group cmf_attr_group;
@ -574,6 +787,7 @@ static struct cmb_operations cmbops_basic = {
.read = read_cmb, .read = read_cmb,
.readall = readall_cmb, .readall = readall_cmb,
.reset = reset_cmb, .reset = reset_cmb,
.align = align_cmb,
.attr_group = &cmf_attr_group, .attr_group = &cmf_attr_group,
}; };
@ -610,22 +824,34 @@ static inline struct cmbe* cmbe_align(struct cmbe *c)
return (struct cmbe*)addr; return (struct cmbe*)addr;
} }
static int static int alloc_cmbe (struct ccw_device *cdev)
alloc_cmbe (struct ccw_device *cdev)
{ {
struct cmbe *cmbe; struct cmbe *cmbe;
cmbe = kmalloc (sizeof (*cmbe) * 2, GFP_KERNEL); struct cmb_data *cmb_data;
int ret;
cmbe = kzalloc (sizeof (*cmbe) * 2, GFP_KERNEL);
if (!cmbe) if (!cmbe)
return -ENOMEM; return -ENOMEM;
cmb_data = kzalloc(sizeof(struct cmb_data), GFP_KERNEL);
if (!cmb_data) {
ret = -ENOMEM;
goto out_free;
}
cmb_data->last_block = kzalloc(sizeof(struct cmbe), GFP_KERNEL);
if (!cmb_data->last_block) {
ret = -ENOMEM;
goto out_free;
}
cmb_data->size = sizeof(struct cmbe);
spin_lock_irq(cdev->ccwlock); spin_lock_irq(cdev->ccwlock);
if (cdev->private->cmb) { if (cdev->private->cmb) {
kfree(cmbe);
spin_unlock_irq(cdev->ccwlock); spin_unlock_irq(cdev->ccwlock);
return -EBUSY; ret = -EBUSY;
goto out_free;
} }
cmb_data->hw_block = cmbe;
cdev->private->cmb = cmbe; cdev->private->cmb = cmb_data;
spin_unlock_irq(cdev->ccwlock); spin_unlock_irq(cdev->ccwlock);
/* activate global measurement if this is the first channel */ /* activate global measurement if this is the first channel */
@ -636,14 +862,24 @@ alloc_cmbe (struct ccw_device *cdev)
spin_unlock(&cmb_area.lock); spin_unlock(&cmb_area.lock);
return 0; return 0;
out_free:
if (cmb_data)
kfree(cmb_data->last_block);
kfree(cmb_data);
kfree(cmbe);
return ret;
} }
static void static void free_cmbe (struct ccw_device *cdev)
free_cmbe (struct ccw_device *cdev)
{ {
struct cmb_data *cmb_data;
spin_lock_irq(cdev->ccwlock); spin_lock_irq(cdev->ccwlock);
kfree(cdev->private->cmb); cmb_data = cdev->private->cmb;
cdev->private->cmb = NULL; cdev->private->cmb = NULL;
if (cmb_data)
kfree(cmb_data->last_block);
kfree(cmb_data);
spin_unlock_irq(cdev->ccwlock); spin_unlock_irq(cdev->ccwlock);
/* deactivate global measurement if this is the last channel */ /* deactivate global measurement if this is the last channel */
@ -654,89 +890,105 @@ free_cmbe (struct ccw_device *cdev)
spin_unlock(&cmb_area.lock); spin_unlock(&cmb_area.lock);
} }
static int static int set_cmbe(struct ccw_device *cdev, u32 mme)
set_cmbe(struct ccw_device *cdev, u32 mme)
{ {
unsigned long mba; unsigned long mba;
struct cmb_data *cmb_data;
unsigned long flags;
if (!cdev->private->cmb) spin_lock_irqsave(cdev->ccwlock, flags);
if (!cdev->private->cmb) {
spin_unlock_irqrestore(cdev->ccwlock, flags);
return -EINVAL; return -EINVAL;
mba = mme ? (unsigned long) cmbe_align(cdev->private->cmb) : 0; }
cmb_data = cdev->private->cmb;
mba = mme ? (unsigned long) cmbe_align(cmb_data->hw_block) : 0;
spin_unlock_irqrestore(cdev->ccwlock, flags);
return set_schib_wait(cdev, mme, 1, mba); return set_schib_wait(cdev, mme, 1, mba);
} }
u64 static u64 read_cmbe (struct ccw_device *cdev, int index)
read_cmbe (struct ccw_device *cdev, int index)
{ {
/* yes, we have to put it on the stack struct cmbe *cmb;
* because the cmb must only be accessed struct cmb_data *cmb_data;
* atomically, e.g. with mvc */
struct cmbe cmb;
unsigned long flags;
u32 val; u32 val;
int ret;
unsigned long flags;
ret = cmf_cmb_copy_wait(cdev);
if (ret < 0)
return 0;
spin_lock_irqsave(cdev->ccwlock, flags); spin_lock_irqsave(cdev->ccwlock, flags);
if (!cdev->private->cmb) { cmb_data = cdev->private->cmb;
spin_unlock_irqrestore(cdev->ccwlock, flags); if (!cmb_data) {
return 0; ret = 0;
goto out;
} }
cmb = cmb_data->last_block;
cmb = *cmbe_align(cdev->private->cmb);
spin_unlock_irqrestore(cdev->ccwlock, flags);
switch (index) { switch (index) {
case cmb_ssch_rsch_count: case cmb_ssch_rsch_count:
return cmb.ssch_rsch_count; ret = cmb->ssch_rsch_count;
goto out;
case cmb_sample_count: case cmb_sample_count:
return cmb.sample_count; ret = cmb->sample_count;
goto out;
case cmb_device_connect_time: case cmb_device_connect_time:
val = cmb.device_connect_time; val = cmb->device_connect_time;
break; break;
case cmb_function_pending_time: case cmb_function_pending_time:
val = cmb.function_pending_time; val = cmb->function_pending_time;
break; break;
case cmb_device_disconnect_time: case cmb_device_disconnect_time:
val = cmb.device_disconnect_time; val = cmb->device_disconnect_time;
break; break;
case cmb_control_unit_queuing_time: case cmb_control_unit_queuing_time:
val = cmb.control_unit_queuing_time; val = cmb->control_unit_queuing_time;
break; break;
case cmb_device_active_only_time: case cmb_device_active_only_time:
val = cmb.device_active_only_time; val = cmb->device_active_only_time;
break; break;
case cmb_device_busy_time: case cmb_device_busy_time:
val = cmb.device_busy_time; val = cmb->device_busy_time;
break; break;
case cmb_initial_command_response_time: case cmb_initial_command_response_time:
val = cmb.initial_command_response_time; val = cmb->initial_command_response_time;
break; break;
default: default:
return 0; ret = 0;
goto out;
} }
return time_to_avg_nsec(val, cmb.sample_count); ret = time_to_avg_nsec(val, cmb->sample_count);
out:
spin_unlock_irqrestore(cdev->ccwlock, flags);
return ret;
} }
static int static int readall_cmbe (struct ccw_device *cdev, struct cmbdata *data)
readall_cmbe (struct ccw_device *cdev, struct cmbdata *data)
{ {
/* yes, we have to put it on the stack struct cmbe *cmb;
* because the cmb must only be accessed struct cmb_data *cmb_data;
* atomically, e.g. with mvc */
struct cmbe cmb;
unsigned long flags;
u64 time; u64 time;
unsigned long flags;
int ret;
ret = cmf_cmb_copy_wait(cdev);
if (ret < 0)
return ret;
spin_lock_irqsave(cdev->ccwlock, flags); spin_lock_irqsave(cdev->ccwlock, flags);
if (!cdev->private->cmb) { cmb_data = cdev->private->cmb;
spin_unlock_irqrestore(cdev->ccwlock, flags); if (!cmb_data) {
return -ENODEV; ret = -ENODEV;
goto out;
} }
if (cmb_data->last_update == 0) {
cmb = *cmbe_align(cdev->private->cmb); ret = -EAGAIN;
time = get_clock() - cdev->private->cmb_start_time; goto out;
spin_unlock_irqrestore(cdev->ccwlock, flags); }
time = cmb_data->last_update - cdev->private->cmb_start_time;
memset (data, 0, sizeof(struct cmbdata)); memset (data, 0, sizeof(struct cmbdata));
@ -746,35 +998,38 @@ readall_cmbe (struct ccw_device *cdev, struct cmbdata *data)
/* conver to nanoseconds */ /* conver to nanoseconds */
data->elapsed_time = (time * 1000) >> 12; data->elapsed_time = (time * 1000) >> 12;
cmb = cmb_data->last_block;
/* copy data to new structure */ /* copy data to new structure */
data->ssch_rsch_count = cmb.ssch_rsch_count; data->ssch_rsch_count = cmb->ssch_rsch_count;
data->sample_count = cmb.sample_count; data->sample_count = cmb->sample_count;
/* time fields are converted to nanoseconds while copying */ /* time fields are converted to nanoseconds while copying */
data->device_connect_time = time_to_nsec(cmb.device_connect_time); data->device_connect_time = time_to_nsec(cmb->device_connect_time);
data->function_pending_time = time_to_nsec(cmb.function_pending_time); data->function_pending_time = time_to_nsec(cmb->function_pending_time);
data->device_disconnect_time = time_to_nsec(cmb.device_disconnect_time); data->device_disconnect_time =
time_to_nsec(cmb->device_disconnect_time);
data->control_unit_queuing_time data->control_unit_queuing_time
= time_to_nsec(cmb.control_unit_queuing_time); = time_to_nsec(cmb->control_unit_queuing_time);
data->device_active_only_time data->device_active_only_time
= time_to_nsec(cmb.device_active_only_time); = time_to_nsec(cmb->device_active_only_time);
data->device_busy_time = time_to_nsec(cmb.device_busy_time); data->device_busy_time = time_to_nsec(cmb->device_busy_time);
data->initial_command_response_time data->initial_command_response_time
= time_to_nsec(cmb.initial_command_response_time); = time_to_nsec(cmb->initial_command_response_time);
return 0; ret = 0;
out:
spin_unlock_irqrestore(cdev->ccwlock, flags);
return ret;
} }
static void static void reset_cmbe(struct ccw_device *cdev)
reset_cmbe(struct ccw_device *cdev)
{ {
struct cmbe *cmb; cmf_generic_reset(cdev);
spin_lock_irq(cdev->ccwlock); }
cmb = cmbe_align(cdev->private->cmb);
if (cmb) static void * align_cmbe(void *area)
memset (cmb, 0, sizeof (*cmb)); {
cdev->private->cmb_start_time = get_clock(); return cmbe_align(area);
spin_unlock_irq(cdev->ccwlock);
} }
static struct attribute_group cmf_attr_group_ext; static struct attribute_group cmf_attr_group_ext;
@ -786,6 +1041,7 @@ static struct cmb_operations cmbops_extended = {
.read = read_cmbe, .read = read_cmbe,
.readall = readall_cmbe, .readall = readall_cmbe,
.reset = reset_cmbe, .reset = reset_cmbe,
.align = align_cmbe,
.attr_group = &cmf_attr_group_ext, .attr_group = &cmf_attr_group_ext,
}; };
@ -803,14 +1059,19 @@ cmb_show_avg_sample_interval(struct device *dev, struct device_attribute *attr,
struct ccw_device *cdev; struct ccw_device *cdev;
long interval; long interval;
unsigned long count; unsigned long count;
struct cmb_data *cmb_data;
cdev = to_ccwdev(dev); cdev = to_ccwdev(dev);
interval = get_clock() - cdev->private->cmb_start_time;
count = cmf_read(cdev, cmb_sample_count); count = cmf_read(cdev, cmb_sample_count);
if (count) spin_lock_irq(cdev->ccwlock);
cmb_data = cdev->private->cmb;
if (count) {
interval = cmb_data->last_update -
cdev->private->cmb_start_time;
interval /= count; interval /= count;
else } else
interval = -1; interval = -1;
spin_unlock_irq(cdev->ccwlock);
return sprintf(buf, "%ld\n", interval); return sprintf(buf, "%ld\n", interval);
} }
@ -823,7 +1084,10 @@ cmb_show_avg_utilization(struct device *dev, struct device_attribute *attr, char
int ret; int ret;
ret = cmf_readall(to_ccwdev(dev), &data); ret = cmf_readall(to_ccwdev(dev), &data);
if (ret) if (ret == -EAGAIN || ret == -ENODEV)
/* No data (yet/currently) available to use for calculation. */
return sprintf(buf, "n/a\n");
else if (ret)
return ret; return ret;
utilization = data.device_connect_time + utilization = data.device_connect_time +
@ -982,6 +1246,13 @@ cmf_readall(struct ccw_device *cdev, struct cmbdata *data)
return cmbops->readall(cdev, data); return cmbops->readall(cdev, data);
} }
/* Reenable cmf when a disconnected device becomes available again. */
int cmf_reenable(struct ccw_device *cdev)
{
cmbops->reset(cdev);
return cmbops->set(cdev, 2);
}
static int __init static int __init
init_cmf(void) init_cmf(void)
{ {

View File

@ -19,9 +19,11 @@
#include "cio_debug.h" #include "cio_debug.h"
#include "ioasm.h" #include "ioasm.h"
#include "chsc.h" #include "chsc.h"
#include "device.h"
int need_rescan = 0; int need_rescan = 0;
int css_init_done = 0; int css_init_done = 0;
static int need_reprobe = 0;
static int max_ssid = 0; static int max_ssid = 0;
struct channel_subsystem *css[__MAX_CSSID + 1]; struct channel_subsystem *css[__MAX_CSSID + 1];
@ -339,6 +341,67 @@ typedef void (*workfunc)(void *);
DECLARE_WORK(slow_path_work, (workfunc)css_trigger_slow_path, NULL); DECLARE_WORK(slow_path_work, (workfunc)css_trigger_slow_path, NULL);
struct workqueue_struct *slow_path_wq; struct workqueue_struct *slow_path_wq;
/* Reprobe subchannel if unregistered. */
static int reprobe_subchannel(struct subchannel_id schid, void *data)
{
struct subchannel *sch;
int ret;
CIO_DEBUG(KERN_INFO, 6, "cio: reprobe 0.%x.%04x\n",
schid.ssid, schid.sch_no);
if (need_reprobe)
return -EAGAIN;
sch = get_subchannel_by_schid(schid);
if (sch) {
/* Already known. */
put_device(&sch->dev);
return 0;
}
ret = css_probe_device(schid);
switch (ret) {
case 0:
break;
case -ENXIO:
case -ENOMEM:
/* These should abort looping */
break;
default:
ret = 0;
}
return ret;
}
/* Work function used to reprobe all unregistered subchannels. */
static void reprobe_all(void *data)
{
int ret;
CIO_MSG_EVENT(2, "reprobe start\n");
need_reprobe = 0;
/* Make sure initial subchannel scan is done. */
wait_event(ccw_device_init_wq,
atomic_read(&ccw_device_init_count) == 0);
ret = for_each_subchannel(reprobe_subchannel, NULL);
CIO_MSG_EVENT(2, "reprobe done (rc=%d, need_reprobe=%d)\n", ret,
need_reprobe);
}
DECLARE_WORK(css_reprobe_work, reprobe_all, NULL);
/* Schedule reprobing of all unregistered subchannels. */
void css_schedule_reprobe(void)
{
need_reprobe = 1;
queue_work(ccw_device_work, &css_reprobe_work);
}
EXPORT_SYMBOL_GPL(css_schedule_reprobe);
/* /*
* Rescan for new devices. FIXME: This is slow. * Rescan for new devices. FIXME: This is slow.
* This function is called when we have lost CRWs due to overflows and we have * This function is called when we have lost CRWs due to overflows and we have

View File

@ -133,8 +133,8 @@ struct css_driver io_subchannel_driver = {
struct workqueue_struct *ccw_device_work; struct workqueue_struct *ccw_device_work;
struct workqueue_struct *ccw_device_notify_work; struct workqueue_struct *ccw_device_notify_work;
static wait_queue_head_t ccw_device_init_wq; wait_queue_head_t ccw_device_init_wq;
static atomic_t ccw_device_init_count; atomic_t ccw_device_init_count;
static int __init static int __init
init_ccw_bus_type (void) init_ccw_bus_type (void)

View File

@ -1,6 +1,10 @@
#ifndef S390_DEVICE_H #ifndef S390_DEVICE_H
#define S390_DEVICE_H #define S390_DEVICE_H
#include <asm/ccwdev.h>
#include <asm/atomic.h>
#include <linux/wait.h>
/* /*
* states of the device statemachine * states of the device statemachine
*/ */
@ -23,6 +27,7 @@ enum dev_state {
DEV_STATE_DISCONNECTED, DEV_STATE_DISCONNECTED,
DEV_STATE_DISCONNECTED_SENSE_ID, DEV_STATE_DISCONNECTED_SENSE_ID,
DEV_STATE_CMFCHANGE, DEV_STATE_CMFCHANGE,
DEV_STATE_CMFUPDATE,
/* last element! */ /* last element! */
NR_DEV_STATES NR_DEV_STATES
}; };
@ -67,6 +72,8 @@ dev_fsm_final_state(struct ccw_device *cdev)
extern struct workqueue_struct *ccw_device_work; extern struct workqueue_struct *ccw_device_work;
extern struct workqueue_struct *ccw_device_notify_work; extern struct workqueue_struct *ccw_device_notify_work;
extern wait_queue_head_t ccw_device_init_wq;
extern atomic_t ccw_device_init_count;
void io_subchannel_recog_done(struct ccw_device *cdev); void io_subchannel_recog_done(struct ccw_device *cdev);
@ -112,5 +119,8 @@ int ccw_device_stlck(struct ccw_device *);
void ccw_device_set_timeout(struct ccw_device *, int); void ccw_device_set_timeout(struct ccw_device *, int);
extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *); extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *);
/* Channel measurement facility related */
void retry_set_schib(struct ccw_device *cdev); void retry_set_schib(struct ccw_device *cdev);
void cmf_retry_copy_block(struct ccw_device *);
int cmf_reenable(struct ccw_device *);
#endif #endif

View File

@ -336,8 +336,11 @@ ccw_device_oper_notify(void *data)
if (!ret) if (!ret)
/* Driver doesn't want device back. */ /* Driver doesn't want device back. */
ccw_device_do_unreg_rereg((void *)cdev); ccw_device_do_unreg_rereg((void *)cdev);
else else {
/* Reenable channel measurements, if needed. */
cmf_reenable(cdev);
wake_up(&cdev->private->wait_q); wake_up(&cdev->private->wait_q);
}
} }
/* /*
@ -861,6 +864,8 @@ ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event)
irb = (struct irb *) __LC_IRB; irb = (struct irb *) __LC_IRB;
/* Accumulate status. We don't do basic sense. */ /* Accumulate status. We don't do basic sense. */
ccw_device_accumulate_irb(cdev, irb); ccw_device_accumulate_irb(cdev, irb);
/* Remember to clear irb to avoid residuals. */
memset(&cdev->private->irb, 0, sizeof(struct irb));
/* Try to start delayed device verification. */ /* Try to start delayed device verification. */
ccw_device_online_verify(cdev, 0); ccw_device_online_verify(cdev, 0);
/* Note: Don't call handler for cio initiated clear! */ /* Note: Don't call handler for cio initiated clear! */
@ -1093,6 +1098,13 @@ ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
dev_fsm_event(cdev, dev_event); dev_fsm_event(cdev, dev_event);
} }
static void ccw_device_update_cmfblock(struct ccw_device *cdev,
enum dev_event dev_event)
{
cmf_retry_copy_block(cdev);
cdev->private->state = DEV_STATE_ONLINE;
dev_fsm_event(cdev, dev_event);
}
static void static void
ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event) ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
@ -1247,6 +1259,12 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
[DEV_EVENT_TIMEOUT] = ccw_device_change_cmfstate, [DEV_EVENT_TIMEOUT] = ccw_device_change_cmfstate,
[DEV_EVENT_VERIFY] = ccw_device_change_cmfstate, [DEV_EVENT_VERIFY] = ccw_device_change_cmfstate,
}, },
[DEV_STATE_CMFUPDATE] = {
[DEV_EVENT_NOTOPER] = ccw_device_update_cmfblock,
[DEV_EVENT_INTERRUPT] = ccw_device_update_cmfblock,
[DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock,
[DEV_EVENT_VERIFY] = ccw_device_update_cmfblock,
},
}; };
/* /*

View File

@ -78,7 +78,8 @@ ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
return -ENODEV; return -ENODEV;
if (cdev->private->state == DEV_STATE_NOT_OPER) if (cdev->private->state == DEV_STATE_NOT_OPER)
return -ENODEV; return -ENODEV;
if (cdev->private->state == DEV_STATE_VERIFY) { if (cdev->private->state == DEV_STATE_VERIFY ||
cdev->private->state == DEV_STATE_CLEAR_VERIFY) {
/* Remember to fake irb when finished. */ /* Remember to fake irb when finished. */
if (!cdev->private->flags.fake_irb) { if (!cdev->private->flags.fake_irb) {
cdev->private->flags.fake_irb = 1; cdev->private->flags.fake_irb = 1;
@ -270,7 +271,8 @@ ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb)
* We didn't get channel end / device end. Check if path * We didn't get channel end / device end. Check if path
* verification has been started; we can retry after it has * verification has been started; we can retry after it has
* finished. We also retry unit checks except for command reject * finished. We also retry unit checks except for command reject
* or intervention required. * or intervention required. Also check for long busy
* conditions.
*/ */
if (cdev->private->flags.doverify || if (cdev->private->flags.doverify ||
cdev->private->state == DEV_STATE_VERIFY) cdev->private->state == DEV_STATE_VERIFY)
@ -279,6 +281,10 @@ ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb)
!(irb->ecw[0] & !(irb->ecw[0] &
(SNS0_CMD_REJECT | SNS0_INTERVENTION_REQ))) (SNS0_CMD_REJECT | SNS0_INTERVENTION_REQ)))
cdev->private->intparm = -EAGAIN; cdev->private->intparm = -EAGAIN;
else if ((irb->scsw.dstat & DEV_STAT_ATTENTION) &&
(irb->scsw.dstat & DEV_STAT_DEV_END) &&
(irb->scsw.dstat & DEV_STAT_UNIT_EXCEP))
cdev->private->intparm = -EAGAIN;
else else
cdev->private->intparm = -EIO; cdev->private->intparm = -EIO;

View File

@ -14,6 +14,7 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/time.h> #include <linux/time.h>
#include <linux/kthread.h>
#include <asm/lowcore.h> #include <asm/lowcore.h>
@ -56,8 +57,6 @@ s390_collect_crw_info(void *param)
unsigned int chain; unsigned int chain;
sem = (struct semaphore *)param; sem = (struct semaphore *)param;
/* Set a nice name. */
daemonize("kmcheck");
repeat: repeat:
down_interruptible(sem); down_interruptible(sem);
slow = 0; slow = 0;
@ -516,7 +515,7 @@ arch_initcall(machine_check_init);
static int __init static int __init
machine_check_crw_init (void) machine_check_crw_init (void)
{ {
kernel_thread(s390_collect_crw_info, &m_sem, CLONE_FS|CLONE_FILES); kthread_run(s390_collect_crw_info, &m_sem, "kmcheck");
ctl_set_bit(14, 28); /* enable channel report MCH */ ctl_set_bit(14, 28); /* enable channel report MCH */
return 0; return 0;
} }

View File

@ -12,6 +12,9 @@
* Copyright (C) 1992, Linus Torvalds * Copyright (C) 1992, Linus Torvalds
* *
*/ */
#ifdef __KERNEL__
#include <linux/compiler.h> #include <linux/compiler.h>
/* /*
@ -50,19 +53,6 @@
* with operation of the form "set_bit(bitnr, flags)". * with operation of the form "set_bit(bitnr, flags)".
*/ */
/* set ALIGN_CS to 1 if the SMP safe bit operations should
* align the address to 4 byte boundary. It seems to work
* without the alignment.
*/
#ifdef __KERNEL__
#define ALIGN_CS 0
#else
#define ALIGN_CS 1
#ifndef CONFIG_SMP
#error "bitops won't work without CONFIG_SMP"
#endif
#endif
/* bitmap tables from arch/S390/kernel/bitmap.S */ /* bitmap tables from arch/S390/kernel/bitmap.S */
extern const char _oi_bitmap[]; extern const char _oi_bitmap[];
extern const char _ni_bitmap[]; extern const char _ni_bitmap[];
@ -121,10 +111,6 @@ static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
unsigned long addr, old, new, mask; unsigned long addr, old, new, mask;
addr = (unsigned long) ptr; addr = (unsigned long) ptr;
#if ALIGN_CS == 1
nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
#endif
/* calculate address for CS */ /* calculate address for CS */
addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
/* make OR mask */ /* make OR mask */
@ -141,10 +127,6 @@ static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
unsigned long addr, old, new, mask; unsigned long addr, old, new, mask;
addr = (unsigned long) ptr; addr = (unsigned long) ptr;
#if ALIGN_CS == 1
nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
#endif
/* calculate address for CS */ /* calculate address for CS */
addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
/* make AND mask */ /* make AND mask */
@ -161,10 +143,6 @@ static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
unsigned long addr, old, new, mask; unsigned long addr, old, new, mask;
addr = (unsigned long) ptr; addr = (unsigned long) ptr;
#if ALIGN_CS == 1
nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
#endif
/* calculate address for CS */ /* calculate address for CS */
addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
/* make XOR mask */ /* make XOR mask */
@ -182,10 +160,6 @@ test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
unsigned long addr, old, new, mask; unsigned long addr, old, new, mask;
addr = (unsigned long) ptr; addr = (unsigned long) ptr;
#if ALIGN_CS == 1
nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
#endif
/* calculate address for CS */ /* calculate address for CS */
addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
/* make OR/test mask */ /* make OR/test mask */
@ -205,10 +179,6 @@ test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
unsigned long addr, old, new, mask; unsigned long addr, old, new, mask;
addr = (unsigned long) ptr; addr = (unsigned long) ptr;
#if ALIGN_CS == 1
nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
#endif
/* calculate address for CS */ /* calculate address for CS */
addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
/* make AND/test mask */ /* make AND/test mask */
@ -228,10 +198,6 @@ test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
unsigned long addr, old, new, mask; unsigned long addr, old, new, mask;
addr = (unsigned long) ptr; addr = (unsigned long) ptr;
#if ALIGN_CS == 1
nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */
addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */
#endif
/* calculate address for CS */ /* calculate address for CS */
addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
/* make XOR/test mask */ /* make XOR/test mask */
@ -834,8 +800,6 @@ static inline int sched_find_first_bit(unsigned long *b)
#include <asm-generic/bitops/hweight.h> #include <asm-generic/bitops/hweight.h>
#ifdef __KERNEL__
/* /*
* ATTENTION: intel byte ordering convention for ext2 and minix !! * ATTENTION: intel byte ordering convention for ext2 and minix !!
* bit 0 is the LSB of addr; bit 31 is the MSB of addr; * bit 0 is the LSB of addr; bit 31 is the MSB of addr;

View File

@ -276,6 +276,8 @@ extern void wait_cons_dev(void);
extern void clear_all_subchannels(void); extern void clear_all_subchannels(void);
extern void css_schedule_reprobe(void);
#endif #endif
#endif #endif

View File

@ -44,10 +44,6 @@ struct cmbdata {
#define BIODASDCMFENABLE _IO(DASD_IOCTL_LETTER,32) #define BIODASDCMFENABLE _IO(DASD_IOCTL_LETTER,32)
/* enable channel measurement */ /* enable channel measurement */
#define BIODASDCMFDISABLE _IO(DASD_IOCTL_LETTER,33) #define BIODASDCMFDISABLE _IO(DASD_IOCTL_LETTER,33)
/* reset channel measurement block */
#define BIODASDRESETCMB _IO(DASD_IOCTL_LETTER,34)
/* read channel measurement data */
#define BIODASDREADCMB _IOWR(DASD_IOCTL_LETTER,32,__u64)
/* read channel measurement data */ /* read channel measurement data */
#define BIODASDREADALLCMB _IOWR(DASD_IOCTL_LETTER,33,struct cmbdata) #define BIODASDREADALLCMB _IOWR(DASD_IOCTL_LETTER,33,struct cmbdata)

View File

@ -68,10 +68,12 @@ typedef struct dasd_information2_t {
* 0x00: default features * 0x00: default features
* 0x01: readonly (ro) * 0x01: readonly (ro)
* 0x02: use diag discipline (diag) * 0x02: use diag discipline (diag)
* 0x04: set the device initially online (internal use only)
*/ */
#define DASD_FEATURE_DEFAULT 0 #define DASD_FEATURE_DEFAULT 0x00
#define DASD_FEATURE_READONLY 1 #define DASD_FEATURE_READONLY 0x01
#define DASD_FEATURE_USEDIAG 2 #define DASD_FEATURE_USEDIAG 0x02
#define DASD_FEATURE_INITIAL_ONLINE 0x04
#define DASD_PARTN_BITS 2 #define DASD_PARTN_BITS 2

View File

@ -63,6 +63,7 @@ struct thread_info {
.exec_domain = &default_exec_domain, \ .exec_domain = &default_exec_domain, \
.flags = 0, \ .flags = 0, \
.cpu = 0, \ .cpu = 0, \
.preempt_count = 1, \
.restart_block = { \ .restart_block = { \
.fn = do_no_restart_syscall, \ .fn = do_no_restart_syscall, \
}, \ }, \

View File

@ -394,11 +394,9 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
/* user-visible error numbers are in the range -1 - -122: see <asm-s390/errno.h> */
#define __syscall_return(type, res) \ #define __syscall_return(type, res) \
do { \ do { \
if ((unsigned long)(res) >= (unsigned long)(-125)) { \ if ((unsigned long)(res) >= (unsigned long)(-4095)) {\
errno = -(res); \ errno = -(res); \
res = -1; \ res = -1; \
} \ } \