mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-04 04:06:26 +00:00
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Martin Schwidefsky: - The remaining patches for the z13 machine support: kernel build option for z13, the cache synonym avoidance, SMT support, compare-and-delay for spinloops and the CES5S crypto adapater. - The ftrace support for function tracing with the gcc hotpatch option. This touches common code Makefiles, Steven is ok with the changes. - The hypfs file system gets an extension to access diagnose 0x0c data in user space for performance analysis for Linux running under z/VM. - The iucv hvc console gets wildcard spport for the user id filtering. - The cacheinfo code is converted to use the generic infrastructure. - Cleanup and bug fixes. * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (42 commits) s390/process: free vx save area when releasing tasks s390/hypfs: Eliminate hypfs interval s390/hypfs: Add diagnose 0c support s390/cacheinfo: don't use smp_processor_id() in preemptible context s390/zcrypt: fixed domain scanning problem (again) s390/smp: increase maximum value of NR_CPUS to 512 s390/jump label: use different nop instruction s390/jump label: add sanity checks s390/mm: correct missing space when reporting user process faults s390/dasd: cleanup profiling s390/dasd: add locking for global_profile access s390/ftrace: hotpatch support for function tracing ftrace: let notrace function attribute disable hotpatching if necessary ftrace: allow architectures to specify ftrace compile options s390: reintroduce diag 44 calls for cpu_relax() s390/zcrypt: Add support for new crypto express (CEX5S) adapter. s390/zcrypt: Number of supported ap domains is not retrievable. s390/spinlock: add compare-and-delay to lock wait loops s390/tape: remove redundant if statement s390/hvc_iucv: add simple wildcard matches to the iucv allow filter ...
This commit is contained in:
commit
b3d6524ff7
@ -1,14 +1,14 @@
|
||||
|
||||
Debugging on Linux for s/390 & z/Architecture
|
||||
by
|
||||
Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
|
||||
Copyright (C) 2000-2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
|
||||
Best viewed with fixed width fonts
|
||||
|
||||
Debugging on Linux for s/390 & z/Architecture
|
||||
by
|
||||
Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
|
||||
Copyright (C) 2000-2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
|
||||
Best viewed with fixed width fonts
|
||||
|
||||
Overview of Document:
|
||||
=====================
|
||||
This document is intended to give a good overview of how to debug
|
||||
Linux for s/390 & z/Architecture. It isn't intended as a complete reference & not a
|
||||
This document is intended to give a good overview of how to debug Linux for
|
||||
s/390 and z/Architecture. It is not intended as a complete reference and not a
|
||||
tutorial on the fundamentals of C & assembly. It doesn't go into
|
||||
390 IO in any detail. It is intended to complement the documents in the
|
||||
reference section below & any other worthwhile references you get.
|
||||
@ -35,7 +35,6 @@ Examining core dumps
|
||||
ldd
|
||||
Debugging modules
|
||||
The proc file system
|
||||
Starting points for debugging scripting languages etc.
|
||||
SysRq
|
||||
References
|
||||
Special Thanks
|
||||
@ -44,18 +43,20 @@ Register Set
|
||||
============
|
||||
The current architectures have the following registers.
|
||||
|
||||
16 General propose registers, 32 bit on s/390 64 bit on z/Architecture, r0-r15 or gpr0-gpr15 used for arithmetic & addressing.
|
||||
16 General propose registers, 32 bit on s/390 and 64 bit on z/Architecture,
|
||||
r0-r15 (or gpr0-gpr15), used for arithmetic and addressing.
|
||||
|
||||
16 Control registers, 32 bit on s/390 64 bit on z/Architecture, ( cr0-cr15 kernel usage only ) used for memory management,
|
||||
interrupt control,debugging control etc.
|
||||
16 Control registers, 32 bit on s/390 and 64 bit on z/Architecture, cr0-cr15,
|
||||
kernel usage only, used for memory management, interrupt control, debugging
|
||||
control etc.
|
||||
|
||||
16 Access registers ( ar0-ar15 ) 32 bit on s/390 & z/Architecture
|
||||
not used by normal programs but potentially could
|
||||
be used as temporary storage. Their main purpose is their 1 to 1
|
||||
association with general purpose registers and are used in
|
||||
the kernel for copying data between kernel & user address spaces.
|
||||
Access register 0 ( & access register 1 on z/Architecture ( needs 64 bit
|
||||
pointer ) ) is currently used by the pthread library as a pointer to
|
||||
16 Access registers (ar0-ar15), 32 bit on both s/390 and z/Architecture,
|
||||
normally not used by normal programs but potentially could be used as
|
||||
temporary storage. These registers have a 1:1 association with general
|
||||
purpose registers and are designed to be used in the so-called access
|
||||
register mode to select different address spaces.
|
||||
Access register 0 (and access register 1 on z/Architecture, which needs a
|
||||
64 bit pointer) is currently used by the pthread library as a pointer to
|
||||
the current running threads private area.
|
||||
|
||||
16 64 bit floating point registers (fp0-fp15 ) IEEE & HFP floating
|
||||
@ -90,18 +91,19 @@ s/390 z/Architecture
|
||||
|
||||
6 6 Input/Output interrupt Mask
|
||||
|
||||
7 7 External interrupt Mask used primarily for interprocessor signalling &
|
||||
clock interrupts.
|
||||
7 7 External interrupt Mask used primarily for interprocessor
|
||||
signalling and clock interrupts.
|
||||
|
||||
8-11 8-11 PSW Key used for complex memory protection mechanism not used under linux
|
||||
8-11 8-11 PSW Key used for complex memory protection mechanism
|
||||
(not used under linux)
|
||||
|
||||
12 12 1 on s/390 0 on z/Architecture
|
||||
|
||||
13 13 Machine Check Mask 1=enable machine check interrupts
|
||||
|
||||
14 14 Wait State set this to 1 to stop the processor except for interrupts & give
|
||||
time to other LPARS used in CPU idle in the kernel to increase overall
|
||||
usage of processor resources.
|
||||
14 14 Wait State. Set this to 1 to stop the processor except for
|
||||
interrupts and give time to other LPARS. Used in CPU idle in
|
||||
the kernel to increase overall usage of processor resources.
|
||||
|
||||
15 15 Problem state ( if set to 1 certain instructions are disabled )
|
||||
all linux user programs run with this bit 1
|
||||
@ -165,21 +167,23 @@ s/390 z/Architecture
|
||||
when loading the address with LPSWE otherwise a
|
||||
specification exception occurs, LPSW is fully backward
|
||||
compatible.
|
||||
|
||||
|
||||
|
||||
|
||||
Prefix Page(s)
|
||||
--------------
|
||||
--------------
|
||||
This per cpu memory area is too intimately tied to the processor not to mention.
|
||||
It exists between the real addresses 0-4096 on s/390 & 0-8192 z/Architecture & is exchanged
|
||||
with a 1 page on s/390 or 2 pages on z/Architecture in absolute storage by the set
|
||||
prefix instruction in linux'es startup.
|
||||
This page is mapped to a different prefix for each processor in an SMP configuration
|
||||
( assuming the os designer is sane of course :-) ).
|
||||
Bytes 0-512 ( 200 hex ) on s/390 & 0-512,4096-4544,4604-5119 currently on z/Architecture
|
||||
are used by the processor itself for holding such information as exception indications &
|
||||
entry points for exceptions.
|
||||
Bytes after 0xc00 hex are used by linux for per processor globals on s/390 & z/Architecture
|
||||
( there is a gap on z/Architecture too currently between 0xc00 & 1000 which linux uses ).
|
||||
It exists between the real addresses 0-4096 on s/390 and between 0-8192 on
|
||||
z/Architecture and is exchanged with one page on s/390 or two pages on
|
||||
z/Architecture in absolute storage by the set prefix instruction during Linux
|
||||
startup.
|
||||
This page is mapped to a different prefix for each processor in an SMP
|
||||
configuration (assuming the OS designer is sane of course).
|
||||
Bytes 0-512 (200 hex) on s/390 and 0-512, 4096-4544, 4604-5119 currently on
|
||||
z/Architecture are used by the processor itself for holding such information
|
||||
as exception indications and entry points for exceptions.
|
||||
Bytes after 0xc00 hex are used by linux for per processor globals on s/390 and
|
||||
z/Architecture (there is a gap on z/Architecture currently between 0xc00 and
|
||||
0x1000, too, which is used by Linux).
|
||||
The closest thing to this on traditional architectures is the interrupt
|
||||
vector table. This is a good thing & does simplify some of the kernel coding
|
||||
however it means that we now cannot catch stray NULL pointers in the
|
||||
@ -192,26 +196,26 @@ Address Spaces on Intel Linux
|
||||
|
||||
The traditional Intel Linux is approximately mapped as follows forgive
|
||||
the ascii art.
|
||||
0xFFFFFFFF 4GB Himem *****************
|
||||
* *
|
||||
* Kernel Space *
|
||||
* *
|
||||
***************** ****************
|
||||
User Space Himem (typically 0xC0000000 3GB )* User Stack * * *
|
||||
***************** * *
|
||||
* Shared Libs * * Next Process *
|
||||
***************** * to *
|
||||
* * <== * Run * <==
|
||||
* User Program * * *
|
||||
* Data BSS * * *
|
||||
* Text * * *
|
||||
* Sections * * *
|
||||
0x00000000 ***************** ****************
|
||||
0xFFFFFFFF 4GB Himem *****************
|
||||
* *
|
||||
* Kernel Space *
|
||||
* *
|
||||
***************** ****************
|
||||
User Space Himem * User Stack * * *
|
||||
(typically 0xC0000000 3GB ) ***************** * *
|
||||
* Shared Libs * * Next Process *
|
||||
***************** * to *
|
||||
* * <== * Run * <==
|
||||
* User Program * * *
|
||||
* Data BSS * * *
|
||||
* Text * * *
|
||||
* Sections * * *
|
||||
0x00000000 ***************** ****************
|
||||
|
||||
Now it is easy to see that on Intel it is quite easy to recognise a kernel address
|
||||
as being one greater than user space himem ( in this case 0xC0000000).
|
||||
& addresses of less than this are the ones in the current running program on this
|
||||
processor ( if an smp box ).
|
||||
Now it is easy to see that on Intel it is quite easy to recognise a kernel
|
||||
address as being one greater than user space himem (in this case 0xC0000000),
|
||||
and addresses of less than this are the ones in the current running program on
|
||||
this processor (if an smp box).
|
||||
If using the virtual machine ( VM ) as a debugger it is quite difficult to
|
||||
know which user process is running as the address space you are looking at
|
||||
could be from any process in the run queue.
|
||||
@ -247,8 +251,8 @@ Our addressing scheme is basically as follows:
|
||||
Himem 0x7fffffff 2GB on s/390 ***************** ****************
|
||||
currently 0x3ffffffffff (2^42)-1 * User Stack * * *
|
||||
on z/Architecture. ***************** * *
|
||||
* Shared Libs * * *
|
||||
***************** * *
|
||||
* Shared Libs * * *
|
||||
***************** * *
|
||||
* * * Kernel *
|
||||
* User Program * * *
|
||||
* Data BSS * * *
|
||||
@ -301,10 +305,10 @@ Virtual Addresses on s/390 & z/Architecture
|
||||
===========================================
|
||||
|
||||
A virtual address on s/390 is made up of 3 parts
|
||||
The SX ( segment index, roughly corresponding to the PGD & PMD in linux terminology )
|
||||
being bits 1-11.
|
||||
The PX ( page index, corresponding to the page table entry (pte) in linux terminology )
|
||||
being bits 12-19.
|
||||
The SX (segment index, roughly corresponding to the PGD & PMD in Linux
|
||||
terminology) being bits 1-11.
|
||||
The PX (page index, corresponding to the page table entry (pte) in Linux
|
||||
terminology) being bits 12-19.
|
||||
The remaining bits BX (the byte index are the offset in the page )
|
||||
i.e. bits 20 to 31.
|
||||
|
||||
@ -368,9 +372,9 @@ each processor as follows.
|
||||
* ( 8K ) *
|
||||
16K aligned ************************
|
||||
|
||||
What this means is that we don't need to dedicate any register or global variable
|
||||
to point to the current running process & can retrieve it with the following
|
||||
very simple construct for s/390 & one very similar for z/Architecture.
|
||||
What this means is that we don't need to dedicate any register or global
|
||||
variable to point to the current running process & can retrieve it with the
|
||||
following very simple construct for s/390 & one very similar for z/Architecture.
|
||||
|
||||
static inline struct task_struct * get_current(void)
|
||||
{
|
||||
@ -403,8 +407,8 @@ Note: To follow stackframes requires a knowledge of C or Pascal &
|
||||
limited knowledge of one assembly language.
|
||||
|
||||
It should be noted that there are some differences between the
|
||||
s/390 & z/Architecture stack layouts as the z/Architecture stack layout didn't have
|
||||
to maintain compatibility with older linkage formats.
|
||||
s/390 and z/Architecture stack layouts as the z/Architecture stack layout
|
||||
didn't have to maintain compatibility with older linkage formats.
|
||||
|
||||
Glossary:
|
||||
---------
|
||||
@ -440,7 +444,7 @@ The code generated by the compiler to return to the caller.
|
||||
|
||||
frameless-function
|
||||
A frameless function in Linux for s390 & z/Architecture is one which doesn't
|
||||
need more than the register save area ( 96 bytes on s/390, 160 on z/Architecture )
|
||||
need more than the register save area (96 bytes on s/390, 160 on z/Architecture)
|
||||
given to it by the caller.
|
||||
A frameless function never:
|
||||
1) Sets up a back chain.
|
||||
@ -588,8 +592,8 @@ A sample program with comments.
|
||||
|
||||
Comments on the function test
|
||||
-----------------------------
|
||||
1) It didn't need to set up a pointer to the constant pool gpr13 as it isn't used
|
||||
( :-( ).
|
||||
1) It didn't need to set up a pointer to the constant pool gpr13 as it is not
|
||||
used ( :-( ).
|
||||
2) This is a frameless function & no stack is bought.
|
||||
3) The compiler was clever enough to recognise that it could return the
|
||||
value in r2 as well as use it for the passed in parameter ( :-) ).
|
||||
@ -743,35 +747,34 @@ Debugging under VM
|
||||
Notes
|
||||
-----
|
||||
Addresses & values in the VM debugger are always hex never decimal
|
||||
Address ranges are of the format <HexValue1>-<HexValue2> or <HexValue1>.<HexValue2>
|
||||
e.g. The address range 0x2000 to 0x3000 can be described as 2000-3000 or 2000.1000
|
||||
Address ranges are of the format <HexValue1>-<HexValue2> or
|
||||
<HexValue1>.<HexValue2>
|
||||
For example, the address range 0x2000 to 0x3000 can be described as 2000-3000
|
||||
or 2000.1000
|
||||
|
||||
The VM Debugger is case insensitive.
|
||||
|
||||
VM's strengths are usually other debuggers weaknesses you can get at any resource
|
||||
no matter how sensitive e.g. memory management resources,change address translation
|
||||
in the PSW. For kernel hacking you will reap dividends if you get good at it.
|
||||
VM's strengths are usually other debuggers weaknesses you can get at any
|
||||
resource no matter how sensitive e.g. memory management resources, change
|
||||
address translation in the PSW. For kernel hacking you will reap dividends if
|
||||
you get good at it.
|
||||
|
||||
The VM Debugger displays operators but not operands, probably because some
|
||||
of it was written when memory was expensive & the programmer was probably proud that
|
||||
it fitted into 2k of memory & the programmers & didn't want to shock hardcore VM'ers by
|
||||
changing the interface :-), also the debugger displays useful information on the same line &
|
||||
the author of the code probably felt that it was a good idea not to go over
|
||||
the 80 columns on the screen.
|
||||
|
||||
As some of you are probably in a panic now this isn't as unintuitive as it may seem
|
||||
as the 390 instructions are easy to decode mentally & you can make a good guess at a lot
|
||||
of them as all the operands are nibble ( half byte aligned ) & if you have an objdump listing
|
||||
also it is quite easy to follow, if you don't have an objdump listing keep a copy of
|
||||
the s/390 Reference Summary & look at between pages 2 & 7 or alternatively the
|
||||
s/390 principles of operation.
|
||||
The VM Debugger displays operators but not operands, and also the debugger
|
||||
displays useful information on the same line as the author of the code probably
|
||||
felt that it was a good idea not to go over the 80 columns on the screen.
|
||||
This isn't as unintuitive as it may seem as the s/390 instructions are easy to
|
||||
decode mentally and you can make a good guess at a lot of them as all the
|
||||
operands are nibble (half byte aligned).
|
||||
So if you have an objdump listing by hand, it is quite easy to follow, and if
|
||||
you don't have an objdump listing keep a copy of the s/390 Reference Summary
|
||||
or alternatively the s/390 principles of operation next to you.
|
||||
e.g. even I can guess that
|
||||
0001AFF8' LR 180F CC 0
|
||||
is a ( load register ) lr r0,r15
|
||||
|
||||
Also it is very easy to tell the length of a 390 instruction from the 2 most significant
|
||||
bits in the instruction ( not that this info is really useful except if you are trying to
|
||||
make sense of a hexdump of code ).
|
||||
Also it is very easy to tell the length of a 390 instruction from the 2 most
|
||||
significant bits in the instruction (not that this info is really useful except
|
||||
if you are trying to make sense of a hexdump of code).
|
||||
Here is a table
|
||||
Bits Instruction Length
|
||||
------------------------------------------
|
||||
@ -780,9 +783,6 @@ Bits Instruction Length
|
||||
10 4 Bytes
|
||||
11 6 Bytes
|
||||
|
||||
|
||||
|
||||
|
||||
The debugger also displays other useful info on the same line such as the
|
||||
addresses being operated on destination addresses of branches & condition codes.
|
||||
e.g.
|
||||
@ -853,8 +853,8 @@ Displaying & modifying Registers
|
||||
--------------------------------
|
||||
D G will display all the gprs
|
||||
Adding a extra G to all the commands is necessary to access the full 64 bit
|
||||
content in VM on z/Architecture obviously this isn't required for access registers
|
||||
as these are still 32 bit.
|
||||
content in VM on z/Architecture. Obviously this isn't required for access
|
||||
registers as these are still 32 bit.
|
||||
e.g. DGG instead of DG
|
||||
D X will display all the control registers
|
||||
D AR will display all the access registers
|
||||
@ -870,10 +870,11 @@ Displaying Memory
|
||||
-----------------
|
||||
To display memory mapped using the current PSW's mapping try
|
||||
D <range>
|
||||
To make VM display a message each time it hits a particular address & continue try
|
||||
To make VM display a message each time it hits a particular address and
|
||||
continue try
|
||||
D I<range> will disassemble/display a range of instructions.
|
||||
ST addr 32 bit word will store a 32 bit aligned address
|
||||
D T<range> will display the EBCDIC in an address ( if you are that way inclined )
|
||||
D T<range> will display the EBCDIC in an address (if you are that way inclined)
|
||||
D R<range> will display real addresses ( without DAT ) but with prefixing.
|
||||
There are other complex options to display if you need to get at say home space
|
||||
but are in primary space the easiest thing to do is to temporarily
|
||||
@ -884,8 +885,8 @@ restore it.
|
||||
|
||||
Hints
|
||||
-----
|
||||
If you want to issue a debugger command without halting your virtual machine with the
|
||||
PA1 key try prefixing the command with #CP e.g.
|
||||
If you want to issue a debugger command without halting your virtual machine
|
||||
with the PA1 key try prefixing the command with #CP e.g.
|
||||
#cp tr i pswa 2000
|
||||
also suffixing most debugger commands with RUN will cause them not
|
||||
to stop just display the mnemonic at the current instruction on the console.
|
||||
@ -903,9 +904,10 @@ This sends a message to your own console each time do_signal is entered.
|
||||
script with breakpoints on every kernel procedure, this isn't a good idea
|
||||
because there are thousands of these routines & VM can only set 255 breakpoints
|
||||
at a time so you nearly had to spend as long pruning the file down as you would
|
||||
entering the msg's by hand ),however, the trick might be useful for a single object file.
|
||||
On linux'es 3270 emulator x3270 there is a very useful option under the file ment
|
||||
Save Screens In File this is very good of keeping a copy of traces.
|
||||
entering the msgs by hand), however, the trick might be useful for a single
|
||||
object file. In the 3270 terminal emulator x3270 there is a very useful option
|
||||
in the file menu called "Save Screen In File" - this is very good for keeping a
|
||||
copy of traces.
|
||||
|
||||
From CMS help <command name> will give you online help on a particular command.
|
||||
e.g.
|
||||
@ -920,7 +922,8 @@ SET PF9 IMM B
|
||||
This does a single step in VM on pressing F8.
|
||||
SET PF10 ^
|
||||
This sets up the ^ key.
|
||||
which can be used for ^c (ctrl-c),^z (ctrl-z) which can't be typed directly into some 3270 consoles.
|
||||
which can be used for ^c (ctrl-c),^z (ctrl-z) which can't be typed directly
|
||||
into some 3270 consoles.
|
||||
SET PF11 ^-
|
||||
This types the starting keystrokes for a sysrq see SysRq below.
|
||||
SET PF12 RETRIEVE
|
||||
@ -1014,8 +1017,8 @@ Tracing Program Exceptions
|
||||
--------------------------
|
||||
If you get a crash which says something like
|
||||
illegal operation or specification exception followed by a register dump
|
||||
You can restart linux & trace these using the tr prog <range or value> trace option.
|
||||
|
||||
You can restart linux & trace these using the tr prog <range or value> trace
|
||||
option.
|
||||
|
||||
|
||||
The most common ones you will normally be tracing for is
|
||||
@ -1057,9 +1060,10 @@ TR GOTO INITIAL
|
||||
|
||||
Tracing linux syscalls under VM
|
||||
-------------------------------
|
||||
Syscalls are implemented on Linux for S390 by the Supervisor call instruction (SVC) there 256
|
||||
possibilities of these as the instruction is made up of a 0xA opcode & the second byte being
|
||||
the syscall number. They are traced using the simple command.
|
||||
Syscalls are implemented on Linux for S390 by the Supervisor call instruction
|
||||
(SVC). There 256 possibilities of these as the instruction is made up of a 0xA
|
||||
opcode and the second byte being the syscall number. They are traced using the
|
||||
simple command:
|
||||
TR SVC <Optional value or range>
|
||||
the syscalls are defined in linux/arch/s390/include/asm/unistd.h
|
||||
e.g. to trace all file opens just do
|
||||
@ -1070,12 +1074,12 @@ SMP Specific commands
|
||||
---------------------
|
||||
To find out how many cpus you have
|
||||
Q CPUS displays all the CPU's available to your virtual machine
|
||||
To find the cpu that the current cpu VM debugger commands are being directed at do
|
||||
Q CPU to change the current cpu VM debugger commands are being directed at do
|
||||
To find the cpu that the current cpu VM debugger commands are being directed at
|
||||
do Q CPU to change the current cpu VM debugger commands are being directed at do
|
||||
CPU <desired cpu no>
|
||||
|
||||
On a SMP guest issue a command to all CPUs try prefixing the command with cpu all.
|
||||
To issue a command to a particular cpu try cpu <cpu number> e.g.
|
||||
On a SMP guest issue a command to all CPUs try prefixing the command with cpu
|
||||
all. To issue a command to a particular cpu try cpu <cpu number> e.g.
|
||||
CPU 01 TR I R 2000.3000
|
||||
If you are running on a guest with several cpus & you have a IO related problem
|
||||
& cannot follow the flow of code but you know it isn't smp related.
|
||||
@ -1101,10 +1105,10 @@ D TX0.100
|
||||
|
||||
Alternatively
|
||||
=============
|
||||
Under older VM debuggers ( I love EBDIC too ) you can use this little program I wrote which
|
||||
will convert a command line of hex digits to ascii text which can be compiled under linux &
|
||||
you can copy the hex digits from your x3270 terminal to your xterm if you are debugging
|
||||
from a linuxbox.
|
||||
Under older VM debuggers (I love EBDIC too) you can use following little
|
||||
program which converts a command line of hex digits to ascii text. It can be
|
||||
compiled under linux and you can copy the hex digits from your x3270 terminal
|
||||
to your xterm if you are debugging from a linuxbox.
|
||||
|
||||
This is quite useful when looking at a parameter passed in as a text string
|
||||
under VM ( unless you are good at decoding ASCII in your head ).
|
||||
@ -1114,14 +1118,14 @@ TR SVC 5
|
||||
We have stopped at a breakpoint
|
||||
000151B0' SVC 0A05 -> 0001909A' CC 0
|
||||
|
||||
D 20.8 to check the SVC old psw in the prefix area & see was it from userspace
|
||||
( for the layout of the prefix area consult P18 of the s/390 390 Reference Summary
|
||||
if you have it available ).
|
||||
D 20.8 to check the SVC old psw in the prefix area and see was it from userspace
|
||||
(for the layout of the prefix area consult the "Fixed Storage Locations"
|
||||
chapter of the s/390 Reference Summary if you have it available).
|
||||
V00000020 070C2000 800151B2
|
||||
The problem state bit wasn't set & it's also too early in the boot sequence
|
||||
for it to be a userspace SVC if it was we would have to temporarily switch the
|
||||
psw to user space addressing so we could get at the first parameter of the open in
|
||||
gpr2.
|
||||
psw to user space addressing so we could get at the first parameter of the open
|
||||
in gpr2.
|
||||
Next do a
|
||||
D G2
|
||||
GPR 2 = 00014CB4
|
||||
@ -1208,9 +1212,9 @@ Here are the tricks I use 9 out of 10 times it works pretty well,
|
||||
|
||||
When your backchain reaches a dead end
|
||||
--------------------------------------
|
||||
This can happen when an exception happens in the kernel & the kernel is entered twice
|
||||
if you reach the NULL pointer at the end of the back chain you should be
|
||||
able to sniff further back if you follow the following tricks.
|
||||
This can happen when an exception happens in the kernel and the kernel is
|
||||
entered twice. If you reach the NULL pointer at the end of the back chain you
|
||||
should be able to sniff further back if you follow the following tricks.
|
||||
1) A kernel address should be easy to recognise since it is in
|
||||
primary space & the problem state bit isn't set & also
|
||||
The Hi bit of the address is set.
|
||||
@ -1260,8 +1264,8 @@ V000FFFD0 00010400 80010802 8001085A 000FFFA0
|
||||
|
||||
our 3rd return address is 8001085A
|
||||
|
||||
as the 04B52002 looks suspiciously like rubbish it is fair to assume that the kernel entry routines
|
||||
for the sake of optimisation don't set up a backchain.
|
||||
as the 04B52002 looks suspiciously like rubbish it is fair to assume that the
|
||||
kernel entry routines for the sake of optimisation don't set up a backchain.
|
||||
|
||||
now look at System.map to see if the addresses make any sense.
|
||||
|
||||
@ -1289,67 +1293,75 @@ Congrats you've done your first backchain.
|
||||
s/390 & z/Architecture IO Overview
|
||||
==================================
|
||||
|
||||
I am not going to give a course in 390 IO architecture as this would take me quite a
|
||||
while & I'm no expert. Instead I'll give a 390 IO architecture summary for Dummies if you have
|
||||
the s/390 principles of operation available read this instead. If nothing else you may find a few
|
||||
useful keywords in here & be able to use them on a web search engine like altavista to find
|
||||
more useful information.
|
||||
I am not going to give a course in 390 IO architecture as this would take me
|
||||
quite a while and I'm no expert. Instead I'll give a 390 IO architecture
|
||||
summary for Dummies. If you have the s/390 principles of operation available
|
||||
read this instead. If nothing else you may find a few useful keywords in here
|
||||
and be able to use them on a web search engine to find more useful information.
|
||||
|
||||
Unlike other bus architectures modern 390 systems do their IO using mostly
|
||||
fibre optics & devices such as tapes & disks can be shared between several mainframes,
|
||||
also S390 can support up to 65536 devices while a high end PC based system might be choking
|
||||
with around 64. Here is some of the common IO terminology
|
||||
fibre optics and devices such as tapes and disks can be shared between several
|
||||
mainframes. Also S390 can support up to 65536 devices while a high end PC based
|
||||
system might be choking with around 64.
|
||||
|
||||
Here is some of the common IO terminology:
|
||||
|
||||
Subchannel:
|
||||
This is the logical number most IO commands use to talk to an IO device there can be up to
|
||||
0x10000 (65536) of these in a configuration typically there is a few hundred. Under VM
|
||||
for simplicity they are allocated contiguously, however on the native hardware they are not
|
||||
they typically stay consistent between boots provided no new hardware is inserted or removed.
|
||||
Under Linux for 390 we use these as IRQ's & also when issuing an IO command (CLEAR SUBCHANNEL,
|
||||
HALT SUBCHANNEL,MODIFY SUBCHANNEL,RESUME SUBCHANNEL,START SUBCHANNEL,STORE SUBCHANNEL &
|
||||
TEST SUBCHANNEL ) we use this as the ID of the device we wish to talk to, the most
|
||||
important of these instructions are START SUBCHANNEL ( to start IO ), TEST SUBCHANNEL ( to check
|
||||
whether the IO completed successfully ), & HALT SUBCHANNEL ( to kill IO ), a subchannel
|
||||
can have up to 8 channel paths to a device this offers redundancy if one is not available.
|
||||
|
||||
This is the logical number most IO commands use to talk to an IO device. There
|
||||
can be up to 0x10000 (65536) of these in a configuration, typically there are a
|
||||
few hundred. Under VM for simplicity they are allocated contiguously, however
|
||||
on the native hardware they are not. They typically stay consistent between
|
||||
boots provided no new hardware is inserted or removed.
|
||||
Under Linux for s390 we use these as IRQ's and also when issuing an IO command
|
||||
(CLEAR SUBCHANNEL, HALT SUBCHANNEL, MODIFY SUBCHANNEL, RESUME SUBCHANNEL,
|
||||
START SUBCHANNEL, STORE SUBCHANNEL and TEST SUBCHANNEL). We use this as the ID
|
||||
of the device we wish to talk to. The most important of these instructions are
|
||||
START SUBCHANNEL (to start IO), TEST SUBCHANNEL (to check whether the IO
|
||||
completed successfully) and HALT SUBCHANNEL (to kill IO). A subchannel can have
|
||||
up to 8 channel paths to a device, this offers redundancy if one is not
|
||||
available.
|
||||
|
||||
Device Number:
|
||||
This number remains static & Is closely tied to the hardware, there are 65536 of these
|
||||
also they are made up of a CHPID ( Channel Path ID, the most significant 8 bits )
|
||||
& another lsb 8 bits. These remain static even if more devices are inserted or removed
|
||||
from the hardware, there is a 1 to 1 mapping between Subchannels & Device Numbers provided
|
||||
devices aren't inserted or removed.
|
||||
This number remains static and is closely tied to the hardware. There are 65536
|
||||
of these, made up of a CHPID (Channel Path ID, the most significant 8 bits) and
|
||||
another lsb 8 bits. These remain static even if more devices are inserted or
|
||||
removed from the hardware. There is a 1 to 1 mapping between subchannels and
|
||||
device numbers, provided devices aren't inserted or removed.
|
||||
|
||||
Channel Control Words:
|
||||
CCWS are linked lists of instructions initially pointed to by an operation request block (ORB),
|
||||
which is initially given to Start Subchannel (SSCH) command along with the subchannel number
|
||||
for the IO subsystem to process while the CPU continues executing normal code.
|
||||
These come in two flavours, Format 0 ( 24 bit for backward )
|
||||
compatibility & Format 1 ( 31 bit ). These are typically used to issue read & write
|
||||
( & many other instructions ) they consist of a length field & an absolute address field.
|
||||
For each IO typically get 1 or 2 interrupts one for channel end ( primary status ) when the
|
||||
channel is idle & the second for device end ( secondary status ) sometimes you get both
|
||||
concurrently, you check how the IO went on by issuing a TEST SUBCHANNEL at each interrupt,
|
||||
from which you receive an Interruption response block (IRB). If you get channel & device end
|
||||
status in the IRB without channel checks etc. your IO probably went okay. If you didn't you
|
||||
probably need a doctor to examine the IRB & extended status word etc.
|
||||
CCWs are linked lists of instructions initially pointed to by an operation
|
||||
request block (ORB), which is initially given to Start Subchannel (SSCH)
|
||||
command along with the subchannel number for the IO subsystem to process
|
||||
while the CPU continues executing normal code.
|
||||
CCWs come in two flavours, Format 0 (24 bit for backward compatibility) and
|
||||
Format 1 (31 bit). These are typically used to issue read and write (and many
|
||||
other) instructions. They consist of a length field and an absolute address
|
||||
field.
|
||||
Each IO typically gets 1 or 2 interrupts, one for channel end (primary status)
|
||||
when the channel is idle, and the second for device end (secondary status).
|
||||
Sometimes you get both concurrently. You check how the IO went on by issuing a
|
||||
TEST SUBCHANNEL at each interrupt, from which you receive an Interruption
|
||||
response block (IRB). If you get channel and device end status in the IRB
|
||||
without channel checks etc. your IO probably went okay. If you didn't you
|
||||
probably need to examine the IRB, extended status word etc.
|
||||
If an error occurs, more sophisticated control units have a facility known as
|
||||
concurrent sense this means that if an error occurs Extended sense information will
|
||||
be presented in the Extended status word in the IRB if not you have to issue a
|
||||
subsequent SENSE CCW command after the test subchannel.
|
||||
concurrent sense. This means that if an error occurs Extended sense information
|
||||
will be presented in the Extended status word in the IRB. If not you have to
|
||||
issue a subsequent SENSE CCW command after the test subchannel.
|
||||
|
||||
|
||||
TPI( Test pending interrupt) can also be used for polled IO but in multitasking multiprocessor
|
||||
systems it isn't recommended except for checking special cases ( i.e. non looping checks for
|
||||
pending IO etc. ).
|
||||
TPI (Test pending interrupt) can also be used for polled IO, but in
|
||||
multitasking multiprocessor systems it isn't recommended except for
|
||||
checking special cases (i.e. non looping checks for pending IO etc.).
|
||||
|
||||
Store Subchannel & Modify Subchannel can be used to examine & modify operating characteristics
|
||||
of a subchannel ( e.g. channel paths ).
|
||||
Store Subchannel and Modify Subchannel can be used to examine and modify
|
||||
operating characteristics of a subchannel (e.g. channel paths).
|
||||
|
||||
Other IO related Terms:
|
||||
Sysplex: S390's Clustering Technology
|
||||
QDIO: S390's new high speed IO architecture to support devices such as gigabit ethernet,
|
||||
this architecture is also designed to be forward compatible with up & coming 64 bit machines.
|
||||
QDIO: S390's new high speed IO architecture to support devices such as gigabit
|
||||
ethernet, this architecture is also designed to be forward compatible with
|
||||
upcoming 64 bit machines.
|
||||
|
||||
|
||||
General Concepts
|
||||
@ -1406,37 +1418,40 @@ sometimes called Bus-and Tag & sometimes Original Equipment Manufacturers
|
||||
Interface (OEMI).
|
||||
|
||||
This byte wide Parallel channel path/bus has parity & data on the "Bus" cable
|
||||
& control lines on the "Tag" cable. These can operate in byte multiplex mode for
|
||||
sharing between several slow devices or burst mode & monopolize the channel for the
|
||||
whole burst. Up to 256 devices can be addressed on one of these cables. These cables are
|
||||
about one inch in diameter. The maximum unextended length supported by these cables is
|
||||
125 Meters but this can be extended up to 2km with a fibre optic channel extended
|
||||
such as a 3044. The maximum burst speed supported is 4.5 megabytes per second however
|
||||
some really old processors support only transfer rates of 3.0, 2.0 & 1.0 MB/sec.
|
||||
and control lines on the "Tag" cable. These can operate in byte multiplex mode
|
||||
for sharing between several slow devices or burst mode and monopolize the
|
||||
channel for the whole burst. Up to 256 devices can be addressed on one of these
|
||||
cables. These cables are about one inch in diameter. The maximum unextended
|
||||
length supported by these cables is 125 Meters but this can be extended up to
|
||||
2km with a fibre optic channel extended such as a 3044. The maximum burst speed
|
||||
supported is 4.5 megabytes per second. However, some really old processors
|
||||
support only transfer rates of 3.0, 2.0 & 1.0 MB/sec.
|
||||
One of these paths can be daisy chained to up to 8 control units.
|
||||
|
||||
|
||||
ESCON if fibre optic it is also called FICON
|
||||
Was introduced by IBM in 1990. Has 2 fibre optic cables & uses either leds or lasers
|
||||
for communication at a signaling rate of up to 200 megabits/sec. As 10bits are transferred
|
||||
for every 8 bits info this drops to 160 megabits/sec & to 18.6 Megabytes/sec once
|
||||
control info & CRC are added. ESCON only operates in burst mode.
|
||||
Was introduced by IBM in 1990. Has 2 fibre optic cables and uses either leds or
|
||||
lasers for communication at a signaling rate of up to 200 megabits/sec. As
|
||||
10bits are transferred for every 8 bits info this drops to 160 megabits/sec
|
||||
and to 18.6 Megabytes/sec once control info and CRC are added. ESCON only
|
||||
operates in burst mode.
|
||||
|
||||
ESCONs typical max cable length is 3km for the led version & 20km for the laser version
|
||||
known as XDF ( extended distance facility ). This can be further extended by using an
|
||||
ESCON director which triples the above mentioned ranges. Unlike Bus & Tag as ESCON is
|
||||
serial it uses a packet switching architecture the standard Bus & Tag control protocol
|
||||
is however present within the packets. Up to 256 devices can be attached to each control
|
||||
unit that uses one of these interfaces.
|
||||
ESCONs typical max cable length is 3km for the led version and 20km for the
|
||||
laser version known as XDF (extended distance facility). This can be further
|
||||
extended by using an ESCON director which triples the above mentioned ranges.
|
||||
Unlike Bus & Tag as ESCON is serial it uses a packet switching architecture,
|
||||
the standard Bus & Tag control protocol is however present within the packets.
|
||||
Up to 256 devices can be attached to each control unit that uses one of these
|
||||
interfaces.
|
||||
|
||||
Common 390 Devices include:
|
||||
Network adapters typically OSA2,3172's,2116's & OSA-E gigabit ethernet adapters,
|
||||
Consoles 3270 & 3215 ( a teletype emulated under linux for a line mode console ).
|
||||
Consoles 3270 & 3215 (a teletype emulated under linux for a line mode console).
|
||||
DASD's direct access storage devices ( otherwise known as hard disks ).
|
||||
Tape Drives.
|
||||
CTC ( Channel to Channel Adapters ),
|
||||
ESCON or Parallel Cables used as a very high speed serial link
|
||||
between 2 machines. We use 2 cables under linux to do a bi-directional serial link.
|
||||
between 2 machines.
|
||||
|
||||
|
||||
Debugging IO on s/390 & z/Architecture under VM
|
||||
@ -1475,9 +1490,9 @@ or the halt subchannels
|
||||
or TR HSCH 7C08-7C09
|
||||
MSCH's ,STSCH's I think you can guess the rest
|
||||
|
||||
Ingo's favourite trick is tracing all the IO's & CCWS & spooling them into the reader of another
|
||||
VM guest so he can ftp the logfile back to his own machine.I'll do a small bit of this & give you
|
||||
a look at the output.
|
||||
A good trick is tracing all the IO's and CCWS and spooling them into the reader
|
||||
of another VM guest so he can ftp the logfile back to his own machine. I'll do
|
||||
a small bit of this and give you a look at the output.
|
||||
|
||||
1) Spool stdout to VM reader
|
||||
SP PRT TO (another vm guest ) or * for the local vm guest
|
||||
@ -1593,8 +1608,8 @@ undisplay : undo's display's
|
||||
|
||||
info breakpoints: shows all current breakpoints
|
||||
|
||||
info stack: shows stack back trace ( if this doesn't work too well, I'll show you the
|
||||
stacktrace by hand below ).
|
||||
info stack: shows stack back trace (if this doesn't work too well, I'll show
|
||||
you the stacktrace by hand below).
|
||||
|
||||
info locals: displays local variables.
|
||||
|
||||
@ -1619,7 +1634,8 @@ next: like step except this will not step into subroutines
|
||||
stepi: steps a single machine code instruction.
|
||||
e.g. stepi 100
|
||||
|
||||
nexti: steps a single machine code instruction but will not step into subroutines.
|
||||
nexti: steps a single machine code instruction but will not step into
|
||||
subroutines.
|
||||
|
||||
finish: will run until exit of the current routine
|
||||
|
||||
@ -1721,7 +1737,8 @@ e.g.
|
||||
outputs:
|
||||
$1 = 11
|
||||
|
||||
You might now be thinking that the line above didn't work, something extra had to be done.
|
||||
You might now be thinking that the line above didn't work, something extra had
|
||||
to be done.
|
||||
(gdb) call fflush(stdout)
|
||||
hello world$2 = 0
|
||||
As an aside the debugger also calls malloc & free under the hood
|
||||
@ -1804,26 +1821,17 @@ man gdb or info gdb.
|
||||
core dumps
|
||||
----------
|
||||
What a core dump ?,
|
||||
A core dump is a file generated by the kernel ( if allowed ) which contains the registers,
|
||||
& all active pages of the program which has crashed.
|
||||
From this file gdb will allow you to look at the registers & stack trace & memory of the
|
||||
program as if it just crashed on your system, it is usually called core & created in the
|
||||
current working directory.
|
||||
This is very useful in that a customer can mail a core dump to a technical support department
|
||||
& the technical support department can reconstruct what happened.
|
||||
Provided they have an identical copy of this program with debugging symbols compiled in &
|
||||
the source base of this build is available.
|
||||
In short it is far more useful than something like a crash log could ever hope to be.
|
||||
|
||||
In theory all that is missing to restart a core dumped program is a kernel patch which
|
||||
will do the following.
|
||||
1) Make a new kernel task structure
|
||||
2) Reload all the dumped pages back into the kernel's memory management structures.
|
||||
3) Do the required clock fixups
|
||||
4) Get all files & network connections for the process back into an identical state ( really difficult ).
|
||||
5) A few more difficult things I haven't thought of.
|
||||
|
||||
|
||||
A core dump is a file generated by the kernel (if allowed) which contains the
|
||||
registers and all active pages of the program which has crashed.
|
||||
From this file gdb will allow you to look at the registers, stack trace and
|
||||
memory of the program as if it just crashed on your system. It is usually
|
||||
called core and created in the current working directory.
|
||||
This is very useful in that a customer can mail a core dump to a technical
|
||||
support department and the technical support department can reconstruct what
|
||||
happened. Provided they have an identical copy of this program with debugging
|
||||
symbols compiled in and the source base of this build is available.
|
||||
In short it is far more useful than something like a crash log could ever hope
|
||||
to be.
|
||||
|
||||
Why have I never seen one ?.
|
||||
Probably because you haven't used the command
|
||||
@ -1868,7 +1876,7 @@ Breakpoint 2 at 0x4d87a4: file top.c, line 2609.
|
||||
#3 0x5167e6 in readline_internal_char () at readline.c:454
|
||||
#4 0x5168ee in readline_internal_charloop () at readline.c:507
|
||||
#5 0x51692c in readline_internal () at readline.c:521
|
||||
#6 0x5164fe in readline (prompt=0x7ffff810 "\177ÿøx\177ÿ÷Ø\177ÿøxÀ")
|
||||
#6 0x5164fe in readline (prompt=0x7ffff810)
|
||||
at readline.c:349
|
||||
#7 0x4d7a8a in command_line_input (prompt=0x564420 "(gdb) ", repeat=1,
|
||||
annotation_suffix=0x4d6b44 "prompt") at top.c:2091
|
||||
@ -1929,8 +1937,8 @@ cat /proc/sys/net/ipv4/ip_forward
|
||||
On my machine now outputs
|
||||
1
|
||||
IP forwarding is on.
|
||||
There is a lot of useful info in here best found by going in & having a look around,
|
||||
so I'll take you through some entries I consider important.
|
||||
There is a lot of useful info in here best found by going in and having a look
|
||||
around, so I'll take you through some entries I consider important.
|
||||
|
||||
All the processes running on the machine have their own entry defined by
|
||||
/proc/<pid>
|
||||
@ -2060,7 +2068,8 @@ if the device doesn't say up
|
||||
try
|
||||
/etc/rc.d/init.d/network start
|
||||
( this starts the network stack & hopefully calls ifconfig tr0 up ).
|
||||
ifconfig looks at the output of /proc/net/dev & presents it in a more presentable form
|
||||
ifconfig looks at the output of /proc/net/dev and presents it in a more
|
||||
presentable form.
|
||||
Now ping the device from a machine in the same subnet.
|
||||
if the RX packets count & TX packets counts don't increment you probably
|
||||
have problems.
|
||||
@ -2086,34 +2095,6 @@ of the device.
|
||||
See the manpage chandev.8 &type cat /proc/chandev for more info.
|
||||
|
||||
|
||||
|
||||
Starting points for debugging scripting languages etc.
|
||||
======================================================
|
||||
|
||||
bash/sh
|
||||
|
||||
bash -x <scriptname>
|
||||
e.g. bash -x /usr/bin/bashbug
|
||||
displays the following lines as it executes them.
|
||||
+ MACHINE=i586
|
||||
+ OS=linux-gnu
|
||||
+ CC=gcc
|
||||
+ CFLAGS= -DPROGRAM='bash' -DHOSTTYPE='i586' -DOSTYPE='linux-gnu' -DMACHTYPE='i586-pc-linux-gnu' -DSHELL -DHAVE_CONFIG_H -I. -I. -I./lib -O2 -pipe
|
||||
+ RELEASE=2.01
|
||||
+ PATCHLEVEL=1
|
||||
+ RELSTATUS=release
|
||||
+ MACHTYPE=i586-pc-linux-gnu
|
||||
|
||||
perl -d <scriptname> runs the perlscript in a fully interactive debugger
|
||||
<like gdb>.
|
||||
Type 'h' in the debugger for help.
|
||||
|
||||
for debugging java type
|
||||
jdb <filename> another fully interactive gdb style debugger.
|
||||
& type ? in the debugger for help.
|
||||
|
||||
|
||||
|
||||
SysRq
|
||||
=====
|
||||
This is now supported by linux for s/390 & z/Architecture.
|
||||
|
6
Makefile
6
Makefile
@ -726,10 +726,14 @@ KBUILD_CFLAGS += $(call cc-option, -femit-struct-debug-baseonly) \
|
||||
endif
|
||||
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
ifndef CC_FLAGS_FTRACE
|
||||
CC_FLAGS_FTRACE := -pg
|
||||
endif
|
||||
export CC_FLAGS_FTRACE
|
||||
ifdef CONFIG_HAVE_FENTRY
|
||||
CC_USING_FENTRY := $(call cc-option, -mfentry -DCC_USING_FENTRY)
|
||||
endif
|
||||
KBUILD_CFLAGS += -pg $(CC_USING_FENTRY)
|
||||
KBUILD_CFLAGS += $(CC_FLAGS_FTRACE) $(CC_USING_FENTRY)
|
||||
KBUILD_AFLAGS += $(CC_USING_FENTRY)
|
||||
ifdef CONFIG_DYNAMIC_FTRACE
|
||||
ifdef CONFIG_HAVE_C_RECORDMCOUNT
|
||||
|
@ -66,6 +66,7 @@ config S390
|
||||
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
|
||||
select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
|
||||
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||
select ARCH_HAS_SG_CHAIN
|
||||
select ARCH_HAVE_NMI_SAFE_CMPXCHG
|
||||
select ARCH_INLINE_READ_LOCK
|
||||
select ARCH_INLINE_READ_LOCK_BH
|
||||
@ -116,7 +117,6 @@ config S390
|
||||
select HAVE_BPF_JIT if 64BIT && PACK_STACK
|
||||
select HAVE_CMPXCHG_DOUBLE
|
||||
select HAVE_CMPXCHG_LOCAL
|
||||
select HAVE_C_RECORDMCOUNT
|
||||
select HAVE_DEBUG_KMEMLEAK
|
||||
select HAVE_DYNAMIC_FTRACE if 64BIT
|
||||
select HAVE_DYNAMIC_FTRACE_WITH_REGS if 64BIT
|
||||
@ -151,7 +151,6 @@ config S390
|
||||
select TTY
|
||||
select VIRT_CPU_ACCOUNTING
|
||||
select VIRT_TO_BUS
|
||||
select ARCH_HAS_SG_CHAIN
|
||||
|
||||
config SCHED_OMIT_FRAME_POINTER
|
||||
def_bool y
|
||||
@ -185,6 +184,10 @@ config HAVE_MARCH_ZEC12_FEATURES
|
||||
def_bool n
|
||||
select HAVE_MARCH_Z196_FEATURES
|
||||
|
||||
config HAVE_MARCH_Z13_FEATURES
|
||||
def_bool n
|
||||
select HAVE_MARCH_ZEC12_FEATURES
|
||||
|
||||
choice
|
||||
prompt "Processor type"
|
||||
default MARCH_G5
|
||||
@ -244,6 +247,14 @@ config MARCH_ZEC12
|
||||
2827 series). The kernel will be slightly faster but will not work on
|
||||
older machines.
|
||||
|
||||
config MARCH_Z13
|
||||
bool "IBM z13"
|
||||
select HAVE_MARCH_Z13_FEATURES if 64BIT
|
||||
help
|
||||
Select this to enable optimizations for IBM z13 (2964 series).
|
||||
The kernel will be slightly faster but will not work on older
|
||||
machines.
|
||||
|
||||
endchoice
|
||||
|
||||
config MARCH_G5_TUNE
|
||||
@ -267,6 +278,9 @@ config MARCH_Z196_TUNE
|
||||
config MARCH_ZEC12_TUNE
|
||||
def_bool TUNE_ZEC12 || MARCH_ZEC12 && TUNE_DEFAULT
|
||||
|
||||
config MARCH_Z13_TUNE
|
||||
def_bool TUNE_Z13 || MARCH_Z13 && TUNE_DEFAULT
|
||||
|
||||
choice
|
||||
prompt "Tune code generation"
|
||||
default TUNE_DEFAULT
|
||||
@ -305,6 +319,9 @@ config TUNE_Z196
|
||||
config TUNE_ZEC12
|
||||
bool "IBM zBC12 and zEC12"
|
||||
|
||||
config TUNE_Z13
|
||||
bool "IBM z13"
|
||||
|
||||
endchoice
|
||||
|
||||
config 64BIT
|
||||
@ -356,14 +373,14 @@ config SMP
|
||||
Even if you don't know what to do here, say Y.
|
||||
|
||||
config NR_CPUS
|
||||
int "Maximum number of CPUs (2-256)"
|
||||
range 2 256
|
||||
int "Maximum number of CPUs (2-512)"
|
||||
range 2 512
|
||||
depends on SMP
|
||||
default "32" if !64BIT
|
||||
default "64" if 64BIT
|
||||
help
|
||||
This allows you to specify the maximum number of CPUs which this
|
||||
kernel will support. The maximum supported value is 256 and the
|
||||
kernel will support. The maximum supported value is 512 and the
|
||||
minimum value which makes sense is 2.
|
||||
|
||||
This is purely to save memory - each supported CPU adds
|
||||
@ -378,17 +395,26 @@ config HOTPLUG_CPU
|
||||
can be controlled through /sys/devices/system/cpu/cpu#.
|
||||
Say N if you want to disable CPU hotplug.
|
||||
|
||||
config SCHED_SMT
|
||||
def_bool n
|
||||
|
||||
config SCHED_MC
|
||||
def_bool n
|
||||
|
||||
config SCHED_BOOK
|
||||
def_bool n
|
||||
|
||||
config SCHED_TOPOLOGY
|
||||
def_bool y
|
||||
prompt "Book scheduler support"
|
||||
prompt "Topology scheduler support"
|
||||
depends on SMP
|
||||
select SCHED_SMT
|
||||
select SCHED_MC
|
||||
select SCHED_BOOK
|
||||
help
|
||||
Book scheduler support improves the CPU scheduler's decision making
|
||||
when dealing with machines that have several books.
|
||||
Topology scheduler support improves the CPU scheduler's decision
|
||||
making when dealing with machines that have multi-threading,
|
||||
multiple cores or multiple books.
|
||||
|
||||
source kernel/Kconfig.preempt
|
||||
|
||||
|
@ -42,6 +42,7 @@ mflags-$(CONFIG_MARCH_Z9_109) := -march=z9-109
|
||||
mflags-$(CONFIG_MARCH_Z10) := -march=z10
|
||||
mflags-$(CONFIG_MARCH_Z196) := -march=z196
|
||||
mflags-$(CONFIG_MARCH_ZEC12) := -march=zEC12
|
||||
mflags-$(CONFIG_MARCH_Z13) := -march=z13
|
||||
|
||||
aflags-y += $(mflags-y)
|
||||
cflags-y += $(mflags-y)
|
||||
@ -53,6 +54,7 @@ cflags-$(CONFIG_MARCH_Z9_109_TUNE) += -mtune=z9-109
|
||||
cflags-$(CONFIG_MARCH_Z10_TUNE) += -mtune=z10
|
||||
cflags-$(CONFIG_MARCH_Z196_TUNE) += -mtune=z196
|
||||
cflags-$(CONFIG_MARCH_ZEC12_TUNE) += -mtune=zEC12
|
||||
cflags-$(CONFIG_MARCH_Z13_TUNE) += -mtune=z13
|
||||
|
||||
#KBUILD_IMAGE is necessary for make rpm
|
||||
KBUILD_IMAGE :=arch/s390/boot/image
|
||||
@ -85,6 +87,16 @@ ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y)
|
||||
cflags-$(CONFIG_WARN_DYNAMIC_STACK) += -mwarn-dynamicstack
|
||||
endif
|
||||
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
# make use of hotpatch feature if the compiler supports it
|
||||
cc_hotpatch := -mhotpatch=0,3
|
||||
ifeq ($(call cc-option-yn,$(cc_hotpatch)),y)
|
||||
CC_FLAGS_FTRACE := $(cc_hotpatch)
|
||||
KBUILD_AFLAGS += -DCC_USING_HOTPATCH
|
||||
KBUILD_CFLAGS += -DCC_USING_HOTPATCH
|
||||
endif
|
||||
endif
|
||||
|
||||
KBUILD_CFLAGS += -mbackchain -msoft-float $(cflags-y)
|
||||
KBUILD_CFLAGS += -pipe -fno-strength-reduce -Wno-sign-compare
|
||||
KBUILD_AFLAGS += $(aflags-y)
|
||||
|
@ -8,6 +8,7 @@
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/sclp.h>
|
||||
#include <asm/ipl.h>
|
||||
#include "sizes.h"
|
||||
|
||||
@ -63,8 +64,6 @@ static unsigned long free_mem_end_ptr;
|
||||
#include "../../../../lib/decompress_unxz.c"
|
||||
#endif
|
||||
|
||||
extern _sclp_print_early(const char *);
|
||||
|
||||
static int puts(const char *s)
|
||||
{
|
||||
_sclp_print_early(s);
|
||||
|
@ -555,7 +555,6 @@ CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
|
||||
CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
|
||||
CONFIG_SLUB_DEBUG_ON=y
|
||||
CONFIG_SLUB_STATS=y
|
||||
CONFIG_DEBUG_KMEMLEAK=y
|
||||
CONFIG_DEBUG_STACK_USAGE=y
|
||||
CONFIG_DEBUG_VM=y
|
||||
CONFIG_DEBUG_VM_RB=y
|
||||
@ -563,6 +562,7 @@ CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
|
||||
CONFIG_DEBUG_PER_CPU_MAPS=y
|
||||
CONFIG_DEBUG_SHIRQ=y
|
||||
CONFIG_DETECT_HUNG_TASK=y
|
||||
CONFIG_PANIC_ON_OOPS=y
|
||||
CONFIG_TIMER_STATS=y
|
||||
CONFIG_DEBUG_RT_MUTEXES=y
|
||||
CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
|
||||
|
@ -540,6 +540,7 @@ CONFIG_UNUSED_SYMBOLS=y
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_DEBUG_KERNEL=y
|
||||
CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
|
||||
CONFIG_PANIC_ON_OOPS=y
|
||||
CONFIG_TIMER_STATS=y
|
||||
CONFIG_RCU_TORTURE_TEST=m
|
||||
CONFIG_RCU_CPU_STALL_TIMEOUT=60
|
||||
|
@ -537,6 +537,7 @@ CONFIG_FRAME_WARN=1024
|
||||
CONFIG_UNUSED_SYMBOLS=y
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_DEBUG_KERNEL=y
|
||||
CONFIG_PANIC_ON_OOPS=y
|
||||
CONFIG_TIMER_STATS=y
|
||||
CONFIG_RCU_TORTURE_TEST=m
|
||||
CONFIG_RCU_CPU_STALL_TIMEOUT=60
|
||||
|
@ -71,6 +71,7 @@ CONFIG_PRINTK_TIME=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_FS=y
|
||||
CONFIG_DEBUG_KERNEL=y
|
||||
CONFIG_PANIC_ON_OOPS=y
|
||||
# CONFIG_SCHED_DEBUG is not set
|
||||
CONFIG_RCU_CPU_STALL_TIMEOUT=60
|
||||
# CONFIG_FTRACE is not set
|
||||
|
@ -134,7 +134,7 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
|
||||
static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
||||
{
|
||||
const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
if (unlikely(need_fallback(sctx->key_len))) {
|
||||
crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
|
||||
@ -159,7 +159,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
||||
|
||||
static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
||||
{
|
||||
const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
if (unlikely(need_fallback(sctx->key_len))) {
|
||||
crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
|
||||
|
@ -14,7 +14,6 @@ CONFIG_IKCONFIG_PROC=y
|
||||
CONFIG_CGROUPS=y
|
||||
CONFIG_CPUSETS=y
|
||||
CONFIG_CGROUP_CPUACCT=y
|
||||
CONFIG_RESOURCE_COUNTERS=y
|
||||
CONFIG_MEMCG=y
|
||||
CONFIG_MEMCG_SWAP=y
|
||||
CONFIG_CGROUP_SCHED=y
|
||||
@ -22,12 +21,8 @@ CONFIG_RT_GROUP_SCHED=y
|
||||
CONFIG_BLK_CGROUP=y
|
||||
CONFIG_NAMESPACES=y
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
CONFIG_RD_BZIP2=y
|
||||
CONFIG_RD_LZMA=y
|
||||
CONFIG_RD_XZ=y
|
||||
CONFIG_RD_LZO=y
|
||||
CONFIG_RD_LZ4=y
|
||||
CONFIG_EXPERT=y
|
||||
CONFIG_BPF_SYSCALL=y
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
CONFIG_PROFILING=y
|
||||
CONFIG_OPROFILE=y
|
||||
|
@ -5,3 +5,4 @@
|
||||
obj-$(CONFIG_S390_HYPFS_FS) += s390_hypfs.o
|
||||
|
||||
s390_hypfs-objs := inode.o hypfs_diag.o hypfs_vm.o hypfs_dbfs.o hypfs_sprp.o
|
||||
s390_hypfs-objs += hypfs_diag0c.o
|
||||
|
@ -37,6 +37,10 @@ extern int hypfs_vm_init(void);
|
||||
extern void hypfs_vm_exit(void);
|
||||
extern int hypfs_vm_create_files(struct dentry *root);
|
||||
|
||||
/* VM diagnose 0c */
|
||||
int hypfs_diag0c_init(void);
|
||||
void hypfs_diag0c_exit(void);
|
||||
|
||||
/* Set Partition-Resource Parameter */
|
||||
int hypfs_sprp_init(void);
|
||||
void hypfs_sprp_exit(void);
|
||||
@ -49,7 +53,6 @@ struct hypfs_dbfs_data {
|
||||
void *buf_free_ptr;
|
||||
size_t size;
|
||||
struct hypfs_dbfs_file *dbfs_file;
|
||||
struct kref kref;
|
||||
};
|
||||
|
||||
struct hypfs_dbfs_file {
|
||||
@ -61,8 +64,6 @@ struct hypfs_dbfs_file {
|
||||
unsigned long);
|
||||
|
||||
/* Private data for hypfs_dbfs.c */
|
||||
struct hypfs_dbfs_data *data;
|
||||
struct delayed_work data_free_work;
|
||||
struct mutex lock;
|
||||
struct dentry *dentry;
|
||||
};
|
||||
|
@ -17,33 +17,16 @@ static struct hypfs_dbfs_data *hypfs_dbfs_data_alloc(struct hypfs_dbfs_file *f)
|
||||
data = kmalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return NULL;
|
||||
kref_init(&data->kref);
|
||||
data->dbfs_file = f;
|
||||
return data;
|
||||
}
|
||||
|
||||
static void hypfs_dbfs_data_free(struct kref *kref)
|
||||
static void hypfs_dbfs_data_free(struct hypfs_dbfs_data *data)
|
||||
{
|
||||
struct hypfs_dbfs_data *data;
|
||||
|
||||
data = container_of(kref, struct hypfs_dbfs_data, kref);
|
||||
data->dbfs_file->data_free(data->buf_free_ptr);
|
||||
kfree(data);
|
||||
}
|
||||
|
||||
static void data_free_delayed(struct work_struct *work)
|
||||
{
|
||||
struct hypfs_dbfs_data *data;
|
||||
struct hypfs_dbfs_file *df;
|
||||
|
||||
df = container_of(work, struct hypfs_dbfs_file, data_free_work.work);
|
||||
mutex_lock(&df->lock);
|
||||
data = df->data;
|
||||
df->data = NULL;
|
||||
mutex_unlock(&df->lock);
|
||||
kref_put(&data->kref, hypfs_dbfs_data_free);
|
||||
}
|
||||
|
||||
static ssize_t dbfs_read(struct file *file, char __user *buf,
|
||||
size_t size, loff_t *ppos)
|
||||
{
|
||||
@ -56,28 +39,21 @@ static ssize_t dbfs_read(struct file *file, char __user *buf,
|
||||
|
||||
df = file_inode(file)->i_private;
|
||||
mutex_lock(&df->lock);
|
||||
if (!df->data) {
|
||||
data = hypfs_dbfs_data_alloc(df);
|
||||
if (!data) {
|
||||
mutex_unlock(&df->lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
rc = df->data_create(&data->buf, &data->buf_free_ptr,
|
||||
&data->size);
|
||||
if (rc) {
|
||||
mutex_unlock(&df->lock);
|
||||
kfree(data);
|
||||
return rc;
|
||||
}
|
||||
df->data = data;
|
||||
schedule_delayed_work(&df->data_free_work, HZ);
|
||||
data = hypfs_dbfs_data_alloc(df);
|
||||
if (!data) {
|
||||
mutex_unlock(&df->lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
rc = df->data_create(&data->buf, &data->buf_free_ptr, &data->size);
|
||||
if (rc) {
|
||||
mutex_unlock(&df->lock);
|
||||
kfree(data);
|
||||
return rc;
|
||||
}
|
||||
data = df->data;
|
||||
kref_get(&data->kref);
|
||||
mutex_unlock(&df->lock);
|
||||
|
||||
rc = simple_read_from_buffer(buf, size, ppos, data->buf, data->size);
|
||||
kref_put(&data->kref, hypfs_dbfs_data_free);
|
||||
hypfs_dbfs_data_free(data);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -108,7 +84,6 @@ int hypfs_dbfs_create_file(struct hypfs_dbfs_file *df)
|
||||
if (IS_ERR(df->dentry))
|
||||
return PTR_ERR(df->dentry);
|
||||
mutex_init(&df->lock);
|
||||
INIT_DELAYED_WORK(&df->data_free_work, data_free_delayed);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
139
arch/s390/hypfs/hypfs_diag0c.c
Normal file
139
arch/s390/hypfs/hypfs_diag0c.c
Normal file
@ -0,0 +1,139 @@
|
||||
/*
|
||||
* Hypervisor filesystem for Linux on s390
|
||||
*
|
||||
* Diag 0C implementation
|
||||
*
|
||||
* Copyright IBM Corp. 2014
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <asm/hypfs.h>
|
||||
#include "hypfs.h"
|
||||
|
||||
#define DBFS_D0C_HDR_VERSION 0
|
||||
|
||||
/*
|
||||
* Execute diagnose 0c in 31 bit mode
|
||||
*/
|
||||
static void diag0c(struct hypfs_diag0c_entry *entry)
|
||||
{
|
||||
asm volatile (
|
||||
#ifdef CONFIG_64BIT
|
||||
" sam31\n"
|
||||
" diag %0,%0,0x0c\n"
|
||||
" sam64\n"
|
||||
#else
|
||||
" diag %0,%0,0x0c\n"
|
||||
#endif
|
||||
: /* no output register */
|
||||
: "a" (entry)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
/*
|
||||
* Get hypfs_diag0c_entry from CPU vector and store diag0c data
|
||||
*/
|
||||
static void diag0c_fn(void *data)
|
||||
{
|
||||
diag0c(((void **) data)[smp_processor_id()]);
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate buffer and store diag 0c data
|
||||
*/
|
||||
static void *diag0c_store(unsigned int *count)
|
||||
{
|
||||
struct hypfs_diag0c_data *diag0c_data;
|
||||
unsigned int cpu_count, cpu, i;
|
||||
void **cpu_vec;
|
||||
|
||||
get_online_cpus();
|
||||
cpu_count = num_online_cpus();
|
||||
cpu_vec = kmalloc(sizeof(*cpu_vec) * num_possible_cpus(), GFP_KERNEL);
|
||||
if (!cpu_vec)
|
||||
goto fail_put_online_cpus;
|
||||
/* Note: Diag 0c needs 8 byte alignment and real storage */
|
||||
diag0c_data = kzalloc(sizeof(struct hypfs_diag0c_hdr) +
|
||||
cpu_count * sizeof(struct hypfs_diag0c_entry),
|
||||
GFP_KERNEL | GFP_DMA);
|
||||
if (!diag0c_data)
|
||||
goto fail_kfree_cpu_vec;
|
||||
i = 0;
|
||||
/* Fill CPU vector for each online CPU */
|
||||
for_each_online_cpu(cpu) {
|
||||
diag0c_data->entry[i].cpu = cpu;
|
||||
cpu_vec[cpu] = &diag0c_data->entry[i++];
|
||||
}
|
||||
/* Collect data all CPUs */
|
||||
on_each_cpu(diag0c_fn, cpu_vec, 1);
|
||||
*count = cpu_count;
|
||||
kfree(cpu_vec);
|
||||
put_online_cpus();
|
||||
return diag0c_data;
|
||||
|
||||
fail_kfree_cpu_vec:
|
||||
kfree(cpu_vec);
|
||||
fail_put_online_cpus:
|
||||
put_online_cpus();
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
/*
|
||||
* Hypfs DBFS callback: Free diag 0c data
|
||||
*/
|
||||
static void dbfs_diag0c_free(const void *data)
|
||||
{
|
||||
kfree(data);
|
||||
}
|
||||
|
||||
/*
|
||||
* Hypfs DBFS callback: Create diag 0c data
|
||||
*/
|
||||
static int dbfs_diag0c_create(void **data, void **data_free_ptr, size_t *size)
|
||||
{
|
||||
struct hypfs_diag0c_data *diag0c_data;
|
||||
unsigned int count;
|
||||
|
||||
diag0c_data = diag0c_store(&count);
|
||||
if (IS_ERR(diag0c_data))
|
||||
return PTR_ERR(diag0c_data);
|
||||
memset(&diag0c_data->hdr, 0, sizeof(diag0c_data->hdr));
|
||||
get_tod_clock_ext(diag0c_data->hdr.tod_ext);
|
||||
diag0c_data->hdr.len = count * sizeof(struct hypfs_diag0c_entry);
|
||||
diag0c_data->hdr.version = DBFS_D0C_HDR_VERSION;
|
||||
diag0c_data->hdr.count = count;
|
||||
*data = diag0c_data;
|
||||
*data_free_ptr = diag0c_data;
|
||||
*size = diag0c_data->hdr.len + sizeof(struct hypfs_diag0c_hdr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Hypfs DBFS file structure
|
||||
*/
|
||||
static struct hypfs_dbfs_file dbfs_file_0c = {
|
||||
.name = "diag_0c",
|
||||
.data_create = dbfs_diag0c_create,
|
||||
.data_free = dbfs_diag0c_free,
|
||||
};
|
||||
|
||||
/*
|
||||
* Initialize diag 0c interface for z/VM
|
||||
*/
|
||||
int __init hypfs_diag0c_init(void)
|
||||
{
|
||||
if (!MACHINE_IS_VM)
|
||||
return 0;
|
||||
return hypfs_dbfs_create_file(&dbfs_file_0c);
|
||||
}
|
||||
|
||||
/*
|
||||
* Shutdown diag 0c interface for z/VM
|
||||
*/
|
||||
void hypfs_diag0c_exit(void)
|
||||
{
|
||||
if (!MACHINE_IS_VM)
|
||||
return;
|
||||
hypfs_dbfs_remove_file(&dbfs_file_0c);
|
||||
}
|
@ -482,10 +482,14 @@ static int __init hypfs_init(void)
|
||||
rc = -ENODATA;
|
||||
goto fail_hypfs_vm_exit;
|
||||
}
|
||||
if (hypfs_diag0c_init()) {
|
||||
rc = -ENODATA;
|
||||
goto fail_hypfs_sprp_exit;
|
||||
}
|
||||
s390_kobj = kobject_create_and_add("s390", hypervisor_kobj);
|
||||
if (!s390_kobj) {
|
||||
rc = -ENOMEM;
|
||||
goto fail_hypfs_sprp_exit;
|
||||
goto fail_hypfs_diag0c_exit;
|
||||
}
|
||||
rc = register_filesystem(&hypfs_type);
|
||||
if (rc)
|
||||
@ -494,6 +498,8 @@ static int __init hypfs_init(void)
|
||||
|
||||
fail_filesystem:
|
||||
kobject_put(s390_kobj);
|
||||
fail_hypfs_diag0c_exit:
|
||||
hypfs_diag0c_exit();
|
||||
fail_hypfs_sprp_exit:
|
||||
hypfs_sprp_exit();
|
||||
fail_hypfs_vm_exit:
|
||||
@ -510,6 +516,7 @@ static void __exit hypfs_exit(void)
|
||||
{
|
||||
unregister_filesystem(&hypfs_type);
|
||||
kobject_put(s390_kobj);
|
||||
hypfs_diag0c_exit();
|
||||
hypfs_sprp_exit();
|
||||
hypfs_vm_exit();
|
||||
hypfs_diag_exit();
|
||||
|
@ -189,6 +189,20 @@ static inline int ecctr(u64 ctr, u64 *val)
|
||||
return cc;
|
||||
}
|
||||
|
||||
/* Store CPU counter multiple for the MT utilization counter set */
|
||||
static inline int stcctm5(u64 num, u64 *val)
|
||||
{
|
||||
typedef struct { u64 _[num]; } addrtype;
|
||||
int cc;
|
||||
|
||||
asm volatile (
|
||||
" .insn rsy,0xeb0000000017,%2,5,%1\n"
|
||||
" ipm %0\n"
|
||||
" srl %0,28\n"
|
||||
: "=d" (cc), "=Q" (*(addrtype *) val) : "d" (num) : "cc");
|
||||
return cc;
|
||||
}
|
||||
|
||||
/* Query sampling information */
|
||||
static inline int qsi(struct hws_qsi_info_block *info)
|
||||
{
|
||||
|
@ -163,8 +163,8 @@ extern unsigned int vdso_enabled;
|
||||
the loader. We need to make sure that it is out of the way of the program
|
||||
that it will "exec", and that there is sufficient room for the brk. */
|
||||
|
||||
extern unsigned long randomize_et_dyn(unsigned long base);
|
||||
#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
|
||||
extern unsigned long randomize_et_dyn(void);
|
||||
#define ELF_ET_DYN_BASE randomize_et_dyn()
|
||||
|
||||
/* This yields a mask that user programs can use to figure out what
|
||||
instruction set this CPU supports. */
|
||||
@ -209,7 +209,9 @@ do { \
|
||||
} while (0)
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
||||
#define STACK_RND_MASK 0x7ffUL
|
||||
extern unsigned long mmap_rnd_mask;
|
||||
|
||||
#define STACK_RND_MASK (mmap_rnd_mask)
|
||||
|
||||
#define ARCH_DLINFO \
|
||||
do { \
|
||||
|
@ -3,8 +3,12 @@
|
||||
|
||||
#define ARCH_SUPPORTS_FTRACE_OPS 1
|
||||
|
||||
#ifdef CC_USING_HOTPATCH
|
||||
#define MCOUNT_INSN_SIZE 6
|
||||
#else
|
||||
#define MCOUNT_INSN_SIZE 24
|
||||
#define MCOUNT_RETURN_FIXUP 18
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
@ -37,17 +41,28 @@ struct ftrace_insn {
|
||||
static inline void ftrace_generate_nop_insn(struct ftrace_insn *insn)
|
||||
{
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
#ifdef CC_USING_HOTPATCH
|
||||
/* brcl 0,0 */
|
||||
insn->opc = 0xc004;
|
||||
insn->disp = 0;
|
||||
#else
|
||||
/* jg .+24 */
|
||||
insn->opc = 0xc0f4;
|
||||
insn->disp = MCOUNT_INSN_SIZE / 2;
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int is_ftrace_nop(struct ftrace_insn *insn)
|
||||
{
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
#ifdef CC_USING_HOTPATCH
|
||||
if (insn->disp == 0)
|
||||
return 1;
|
||||
#else
|
||||
if (insn->disp == MCOUNT_INSN_SIZE / 2)
|
||||
return 1;
|
||||
#endif
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <linux/types.h>
|
||||
|
||||
#define JUMP_LABEL_NOP_SIZE 6
|
||||
#define JUMP_LABEL_NOP_OFFSET 2
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#define ASM_PTR ".quad"
|
||||
@ -13,9 +14,13 @@
|
||||
#define ASM_ALIGN ".balign 4"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We use a brcl 0,2 instruction for jump labels at compile time so it
|
||||
* can be easily distinguished from a hotpatch generated instruction.
|
||||
*/
|
||||
static __always_inline bool arch_static_branch(struct static_key *key)
|
||||
{
|
||||
asm_volatile_goto("0: brcl 0,0\n"
|
||||
asm_volatile_goto("0: brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n"
|
||||
".pushsection __jump_table, \"aw\"\n"
|
||||
ASM_ALIGN "\n"
|
||||
ASM_PTR " 0b, %l[label], %0\n"
|
||||
|
@ -1758,6 +1758,10 @@ extern int s390_enable_sie(void);
|
||||
extern int s390_enable_skey(void);
|
||||
extern void s390_reset_cmma(struct mm_struct *mm);
|
||||
|
||||
/* s390 has a private copy of get unmapped area to deal with cache synonyms */
|
||||
#define HAVE_ARCH_UNMAPPED_AREA
|
||||
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
|
||||
|
||||
/*
|
||||
* No page table caches to initialise
|
||||
*/
|
||||
|
@ -215,10 +215,7 @@ static inline unsigned short stap(void)
|
||||
/*
|
||||
* Give up the time slice of the virtual PU.
|
||||
*/
|
||||
static inline void cpu_relax(void)
|
||||
{
|
||||
barrier();
|
||||
}
|
||||
void cpu_relax(void);
|
||||
|
||||
#define cpu_relax_lowlatency() barrier()
|
||||
|
||||
|
@ -15,5 +15,6 @@ struct reset_call {
|
||||
|
||||
extern void register_reset_call(struct reset_call *reset);
|
||||
extern void unregister_reset_call(struct reset_call *reset);
|
||||
extern void s390_reset_system(void (*func)(void *), void *data);
|
||||
extern void s390_reset_system(void (*fn_pre)(void),
|
||||
void (*fn_post)(void *), void *data);
|
||||
#endif /* _ASM_S390_RESET_H */
|
||||
|
@ -27,7 +27,7 @@ struct sclp_ipl_info {
|
||||
};
|
||||
|
||||
struct sclp_cpu_entry {
|
||||
u8 address;
|
||||
u8 core_id;
|
||||
u8 reserved0[2];
|
||||
u8 : 3;
|
||||
u8 siif : 1;
|
||||
@ -51,6 +51,9 @@ int sclp_cpu_deconfigure(u8 cpu);
|
||||
unsigned long long sclp_get_rnmax(void);
|
||||
unsigned long long sclp_get_rzm(void);
|
||||
unsigned int sclp_get_max_cpu(void);
|
||||
unsigned int sclp_get_mtid(u8 cpu_type);
|
||||
unsigned int sclp_get_mtid_max(void);
|
||||
unsigned int sclp_get_mtid_prev(void);
|
||||
int sclp_sdias_blk_count(void);
|
||||
int sclp_sdias_copy(void *dest, int blk_num, int nr_blks);
|
||||
int sclp_chp_configure(struct chp_id chpid);
|
||||
@ -68,4 +71,6 @@ void sclp_early_detect(void);
|
||||
int sclp_has_siif(void);
|
||||
unsigned int sclp_get_ibc(void);
|
||||
|
||||
long _sclp_print_early(const char *);
|
||||
|
||||
#endif /* _ASM_S390_SCLP_H */
|
||||
|
@ -57,6 +57,7 @@ extern void detect_memory_memblock(void);
|
||||
#define MACHINE_FLAG_TE (1UL << 15)
|
||||
#define MACHINE_FLAG_TLB_LC (1UL << 17)
|
||||
#define MACHINE_FLAG_VX (1UL << 18)
|
||||
#define MACHINE_FLAG_CAD (1UL << 19)
|
||||
|
||||
#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
|
||||
#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
|
||||
@ -80,6 +81,7 @@ extern void detect_memory_memblock(void);
|
||||
#define MACHINE_HAS_TE (0)
|
||||
#define MACHINE_HAS_TLB_LC (0)
|
||||
#define MACHINE_HAS_VX (0)
|
||||
#define MACHINE_HAS_CAD (0)
|
||||
#else /* CONFIG_64BIT */
|
||||
#define MACHINE_HAS_IEEE (1)
|
||||
#define MACHINE_HAS_CSP (1)
|
||||
@ -93,6 +95,7 @@ extern void detect_memory_memblock(void);
|
||||
#define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE)
|
||||
#define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC)
|
||||
#define MACHINE_HAS_VX (S390_lowcore.machine_flags & MACHINE_FLAG_VX)
|
||||
#define MACHINE_HAS_CAD (S390_lowcore.machine_flags & MACHINE_FLAG_CAD)
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
/*
|
||||
|
@ -16,6 +16,7 @@
|
||||
#define SIGP_SET_ARCHITECTURE 18
|
||||
#define SIGP_COND_EMERGENCY_SIGNAL 19
|
||||
#define SIGP_SENSE_RUNNING 21
|
||||
#define SIGP_SET_MULTI_THREADING 22
|
||||
#define SIGP_STORE_ADDITIONAL_STATUS 23
|
||||
|
||||
/* SIGP condition codes */
|
||||
|
@ -16,6 +16,8 @@
|
||||
#define raw_smp_processor_id() (S390_lowcore.cpu_nr)
|
||||
|
||||
extern struct mutex smp_cpu_state_mutex;
|
||||
extern unsigned int smp_cpu_mt_shift;
|
||||
extern unsigned int smp_cpu_mtid;
|
||||
|
||||
extern int __cpu_up(unsigned int cpu, struct task_struct *tidle);
|
||||
|
||||
@ -35,6 +37,8 @@ extern void smp_fill_possible_mask(void);
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
#define smp_cpu_mtid 0
|
||||
|
||||
static inline void smp_call_ipl_cpu(void (*func)(void *), void *data)
|
||||
{
|
||||
func(data);
|
||||
|
@ -90,7 +90,11 @@ struct sysinfo_2_2_2 {
|
||||
unsigned short cpus_reserved;
|
||||
char name[8];
|
||||
unsigned int caf;
|
||||
char reserved_2[16];
|
||||
char reserved_2[8];
|
||||
unsigned char mt_installed;
|
||||
unsigned char mt_general;
|
||||
unsigned char mt_psmtid;
|
||||
char reserved_3[5];
|
||||
unsigned short cpus_dedicated;
|
||||
unsigned short cpus_shared;
|
||||
};
|
||||
@ -120,26 +124,28 @@ struct sysinfo_3_2_2 {
|
||||
|
||||
extern int topology_max_mnest;
|
||||
|
||||
#define TOPOLOGY_CPU_BITS 64
|
||||
#define TOPOLOGY_CORE_BITS 64
|
||||
#define TOPOLOGY_NR_MAG 6
|
||||
|
||||
struct topology_cpu {
|
||||
unsigned char reserved0[4];
|
||||
struct topology_core {
|
||||
unsigned char nl;
|
||||
unsigned char reserved0[3];
|
||||
unsigned char :6;
|
||||
unsigned char pp:2;
|
||||
unsigned char reserved1;
|
||||
unsigned short origin;
|
||||
unsigned long mask[TOPOLOGY_CPU_BITS / BITS_PER_LONG];
|
||||
unsigned long mask[TOPOLOGY_CORE_BITS / BITS_PER_LONG];
|
||||
};
|
||||
|
||||
struct topology_container {
|
||||
unsigned char reserved[7];
|
||||
unsigned char nl;
|
||||
unsigned char reserved[6];
|
||||
unsigned char id;
|
||||
};
|
||||
|
||||
union topology_entry {
|
||||
unsigned char nl;
|
||||
struct topology_cpu cpu;
|
||||
struct topology_core cpu;
|
||||
struct topology_container container;
|
||||
};
|
||||
|
||||
|
@ -9,9 +9,11 @@ struct cpu;
|
||||
#ifdef CONFIG_SCHED_BOOK
|
||||
|
||||
struct cpu_topology_s390 {
|
||||
unsigned short thread_id;
|
||||
unsigned short core_id;
|
||||
unsigned short socket_id;
|
||||
unsigned short book_id;
|
||||
cpumask_t thread_mask;
|
||||
cpumask_t core_mask;
|
||||
cpumask_t book_mask;
|
||||
};
|
||||
@ -19,6 +21,8 @@ struct cpu_topology_s390 {
|
||||
extern struct cpu_topology_s390 cpu_topology[NR_CPUS];
|
||||
|
||||
#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
|
||||
#define topology_thread_id(cpu) (cpu_topology[cpu].thread_id)
|
||||
#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_mask)
|
||||
#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
|
||||
#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_mask)
|
||||
#define topology_book_id(cpu) (cpu_topology[cpu].book_id)
|
||||
|
@ -1,16 +1,19 @@
|
||||
/*
|
||||
* IOCTL interface for hypfs
|
||||
* Structures for hypfs interface
|
||||
*
|
||||
* Copyright IBM Corp. 2013
|
||||
*
|
||||
* Author: Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
*/
|
||||
|
||||
#ifndef _ASM_HYPFS_CTL_H
|
||||
#define _ASM_HYPFS_CTL_H
|
||||
#ifndef _ASM_HYPFS_H
|
||||
#define _ASM_HYPFS_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* IOCTL for binary interface /sys/kernel/debug/diag_304
|
||||
*/
|
||||
struct hypfs_diag304 {
|
||||
__u32 args[2];
|
||||
__u64 data;
|
||||
@ -22,4 +25,30 @@ struct hypfs_diag304 {
|
||||
#define HYPFS_DIAG304 \
|
||||
_IOWR(HYPFS_IOCTL_MAGIC, 0x20, struct hypfs_diag304)
|
||||
|
||||
/*
|
||||
* Structures for binary interface /sys/kernel/debug/diag_0c
|
||||
*/
|
||||
struct hypfs_diag0c_hdr {
|
||||
__u64 len; /* Length of diag0c buffer without header */
|
||||
__u16 version; /* Version of header */
|
||||
char reserved1[6]; /* Reserved */
|
||||
char tod_ext[16]; /* TOD clock for diag0c */
|
||||
__u64 count; /* Number of entries (CPUs) in diag0c array */
|
||||
char reserved2[24]; /* Reserved */
|
||||
};
|
||||
|
||||
struct hypfs_diag0c_entry {
|
||||
char date[8]; /* MM/DD/YY in EBCDIC */
|
||||
char time[8]; /* HH:MM:SS in EBCDIC */
|
||||
__u64 virtcpu; /* Virtual time consumed by the virt CPU (us) */
|
||||
__u64 totalproc; /* Total of virtual and simulation time (us) */
|
||||
__u32 cpu; /* Linux logical CPU number */
|
||||
__u32 reserved; /* Align to 8 byte */
|
||||
};
|
||||
|
||||
struct hypfs_diag0c_data {
|
||||
struct hypfs_diag0c_hdr hdr; /* 64 byte header */
|
||||
struct hypfs_diag0c_entry entry[]; /* diag0c entry array */
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -4,8 +4,8 @@
|
||||
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
# Don't trace early setup code and tracing code
|
||||
CFLAGS_REMOVE_early.o = -pg
|
||||
CFLAGS_REMOVE_ftrace.o = -pg
|
||||
CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
|
||||
endif
|
||||
|
||||
#
|
||||
|
@ -97,7 +97,8 @@ ENTRY(diag308_reset)
|
||||
lg %r4,0(%r4) # Save PSW
|
||||
sturg %r4,%r3 # Use sturg, because of large pages
|
||||
lghi %r1,1
|
||||
diag %r1,%r1,0x308
|
||||
lghi %r0,0
|
||||
diag %r0,%r1,0x308
|
||||
.Lrestart_part2:
|
||||
lhi %r0,0 # Load r0 with zero
|
||||
lhi %r1,2 # Use mode 2 = ESAME (dump)
|
||||
|
@ -5,37 +5,11 @@
|
||||
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/cacheinfo.h>
|
||||
#include <asm/facility.h>
|
||||
|
||||
struct cache {
|
||||
unsigned long size;
|
||||
unsigned int line_size;
|
||||
unsigned int associativity;
|
||||
unsigned int nr_sets;
|
||||
unsigned int level : 3;
|
||||
unsigned int type : 2;
|
||||
unsigned int private : 1;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
struct cache_dir {
|
||||
struct kobject *kobj;
|
||||
struct cache_index_dir *index;
|
||||
};
|
||||
|
||||
struct cache_index_dir {
|
||||
struct kobject kobj;
|
||||
int cpu;
|
||||
struct cache *cache;
|
||||
struct cache_index_dir *next;
|
||||
};
|
||||
|
||||
enum {
|
||||
CACHE_SCOPE_NOTEXISTS,
|
||||
CACHE_SCOPE_PRIVATE,
|
||||
@ -44,10 +18,10 @@ enum {
|
||||
};
|
||||
|
||||
enum {
|
||||
CACHE_TYPE_SEPARATE,
|
||||
CACHE_TYPE_DATA,
|
||||
CACHE_TYPE_INSTRUCTION,
|
||||
CACHE_TYPE_UNIFIED,
|
||||
CTYPE_SEPARATE,
|
||||
CTYPE_DATA,
|
||||
CTYPE_INSTRUCTION,
|
||||
CTYPE_UNIFIED,
|
||||
};
|
||||
|
||||
enum {
|
||||
@ -70,37 +44,60 @@ struct cache_info {
|
||||
};
|
||||
|
||||
#define CACHE_MAX_LEVEL 8
|
||||
|
||||
union cache_topology {
|
||||
struct cache_info ci[CACHE_MAX_LEVEL];
|
||||
unsigned long long raw;
|
||||
};
|
||||
|
||||
static const char * const cache_type_string[] = {
|
||||
"Data",
|
||||
"",
|
||||
"Instruction",
|
||||
"Data",
|
||||
"",
|
||||
"Unified",
|
||||
};
|
||||
|
||||
static struct cache_dir *cache_dir_cpu[NR_CPUS];
|
||||
static LIST_HEAD(cache_list);
|
||||
static const enum cache_type cache_type_map[] = {
|
||||
[CTYPE_SEPARATE] = CACHE_TYPE_SEPARATE,
|
||||
[CTYPE_DATA] = CACHE_TYPE_DATA,
|
||||
[CTYPE_INSTRUCTION] = CACHE_TYPE_INST,
|
||||
[CTYPE_UNIFIED] = CACHE_TYPE_UNIFIED,
|
||||
};
|
||||
|
||||
void show_cacheinfo(struct seq_file *m)
|
||||
{
|
||||
struct cache *cache;
|
||||
int index = 0;
|
||||
struct cpu_cacheinfo *this_cpu_ci;
|
||||
struct cacheinfo *cache;
|
||||
int idx;
|
||||
|
||||
list_for_each_entry(cache, &cache_list, list) {
|
||||
seq_printf(m, "cache%-11d: ", index);
|
||||
get_online_cpus();
|
||||
this_cpu_ci = get_cpu_cacheinfo(cpumask_any(cpu_online_mask));
|
||||
for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) {
|
||||
cache = this_cpu_ci->info_list + idx;
|
||||
seq_printf(m, "cache%-11d: ", idx);
|
||||
seq_printf(m, "level=%d ", cache->level);
|
||||
seq_printf(m, "type=%s ", cache_type_string[cache->type]);
|
||||
seq_printf(m, "scope=%s ", cache->private ? "Private" : "Shared");
|
||||
seq_printf(m, "size=%luK ", cache->size >> 10);
|
||||
seq_printf(m, "line_size=%u ", cache->line_size);
|
||||
seq_printf(m, "associativity=%d", cache->associativity);
|
||||
seq_printf(m, "scope=%s ",
|
||||
cache->disable_sysfs ? "Shared" : "Private");
|
||||
seq_printf(m, "size=%dK ", cache->size >> 10);
|
||||
seq_printf(m, "line_size=%u ", cache->coherency_line_size);
|
||||
seq_printf(m, "associativity=%d", cache->ways_of_associativity);
|
||||
seq_puts(m, "\n");
|
||||
index++;
|
||||
}
|
||||
put_online_cpus();
|
||||
}
|
||||
|
||||
static inline enum cache_type get_cache_type(struct cache_info *ci, int level)
|
||||
{
|
||||
if (level >= CACHE_MAX_LEVEL)
|
||||
return CACHE_TYPE_NOCACHE;
|
||||
|
||||
ci += level;
|
||||
|
||||
if (ci->scope != CACHE_SCOPE_SHARED && ci->scope != CACHE_SCOPE_PRIVATE)
|
||||
return CACHE_TYPE_NOCACHE;
|
||||
|
||||
return cache_type_map[ci->type];
|
||||
}
|
||||
|
||||
static inline unsigned long ecag(int ai, int li, int ti)
|
||||
@ -113,277 +110,79 @@ static inline unsigned long ecag(int ai, int li, int ti)
|
||||
return val;
|
||||
}
|
||||
|
||||
static int __init cache_add(int level, int private, int type)
|
||||
static void ci_leaf_init(struct cacheinfo *this_leaf, int private,
|
||||
enum cache_type type, unsigned int level)
|
||||
{
|
||||
struct cache *cache;
|
||||
int ti;
|
||||
int ti, num_sets;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
cache = kzalloc(sizeof(*cache), GFP_KERNEL);
|
||||
if (!cache)
|
||||
return -ENOMEM;
|
||||
if (type == CACHE_TYPE_INSTRUCTION)
|
||||
if (type == CACHE_TYPE_INST)
|
||||
ti = CACHE_TI_INSTRUCTION;
|
||||
else
|
||||
ti = CACHE_TI_UNIFIED;
|
||||
cache->size = ecag(EXTRACT_SIZE, level, ti);
|
||||
cache->line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
|
||||
cache->associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
|
||||
cache->nr_sets = cache->size / cache->associativity;
|
||||
cache->nr_sets /= cache->line_size;
|
||||
cache->private = private;
|
||||
cache->level = level + 1;
|
||||
cache->type = type - 1;
|
||||
list_add_tail(&cache->list, &cache_list);
|
||||
return 0;
|
||||
|
||||
this_leaf->level = level + 1;
|
||||
this_leaf->type = type;
|
||||
this_leaf->coherency_line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
|
||||
this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY,
|
||||
level, ti);
|
||||
this_leaf->size = ecag(EXTRACT_SIZE, level, ti);
|
||||
|
||||
num_sets = this_leaf->size / this_leaf->coherency_line_size;
|
||||
num_sets /= this_leaf->ways_of_associativity;
|
||||
this_leaf->number_of_sets = num_sets;
|
||||
cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
|
||||
if (!private)
|
||||
this_leaf->disable_sysfs = true;
|
||||
}
|
||||
|
||||
static void __init cache_build_info(void)
|
||||
int init_cache_level(unsigned int cpu)
|
||||
{
|
||||
struct cache *cache, *next;
|
||||
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
||||
unsigned int level = 0, leaves = 0;
|
||||
union cache_topology ct;
|
||||
int level, private, rc;
|
||||
enum cache_type ctype;
|
||||
|
||||
if (!this_cpu_ci)
|
||||
return -EINVAL;
|
||||
|
||||
ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
|
||||
for (level = 0; level < CACHE_MAX_LEVEL; level++) {
|
||||
switch (ct.ci[level].scope) {
|
||||
case CACHE_SCOPE_SHARED:
|
||||
private = 0;
|
||||
do {
|
||||
ctype = get_cache_type(&ct.ci[0], level);
|
||||
if (ctype == CACHE_TYPE_NOCACHE)
|
||||
break;
|
||||
case CACHE_SCOPE_PRIVATE:
|
||||
private = 1;
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
if (ct.ci[level].type == CACHE_TYPE_SEPARATE) {
|
||||
rc = cache_add(level, private, CACHE_TYPE_DATA);
|
||||
rc |= cache_add(level, private, CACHE_TYPE_INSTRUCTION);
|
||||
/* Separate instruction and data caches */
|
||||
leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1;
|
||||
} while (++level < CACHE_MAX_LEVEL);
|
||||
|
||||
this_cpu_ci->num_levels = level;
|
||||
this_cpu_ci->num_leaves = leaves;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int populate_cache_leaves(unsigned int cpu)
|
||||
{
|
||||
unsigned int level, idx, pvt;
|
||||
union cache_topology ct;
|
||||
enum cache_type ctype;
|
||||
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
||||
struct cacheinfo *this_leaf = this_cpu_ci->info_list;
|
||||
|
||||
ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
|
||||
for (idx = 0, level = 0; level < this_cpu_ci->num_levels &&
|
||||
idx < this_cpu_ci->num_leaves; idx++, level++) {
|
||||
if (!this_leaf)
|
||||
return -EINVAL;
|
||||
|
||||
pvt = (ct.ci[level].scope == CACHE_SCOPE_PRIVATE) ? 1 : 0;
|
||||
ctype = get_cache_type(&ct.ci[0], level);
|
||||
if (ctype == CACHE_TYPE_SEPARATE) {
|
||||
ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level);
|
||||
ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level);
|
||||
} else {
|
||||
rc = cache_add(level, private, ct.ci[level].type);
|
||||
ci_leaf_init(this_leaf++, pvt, ctype, level);
|
||||
}
|
||||
if (rc)
|
||||
goto error;
|
||||
}
|
||||
return;
|
||||
error:
|
||||
list_for_each_entry_safe(cache, next, &cache_list, list) {
|
||||
list_del(&cache->list);
|
||||
kfree(cache);
|
||||
}
|
||||
}
|
||||
|
||||
static struct cache_dir *cache_create_cache_dir(int cpu)
|
||||
{
|
||||
struct cache_dir *cache_dir;
|
||||
struct kobject *kobj = NULL;
|
||||
struct device *dev;
|
||||
|
||||
dev = get_cpu_device(cpu);
|
||||
if (!dev)
|
||||
goto out;
|
||||
kobj = kobject_create_and_add("cache", &dev->kobj);
|
||||
if (!kobj)
|
||||
goto out;
|
||||
cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
|
||||
if (!cache_dir)
|
||||
goto out;
|
||||
cache_dir->kobj = kobj;
|
||||
cache_dir_cpu[cpu] = cache_dir;
|
||||
return cache_dir;
|
||||
out:
|
||||
kobject_put(kobj);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *kobj)
|
||||
{
|
||||
return container_of(kobj, struct cache_index_dir, kobj);
|
||||
}
|
||||
|
||||
static void cache_index_release(struct kobject *kobj)
|
||||
{
|
||||
struct cache_index_dir *index;
|
||||
|
||||
index = kobj_to_cache_index_dir(kobj);
|
||||
kfree(index);
|
||||
}
|
||||
|
||||
static ssize_t cache_index_show(struct kobject *kobj,
|
||||
struct attribute *attr, char *buf)
|
||||
{
|
||||
struct kobj_attribute *kobj_attr;
|
||||
|
||||
kobj_attr = container_of(attr, struct kobj_attribute, attr);
|
||||
return kobj_attr->show(kobj, kobj_attr, buf);
|
||||
}
|
||||
|
||||
#define DEFINE_CACHE_ATTR(_name, _format, _value) \
|
||||
static ssize_t cache_##_name##_show(struct kobject *kobj, \
|
||||
struct kobj_attribute *attr, \
|
||||
char *buf) \
|
||||
{ \
|
||||
struct cache_index_dir *index; \
|
||||
\
|
||||
index = kobj_to_cache_index_dir(kobj); \
|
||||
return sprintf(buf, _format, _value); \
|
||||
} \
|
||||
static struct kobj_attribute cache_##_name##_attr = \
|
||||
__ATTR(_name, 0444, cache_##_name##_show, NULL);
|
||||
|
||||
DEFINE_CACHE_ATTR(size, "%luK\n", index->cache->size >> 10);
|
||||
DEFINE_CACHE_ATTR(coherency_line_size, "%u\n", index->cache->line_size);
|
||||
DEFINE_CACHE_ATTR(number_of_sets, "%u\n", index->cache->nr_sets);
|
||||
DEFINE_CACHE_ATTR(ways_of_associativity, "%u\n", index->cache->associativity);
|
||||
DEFINE_CACHE_ATTR(type, "%s\n", cache_type_string[index->cache->type]);
|
||||
DEFINE_CACHE_ATTR(level, "%d\n", index->cache->level);
|
||||
|
||||
static ssize_t shared_cpu_map_func(struct kobject *kobj, int type, char *buf)
|
||||
{
|
||||
struct cache_index_dir *index;
|
||||
int len;
|
||||
|
||||
index = kobj_to_cache_index_dir(kobj);
|
||||
len = type ?
|
||||
cpulist_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu)) :
|
||||
cpumask_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu));
|
||||
len += sprintf(&buf[len], "\n");
|
||||
return len;
|
||||
}
|
||||
|
||||
static ssize_t shared_cpu_map_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
return shared_cpu_map_func(kobj, 0, buf);
|
||||
}
|
||||
static struct kobj_attribute cache_shared_cpu_map_attr =
|
||||
__ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
|
||||
|
||||
static ssize_t shared_cpu_list_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
return shared_cpu_map_func(kobj, 1, buf);
|
||||
}
|
||||
static struct kobj_attribute cache_shared_cpu_list_attr =
|
||||
__ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL);
|
||||
|
||||
static struct attribute *cache_index_default_attrs[] = {
|
||||
&cache_type_attr.attr,
|
||||
&cache_size_attr.attr,
|
||||
&cache_number_of_sets_attr.attr,
|
||||
&cache_ways_of_associativity_attr.attr,
|
||||
&cache_level_attr.attr,
|
||||
&cache_coherency_line_size_attr.attr,
|
||||
&cache_shared_cpu_map_attr.attr,
|
||||
&cache_shared_cpu_list_attr.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static const struct sysfs_ops cache_index_ops = {
|
||||
.show = cache_index_show,
|
||||
};
|
||||
|
||||
static struct kobj_type cache_index_type = {
|
||||
.sysfs_ops = &cache_index_ops,
|
||||
.release = cache_index_release,
|
||||
.default_attrs = cache_index_default_attrs,
|
||||
};
|
||||
|
||||
static int cache_create_index_dir(struct cache_dir *cache_dir,
|
||||
struct cache *cache, int index, int cpu)
|
||||
{
|
||||
struct cache_index_dir *index_dir;
|
||||
int rc;
|
||||
|
||||
index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
|
||||
if (!index_dir)
|
||||
return -ENOMEM;
|
||||
index_dir->cache = cache;
|
||||
index_dir->cpu = cpu;
|
||||
rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
|
||||
cache_dir->kobj, "index%d", index);
|
||||
if (rc)
|
||||
goto out;
|
||||
index_dir->next = cache_dir->index;
|
||||
cache_dir->index = index_dir;
|
||||
return 0;
|
||||
out:
|
||||
kfree(index_dir);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int cache_add_cpu(int cpu)
|
||||
{
|
||||
struct cache_dir *cache_dir;
|
||||
struct cache *cache;
|
||||
int rc, index = 0;
|
||||
|
||||
if (list_empty(&cache_list))
|
||||
return 0;
|
||||
cache_dir = cache_create_cache_dir(cpu);
|
||||
if (!cache_dir)
|
||||
return -ENOMEM;
|
||||
list_for_each_entry(cache, &cache_list, list) {
|
||||
if (!cache->private)
|
||||
break;
|
||||
rc = cache_create_index_dir(cache_dir, cache, index, cpu);
|
||||
if (rc)
|
||||
return rc;
|
||||
index++;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cache_remove_cpu(int cpu)
|
||||
{
|
||||
struct cache_index_dir *index, *next;
|
||||
struct cache_dir *cache_dir;
|
||||
|
||||
cache_dir = cache_dir_cpu[cpu];
|
||||
if (!cache_dir)
|
||||
return;
|
||||
index = cache_dir->index;
|
||||
while (index) {
|
||||
next = index->next;
|
||||
kobject_put(&index->kobj);
|
||||
index = next;
|
||||
}
|
||||
kobject_put(cache_dir->kobj);
|
||||
kfree(cache_dir);
|
||||
cache_dir_cpu[cpu] = NULL;
|
||||
}
|
||||
|
||||
static int cache_hotplug(struct notifier_block *nfb, unsigned long action,
|
||||
void *hcpu)
|
||||
{
|
||||
int cpu = (long)hcpu;
|
||||
int rc = 0;
|
||||
|
||||
switch (action & ~CPU_TASKS_FROZEN) {
|
||||
case CPU_ONLINE:
|
||||
rc = cache_add_cpu(cpu);
|
||||
if (rc)
|
||||
cache_remove_cpu(cpu);
|
||||
break;
|
||||
case CPU_DEAD:
|
||||
cache_remove_cpu(cpu);
|
||||
break;
|
||||
}
|
||||
return rc ? NOTIFY_BAD : NOTIFY_OK;
|
||||
}
|
||||
|
||||
static int __init cache_init(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
if (!test_facility(34))
|
||||
return 0;
|
||||
cache_build_info();
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
for_each_online_cpu(cpu)
|
||||
cache_add_cpu(cpu);
|
||||
__hotcpu_notifier(cache_hotplug, 0);
|
||||
cpu_notifier_register_done();
|
||||
return 0;
|
||||
}
|
||||
device_initcall(cache_init);
|
||||
|
@ -137,7 +137,7 @@ enum {
|
||||
INSTR_RSI_RRP,
|
||||
INSTR_RSL_LRDFU, INSTR_RSL_R0RD,
|
||||
INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, INSTR_RSY_RURD,
|
||||
INSTR_RSY_RDRM,
|
||||
INSTR_RSY_RDRM, INSTR_RSY_RMRD,
|
||||
INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD,
|
||||
INSTR_RS_RURD,
|
||||
INSTR_RXE_FRRD, INSTR_RXE_RRRD, INSTR_RXE_RRRDM,
|
||||
@ -226,7 +226,6 @@ static const struct s390_operand operands[] =
|
||||
[U16_32] = { 16, 32, 0 },
|
||||
[J16_16] = { 16, 16, OPERAND_PCREL },
|
||||
[J16_32] = { 16, 32, OPERAND_PCREL },
|
||||
[I16_32] = { 16, 32, OPERAND_SIGNED },
|
||||
[I24_24] = { 24, 24, OPERAND_SIGNED },
|
||||
[J32_16] = { 32, 16, OPERAND_PCREL },
|
||||
[I32_16] = { 32, 16, OPERAND_SIGNED },
|
||||
@ -308,6 +307,7 @@ static const unsigned char formats[][7] = {
|
||||
[INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 },
|
||||
[INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 },
|
||||
[INSTR_RSY_RDRM] = { 0xff, R_8,D20_20,B_16,U4_12,0,0 },
|
||||
[INSTR_RSY_RMRD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 },
|
||||
[INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 },
|
||||
[INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 },
|
||||
[INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 },
|
||||
@ -451,7 +451,8 @@ enum {
|
||||
LONG_INSN_VERLLV,
|
||||
LONG_INSN_VESRAV,
|
||||
LONG_INSN_VESRLV,
|
||||
LONG_INSN_VSBCBI
|
||||
LONG_INSN_VSBCBI,
|
||||
LONG_INSN_STCCTM
|
||||
};
|
||||
|
||||
static char *long_insn_name[] = {
|
||||
@ -531,6 +532,7 @@ static char *long_insn_name[] = {
|
||||
[LONG_INSN_VESRAV] = "vesrav",
|
||||
[LONG_INSN_VESRLV] = "vesrlv",
|
||||
[LONG_INSN_VSBCBI] = "vsbcbi",
|
||||
[LONG_INSN_STCCTM] = "stcctm",
|
||||
};
|
||||
|
||||
static struct s390_insn opcode[] = {
|
||||
@ -1656,6 +1658,7 @@ static struct s390_insn opcode_eb[] = {
|
||||
{ "lric", 0x60, INSTR_RSY_RDRM },
|
||||
{ "stric", 0x61, INSTR_RSY_RDRM },
|
||||
{ "mric", 0x62, INSTR_RSY_RDRM },
|
||||
{ { 0, LONG_INSN_STCCTM }, 0x17, INSTR_RSY_RMRD },
|
||||
#endif
|
||||
{ "rll", 0x1d, INSTR_RSY_RRRD },
|
||||
{ "mvclu", 0x8e, INSTR_RSY_RRRD },
|
||||
|
@ -393,9 +393,27 @@ static __init void detect_machine_facilities(void)
|
||||
S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
|
||||
if (test_facility(129))
|
||||
S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
|
||||
if (test_facility(128))
|
||||
S390_lowcore.machine_flags |= MACHINE_FLAG_CAD;
|
||||
#endif
|
||||
}
|
||||
|
||||
static int __init nocad_setup(char *str)
|
||||
{
|
||||
S390_lowcore.machine_flags &= ~MACHINE_FLAG_CAD;
|
||||
return 0;
|
||||
}
|
||||
early_param("nocad", nocad_setup);
|
||||
|
||||
static int __init cad_init(void)
|
||||
{
|
||||
if (MACHINE_HAS_CAD)
|
||||
/* Enable problem state CAD. */
|
||||
__ctl_set_bit(2, 3);
|
||||
return 0;
|
||||
}
|
||||
early_initcall(cad_init);
|
||||
|
||||
static __init void rescue_initrd(void)
|
||||
{
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
|
@ -71,9 +71,11 @@ struct s390_mmap_arg_struct;
|
||||
struct fadvise64_64_args;
|
||||
struct old_sigaction;
|
||||
|
||||
long sys_rt_sigreturn(void);
|
||||
long sys_sigreturn(void);
|
||||
|
||||
long sys_s390_personality(unsigned int personality);
|
||||
long sys_s390_runtime_instr(int command, int signum);
|
||||
|
||||
long sys_s390_pci_mmio_write(unsigned long, const void __user *, size_t);
|
||||
long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t);
|
||||
#endif /* _ENTRY_H */
|
||||
|
@ -46,6 +46,13 @@
|
||||
* lg %r14,8(%r15) # offset 18
|
||||
* The jg instruction branches to offset 24 to skip as many instructions
|
||||
* as possible.
|
||||
* In case we use gcc's hotpatch feature the original and also the disabled
|
||||
* function prologue contains only a single six byte instruction and looks
|
||||
* like this:
|
||||
* > brcl 0,0 # offset 0
|
||||
* To enable ftrace the code gets patched like above and afterwards looks
|
||||
* like this:
|
||||
* > brasl %r0,ftrace_caller # offset 0
|
||||
*/
|
||||
|
||||
unsigned long ftrace_plt;
|
||||
@ -59,62 +66,71 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
|
||||
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
|
||||
unsigned long addr)
|
||||
{
|
||||
struct ftrace_insn insn;
|
||||
unsigned short op;
|
||||
void *from, *to;
|
||||
size_t size;
|
||||
struct ftrace_insn orig, new, old;
|
||||
|
||||
ftrace_generate_nop_insn(&insn);
|
||||
size = sizeof(insn);
|
||||
from = &insn;
|
||||
to = (void *) rec->ip;
|
||||
if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op)))
|
||||
if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
|
||||
return -EFAULT;
|
||||
/*
|
||||
* If we find a breakpoint instruction, a kprobe has been placed
|
||||
* at the beginning of the function. We write the constant
|
||||
* KPROBE_ON_FTRACE_NOP into the remaining four bytes of the original
|
||||
* instruction so that the kprobes handler can execute a nop, if it
|
||||
* reaches this breakpoint.
|
||||
*/
|
||||
if (op == BREAKPOINT_INSTRUCTION) {
|
||||
size -= 2;
|
||||
from += 2;
|
||||
to += 2;
|
||||
insn.disp = KPROBE_ON_FTRACE_NOP;
|
||||
if (addr == MCOUNT_ADDR) {
|
||||
/* Initial code replacement */
|
||||
#ifdef CC_USING_HOTPATCH
|
||||
/* We expect to see brcl 0,0 */
|
||||
ftrace_generate_nop_insn(&orig);
|
||||
#else
|
||||
/* We expect to see stg r14,8(r15) */
|
||||
orig.opc = 0xe3e0;
|
||||
orig.disp = 0xf0080024;
|
||||
#endif
|
||||
ftrace_generate_nop_insn(&new);
|
||||
} else if (old.opc == BREAKPOINT_INSTRUCTION) {
|
||||
/*
|
||||
* If we find a breakpoint instruction, a kprobe has been
|
||||
* placed at the beginning of the function. We write the
|
||||
* constant KPROBE_ON_FTRACE_NOP into the remaining four
|
||||
* bytes of the original instruction so that the kprobes
|
||||
* handler can execute a nop, if it reaches this breakpoint.
|
||||
*/
|
||||
new.opc = orig.opc = BREAKPOINT_INSTRUCTION;
|
||||
orig.disp = KPROBE_ON_FTRACE_CALL;
|
||||
new.disp = KPROBE_ON_FTRACE_NOP;
|
||||
} else {
|
||||
/* Replace ftrace call with a nop. */
|
||||
ftrace_generate_call_insn(&orig, rec->ip);
|
||||
ftrace_generate_nop_insn(&new);
|
||||
}
|
||||
if (probe_kernel_write(to, from, size))
|
||||
/* Verify that the to be replaced code matches what we expect. */
|
||||
if (memcmp(&orig, &old, sizeof(old)))
|
||||
return -EINVAL;
|
||||
if (probe_kernel_write((void *) rec->ip, &new, sizeof(new)))
|
||||
return -EPERM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
||||
{
|
||||
struct ftrace_insn insn;
|
||||
unsigned short op;
|
||||
void *from, *to;
|
||||
size_t size;
|
||||
struct ftrace_insn orig, new, old;
|
||||
|
||||
ftrace_generate_call_insn(&insn, rec->ip);
|
||||
size = sizeof(insn);
|
||||
from = &insn;
|
||||
to = (void *) rec->ip;
|
||||
if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op)))
|
||||
if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
|
||||
return -EFAULT;
|
||||
/*
|
||||
* If we find a breakpoint instruction, a kprobe has been placed
|
||||
* at the beginning of the function. We write the constant
|
||||
* KPROBE_ON_FTRACE_CALL into the remaining four bytes of the original
|
||||
* instruction so that the kprobes handler can execute a brasl if it
|
||||
* reaches this breakpoint.
|
||||
*/
|
||||
if (op == BREAKPOINT_INSTRUCTION) {
|
||||
size -= 2;
|
||||
from += 2;
|
||||
to += 2;
|
||||
insn.disp = KPROBE_ON_FTRACE_CALL;
|
||||
if (old.opc == BREAKPOINT_INSTRUCTION) {
|
||||
/*
|
||||
* If we find a breakpoint instruction, a kprobe has been
|
||||
* placed at the beginning of the function. We write the
|
||||
* constant KPROBE_ON_FTRACE_CALL into the remaining four
|
||||
* bytes of the original instruction so that the kprobes
|
||||
* handler can execute a brasl if it reaches this breakpoint.
|
||||
*/
|
||||
new.opc = orig.opc = BREAKPOINT_INSTRUCTION;
|
||||
orig.disp = KPROBE_ON_FTRACE_NOP;
|
||||
new.disp = KPROBE_ON_FTRACE_CALL;
|
||||
} else {
|
||||
/* Replace nop with an ftrace call. */
|
||||
ftrace_generate_nop_insn(&orig);
|
||||
ftrace_generate_call_insn(&new, rec->ip);
|
||||
}
|
||||
if (probe_kernel_write(to, from, size))
|
||||
/* Verify that the to be replaced code matches what we expect. */
|
||||
if (memcmp(&orig, &old, sizeof(old)))
|
||||
return -EINVAL;
|
||||
if (probe_kernel_write((void *) rec->ip, &new, sizeof(new)))
|
||||
return -EPERM;
|
||||
return 0;
|
||||
}
|
||||
|
@ -436,7 +436,9 @@ ENTRY(startup_kdump)
|
||||
# followed by the facility words.
|
||||
|
||||
#if defined(CONFIG_64BIT)
|
||||
#if defined(CONFIG_MARCH_ZEC12)
|
||||
#if defined(CONFIG_MARCH_Z13)
|
||||
.long 3, 0xc100eff2, 0xf46ce800, 0x00400000
|
||||
#elif defined(CONFIG_MARCH_ZEC12)
|
||||
.long 3, 0xc100eff2, 0xf46ce800, 0x00400000
|
||||
#elif defined(CONFIG_MARCH_Z196)
|
||||
.long 2, 0xc100eff2, 0xf46c0000
|
||||
|
@ -2074,7 +2074,8 @@ static void do_reset_calls(void)
|
||||
|
||||
u32 dump_prefix_page;
|
||||
|
||||
void s390_reset_system(void (*func)(void *), void *data)
|
||||
void s390_reset_system(void (*fn_pre)(void),
|
||||
void (*fn_post)(void *), void *data)
|
||||
{
|
||||
struct _lowcore *lc;
|
||||
|
||||
@ -2112,7 +2113,11 @@ void s390_reset_system(void (*func)(void *), void *data)
|
||||
/* Store status at absolute zero */
|
||||
store_status();
|
||||
|
||||
/* Call function before reset */
|
||||
if (fn_pre)
|
||||
fn_pre();
|
||||
do_reset_calls();
|
||||
if (func)
|
||||
func(data);
|
||||
/* Call function after reset */
|
||||
if (fn_post)
|
||||
fn_post(data);
|
||||
}
|
||||
|
@ -22,31 +22,66 @@ struct insn_args {
|
||||
enum jump_label_type type;
|
||||
};
|
||||
|
||||
static void __jump_label_transform(struct jump_entry *entry,
|
||||
enum jump_label_type type)
|
||||
static void jump_label_make_nop(struct jump_entry *entry, struct insn *insn)
|
||||
{
|
||||
struct insn insn;
|
||||
int rc;
|
||||
/* brcl 0,0 */
|
||||
insn->opcode = 0xc004;
|
||||
insn->offset = 0;
|
||||
}
|
||||
|
||||
static void jump_label_make_branch(struct jump_entry *entry, struct insn *insn)
|
||||
{
|
||||
/* brcl 15,offset */
|
||||
insn->opcode = 0xc0f4;
|
||||
insn->offset = (entry->target - entry->code) >> 1;
|
||||
}
|
||||
|
||||
static void jump_label_bug(struct jump_entry *entry, struct insn *insn)
|
||||
{
|
||||
unsigned char *ipc = (unsigned char *)entry->code;
|
||||
unsigned char *ipe = (unsigned char *)insn;
|
||||
|
||||
pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc);
|
||||
pr_emerg("Found: %02x %02x %02x %02x %02x %02x\n",
|
||||
ipc[0], ipc[1], ipc[2], ipc[3], ipc[4], ipc[5]);
|
||||
pr_emerg("Expected: %02x %02x %02x %02x %02x %02x\n",
|
||||
ipe[0], ipe[1], ipe[2], ipe[3], ipe[4], ipe[5]);
|
||||
panic("Corrupted kernel text");
|
||||
}
|
||||
|
||||
static struct insn orignop = {
|
||||
.opcode = 0xc004,
|
||||
.offset = JUMP_LABEL_NOP_OFFSET >> 1,
|
||||
};
|
||||
|
||||
static void __jump_label_transform(struct jump_entry *entry,
|
||||
enum jump_label_type type,
|
||||
int init)
|
||||
{
|
||||
struct insn old, new;
|
||||
|
||||
if (type == JUMP_LABEL_ENABLE) {
|
||||
/* brcl 15,offset */
|
||||
insn.opcode = 0xc0f4;
|
||||
insn.offset = (entry->target - entry->code) >> 1;
|
||||
jump_label_make_nop(entry, &old);
|
||||
jump_label_make_branch(entry, &new);
|
||||
} else {
|
||||
/* brcl 0,0 */
|
||||
insn.opcode = 0xc004;
|
||||
insn.offset = 0;
|
||||
jump_label_make_branch(entry, &old);
|
||||
jump_label_make_nop(entry, &new);
|
||||
}
|
||||
|
||||
rc = probe_kernel_write((void *)entry->code, &insn, JUMP_LABEL_NOP_SIZE);
|
||||
WARN_ON_ONCE(rc < 0);
|
||||
if (init) {
|
||||
if (memcmp((void *)entry->code, &orignop, sizeof(orignop)))
|
||||
jump_label_bug(entry, &old);
|
||||
} else {
|
||||
if (memcmp((void *)entry->code, &old, sizeof(old)))
|
||||
jump_label_bug(entry, &old);
|
||||
}
|
||||
probe_kernel_write((void *)entry->code, &new, sizeof(new));
|
||||
}
|
||||
|
||||
static int __sm_arch_jump_label_transform(void *data)
|
||||
{
|
||||
struct insn_args *args = data;
|
||||
|
||||
__jump_label_transform(args->entry, args->type);
|
||||
__jump_label_transform(args->entry, args->type, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -64,7 +99,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
|
||||
void arch_jump_label_transform_static(struct jump_entry *entry,
|
||||
enum jump_label_type type)
|
||||
{
|
||||
__jump_label_transform(entry, type);
|
||||
__jump_label_transform(entry, type, 1);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -69,7 +69,8 @@ static void copy_instruction(struct kprobe *p)
|
||||
/*
|
||||
* If kprobes patches the instruction that is morphed by
|
||||
* ftrace make sure that kprobes always sees the branch
|
||||
* "jg .+24" that skips the mcount block
|
||||
* "jg .+24" that skips the mcount block or the "brcl 0,0"
|
||||
* in case of hotpatch.
|
||||
*/
|
||||
ftrace_generate_nop_insn((struct ftrace_insn *)p->ainsn.insn);
|
||||
p->ainsn.is_ftrace_insn = 1;
|
||||
|
@ -103,21 +103,18 @@ static int __init machine_kdump_pm_init(void)
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(machine_kdump_pm_init);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Start kdump: We expect here that a store status has been done on our CPU
|
||||
*/
|
||||
static void __do_machine_kdump(void *image)
|
||||
{
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
int (*start_kdump)(int) = (void *)((struct kimage *) image)->start;
|
||||
|
||||
setup_regs();
|
||||
__load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA);
|
||||
start_kdump(1);
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Check if kdump checksums are valid: We call purgatory with parameter "0"
|
||||
@ -249,18 +246,18 @@ static void __do_machine_kexec(void *data)
|
||||
*/
|
||||
static void __machine_kexec(void *data)
|
||||
{
|
||||
struct kimage *image = data;
|
||||
|
||||
__arch_local_irq_stosm(0x04); /* enable DAT */
|
||||
pfault_fini();
|
||||
tracing_off();
|
||||
debug_locks_off();
|
||||
if (image->type == KEXEC_TYPE_CRASH) {
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
if (((struct kimage *) data)->type == KEXEC_TYPE_CRASH) {
|
||||
|
||||
lgr_info_log();
|
||||
s390_reset_system(__do_machine_kdump, data);
|
||||
} else {
|
||||
s390_reset_system(__do_machine_kexec, data);
|
||||
}
|
||||
s390_reset_system(setup_regs, __do_machine_kdump, data);
|
||||
} else
|
||||
#endif
|
||||
s390_reset_system(NULL, __do_machine_kexec, data);
|
||||
disabled_wait((unsigned long) __builtin_return_address(0));
|
||||
}
|
||||
|
||||
|
@ -27,7 +27,9 @@ ENTRY(ftrace_caller)
|
||||
.globl ftrace_regs_caller
|
||||
.set ftrace_regs_caller,ftrace_caller
|
||||
lgr %r1,%r15
|
||||
#ifndef CC_USING_HOTPATCH
|
||||
aghi %r0,MCOUNT_RETURN_FIXUP
|
||||
#endif
|
||||
aghi %r15,-STACK_FRAME_SIZE
|
||||
stg %r1,__SF_BACKCHAIN(%r15)
|
||||
stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15)
|
||||
|
@ -79,6 +79,14 @@ void release_thread(struct task_struct *dead_task)
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
void arch_release_task_struct(struct task_struct *tsk)
|
||||
{
|
||||
if (tsk->thread.vxrs)
|
||||
kfree(tsk->thread.vxrs);
|
||||
}
|
||||
#endif
|
||||
|
||||
int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
|
||||
unsigned long arg, struct task_struct *p)
|
||||
{
|
||||
@ -243,13 +251,3 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
|
||||
ret = PAGE_ALIGN(mm->brk + brk_rnd());
|
||||
return (ret > mm->brk) ? ret : mm->brk;
|
||||
}
|
||||
|
||||
unsigned long randomize_et_dyn(unsigned long base)
|
||||
{
|
||||
unsigned long ret;
|
||||
|
||||
if (!(current->flags & PF_RANDOMIZE))
|
||||
return base;
|
||||
ret = PAGE_ALIGN(base + brk_rnd());
|
||||
return (ret > base) ? ret : base;
|
||||
}
|
||||
|
@ -8,16 +8,24 @@
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <asm/elf.h>
|
||||
#include <asm/lowcore.h>
|
||||
#include <asm/param.h>
|
||||
#include <asm/smp.h>
|
||||
|
||||
static DEFINE_PER_CPU(struct cpuid, cpu_id);
|
||||
|
||||
void cpu_relax(void)
|
||||
{
|
||||
if (!smp_cpu_mtid && MACHINE_HAS_DIAG44)
|
||||
asm volatile("diag 0,0,0x44");
|
||||
barrier();
|
||||
}
|
||||
EXPORT_SYMBOL(cpu_relax);
|
||||
|
||||
/*
|
||||
* cpu_init - initializes state that is per-CPU.
|
||||
*/
|
||||
|
@ -294,7 +294,8 @@ ENTRY(_sclp_print_early)
|
||||
#ifdef CONFIG_64BIT
|
||||
tm LC_AR_MODE_ID,1
|
||||
jno .Lesa3
|
||||
lmh %r6,%r15,96(%r15) # store upper register halves
|
||||
lgfr %r2,%r2 # sign extend return value
|
||||
lmh %r6,%r15,96(%r15) # restore upper register halves
|
||||
ahi %r15,80
|
||||
.Lesa3:
|
||||
#endif
|
||||
|
@ -810,6 +810,9 @@ static void __init setup_hwcaps(void)
|
||||
case 0x2828:
|
||||
strcpy(elf_platform, "zEC12");
|
||||
break;
|
||||
case 0x2964:
|
||||
strcpy(elf_platform, "z13");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -71,9 +71,30 @@ struct pcpu {
|
||||
};
|
||||
|
||||
static u8 boot_cpu_type;
|
||||
static u16 boot_cpu_address;
|
||||
static struct pcpu pcpu_devices[NR_CPUS];
|
||||
|
||||
unsigned int smp_cpu_mt_shift;
|
||||
EXPORT_SYMBOL(smp_cpu_mt_shift);
|
||||
|
||||
unsigned int smp_cpu_mtid;
|
||||
EXPORT_SYMBOL(smp_cpu_mtid);
|
||||
|
||||
static unsigned int smp_max_threads __initdata = -1U;
|
||||
|
||||
static int __init early_nosmt(char *s)
|
||||
{
|
||||
smp_max_threads = 1;
|
||||
return 0;
|
||||
}
|
||||
early_param("nosmt", early_nosmt);
|
||||
|
||||
static int __init early_smt(char *s)
|
||||
{
|
||||
get_option(&s, &smp_max_threads);
|
||||
return 0;
|
||||
}
|
||||
early_param("smt", early_smt);
|
||||
|
||||
/*
|
||||
* The smp_cpu_state_mutex must be held when changing the state or polarization
|
||||
* member of a pcpu data structure within the pcpu_devices arreay.
|
||||
@ -132,7 +153,7 @@ static inline int pcpu_running(struct pcpu *pcpu)
|
||||
/*
|
||||
* Find struct pcpu by cpu address.
|
||||
*/
|
||||
static struct pcpu *pcpu_find_address(const struct cpumask *mask, int address)
|
||||
static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
@ -298,6 +319,32 @@ static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
|
||||
for (;;) ;
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable additional logical cpus for multi-threading.
|
||||
*/
|
||||
static int pcpu_set_smt(unsigned int mtid)
|
||||
{
|
||||
register unsigned long reg1 asm ("1") = (unsigned long) mtid;
|
||||
int cc;
|
||||
|
||||
if (smp_cpu_mtid == mtid)
|
||||
return 0;
|
||||
asm volatile(
|
||||
" sigp %1,0,%2 # sigp set multi-threading\n"
|
||||
" ipm %0\n"
|
||||
" srl %0,28\n"
|
||||
: "=d" (cc) : "d" (reg1), "K" (SIGP_SET_MULTI_THREADING)
|
||||
: "cc");
|
||||
if (cc == 0) {
|
||||
smp_cpu_mtid = mtid;
|
||||
smp_cpu_mt_shift = 0;
|
||||
while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift))
|
||||
smp_cpu_mt_shift++;
|
||||
pcpu_devices[0].address = stap();
|
||||
}
|
||||
return cc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Call function on an online CPU.
|
||||
*/
|
||||
@ -512,22 +559,17 @@ EXPORT_SYMBOL(smp_ctl_clear_bit);
|
||||
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
|
||||
static void __init smp_get_save_area(int cpu, u16 address)
|
||||
static inline void __smp_store_cpu_state(int cpu, u16 address, int is_boot_cpu)
|
||||
{
|
||||
void *lc = pcpu_devices[0].lowcore;
|
||||
struct save_area_ext *sa_ext;
|
||||
unsigned long vx_sa;
|
||||
|
||||
if (is_kdump_kernel())
|
||||
return;
|
||||
if (!OLDMEM_BASE && (address == boot_cpu_address ||
|
||||
ipl_info.type != IPL_TYPE_FCP_DUMP))
|
||||
return;
|
||||
sa_ext = dump_save_area_create(cpu);
|
||||
if (!sa_ext)
|
||||
panic("could not allocate memory for save area\n");
|
||||
if (address == boot_cpu_address) {
|
||||
/* Copy the registers of the boot cpu. */
|
||||
if (is_boot_cpu) {
|
||||
/* Copy the registers of the boot CPU. */
|
||||
copy_oldmem_page(1, (void *) &sa_ext->sa, sizeof(sa_ext->sa),
|
||||
SAVE_AREA_BASE - PAGE_SIZE, 0);
|
||||
if (MACHINE_HAS_VX)
|
||||
@ -548,6 +590,64 @@ static void __init smp_get_save_area(int cpu, u16 address)
|
||||
free_page(vx_sa);
|
||||
}
|
||||
|
||||
/*
|
||||
* Collect CPU state of the previous, crashed system.
|
||||
* There are four cases:
|
||||
* 1) standard zfcp dump
|
||||
* condition: OLDMEM_BASE == NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
|
||||
* The state for all CPUs except the boot CPU needs to be collected
|
||||
* with sigp stop-and-store-status. The boot CPU state is located in
|
||||
* the absolute lowcore of the memory stored in the HSA. The zcore code
|
||||
* will allocate the save area and copy the boot CPU state from the HSA.
|
||||
* 2) stand-alone kdump for SCSI (zfcp dump with swapped memory)
|
||||
* condition: OLDMEM_BASE != NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
|
||||
* The state for all CPUs except the boot CPU needs to be collected
|
||||
* with sigp stop-and-store-status. The firmware or the boot-loader
|
||||
* stored the registers of the boot CPU in the absolute lowcore in the
|
||||
* memory of the old system.
|
||||
* 3) kdump and the old kernel did not store the CPU state,
|
||||
* or stand-alone kdump for DASD
|
||||
* condition: OLDMEM_BASE != NULL && !is_kdump_kernel()
|
||||
* The state for all CPUs except the boot CPU needs to be collected
|
||||
* with sigp stop-and-store-status. The kexec code or the boot-loader
|
||||
* stored the registers of the boot CPU in the memory of the old system.
|
||||
* 4) kdump and the old kernel stored the CPU state
|
||||
* condition: OLDMEM_BASE != NULL && is_kdump_kernel()
|
||||
* The state of all CPUs is stored in ELF sections in the memory of the
|
||||
* old system. The ELF sections are picked up by the crash_dump code
|
||||
* via elfcorehdr_addr.
|
||||
*/
|
||||
static void __init smp_store_cpu_states(struct sclp_cpu_info *info)
|
||||
{
|
||||
unsigned int cpu, address, i, j;
|
||||
int is_boot_cpu;
|
||||
|
||||
if (is_kdump_kernel())
|
||||
/* Previous system stored the CPU states. Nothing to do. */
|
||||
return;
|
||||
if (!(OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP))
|
||||
/* No previous system present, normal boot. */
|
||||
return;
|
||||
/* Set multi-threading state to the previous system. */
|
||||
pcpu_set_smt(sclp_get_mtid_prev());
|
||||
/* Collect CPU states. */
|
||||
cpu = 0;
|
||||
for (i = 0; i < info->configured; i++) {
|
||||
/* Skip CPUs with different CPU type. */
|
||||
if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type)
|
||||
continue;
|
||||
for (j = 0; j <= smp_cpu_mtid; j++, cpu++) {
|
||||
address = (info->cpu[i].core_id << smp_cpu_mt_shift) + j;
|
||||
is_boot_cpu = (address == pcpu_devices[0].address);
|
||||
if (is_boot_cpu && !OLDMEM_BASE)
|
||||
/* Skip boot CPU for standard zfcp dump. */
|
||||
continue;
|
||||
/* Get state for this CPu. */
|
||||
__smp_store_cpu_state(cpu, address, is_boot_cpu);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int smp_store_status(int cpu)
|
||||
{
|
||||
unsigned long vx_sa;
|
||||
@ -565,10 +665,6 @@ int smp_store_status(int cpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#else /* CONFIG_CRASH_DUMP */
|
||||
|
||||
static inline void smp_get_save_area(int cpu, u16 address) { }
|
||||
|
||||
#endif /* CONFIG_CRASH_DUMP */
|
||||
|
||||
void smp_cpu_set_polarization(int cpu, int val)
|
||||
@ -590,11 +686,13 @@ static struct sclp_cpu_info *smp_get_cpu_info(void)
|
||||
info = kzalloc(sizeof(*info), GFP_KERNEL);
|
||||
if (info && (use_sigp_detection || sclp_get_cpu_info(info))) {
|
||||
use_sigp_detection = 1;
|
||||
for (address = 0; address <= MAX_CPU_ADDRESS; address++) {
|
||||
for (address = 0; address <= MAX_CPU_ADDRESS;
|
||||
address += (1U << smp_cpu_mt_shift)) {
|
||||
if (__pcpu_sigp_relax(address, SIGP_SENSE, 0, NULL) ==
|
||||
SIGP_CC_NOT_OPERATIONAL)
|
||||
continue;
|
||||
info->cpu[info->configured].address = address;
|
||||
info->cpu[info->configured].core_id =
|
||||
address >> smp_cpu_mt_shift;
|
||||
info->configured++;
|
||||
}
|
||||
info->combined = info->configured;
|
||||
@ -608,7 +706,8 @@ static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add)
|
||||
{
|
||||
struct pcpu *pcpu;
|
||||
cpumask_t avail;
|
||||
int cpu, nr, i;
|
||||
int cpu, nr, i, j;
|
||||
u16 address;
|
||||
|
||||
nr = 0;
|
||||
cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
|
||||
@ -616,51 +715,76 @@ static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add)
|
||||
for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
|
||||
if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type)
|
||||
continue;
|
||||
if (pcpu_find_address(cpu_present_mask, info->cpu[i].address))
|
||||
continue;
|
||||
pcpu = pcpu_devices + cpu;
|
||||
pcpu->address = info->cpu[i].address;
|
||||
pcpu->state = (i >= info->configured) ?
|
||||
CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
|
||||
smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
|
||||
set_cpu_present(cpu, true);
|
||||
if (sysfs_add && smp_add_present_cpu(cpu) != 0)
|
||||
set_cpu_present(cpu, false);
|
||||
else
|
||||
nr++;
|
||||
cpu = cpumask_next(cpu, &avail);
|
||||
address = info->cpu[i].core_id << smp_cpu_mt_shift;
|
||||
for (j = 0; j <= smp_cpu_mtid; j++) {
|
||||
if (pcpu_find_address(cpu_present_mask, address + j))
|
||||
continue;
|
||||
pcpu = pcpu_devices + cpu;
|
||||
pcpu->address = address + j;
|
||||
pcpu->state =
|
||||
(cpu >= info->configured*(smp_cpu_mtid + 1)) ?
|
||||
CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
|
||||
smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
|
||||
set_cpu_present(cpu, true);
|
||||
if (sysfs_add && smp_add_present_cpu(cpu) != 0)
|
||||
set_cpu_present(cpu, false);
|
||||
else
|
||||
nr++;
|
||||
cpu = cpumask_next(cpu, &avail);
|
||||
if (cpu >= nr_cpu_ids)
|
||||
break;
|
||||
}
|
||||
}
|
||||
return nr;
|
||||
}
|
||||
|
||||
static void __init smp_detect_cpus(void)
|
||||
{
|
||||
unsigned int cpu, c_cpus, s_cpus;
|
||||
unsigned int cpu, mtid, c_cpus, s_cpus;
|
||||
struct sclp_cpu_info *info;
|
||||
u16 address;
|
||||
|
||||
/* Get CPU information */
|
||||
info = smp_get_cpu_info();
|
||||
if (!info)
|
||||
panic("smp_detect_cpus failed to allocate memory\n");
|
||||
|
||||
/* Find boot CPU type */
|
||||
if (info->has_cpu_type) {
|
||||
for (cpu = 0; cpu < info->combined; cpu++) {
|
||||
if (info->cpu[cpu].address != boot_cpu_address)
|
||||
continue;
|
||||
/* The boot cpu dictates the cpu type. */
|
||||
boot_cpu_type = info->cpu[cpu].type;
|
||||
break;
|
||||
}
|
||||
address = stap();
|
||||
for (cpu = 0; cpu < info->combined; cpu++)
|
||||
if (info->cpu[cpu].core_id == address) {
|
||||
/* The boot cpu dictates the cpu type. */
|
||||
boot_cpu_type = info->cpu[cpu].type;
|
||||
break;
|
||||
}
|
||||
if (cpu >= info->combined)
|
||||
panic("Could not find boot CPU type");
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
/* Collect CPU state of previous system */
|
||||
smp_store_cpu_states(info);
|
||||
#endif
|
||||
|
||||
/* Set multi-threading state for the current system */
|
||||
mtid = sclp_get_mtid(boot_cpu_type);
|
||||
mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
|
||||
pcpu_set_smt(mtid);
|
||||
|
||||
/* Print number of CPUs */
|
||||
c_cpus = s_cpus = 0;
|
||||
for (cpu = 0; cpu < info->combined; cpu++) {
|
||||
if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type)
|
||||
continue;
|
||||
if (cpu < info->configured) {
|
||||
smp_get_save_area(c_cpus, info->cpu[cpu].address);
|
||||
c_cpus++;
|
||||
} else
|
||||
s_cpus++;
|
||||
if (cpu < info->configured)
|
||||
c_cpus += smp_cpu_mtid + 1;
|
||||
else
|
||||
s_cpus += smp_cpu_mtid + 1;
|
||||
}
|
||||
pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
|
||||
|
||||
/* Add CPUs present at boot */
|
||||
get_online_cpus();
|
||||
__smp_rescan_cpus(info, 0);
|
||||
put_online_cpus();
|
||||
@ -696,12 +820,23 @@ static void smp_start_secondary(void *cpuvoid)
|
||||
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
{
|
||||
struct pcpu *pcpu;
|
||||
int rc;
|
||||
int base, i, rc;
|
||||
|
||||
pcpu = pcpu_devices + cpu;
|
||||
if (pcpu->state != CPU_STATE_CONFIGURED)
|
||||
return -EIO;
|
||||
if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) !=
|
||||
base = cpu - (cpu % (smp_cpu_mtid + 1));
|
||||
for (i = 0; i <= smp_cpu_mtid; i++) {
|
||||
if (base + i < nr_cpu_ids)
|
||||
if (cpu_online(base + i))
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* If this is the first CPU of the core to get online
|
||||
* do an initial CPU reset.
|
||||
*/
|
||||
if (i > smp_cpu_mtid &&
|
||||
pcpu_sigp_retry(pcpu_devices + base, SIGP_INITIAL_CPU_RESET, 0) !=
|
||||
SIGP_CC_ORDER_CODE_ACCEPTED)
|
||||
return -EIO;
|
||||
|
||||
@ -774,7 +909,8 @@ void __init smp_fill_possible_mask(void)
|
||||
{
|
||||
unsigned int possible, sclp, cpu;
|
||||
|
||||
sclp = sclp_get_max_cpu() ?: nr_cpu_ids;
|
||||
sclp = min(smp_max_threads, sclp_get_mtid_max() + 1);
|
||||
sclp = sclp_get_max_cpu()*sclp ?: nr_cpu_ids;
|
||||
possible = setup_possible_cpus ?: nr_cpu_ids;
|
||||
possible = min(possible, sclp);
|
||||
for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
|
||||
@ -796,9 +932,8 @@ void __init smp_prepare_boot_cpu(void)
|
||||
{
|
||||
struct pcpu *pcpu = pcpu_devices;
|
||||
|
||||
boot_cpu_address = stap();
|
||||
pcpu->state = CPU_STATE_CONFIGURED;
|
||||
pcpu->address = boot_cpu_address;
|
||||
pcpu->address = stap();
|
||||
pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
|
||||
pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE
|
||||
+ STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
|
||||
@ -848,7 +983,7 @@ static ssize_t cpu_configure_store(struct device *dev,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct pcpu *pcpu;
|
||||
int cpu, val, rc;
|
||||
int cpu, val, rc, i;
|
||||
char delim;
|
||||
|
||||
if (sscanf(buf, "%d %c", &val, &delim) != 1)
|
||||
@ -860,29 +995,43 @@ static ssize_t cpu_configure_store(struct device *dev,
|
||||
rc = -EBUSY;
|
||||
/* disallow configuration changes of online cpus and cpu 0 */
|
||||
cpu = dev->id;
|
||||
if (cpu_online(cpu) || cpu == 0)
|
||||
cpu -= cpu % (smp_cpu_mtid + 1);
|
||||
if (cpu == 0)
|
||||
goto out;
|
||||
for (i = 0; i <= smp_cpu_mtid; i++)
|
||||
if (cpu_online(cpu + i))
|
||||
goto out;
|
||||
pcpu = pcpu_devices + cpu;
|
||||
rc = 0;
|
||||
switch (val) {
|
||||
case 0:
|
||||
if (pcpu->state != CPU_STATE_CONFIGURED)
|
||||
break;
|
||||
rc = sclp_cpu_deconfigure(pcpu->address);
|
||||
rc = sclp_cpu_deconfigure(pcpu->address >> smp_cpu_mt_shift);
|
||||
if (rc)
|
||||
break;
|
||||
pcpu->state = CPU_STATE_STANDBY;
|
||||
smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
|
||||
for (i = 0; i <= smp_cpu_mtid; i++) {
|
||||
if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
|
||||
continue;
|
||||
pcpu[i].state = CPU_STATE_STANDBY;
|
||||
smp_cpu_set_polarization(cpu + i,
|
||||
POLARIZATION_UNKNOWN);
|
||||
}
|
||||
topology_expect_change();
|
||||
break;
|
||||
case 1:
|
||||
if (pcpu->state != CPU_STATE_STANDBY)
|
||||
break;
|
||||
rc = sclp_cpu_configure(pcpu->address);
|
||||
rc = sclp_cpu_configure(pcpu->address >> smp_cpu_mt_shift);
|
||||
if (rc)
|
||||
break;
|
||||
pcpu->state = CPU_STATE_CONFIGURED;
|
||||
smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
|
||||
for (i = 0; i <= smp_cpu_mtid; i++) {
|
||||
if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
|
||||
continue;
|
||||
pcpu[i].state = CPU_STATE_CONFIGURED;
|
||||
smp_cpu_set_polarization(cpu + i,
|
||||
POLARIZATION_UNKNOWN);
|
||||
}
|
||||
topology_expect_change();
|
||||
break;
|
||||
default:
|
||||
|
@ -194,6 +194,14 @@ static void stsi_2_2_2(struct seq_file *m, struct sysinfo_2_2_2 *info)
|
||||
seq_printf(m, "LPAR CPUs Reserved: %d\n", info->cpus_reserved);
|
||||
seq_printf(m, "LPAR CPUs Dedicated: %d\n", info->cpus_dedicated);
|
||||
seq_printf(m, "LPAR CPUs Shared: %d\n", info->cpus_shared);
|
||||
if (info->mt_installed & 0x80) {
|
||||
seq_printf(m, "LPAR CPUs G-MTID: %d\n",
|
||||
info->mt_general & 0x1f);
|
||||
seq_printf(m, "LPAR CPUs S-MTID: %d\n",
|
||||
info->mt_installed & 0x1f);
|
||||
seq_printf(m, "LPAR CPUs PS-MTID: %d\n",
|
||||
info->mt_psmtid & 0x1f);
|
||||
}
|
||||
}
|
||||
|
||||
static void stsi_3_2_2(struct seq_file *m, struct sysinfo_3_2_2 *info)
|
||||
|
@ -59,32 +59,50 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
|
||||
return mask;
|
||||
}
|
||||
|
||||
static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
|
||||
static cpumask_t cpu_thread_map(unsigned int cpu)
|
||||
{
|
||||
cpumask_t mask;
|
||||
int i;
|
||||
|
||||
cpumask_copy(&mask, cpumask_of(cpu));
|
||||
if (!topology_enabled || !MACHINE_HAS_TOPOLOGY)
|
||||
return mask;
|
||||
cpu -= cpu % (smp_cpu_mtid + 1);
|
||||
for (i = 0; i <= smp_cpu_mtid; i++)
|
||||
if (cpu_present(cpu + i))
|
||||
cpumask_set_cpu(cpu + i, &mask);
|
||||
return mask;
|
||||
}
|
||||
|
||||
static struct mask_info *add_cpus_to_mask(struct topology_core *tl_core,
|
||||
struct mask_info *book,
|
||||
struct mask_info *socket,
|
||||
int one_socket_per_cpu)
|
||||
{
|
||||
unsigned int cpu;
|
||||
unsigned int core;
|
||||
|
||||
for_each_set_bit(cpu, &tl_cpu->mask[0], TOPOLOGY_CPU_BITS) {
|
||||
unsigned int rcpu;
|
||||
int lcpu;
|
||||
for_each_set_bit(core, &tl_core->mask[0], TOPOLOGY_CORE_BITS) {
|
||||
unsigned int rcore;
|
||||
int lcpu, i;
|
||||
|
||||
rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin;
|
||||
lcpu = smp_find_processor_id(rcpu);
|
||||
rcore = TOPOLOGY_CORE_BITS - 1 - core + tl_core->origin;
|
||||
lcpu = smp_find_processor_id(rcore << smp_cpu_mt_shift);
|
||||
if (lcpu < 0)
|
||||
continue;
|
||||
cpumask_set_cpu(lcpu, &book->mask);
|
||||
cpu_topology[lcpu].book_id = book->id;
|
||||
cpumask_set_cpu(lcpu, &socket->mask);
|
||||
cpu_topology[lcpu].core_id = rcpu;
|
||||
if (one_socket_per_cpu) {
|
||||
cpu_topology[lcpu].socket_id = rcpu;
|
||||
socket = socket->next;
|
||||
} else {
|
||||
cpu_topology[lcpu].socket_id = socket->id;
|
||||
for (i = 0; i <= smp_cpu_mtid; i++) {
|
||||
cpu_topology[lcpu + i].book_id = book->id;
|
||||
cpu_topology[lcpu + i].core_id = rcore;
|
||||
cpu_topology[lcpu + i].thread_id = lcpu + i;
|
||||
cpumask_set_cpu(lcpu + i, &book->mask);
|
||||
cpumask_set_cpu(lcpu + i, &socket->mask);
|
||||
if (one_socket_per_cpu)
|
||||
cpu_topology[lcpu + i].socket_id = rcore;
|
||||
else
|
||||
cpu_topology[lcpu + i].socket_id = socket->id;
|
||||
smp_cpu_set_polarization(lcpu + i, tl_core->pp);
|
||||
}
|
||||
smp_cpu_set_polarization(lcpu, tl_cpu->pp);
|
||||
if (one_socket_per_cpu)
|
||||
socket = socket->next;
|
||||
}
|
||||
return socket;
|
||||
}
|
||||
@ -108,7 +126,7 @@ static void clear_masks(void)
|
||||
static union topology_entry *next_tle(union topology_entry *tle)
|
||||
{
|
||||
if (!tle->nl)
|
||||
return (union topology_entry *)((struct topology_cpu *)tle + 1);
|
||||
return (union topology_entry *)((struct topology_core *)tle + 1);
|
||||
return (union topology_entry *)((struct topology_container *)tle + 1);
|
||||
}
|
||||
|
||||
@ -231,9 +249,11 @@ static void update_cpu_masks(void)
|
||||
|
||||
spin_lock_irqsave(&topology_lock, flags);
|
||||
for_each_possible_cpu(cpu) {
|
||||
cpu_topology[cpu].thread_mask = cpu_thread_map(cpu);
|
||||
cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu);
|
||||
cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu);
|
||||
if (!MACHINE_HAS_TOPOLOGY) {
|
||||
cpu_topology[cpu].thread_id = cpu;
|
||||
cpu_topology[cpu].core_id = cpu;
|
||||
cpu_topology[cpu].socket_id = cpu;
|
||||
cpu_topology[cpu].book_id = cpu;
|
||||
@ -445,6 +465,12 @@ int topology_cpu_init(struct cpu *cpu)
|
||||
return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
|
||||
}
|
||||
|
||||
const struct cpumask *cpu_thread_mask(int cpu)
|
||||
{
|
||||
return &cpu_topology[cpu].thread_mask;
|
||||
}
|
||||
|
||||
|
||||
const struct cpumask *cpu_coregroup_mask(int cpu)
|
||||
{
|
||||
return &cpu_topology[cpu].core_mask;
|
||||
@ -456,6 +482,7 @@ static const struct cpumask *cpu_book_mask(int cpu)
|
||||
}
|
||||
|
||||
static struct sched_domain_topology_level s390_topology[] = {
|
||||
{ cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
|
||||
{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
|
||||
{ cpu_book_mask, SD_INIT_NAME(BOOK) },
|
||||
{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
|
||||
|
@ -15,6 +15,8 @@
|
||||
#include <asm/cputime.h>
|
||||
#include <asm/vtimer.h>
|
||||
#include <asm/vtime.h>
|
||||
#include <asm/cpu_mf.h>
|
||||
#include <asm/smp.h>
|
||||
|
||||
static void virt_timer_expire(void);
|
||||
|
||||
@ -23,6 +25,10 @@ static DEFINE_SPINLOCK(virt_timer_lock);
|
||||
static atomic64_t virt_timer_current;
|
||||
static atomic64_t virt_timer_elapsed;
|
||||
|
||||
static DEFINE_PER_CPU(u64, mt_cycles[32]);
|
||||
static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 };
|
||||
static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 };
|
||||
|
||||
static inline u64 get_vtimer(void)
|
||||
{
|
||||
u64 timer;
|
||||
@ -61,6 +67,8 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
|
||||
{
|
||||
struct thread_info *ti = task_thread_info(tsk);
|
||||
u64 timer, clock, user, system, steal;
|
||||
u64 user_scaled, system_scaled;
|
||||
int i;
|
||||
|
||||
timer = S390_lowcore.last_update_timer;
|
||||
clock = S390_lowcore.last_update_clock;
|
||||
@ -76,15 +84,49 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
|
||||
S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
|
||||
S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
|
||||
|
||||
/* Do MT utilization calculation */
|
||||
if (smp_cpu_mtid) {
|
||||
u64 cycles_new[32], *cycles_old;
|
||||
u64 delta, mult, div;
|
||||
|
||||
cycles_old = this_cpu_ptr(mt_cycles);
|
||||
if (stcctm5(smp_cpu_mtid + 1, cycles_new) < 2) {
|
||||
mult = div = 0;
|
||||
for (i = 0; i <= smp_cpu_mtid; i++) {
|
||||
delta = cycles_new[i] - cycles_old[i];
|
||||
mult += delta;
|
||||
div += (i + 1) * delta;
|
||||
}
|
||||
if (mult > 0) {
|
||||
/* Update scaling factor */
|
||||
__this_cpu_write(mt_scaling_mult, mult);
|
||||
__this_cpu_write(mt_scaling_div, div);
|
||||
memcpy(cycles_old, cycles_new,
|
||||
sizeof(u64) * (smp_cpu_mtid + 1));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
user = S390_lowcore.user_timer - ti->user_timer;
|
||||
S390_lowcore.steal_timer -= user;
|
||||
ti->user_timer = S390_lowcore.user_timer;
|
||||
account_user_time(tsk, user, user);
|
||||
|
||||
system = S390_lowcore.system_timer - ti->system_timer;
|
||||
S390_lowcore.steal_timer -= system;
|
||||
ti->system_timer = S390_lowcore.system_timer;
|
||||
account_system_time(tsk, hardirq_offset, system, system);
|
||||
|
||||
user_scaled = user;
|
||||
system_scaled = system;
|
||||
/* Do MT utilization scaling */
|
||||
if (smp_cpu_mtid) {
|
||||
u64 mult = __this_cpu_read(mt_scaling_mult);
|
||||
u64 div = __this_cpu_read(mt_scaling_div);
|
||||
|
||||
user_scaled = (user_scaled * mult) / div;
|
||||
system_scaled = (system_scaled * mult) / div;
|
||||
}
|
||||
account_user_time(tsk, user, user_scaled);
|
||||
account_system_time(tsk, hardirq_offset, system, system_scaled);
|
||||
|
||||
steal = S390_lowcore.steal_timer;
|
||||
if ((s64) steal > 0) {
|
||||
@ -126,7 +168,7 @@ void vtime_account_user(struct task_struct *tsk)
|
||||
void vtime_account_irq_enter(struct task_struct *tsk)
|
||||
{
|
||||
struct thread_info *ti = task_thread_info(tsk);
|
||||
u64 timer, system;
|
||||
u64 timer, system, system_scaled;
|
||||
|
||||
timer = S390_lowcore.last_update_timer;
|
||||
S390_lowcore.last_update_timer = get_vtimer();
|
||||
@ -135,7 +177,15 @@ void vtime_account_irq_enter(struct task_struct *tsk)
|
||||
system = S390_lowcore.system_timer - ti->system_timer;
|
||||
S390_lowcore.steal_timer -= system;
|
||||
ti->system_timer = S390_lowcore.system_timer;
|
||||
account_system_time(tsk, 0, system, system);
|
||||
system_scaled = system;
|
||||
/* Do MT utilization scaling */
|
||||
if (smp_cpu_mtid) {
|
||||
u64 mult = __this_cpu_read(mt_scaling_mult);
|
||||
u64 div = __this_cpu_read(mt_scaling_div);
|
||||
|
||||
system_scaled = (system_scaled * mult) / div;
|
||||
}
|
||||
account_system_time(tsk, 0, system, system_scaled);
|
||||
|
||||
virt_timer_forward(system);
|
||||
}
|
||||
|
@ -12,7 +12,15 @@
|
||||
#include <linux/smp.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
int spin_retry = 1000;
|
||||
int spin_retry = -1;
|
||||
|
||||
static int __init spin_retry_init(void)
|
||||
{
|
||||
if (spin_retry < 0)
|
||||
spin_retry = MACHINE_HAS_CAD ? 10 : 1000;
|
||||
return 0;
|
||||
}
|
||||
early_initcall(spin_retry_init);
|
||||
|
||||
/**
|
||||
* spin_retry= parameter
|
||||
@ -24,6 +32,11 @@ static int __init spin_retry_setup(char *str)
|
||||
}
|
||||
__setup("spin_retry=", spin_retry_setup);
|
||||
|
||||
static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old)
|
||||
{
|
||||
asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
|
||||
}
|
||||
|
||||
void arch_spin_lock_wait(arch_spinlock_t *lp)
|
||||
{
|
||||
unsigned int cpu = SPINLOCK_LOCKVAL;
|
||||
@ -46,6 +59,8 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
|
||||
/* Loop for a while on the lock value. */
|
||||
count = spin_retry;
|
||||
do {
|
||||
if (MACHINE_HAS_CAD)
|
||||
_raw_compare_and_delay(&lp->lock, owner);
|
||||
owner = ACCESS_ONCE(lp->lock);
|
||||
} while (owner && count-- > 0);
|
||||
if (!owner)
|
||||
@ -84,6 +99,8 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
|
||||
/* Loop for a while on the lock value. */
|
||||
count = spin_retry;
|
||||
do {
|
||||
if (MACHINE_HAS_CAD)
|
||||
_raw_compare_and_delay(&lp->lock, owner);
|
||||
owner = ACCESS_ONCE(lp->lock);
|
||||
} while (owner && count-- > 0);
|
||||
if (!owner)
|
||||
@ -100,11 +117,19 @@ EXPORT_SYMBOL(arch_spin_lock_wait_flags);
|
||||
|
||||
int arch_spin_trylock_retry(arch_spinlock_t *lp)
|
||||
{
|
||||
unsigned int cpu = SPINLOCK_LOCKVAL;
|
||||
unsigned int owner;
|
||||
int count;
|
||||
|
||||
for (count = spin_retry; count > 0; count--)
|
||||
if (arch_spin_trylock_once(lp))
|
||||
return 1;
|
||||
for (count = spin_retry; count > 0; count--) {
|
||||
owner = ACCESS_ONCE(lp->lock);
|
||||
/* Try to get the lock if it is free. */
|
||||
if (!owner) {
|
||||
if (_raw_compare_and_swap(&lp->lock, 0, cpu))
|
||||
return 1;
|
||||
} else if (MACHINE_HAS_CAD)
|
||||
_raw_compare_and_delay(&lp->lock, owner);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(arch_spin_trylock_retry);
|
||||
@ -126,8 +151,11 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
|
||||
}
|
||||
old = ACCESS_ONCE(rw->lock);
|
||||
owner = ACCESS_ONCE(rw->owner);
|
||||
if ((int) old < 0)
|
||||
if ((int) old < 0) {
|
||||
if (MACHINE_HAS_CAD)
|
||||
_raw_compare_and_delay(&rw->lock, old);
|
||||
continue;
|
||||
}
|
||||
if (_raw_compare_and_swap(&rw->lock, old, old + 1))
|
||||
return;
|
||||
}
|
||||
@ -141,8 +169,11 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw)
|
||||
|
||||
while (count-- > 0) {
|
||||
old = ACCESS_ONCE(rw->lock);
|
||||
if ((int) old < 0)
|
||||
if ((int) old < 0) {
|
||||
if (MACHINE_HAS_CAD)
|
||||
_raw_compare_and_delay(&rw->lock, old);
|
||||
continue;
|
||||
}
|
||||
if (_raw_compare_and_swap(&rw->lock, old, old + 1))
|
||||
return 1;
|
||||
}
|
||||
@ -173,6 +204,8 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
|
||||
}
|
||||
if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
|
||||
break;
|
||||
if (MACHINE_HAS_CAD)
|
||||
_raw_compare_and_delay(&rw->lock, old);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_write_lock_wait);
|
||||
@ -201,6 +234,8 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
|
||||
smp_rmb();
|
||||
if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
|
||||
break;
|
||||
if (MACHINE_HAS_CAD)
|
||||
_raw_compare_and_delay(&rw->lock, old);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_write_lock_wait);
|
||||
@ -214,8 +249,11 @@ int _raw_write_trylock_retry(arch_rwlock_t *rw)
|
||||
|
||||
while (count-- > 0) {
|
||||
old = ACCESS_ONCE(rw->lock);
|
||||
if (old)
|
||||
if (old) {
|
||||
if (MACHINE_HAS_CAD)
|
||||
_raw_compare_and_delay(&rw->lock, old);
|
||||
continue;
|
||||
}
|
||||
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
|
||||
return 1;
|
||||
}
|
||||
|
@ -171,7 +171,7 @@ static void dump_pagetable(unsigned long asce, unsigned long address)
|
||||
table = table + ((address >> 20) & 0x7ff);
|
||||
if (bad_address(table))
|
||||
goto bad;
|
||||
pr_cont(KERN_CONT "S:%016lx ", *table);
|
||||
pr_cont("S:%016lx ", *table);
|
||||
if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
|
||||
goto out;
|
||||
table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
|
||||
@ -261,7 +261,7 @@ static inline void report_user_fault(struct pt_regs *regs, long signr)
|
||||
return;
|
||||
if (!printk_ratelimit())
|
||||
return;
|
||||
printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d",
|
||||
printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
|
||||
regs->int_code & 0xffff, regs->int_code >> 17);
|
||||
print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN);
|
||||
printk(KERN_CONT "\n");
|
||||
|
@ -71,13 +71,16 @@ static void __init setup_zero_pages(void)
|
||||
break;
|
||||
case 0x2827: /* zEC12 */
|
||||
case 0x2828: /* zEC12 */
|
||||
default:
|
||||
order = 5;
|
||||
break;
|
||||
case 0x2964: /* z13 */
|
||||
default:
|
||||
order = 7;
|
||||
break;
|
||||
}
|
||||
/* Limit number of empty zero pages for small memory sizes */
|
||||
if (order > 2 && totalram_pages <= 16384)
|
||||
order = 2;
|
||||
while (order > 2 && (totalram_pages >> 10) < (1UL << order))
|
||||
order--;
|
||||
|
||||
empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
|
||||
if (!empty_zero_page)
|
||||
|
@ -28,8 +28,12 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/compat.h>
|
||||
#include <linux/security.h>
|
||||
#include <asm/pgalloc.h>
|
||||
|
||||
unsigned long mmap_rnd_mask;
|
||||
unsigned long mmap_align_mask;
|
||||
|
||||
static unsigned long stack_maxrandom_size(void)
|
||||
{
|
||||
if (!(current->flags & PF_RANDOMIZE))
|
||||
@ -60,8 +64,10 @@ static unsigned long mmap_rnd(void)
|
||||
{
|
||||
if (!(current->flags & PF_RANDOMIZE))
|
||||
return 0;
|
||||
/* 8MB randomization for mmap_base */
|
||||
return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
|
||||
if (is_32bit_task())
|
||||
return (get_random_int() & 0x7ff) << PAGE_SHIFT;
|
||||
else
|
||||
return (get_random_int() & mmap_rnd_mask) << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static unsigned long mmap_base_legacy(void)
|
||||
@ -81,6 +87,106 @@ static inline unsigned long mmap_base(void)
|
||||
return STACK_TOP - stack_maxrandom_size() - mmap_rnd() - gap;
|
||||
}
|
||||
|
||||
unsigned long
|
||||
arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
||||
unsigned long len, unsigned long pgoff, unsigned long flags)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct vm_area_struct *vma;
|
||||
struct vm_unmapped_area_info info;
|
||||
int do_color_align;
|
||||
|
||||
if (len > TASK_SIZE - mmap_min_addr)
|
||||
return -ENOMEM;
|
||||
|
||||
if (flags & MAP_FIXED)
|
||||
return addr;
|
||||
|
||||
if (addr) {
|
||||
addr = PAGE_ALIGN(addr);
|
||||
vma = find_vma(mm, addr);
|
||||
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
return addr;
|
||||
}
|
||||
|
||||
do_color_align = 0;
|
||||
if (filp || (flags & MAP_SHARED))
|
||||
do_color_align = !is_32bit_task();
|
||||
|
||||
info.flags = 0;
|
||||
info.length = len;
|
||||
info.low_limit = mm->mmap_base;
|
||||
info.high_limit = TASK_SIZE;
|
||||
info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
|
||||
info.align_offset = pgoff << PAGE_SHIFT;
|
||||
return vm_unmapped_area(&info);
|
||||
}
|
||||
|
||||
unsigned long
|
||||
arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
||||
const unsigned long len, const unsigned long pgoff,
|
||||
const unsigned long flags)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
struct mm_struct *mm = current->mm;
|
||||
unsigned long addr = addr0;
|
||||
struct vm_unmapped_area_info info;
|
||||
int do_color_align;
|
||||
|
||||
/* requested length too big for entire address space */
|
||||
if (len > TASK_SIZE - mmap_min_addr)
|
||||
return -ENOMEM;
|
||||
|
||||
if (flags & MAP_FIXED)
|
||||
return addr;
|
||||
|
||||
/* requesting a specific address */
|
||||
if (addr) {
|
||||
addr = PAGE_ALIGN(addr);
|
||||
vma = find_vma(mm, addr);
|
||||
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
return addr;
|
||||
}
|
||||
|
||||
do_color_align = 0;
|
||||
if (filp || (flags & MAP_SHARED))
|
||||
do_color_align = !is_32bit_task();
|
||||
|
||||
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
|
||||
info.length = len;
|
||||
info.low_limit = max(PAGE_SIZE, mmap_min_addr);
|
||||
info.high_limit = mm->mmap_base;
|
||||
info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
|
||||
info.align_offset = pgoff << PAGE_SHIFT;
|
||||
addr = vm_unmapped_area(&info);
|
||||
|
||||
/*
|
||||
* A failed mmap() very likely causes application failure,
|
||||
* so fall back to the bottom-up function here. This scenario
|
||||
* can happen with large stack limits and large mmap()
|
||||
* allocations.
|
||||
*/
|
||||
if (addr & ~PAGE_MASK) {
|
||||
VM_BUG_ON(addr != -ENOMEM);
|
||||
info.flags = 0;
|
||||
info.low_limit = TASK_UNMAPPED_BASE;
|
||||
info.high_limit = TASK_SIZE;
|
||||
addr = vm_unmapped_area(&info);
|
||||
}
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
unsigned long randomize_et_dyn(void)
|
||||
{
|
||||
unsigned long base;
|
||||
|
||||
base = (STACK_TOP / 3 * 2) & (~mmap_align_mask << PAGE_SHIFT);
|
||||
return base + mmap_rnd();
|
||||
}
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
|
||||
/*
|
||||
@ -177,4 +283,36 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
|
||||
}
|
||||
}
|
||||
|
||||
static int __init setup_mmap_rnd(void)
|
||||
{
|
||||
struct cpuid cpu_id;
|
||||
|
||||
get_cpu_id(&cpu_id);
|
||||
switch (cpu_id.machine) {
|
||||
case 0x9672:
|
||||
case 0x2064:
|
||||
case 0x2066:
|
||||
case 0x2084:
|
||||
case 0x2086:
|
||||
case 0x2094:
|
||||
case 0x2096:
|
||||
case 0x2097:
|
||||
case 0x2098:
|
||||
case 0x2817:
|
||||
case 0x2818:
|
||||
case 0x2827:
|
||||
case 0x2828:
|
||||
mmap_rnd_mask = 0x7ffUL;
|
||||
mmap_align_mask = 0UL;
|
||||
break;
|
||||
case 0x2964: /* z13 */
|
||||
default:
|
||||
mmap_rnd_mask = 0x3ff80UL;
|
||||
mmap_align_mask = 0x7fUL;
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
early_initcall(setup_mmap_rnd);
|
||||
|
||||
#endif
|
||||
|
@ -527,7 +527,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
|
||||
table += (gaddr >> 53) & 0x7ff;
|
||||
if ((*table & _REGION_ENTRY_INVALID) &&
|
||||
gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
|
||||
gaddr & 0xffe0000000000000))
|
||||
gaddr & 0xffe0000000000000UL))
|
||||
return -ENOMEM;
|
||||
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
|
||||
}
|
||||
@ -535,7 +535,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
|
||||
table += (gaddr >> 42) & 0x7ff;
|
||||
if ((*table & _REGION_ENTRY_INVALID) &&
|
||||
gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
|
||||
gaddr & 0xfffffc0000000000))
|
||||
gaddr & 0xfffffc0000000000UL))
|
||||
return -ENOMEM;
|
||||
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
|
||||
}
|
||||
@ -543,7 +543,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
|
||||
table += (gaddr >> 31) & 0x7ff;
|
||||
if ((*table & _REGION_ENTRY_INVALID) &&
|
||||
gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
|
||||
gaddr & 0xffffffff80000000))
|
||||
gaddr & 0xffffffff80000000UL))
|
||||
return -ENOMEM;
|
||||
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
|
||||
}
|
||||
|
@ -55,7 +55,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
|
||||
ret = get_pfn(mmio_addr, VM_WRITE, &pfn);
|
||||
if (ret)
|
||||
goto out;
|
||||
io_addr = (void *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
|
||||
io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
|
||||
|
||||
ret = -EFAULT;
|
||||
if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
|
||||
@ -96,7 +96,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
|
||||
ret = get_pfn(mmio_addr, VM_READ, &pfn);
|
||||
if (ret)
|
||||
goto out;
|
||||
io_addr = (void *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
|
||||
io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
|
||||
|
||||
ret = -EFAULT;
|
||||
if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
|
||||
|
@ -674,8 +674,9 @@ EXPORT_SYMBOL(dasd_enable_device);
|
||||
unsigned int dasd_global_profile_level = DASD_PROFILE_OFF;
|
||||
|
||||
#ifdef CONFIG_DASD_PROFILE
|
||||
struct dasd_profile_info dasd_global_profile_data;
|
||||
static struct dentry *dasd_global_profile_dentry;
|
||||
struct dasd_profile dasd_global_profile = {
|
||||
.lock = __SPIN_LOCK_UNLOCKED(dasd_global_profile.lock),
|
||||
};
|
||||
static struct dentry *dasd_debugfs_global_entry;
|
||||
|
||||
/*
|
||||
@ -696,11 +697,13 @@ static void dasd_profile_start(struct dasd_block *block,
|
||||
if (++counter >= 31)
|
||||
break;
|
||||
|
||||
if (dasd_global_profile_level) {
|
||||
dasd_global_profile_data.dasd_io_nr_req[counter]++;
|
||||
spin_lock(&dasd_global_profile.lock);
|
||||
if (dasd_global_profile.data) {
|
||||
dasd_global_profile.data->dasd_io_nr_req[counter]++;
|
||||
if (rq_data_dir(req) == READ)
|
||||
dasd_global_profile_data.dasd_read_nr_req[counter]++;
|
||||
dasd_global_profile.data->dasd_read_nr_req[counter]++;
|
||||
}
|
||||
spin_unlock(&dasd_global_profile.lock);
|
||||
|
||||
spin_lock(&block->profile.lock);
|
||||
if (block->profile.data) {
|
||||
@ -825,8 +828,9 @@ static void dasd_profile_end(struct dasd_block *block,
|
||||
dasd_profile_counter(irqtime / sectors, irqtimeps_ind);
|
||||
dasd_profile_counter(endtime, endtime_ind);
|
||||
|
||||
if (dasd_global_profile_level) {
|
||||
dasd_profile_end_add_data(&dasd_global_profile_data,
|
||||
spin_lock(&dasd_global_profile.lock);
|
||||
if (dasd_global_profile.data) {
|
||||
dasd_profile_end_add_data(dasd_global_profile.data,
|
||||
cqr->startdev != block->base,
|
||||
cqr->cpmode == 1,
|
||||
rq_data_dir(req) == READ,
|
||||
@ -835,6 +839,7 @@ static void dasd_profile_end(struct dasd_block *block,
|
||||
irqtime_ind, irqtimeps_ind,
|
||||
endtime_ind);
|
||||
}
|
||||
spin_unlock(&dasd_global_profile.lock);
|
||||
|
||||
spin_lock(&block->profile.lock);
|
||||
if (block->profile.data)
|
||||
@ -876,12 +881,6 @@ void dasd_profile_reset(struct dasd_profile *profile)
|
||||
spin_unlock_bh(&profile->lock);
|
||||
}
|
||||
|
||||
void dasd_global_profile_reset(void)
|
||||
{
|
||||
memset(&dasd_global_profile_data, 0, sizeof(dasd_global_profile_data));
|
||||
getnstimeofday(&dasd_global_profile_data.starttod);
|
||||
}
|
||||
|
||||
int dasd_profile_on(struct dasd_profile *profile)
|
||||
{
|
||||
struct dasd_profile_info *data;
|
||||
@ -949,12 +948,20 @@ static ssize_t dasd_stats_write(struct file *file,
|
||||
dasd_profile_reset(prof);
|
||||
} else if (strncmp(str, "on", 2) == 0) {
|
||||
rc = dasd_profile_on(prof);
|
||||
if (!rc)
|
||||
rc = user_len;
|
||||
if (rc)
|
||||
goto out;
|
||||
rc = user_len;
|
||||
if (prof == &dasd_global_profile) {
|
||||
dasd_profile_reset(prof);
|
||||
dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY;
|
||||
}
|
||||
} else if (strncmp(str, "off", 3) == 0) {
|
||||
if (prof == &dasd_global_profile)
|
||||
dasd_global_profile_level = DASD_PROFILE_OFF;
|
||||
dasd_profile_off(prof);
|
||||
} else
|
||||
rc = -EINVAL;
|
||||
out:
|
||||
vfree(buffer);
|
||||
return rc;
|
||||
}
|
||||
@ -1044,57 +1051,6 @@ static const struct file_operations dasd_stats_raw_fops = {
|
||||
.write = dasd_stats_write,
|
||||
};
|
||||
|
||||
static ssize_t dasd_stats_global_write(struct file *file,
|
||||
const char __user *user_buf,
|
||||
size_t user_len, loff_t *pos)
|
||||
{
|
||||
char *buffer, *str;
|
||||
ssize_t rc;
|
||||
|
||||
if (user_len > 65536)
|
||||
user_len = 65536;
|
||||
buffer = dasd_get_user_string(user_buf, user_len);
|
||||
if (IS_ERR(buffer))
|
||||
return PTR_ERR(buffer);
|
||||
str = skip_spaces(buffer);
|
||||
rc = user_len;
|
||||
if (strncmp(str, "reset", 5) == 0) {
|
||||
dasd_global_profile_reset();
|
||||
} else if (strncmp(str, "on", 2) == 0) {
|
||||
dasd_global_profile_reset();
|
||||
dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY;
|
||||
} else if (strncmp(str, "off", 3) == 0) {
|
||||
dasd_global_profile_level = DASD_PROFILE_OFF;
|
||||
} else
|
||||
rc = -EINVAL;
|
||||
vfree(buffer);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int dasd_stats_global_show(struct seq_file *m, void *v)
|
||||
{
|
||||
if (!dasd_global_profile_level) {
|
||||
seq_puts(m, "disabled\n");
|
||||
return 0;
|
||||
}
|
||||
dasd_stats_seq_print(m, &dasd_global_profile_data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dasd_stats_global_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, dasd_stats_global_show, NULL);
|
||||
}
|
||||
|
||||
static const struct file_operations dasd_stats_global_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = dasd_stats_global_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
.write = dasd_stats_global_write,
|
||||
};
|
||||
|
||||
static void dasd_profile_init(struct dasd_profile *profile,
|
||||
struct dentry *base_dentry)
|
||||
{
|
||||
@ -1123,20 +1079,16 @@ static void dasd_profile_exit(struct dasd_profile *profile)
|
||||
static void dasd_statistics_removeroot(void)
|
||||
{
|
||||
dasd_global_profile_level = DASD_PROFILE_OFF;
|
||||
debugfs_remove(dasd_global_profile_dentry);
|
||||
dasd_global_profile_dentry = NULL;
|
||||
dasd_profile_exit(&dasd_global_profile);
|
||||
debugfs_remove(dasd_debugfs_global_entry);
|
||||
debugfs_remove(dasd_debugfs_root_entry);
|
||||
}
|
||||
|
||||
static void dasd_statistics_createroot(void)
|
||||
{
|
||||
umode_t mode;
|
||||
struct dentry *pde;
|
||||
|
||||
dasd_debugfs_root_entry = NULL;
|
||||
dasd_debugfs_global_entry = NULL;
|
||||
dasd_global_profile_dentry = NULL;
|
||||
pde = debugfs_create_dir("dasd", NULL);
|
||||
if (!pde || IS_ERR(pde))
|
||||
goto error;
|
||||
@ -1145,13 +1097,7 @@ static void dasd_statistics_createroot(void)
|
||||
if (!pde || IS_ERR(pde))
|
||||
goto error;
|
||||
dasd_debugfs_global_entry = pde;
|
||||
|
||||
mode = (S_IRUSR | S_IWUSR | S_IFREG);
|
||||
pde = debugfs_create_file("statistics", mode, dasd_debugfs_global_entry,
|
||||
NULL, &dasd_stats_global_fops);
|
||||
if (!pde || IS_ERR(pde))
|
||||
goto error;
|
||||
dasd_global_profile_dentry = pde;
|
||||
dasd_profile_init(&dasd_global_profile, dasd_debugfs_global_entry);
|
||||
return;
|
||||
|
||||
error:
|
||||
|
@ -651,7 +651,7 @@ dasd_check_blocksize(int bsize)
|
||||
#define DASD_PROFILE_GLOBAL_ONLY 2
|
||||
|
||||
extern debug_info_t *dasd_debug_area;
|
||||
extern struct dasd_profile_info dasd_global_profile_data;
|
||||
extern struct dasd_profile dasd_global_profile;
|
||||
extern unsigned int dasd_global_profile_level;
|
||||
extern const struct block_device_operations dasd_device_operations;
|
||||
|
||||
@ -728,7 +728,6 @@ int dasd_device_is_ro(struct dasd_device *);
|
||||
void dasd_profile_reset(struct dasd_profile *);
|
||||
int dasd_profile_on(struct dasd_profile *);
|
||||
void dasd_profile_off(struct dasd_profile *);
|
||||
void dasd_global_profile_reset(void);
|
||||
char *dasd_get_user_string(const char __user *, size_t);
|
||||
|
||||
/* externals in dasd_devmap.c */
|
||||
|
@ -212,14 +212,15 @@ static int dasd_stats_proc_show(struct seq_file *m, void *v)
|
||||
struct dasd_profile_info *prof;
|
||||
int factor;
|
||||
|
||||
/* check for active profiling */
|
||||
if (!dasd_global_profile_level) {
|
||||
spin_lock_bh(&dasd_global_profile.lock);
|
||||
prof = dasd_global_profile.data;
|
||||
if (!prof) {
|
||||
spin_unlock_bh(&dasd_global_profile.lock);
|
||||
seq_printf(m, "Statistics are off - they might be "
|
||||
"switched on using 'echo set on > "
|
||||
"/proc/dasd/statistics'\n");
|
||||
return 0;
|
||||
}
|
||||
prof = &dasd_global_profile_data;
|
||||
|
||||
/* prevent counter 'overflow' on output */
|
||||
for (factor = 1; (prof->dasd_io_reqs / factor) > 9999999;
|
||||
@ -255,6 +256,7 @@ static int dasd_stats_proc_show(struct seq_file *m, void *v)
|
||||
dasd_statistics_array(m, prof->dasd_io_time3, factor);
|
||||
seq_printf(m, "# of req in chanq at enqueuing (1..32) \n");
|
||||
dasd_statistics_array(m, prof->dasd_io_nr_req, factor);
|
||||
spin_unlock_bh(&dasd_global_profile.lock);
|
||||
#else
|
||||
seq_printf(m, "Statistics are not activated in this kernel\n");
|
||||
#endif
|
||||
@ -291,14 +293,19 @@ static ssize_t dasd_stats_proc_write(struct file *file,
|
||||
dasd_stats_all_block_off();
|
||||
goto out_error;
|
||||
}
|
||||
dasd_global_profile_reset();
|
||||
rc = dasd_profile_on(&dasd_global_profile);
|
||||
if (rc) {
|
||||
dasd_stats_all_block_off();
|
||||
goto out_error;
|
||||
}
|
||||
dasd_profile_reset(&dasd_global_profile);
|
||||
dasd_global_profile_level = DASD_PROFILE_ON;
|
||||
pr_info("The statistics feature has been switched "
|
||||
"on\n");
|
||||
} else if (strcmp(str, "off") == 0) {
|
||||
/* switch off and reset statistics profiling */
|
||||
/* switch off statistics profiling */
|
||||
dasd_global_profile_level = DASD_PROFILE_OFF;
|
||||
dasd_global_profile_reset();
|
||||
dasd_profile_off(&dasd_global_profile);
|
||||
dasd_stats_all_block_off();
|
||||
pr_info("The statistics feature has been switched "
|
||||
"off\n");
|
||||
@ -306,7 +313,7 @@ static ssize_t dasd_stats_proc_write(struct file *file,
|
||||
goto out_parse_error;
|
||||
} else if (strncmp(str, "reset", 5) == 0) {
|
||||
/* reset the statistics */
|
||||
dasd_global_profile_reset();
|
||||
dasd_profile_reset(&dasd_global_profile);
|
||||
dasd_stats_all_block_reset();
|
||||
pr_info("The statistics have been reset\n");
|
||||
} else
|
||||
|
@ -438,7 +438,13 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char
|
||||
pr_info("All DCSSs that map to device %s are "
|
||||
"saved\n", dev_info->segment_name);
|
||||
list_for_each_entry(entry, &dev_info->seg_list, lh) {
|
||||
segment_save(entry->segment_name);
|
||||
if (entry->segment_type == SEG_TYPE_EN ||
|
||||
entry->segment_type == SEG_TYPE_SN)
|
||||
pr_warn("DCSS %s is of type SN or EN"
|
||||
" and cannot be saved\n",
|
||||
entry->segment_name);
|
||||
else
|
||||
segment_save(entry->segment_name);
|
||||
}
|
||||
} else {
|
||||
// device is busy => we save it when it becomes
|
||||
@ -797,7 +803,12 @@ dcssblk_release(struct gendisk *disk, fmode_t mode)
|
||||
pr_info("Device %s has become idle and is being saved "
|
||||
"now\n", dev_info->segment_name);
|
||||
list_for_each_entry(entry, &dev_info->seg_list, lh) {
|
||||
segment_save(entry->segment_name);
|
||||
if (entry->segment_type == SEG_TYPE_EN ||
|
||||
entry->segment_type == SEG_TYPE_SN)
|
||||
pr_warn("DCSS %s is of type SN or EN and cannot"
|
||||
" be saved\n", entry->segment_name);
|
||||
else
|
||||
segment_save(entry->segment_name);
|
||||
}
|
||||
dev_info->save_pending = 0;
|
||||
}
|
||||
|
@ -200,10 +200,9 @@ int hmcdrv_ftp_probe(void)
|
||||
rc = hmcdrv_ftp_startup();
|
||||
|
||||
if (rc)
|
||||
return rc;
|
||||
goto out;
|
||||
|
||||
rc = hmcdrv_ftp_do(&ftp);
|
||||
free_page((unsigned long) ftp.buf);
|
||||
hmcdrv_ftp_shutdown();
|
||||
|
||||
switch (rc) {
|
||||
@ -216,7 +215,8 @@ int hmcdrv_ftp_probe(void)
|
||||
rc = 0; /* clear length (success) */
|
||||
break;
|
||||
} /* switch */
|
||||
|
||||
out:
|
||||
free_page((unsigned long) ftp.buf);
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL(hmcdrv_ftp_probe);
|
||||
|
@ -11,7 +11,6 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/version.h>
|
||||
#include <linux/stat.h>
|
||||
|
||||
#include "hmcdrv_ftp.h"
|
||||
|
@ -20,26 +20,31 @@ struct read_info_sccb {
|
||||
struct sccb_header header; /* 0-7 */
|
||||
u16 rnmax; /* 8-9 */
|
||||
u8 rnsize; /* 10 */
|
||||
u8 _reserved0[16 - 11]; /* 11-15 */
|
||||
u8 _pad_11[16 - 11]; /* 11-15 */
|
||||
u16 ncpurl; /* 16-17 */
|
||||
u16 cpuoff; /* 18-19 */
|
||||
u8 _reserved7[24 - 20]; /* 20-23 */
|
||||
u8 _pad_20[24 - 20]; /* 20-23 */
|
||||
u8 loadparm[8]; /* 24-31 */
|
||||
u8 _reserved1[48 - 32]; /* 32-47 */
|
||||
u8 _pad_32[42 - 32]; /* 32-41 */
|
||||
u8 fac42; /* 42 */
|
||||
u8 fac43; /* 43 */
|
||||
u8 _pad_44[48 - 44]; /* 44-47 */
|
||||
u64 facilities; /* 48-55 */
|
||||
u8 _reserved2a[76 - 56]; /* 56-75 */
|
||||
u8 _pad_56[66 - 56]; /* 56-65 */
|
||||
u8 fac66; /* 66 */
|
||||
u8 _pad_67[76 - 67]; /* 67-83 */
|
||||
u32 ibc; /* 76-79 */
|
||||
u8 _reserved2b[84 - 80]; /* 80-83 */
|
||||
u8 _pad80[84 - 80]; /* 80-83 */
|
||||
u8 fac84; /* 84 */
|
||||
u8 fac85; /* 85 */
|
||||
u8 _reserved3[91 - 86]; /* 86-90 */
|
||||
u8 _pad_86[91 - 86]; /* 86-90 */
|
||||
u8 flags; /* 91 */
|
||||
u8 _reserved4[100 - 92]; /* 92-99 */
|
||||
u8 _pad_92[100 - 92]; /* 92-99 */
|
||||
u32 rnsize2; /* 100-103 */
|
||||
u64 rnmax2; /* 104-111 */
|
||||
u8 _reserved5[120 - 112]; /* 112-119 */
|
||||
u8 _pad_112[120 - 112]; /* 112-119 */
|
||||
u16 hcpua; /* 120-121 */
|
||||
u8 _reserved6[4096 - 122]; /* 122-4095 */
|
||||
u8 _pad_122[4096 - 122]; /* 122-4095 */
|
||||
} __packed __aligned(PAGE_SIZE);
|
||||
|
||||
static char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE) __initdata;
|
||||
@ -50,6 +55,10 @@ static unsigned int sclp_max_cpu;
|
||||
static struct sclp_ipl_info sclp_ipl_info;
|
||||
static unsigned char sclp_siif;
|
||||
static u32 sclp_ibc;
|
||||
static unsigned int sclp_mtid;
|
||||
static unsigned int sclp_mtid_cp;
|
||||
static unsigned int sclp_mtid_max;
|
||||
static unsigned int sclp_mtid_prev;
|
||||
|
||||
u64 sclp_facilities;
|
||||
u8 sclp_fac84;
|
||||
@ -128,7 +137,7 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb)
|
||||
boot_cpu_address = stap();
|
||||
cpue = (void *)sccb + sccb->cpuoff;
|
||||
for (cpu = 0; cpu < sccb->ncpurl; cpue++, cpu++) {
|
||||
if (boot_cpu_address != cpue->address)
|
||||
if (boot_cpu_address != cpue->core_id)
|
||||
continue;
|
||||
sclp_siif = cpue->siif;
|
||||
break;
|
||||
@ -139,6 +148,11 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb)
|
||||
if (sccb->flags & 0x2)
|
||||
sclp_ipl_info.has_dump = 1;
|
||||
memcpy(&sclp_ipl_info.loadparm, &sccb->loadparm, LOADPARM_LEN);
|
||||
|
||||
sclp_mtid = (sccb->fac42 & 0x80) ? (sccb->fac42 & 31) : 0;
|
||||
sclp_mtid_cp = (sccb->fac42 & 0x80) ? (sccb->fac43 & 31) : 0;
|
||||
sclp_mtid_max = max(sclp_mtid, sclp_mtid_cp);
|
||||
sclp_mtid_prev = (sccb->fac42 & 0x80) ? (sccb->fac66 & 31) : 0;
|
||||
}
|
||||
|
||||
bool __init sclp_has_linemode(void)
|
||||
@ -178,6 +192,21 @@ unsigned int sclp_get_ibc(void)
|
||||
}
|
||||
EXPORT_SYMBOL(sclp_get_ibc);
|
||||
|
||||
unsigned int sclp_get_mtid(u8 cpu_type)
|
||||
{
|
||||
return cpu_type ? sclp_mtid : sclp_mtid_cp;
|
||||
}
|
||||
|
||||
unsigned int sclp_get_mtid_max(void)
|
||||
{
|
||||
return sclp_mtid_max;
|
||||
}
|
||||
|
||||
unsigned int sclp_get_mtid_prev(void)
|
||||
{
|
||||
return sclp_mtid_prev;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function will be called after sclp_facilities_detect(), which gets
|
||||
* called from early.c code. The sclp_facilities_detect() function retrieves
|
||||
|
@ -773,13 +773,11 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
|
||||
"occurred\n");
|
||||
return tape_34xx_erp_failed(request, -EIO);
|
||||
case 0x57:
|
||||
if (device->cdev->id.driver_info == tape_3480) {
|
||||
/* Attention intercept. */
|
||||
return tape_34xx_erp_retry(request);
|
||||
} else {
|
||||
/* Global status intercept. */
|
||||
return tape_34xx_erp_retry(request);
|
||||
}
|
||||
/*
|
||||
* 3480: Attention intercept.
|
||||
* 3490: Global status intercept.
|
||||
*/
|
||||
return tape_34xx_erp_retry(request);
|
||||
case 0x5a:
|
||||
/*
|
||||
* Tape length incompatible. The tape inserted is too long,
|
||||
|
@ -938,7 +938,7 @@ void reipl_ccw_dev(struct ccw_dev_id *devid)
|
||||
{
|
||||
struct subchannel_id uninitialized_var(schid);
|
||||
|
||||
s390_reset_system(NULL, NULL);
|
||||
s390_reset_system(NULL, NULL, NULL);
|
||||
if (reipl_find_schid(devid, &schid) != 0)
|
||||
panic("IPL Device not found\n");
|
||||
do_reipl_asm(*((__u32*)&schid));
|
||||
|
@ -38,11 +38,6 @@ void idset_free(struct idset *set)
|
||||
vfree(set);
|
||||
}
|
||||
|
||||
void idset_clear(struct idset *set)
|
||||
{
|
||||
memset(set->bitmap, 0, bitmap_size(set->num_ssid, set->num_id));
|
||||
}
|
||||
|
||||
void idset_fill(struct idset *set)
|
||||
{
|
||||
memset(set->bitmap, 0xff, bitmap_size(set->num_ssid, set->num_id));
|
||||
@ -103,21 +98,6 @@ int idset_sch_contains(struct idset *set, struct subchannel_id schid)
|
||||
return idset_contains(set, schid.ssid, schid.sch_no);
|
||||
}
|
||||
|
||||
int idset_sch_get_first(struct idset *set, struct subchannel_id *schid)
|
||||
{
|
||||
int ssid = 0;
|
||||
int id = 0;
|
||||
int rc;
|
||||
|
||||
rc = idset_get_first(set, &ssid, &id);
|
||||
if (rc) {
|
||||
init_subchannel_id(schid);
|
||||
schid->ssid = ssid;
|
||||
schid->sch_no = id;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
int idset_is_empty(struct idset *set)
|
||||
{
|
||||
return bitmap_empty(set->bitmap, set->num_ssid * set->num_id);
|
||||
|
@ -11,7 +11,6 @@
|
||||
struct idset;
|
||||
|
||||
void idset_free(struct idset *set);
|
||||
void idset_clear(struct idset *set);
|
||||
void idset_fill(struct idset *set);
|
||||
|
||||
struct idset *idset_sch_new(void);
|
||||
@ -19,7 +18,6 @@ void idset_sch_add(struct idset *set, struct subchannel_id id);
|
||||
void idset_sch_del(struct idset *set, struct subchannel_id id);
|
||||
void idset_sch_del_subseq(struct idset *set, struct subchannel_id schid);
|
||||
int idset_sch_contains(struct idset *set, struct subchannel_id id);
|
||||
int idset_sch_get_first(struct idset *set, struct subchannel_id *id);
|
||||
int idset_is_empty(struct idset *set);
|
||||
void idset_add_set(struct idset *to, struct idset *from);
|
||||
|
||||
|
@ -203,6 +203,24 @@ ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type)
|
||||
return reg1;
|
||||
}
|
||||
|
||||
/**
|
||||
* ap_query_facilities(): PQAP(TAPQ) query facilities.
|
||||
* @qid: The AP queue number
|
||||
*
|
||||
* Returns content of general register 2 after the PQAP(TAPQ)
|
||||
* instruction was called.
|
||||
*/
|
||||
static inline unsigned long ap_query_facilities(ap_qid_t qid)
|
||||
{
|
||||
register unsigned long reg0 asm ("0") = qid | 0x00800000UL;
|
||||
register unsigned long reg1 asm ("1");
|
||||
register unsigned long reg2 asm ("2") = 0UL;
|
||||
|
||||
asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */
|
||||
: "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
|
||||
return reg2;
|
||||
}
|
||||
|
||||
/**
|
||||
* ap_reset_queue(): Reset adjunct processor queue.
|
||||
* @qid: The AP queue number
|
||||
@ -1006,6 +1024,51 @@ void ap_bus_force_rescan(void)
|
||||
}
|
||||
EXPORT_SYMBOL(ap_bus_force_rescan);
|
||||
|
||||
/*
|
||||
* ap_test_config(): helper function to extract the nrth bit
|
||||
* within the unsigned int array field.
|
||||
*/
|
||||
static inline int ap_test_config(unsigned int *field, unsigned int nr)
|
||||
{
|
||||
if (nr > 0xFFu)
|
||||
return 0;
|
||||
return ap_test_bit((field + (nr >> 5)), (nr & 0x1f));
|
||||
}
|
||||
|
||||
/*
|
||||
* ap_test_config_card_id(): Test, whether an AP card ID is configured.
|
||||
* @id AP card ID
|
||||
*
|
||||
* Returns 0 if the card is not configured
|
||||
* 1 if the card is configured or
|
||||
* if the configuration information is not available
|
||||
*/
|
||||
static inline int ap_test_config_card_id(unsigned int id)
|
||||
{
|
||||
if (!ap_configuration)
|
||||
return 1;
|
||||
return ap_test_config(ap_configuration->apm, id);
|
||||
}
|
||||
|
||||
/*
|
||||
* ap_test_config_domain(): Test, whether an AP usage domain is configured.
|
||||
* @domain AP usage domain ID
|
||||
*
|
||||
* Returns 0 if the usage domain is not configured
|
||||
* 1 if the usage domain is configured or
|
||||
* if the configuration information is not available
|
||||
*/
|
||||
static inline int ap_test_config_domain(unsigned int domain)
|
||||
{
|
||||
if (!ap_configuration) /* QCI not supported */
|
||||
if (domain < 16)
|
||||
return 1; /* then domains 0...15 are configured */
|
||||
else
|
||||
return 0;
|
||||
else
|
||||
return ap_test_config(ap_configuration->aqm, domain);
|
||||
}
|
||||
|
||||
/*
|
||||
* AP bus attributes.
|
||||
*/
|
||||
@ -1121,6 +1184,42 @@ static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
|
||||
|
||||
static BUS_ATTR(poll_timeout, 0644, poll_timeout_show, poll_timeout_store);
|
||||
|
||||
static ssize_t ap_max_domain_id_show(struct bus_type *bus, char *buf)
|
||||
{
|
||||
ap_qid_t qid;
|
||||
int i, nd, max_domain_id = -1;
|
||||
unsigned long fbits;
|
||||
|
||||
if (ap_configuration) {
|
||||
if (ap_domain_index >= 0 && ap_domain_index < AP_DOMAINS) {
|
||||
for (i = 0; i < AP_DEVICES; i++) {
|
||||
if (!ap_test_config_card_id(i))
|
||||
continue;
|
||||
qid = AP_MKQID(i, ap_domain_index);
|
||||
fbits = ap_query_facilities(qid);
|
||||
if (fbits & (1UL << 57)) {
|
||||
/* the N bit is 0, Nd field is filled */
|
||||
nd = (int)((fbits & 0x00FF0000UL)>>16);
|
||||
if (nd > 0)
|
||||
max_domain_id = nd;
|
||||
else
|
||||
max_domain_id = 15;
|
||||
} else {
|
||||
/* N bit is 1, max 16 domains */
|
||||
max_domain_id = 15;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/* no APXA support, older machines with max 16 domains */
|
||||
max_domain_id = 15;
|
||||
}
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", max_domain_id);
|
||||
}
|
||||
|
||||
static BUS_ATTR(ap_max_domain_id, 0444, ap_max_domain_id_show, NULL);
|
||||
|
||||
static struct bus_attribute *const ap_bus_attrs[] = {
|
||||
&bus_attr_ap_domain,
|
||||
&bus_attr_ap_control_domain_mask,
|
||||
@ -1128,50 +1227,10 @@ static struct bus_attribute *const ap_bus_attrs[] = {
|
||||
&bus_attr_poll_thread,
|
||||
&bus_attr_ap_interrupts,
|
||||
&bus_attr_poll_timeout,
|
||||
&bus_attr_ap_max_domain_id,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static inline int ap_test_config(unsigned int *field, unsigned int nr)
|
||||
{
|
||||
if (nr > 0xFFu)
|
||||
return 0;
|
||||
return ap_test_bit((field + (nr >> 5)), (nr & 0x1f));
|
||||
}
|
||||
|
||||
/*
|
||||
* ap_test_config_card_id(): Test, whether an AP card ID is configured.
|
||||
* @id AP card ID
|
||||
*
|
||||
* Returns 0 if the card is not configured
|
||||
* 1 if the card is configured or
|
||||
* if the configuration information is not available
|
||||
*/
|
||||
static inline int ap_test_config_card_id(unsigned int id)
|
||||
{
|
||||
if (!ap_configuration)
|
||||
return 1;
|
||||
return ap_test_config(ap_configuration->apm, id);
|
||||
}
|
||||
|
||||
/*
|
||||
* ap_test_config_domain(): Test, whether an AP usage domain is configured.
|
||||
* @domain AP usage domain ID
|
||||
*
|
||||
* Returns 0 if the usage domain is not configured
|
||||
* 1 if the usage domain is configured or
|
||||
* if the configuration information is not available
|
||||
*/
|
||||
static inline int ap_test_config_domain(unsigned int domain)
|
||||
{
|
||||
if (!ap_configuration) /* QCI not supported */
|
||||
if (domain < 16)
|
||||
return 1; /* then domains 0...15 are configured */
|
||||
else
|
||||
return 0;
|
||||
else
|
||||
return ap_test_config(ap_configuration->aqm, domain);
|
||||
}
|
||||
|
||||
/**
|
||||
* ap_query_configuration(): Query AP configuration information.
|
||||
*
|
||||
@ -1434,9 +1493,6 @@ static void ap_scan_bus(struct work_struct *unused)
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
case 11:
|
||||
ap_dev->device_type = 10;
|
||||
break;
|
||||
default:
|
||||
ap_dev->device_type = device_type;
|
||||
}
|
||||
|
@ -117,6 +117,7 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
|
||||
#define AP_DEVICE_TYPE_CEX3A 8
|
||||
#define AP_DEVICE_TYPE_CEX3C 9
|
||||
#define AP_DEVICE_TYPE_CEX4 10
|
||||
#define AP_DEVICE_TYPE_CEX5 11
|
||||
|
||||
/*
|
||||
* Known function facilities
|
||||
|
@ -75,6 +75,7 @@ struct ica_z90_status {
|
||||
#define ZCRYPT_CEX3C 7
|
||||
#define ZCRYPT_CEX3A 8
|
||||
#define ZCRYPT_CEX4 10
|
||||
#define ZCRYPT_CEX5 11
|
||||
|
||||
/**
|
||||
* Large random numbers are pulled in 4096 byte chunks from the crypto cards
|
||||
|
@ -26,6 +26,10 @@
|
||||
|
||||
#define CEX4A_SPEED_RATING 900 /* TODO new card, new speed rating */
|
||||
#define CEX4C_SPEED_RATING 6500 /* TODO new card, new speed rating */
|
||||
#define CEX4P_SPEED_RATING 7000 /* TODO new card, new speed rating */
|
||||
#define CEX5A_SPEED_RATING 450 /* TODO new card, new speed rating */
|
||||
#define CEX5C_SPEED_RATING 3250 /* TODO new card, new speed rating */
|
||||
#define CEX5P_SPEED_RATING 3500 /* TODO new card, new speed rating */
|
||||
|
||||
#define CEX4A_MAX_MESSAGE_SIZE MSGTYPE50_CRB3_MAX_MSG_SIZE
|
||||
#define CEX4C_MAX_MESSAGE_SIZE MSGTYPE06_MAX_MSG_SIZE
|
||||
@ -39,6 +43,7 @@
|
||||
|
||||
static struct ap_device_id zcrypt_cex4_ids[] = {
|
||||
{ AP_DEVICE(AP_DEVICE_TYPE_CEX4) },
|
||||
{ AP_DEVICE(AP_DEVICE_TYPE_CEX5) },
|
||||
{ /* end of list */ },
|
||||
};
|
||||
|
||||
@ -70,11 +75,18 @@ static int zcrypt_cex4_probe(struct ap_device *ap_dev)
|
||||
|
||||
switch (ap_dev->device_type) {
|
||||
case AP_DEVICE_TYPE_CEX4:
|
||||
case AP_DEVICE_TYPE_CEX5:
|
||||
if (ap_test_bit(&ap_dev->functions, AP_FUNC_ACCEL)) {
|
||||
zdev = zcrypt_device_alloc(CEX4A_MAX_MESSAGE_SIZE);
|
||||
if (!zdev)
|
||||
return -ENOMEM;
|
||||
zdev->type_string = "CEX4A";
|
||||
if (ap_dev->device_type == AP_DEVICE_TYPE_CEX4) {
|
||||
zdev->type_string = "CEX4A";
|
||||
zdev->speed_rating = CEX4A_SPEED_RATING;
|
||||
} else {
|
||||
zdev->type_string = "CEX5A";
|
||||
zdev->speed_rating = CEX5A_SPEED_RATING;
|
||||
}
|
||||
zdev->user_space_type = ZCRYPT_CEX3A;
|
||||
zdev->min_mod_size = CEX4A_MIN_MOD_SIZE;
|
||||
if (ap_test_bit(&ap_dev->functions, AP_FUNC_MEX4K) &&
|
||||
@ -90,33 +102,42 @@ static int zcrypt_cex4_probe(struct ap_device *ap_dev)
|
||||
CEX4A_MAX_MOD_SIZE_2K;
|
||||
}
|
||||
zdev->short_crt = 1;
|
||||
zdev->speed_rating = CEX4A_SPEED_RATING;
|
||||
zdev->ops = zcrypt_msgtype_request(MSGTYPE50_NAME,
|
||||
MSGTYPE50_VARIANT_DEFAULT);
|
||||
} else if (ap_test_bit(&ap_dev->functions, AP_FUNC_COPRO)) {
|
||||
zdev = zcrypt_device_alloc(CEX4C_MAX_MESSAGE_SIZE);
|
||||
if (!zdev)
|
||||
return -ENOMEM;
|
||||
zdev->type_string = "CEX4C";
|
||||
if (ap_dev->device_type == AP_DEVICE_TYPE_CEX4) {
|
||||
zdev->type_string = "CEX4C";
|
||||
zdev->speed_rating = CEX4C_SPEED_RATING;
|
||||
} else {
|
||||
zdev->type_string = "CEX5C";
|
||||
zdev->speed_rating = CEX5C_SPEED_RATING;
|
||||
}
|
||||
zdev->user_space_type = ZCRYPT_CEX3C;
|
||||
zdev->min_mod_size = CEX4C_MIN_MOD_SIZE;
|
||||
zdev->max_mod_size = CEX4C_MAX_MOD_SIZE;
|
||||
zdev->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
|
||||
zdev->short_crt = 0;
|
||||
zdev->speed_rating = CEX4C_SPEED_RATING;
|
||||
zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
|
||||
MSGTYPE06_VARIANT_DEFAULT);
|
||||
} else if (ap_test_bit(&ap_dev->functions, AP_FUNC_EP11)) {
|
||||
zdev = zcrypt_device_alloc(CEX4C_MAX_MESSAGE_SIZE);
|
||||
if (!zdev)
|
||||
return -ENOMEM;
|
||||
zdev->type_string = "CEX4P";
|
||||
if (ap_dev->device_type == AP_DEVICE_TYPE_CEX4) {
|
||||
zdev->type_string = "CEX4P";
|
||||
zdev->speed_rating = CEX4P_SPEED_RATING;
|
||||
} else {
|
||||
zdev->type_string = "CEX5P";
|
||||
zdev->speed_rating = CEX5P_SPEED_RATING;
|
||||
}
|
||||
zdev->user_space_type = ZCRYPT_CEX4;
|
||||
zdev->min_mod_size = CEX4C_MIN_MOD_SIZE;
|
||||
zdev->max_mod_size = CEX4C_MAX_MOD_SIZE;
|
||||
zdev->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
|
||||
zdev->short_crt = 0;
|
||||
zdev->speed_rating = CEX4C_SPEED_RATING;
|
||||
zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
|
||||
MSGTYPE06_VARIANT_EP11);
|
||||
}
|
||||
|
@ -1,10 +1,10 @@
|
||||
/*
|
||||
* hvc_iucv.c - z/VM IUCV hypervisor console (HVC) device driver
|
||||
* z/VM IUCV hypervisor console (HVC) device driver
|
||||
*
|
||||
* This HVC device driver provides terminal access using
|
||||
* z/VM IUCV communication paths.
|
||||
*
|
||||
* Copyright IBM Corp. 2008, 2009
|
||||
* Copyright IBM Corp. 2008, 2013
|
||||
*
|
||||
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
|
||||
*/
|
||||
@ -102,6 +102,7 @@ static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES];
|
||||
#define IUCV_HVC_CON_IDX (0)
|
||||
/* List of z/VM user ID filter entries (struct iucv_vmid_filter) */
|
||||
#define MAX_VMID_FILTER (500)
|
||||
#define FILTER_WILDCARD_CHAR '*'
|
||||
static size_t hvc_iucv_filter_size;
|
||||
static void *hvc_iucv_filter;
|
||||
static const char *hvc_iucv_filter_string;
|
||||
@ -734,20 +735,31 @@ static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
|
||||
* hvc_iucv_filter_connreq() - Filter connection request based on z/VM user ID
|
||||
* @ipvmid: Originating z/VM user ID (right padded with blanks)
|
||||
*
|
||||
* Returns 0 if the z/VM user ID @ipvmid is allowed to connection, otherwise
|
||||
* non-zero.
|
||||
* Returns 0 if the z/VM user ID that is specified with @ipvmid is permitted to
|
||||
* connect, otherwise non-zero.
|
||||
*/
|
||||
static int hvc_iucv_filter_connreq(u8 ipvmid[8])
|
||||
{
|
||||
size_t i;
|
||||
const char *wildcard, *filter_entry;
|
||||
size_t i, len;
|
||||
|
||||
/* Note: default policy is ACCEPT if no filter is set */
|
||||
if (!hvc_iucv_filter_size)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < hvc_iucv_filter_size; i++)
|
||||
if (0 == memcmp(ipvmid, hvc_iucv_filter + (8 * i), 8))
|
||||
for (i = 0; i < hvc_iucv_filter_size; i++) {
|
||||
filter_entry = hvc_iucv_filter + (8 * i);
|
||||
|
||||
/* If a filter entry contains the filter wildcard character,
|
||||
* reduce the length to match the leading portion of the user
|
||||
* ID only (wildcard match). Characters following the wildcard
|
||||
* are ignored.
|
||||
*/
|
||||
wildcard = strnchr(filter_entry, 8, FILTER_WILDCARD_CHAR);
|
||||
len = (wildcard) ? wildcard - filter_entry : 8;
|
||||
if (0 == memcmp(ipvmid, filter_entry, len))
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -1166,6 +1178,7 @@ static void __init hvc_iucv_destroy(struct hvc_iucv_private *priv)
|
||||
/**
|
||||
* hvc_iucv_parse_filter() - Parse filter for a single z/VM user ID
|
||||
* @filter: String containing a comma-separated list of z/VM user IDs
|
||||
* @dest: Location where to store the parsed z/VM user ID
|
||||
*/
|
||||
static const char *hvc_iucv_parse_filter(const char *filter, char *dest)
|
||||
{
|
||||
@ -1188,6 +1201,10 @@ static const char *hvc_iucv_parse_filter(const char *filter, char *dest)
|
||||
if (filter[len - 1] == '\n')
|
||||
len--;
|
||||
|
||||
/* prohibit filter entries containing the wildcard character only */
|
||||
if (len == 1 && *filter == FILTER_WILDCARD_CHAR)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (len > 8)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
|
@ -54,7 +54,11 @@ extern void __chk_io_ptr(const volatile void __iomem *);
|
||||
#include <linux/compiler-gcc.h>
|
||||
#endif
|
||||
|
||||
#ifdef CC_USING_HOTPATCH
|
||||
#define notrace __attribute__((hotpatch(0,0)))
|
||||
#else
|
||||
#define notrace __attribute__((no_instrument_function))
|
||||
#endif
|
||||
|
||||
/* Intel compiler defines __GNUC__. So we will overwrite implementations
|
||||
* coming from above header files here
|
||||
|
@ -13,8 +13,8 @@ obj-y = fork.o exec_domain.o panic.o \
|
||||
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
# Do not trace debug files and internal ftrace files
|
||||
CFLAGS_REMOVE_cgroup-debug.o = -pg
|
||||
CFLAGS_REMOVE_irq_work.o = -pg
|
||||
CFLAGS_REMOVE_cgroup-debug.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_irq_work.o = $(CC_FLAGS_FTRACE)
|
||||
endif
|
||||
|
||||
# cond_syscall is currently not LTO compatible
|
||||
|
@ -1,5 +1,5 @@
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
CFLAGS_REMOVE_core.o = -pg
|
||||
CFLAGS_REMOVE_core.o = $(CC_FLAGS_FTRACE)
|
||||
endif
|
||||
|
||||
obj-y := core.o ring_buffer.o callchain.o
|
||||
|
@ -2,10 +2,10 @@
|
||||
obj-y += mutex.o semaphore.o rwsem.o
|
||||
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
CFLAGS_REMOVE_lockdep.o = -pg
|
||||
CFLAGS_REMOVE_lockdep_proc.o = -pg
|
||||
CFLAGS_REMOVE_mutex-debug.o = -pg
|
||||
CFLAGS_REMOVE_rtmutex-debug.o = -pg
|
||||
CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_lockdep_proc.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_mutex-debug.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_rtmutex-debug.o = $(CC_FLAGS_FTRACE)
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
|
||||
|
@ -1,5 +1,5 @@
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
CFLAGS_REMOVE_clock.o = -pg
|
||||
CFLAGS_REMOVE_clock.o = $(CC_FLAGS_FTRACE)
|
||||
endif
|
||||
|
||||
ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
|
||||
|
@ -3,11 +3,11 @@
|
||||
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
ORIG_CFLAGS := $(KBUILD_CFLAGS)
|
||||
KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS))
|
||||
KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS))
|
||||
|
||||
ifdef CONFIG_FTRACE_SELFTEST
|
||||
# selftest needs instrumentation
|
||||
CFLAGS_trace_selftest_dynamic.o = -pg
|
||||
CFLAGS_trace_selftest_dynamic.o = $(CC_FLAGS_FTRACE)
|
||||
obj-y += trace_selftest_dynamic.o
|
||||
endif
|
||||
endif
|
||||
|
@ -4,7 +4,7 @@
|
||||
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
ORIG_CFLAGS := $(KBUILD_CFLAGS)
|
||||
KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS))
|
||||
KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS))
|
||||
endif
|
||||
|
||||
lib-y := ctype.o string.o vsprintf.o cmdline.o \
|
||||
|
@ -234,8 +234,9 @@ sub_cmd_record_mcount = set -e ; perl $(srctree)/scripts/recordmcount.pl "$(ARCH
|
||||
"$(if $(part-of-module),1,0)" "$(@)";
|
||||
recordmcount_source := $(srctree)/scripts/recordmcount.pl
|
||||
endif
|
||||
cmd_record_mcount = \
|
||||
if [ "$(findstring -pg,$(_c_flags))" = "-pg" ]; then \
|
||||
cmd_record_mcount = \
|
||||
if [ "$(findstring $(CC_FLAGS_FTRACE),$(_c_flags))" = \
|
||||
"$(CC_FLAGS_FTRACE)" ]; then \
|
||||
$(sub_cmd_record_mcount) \
|
||||
fi;
|
||||
endif
|
||||
|
@ -242,8 +242,13 @@ if ($arch eq "x86_64") {
|
||||
$cc .= " -m32";
|
||||
|
||||
} elsif ($arch eq "s390" && $bits == 64) {
|
||||
$mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_(PC|PLT)32DBL\\s+_mcount\\+0x2\$";
|
||||
$mcount_adjust = -14;
|
||||
if ($cc =~ /-DCC_USING_HOTPATCH/) {
|
||||
$mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*c0 04 00 00 00 00\\s*brcl\\s*0,[0-9a-f]+ <([^\+]*)>\$";
|
||||
$mcount_adjust = 0;
|
||||
} else {
|
||||
$mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_(PC|PLT)32DBL\\s+_mcount\\+0x2\$";
|
||||
$mcount_adjust = -14;
|
||||
}
|
||||
$alignment = 8;
|
||||
$type = ".quad";
|
||||
$ld .= " -m elf64_s390";
|
||||
|
Loading…
Reference in New Issue
Block a user