Merge branch 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6
* 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6: (65 commits) [S390] prevent unneccesary loops_per_jiffy recalculation [S390] cpuinfo: use get_online_cpus() instead of preempt_disable() [S390] smp: remove cpu hotplug messages [S390] mutex: enable spinning mutex on s390 [S390] mutex: Introduce arch_mutex_cpu_relax() [S390] cio: fix ccwgroup unregistration race condition [S390] perf: add DWARF register lookup for s390 [S390] cleanup ftrace backend functions [S390] ptrace cleanup [S390] smp/idle: call init_idle() before starting a new cpu [S390] smp: delay idle task creation [S390] dasd: Correct retry counter for terminated I/O. [S390] dasd: Add support for raw ECKD access. [S390] dasd: Prevent deadlock during suspend/resume. [S390] dasd: Improve handling of stolen DASD reservation [S390] dasd: do path verification for paths added at runtime [S390] dasd: add High Performance FICON multitrack support [S390] cio: reduce memory consumption of itcw structures [S390] nmi: enable machine checks early [S390] qeth: buffer count imbalance ...
This commit is contained in:
commit
fb5131e188
95 changed files with 2996 additions and 1804 deletions
|
@ -1,13 +1,8 @@
|
|||
config SCHED_MC
|
||||
def_bool y
|
||||
depends on SMP
|
||||
|
||||
config MMU
|
||||
def_bool y
|
||||
|
||||
config ZONE_DMA
|
||||
def_bool y
|
||||
depends on 64BIT
|
||||
def_bool y if 64BIT
|
||||
|
||||
config LOCKDEP_SUPPORT
|
||||
def_bool y
|
||||
|
@ -25,12 +20,10 @@ config RWSEM_XCHGADD_ALGORITHM
|
|||
def_bool y
|
||||
|
||||
config ARCH_HAS_ILOG2_U32
|
||||
bool
|
||||
default n
|
||||
def_bool n
|
||||
|
||||
config ARCH_HAS_ILOG2_U64
|
||||
bool
|
||||
default n
|
||||
def_bool n
|
||||
|
||||
config GENERIC_HWEIGHT
|
||||
def_bool y
|
||||
|
@ -42,9 +35,7 @@ config GENERIC_CLOCKEVENTS
|
|||
def_bool y
|
||||
|
||||
config GENERIC_BUG
|
||||
bool
|
||||
depends on BUG
|
||||
default y
|
||||
def_bool y if BUG
|
||||
|
||||
config GENERIC_BUG_RELATIVE_POINTERS
|
||||
def_bool y
|
||||
|
@ -59,13 +50,10 @@ config ARCH_DMA_ADDR_T_64BIT
|
|||
def_bool 64BIT
|
||||
|
||||
config GENERIC_LOCKBREAK
|
||||
bool
|
||||
default y
|
||||
depends on SMP && PREEMPT
|
||||
def_bool y if SMP && PREEMPT
|
||||
|
||||
config PGSTE
|
||||
bool
|
||||
default y if KVM
|
||||
def_bool y if KVM
|
||||
|
||||
config VIRT_CPU_ACCOUNTING
|
||||
def_bool y
|
||||
|
@ -85,7 +73,6 @@ config S390
|
|||
select HAVE_DYNAMIC_FTRACE
|
||||
select HAVE_FUNCTION_GRAPH_TRACER
|
||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||
select HAVE_DEFAULT_NO_SPIN_MUTEXES
|
||||
select HAVE_OPROFILE
|
||||
select HAVE_KPROBES
|
||||
select HAVE_KRETPROBES
|
||||
|
@ -130,8 +117,7 @@ config S390
|
|||
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
|
||||
|
||||
config SCHED_OMIT_FRAME_POINTER
|
||||
bool
|
||||
default y
|
||||
def_bool y
|
||||
|
||||
source "init/Kconfig"
|
||||
|
||||
|
@ -144,20 +130,21 @@ comment "Processor type and features"
|
|||
source "kernel/time/Kconfig"
|
||||
|
||||
config 64BIT
|
||||
bool "64 bit kernel"
|
||||
def_bool y
|
||||
prompt "64 bit kernel"
|
||||
help
|
||||
Select this option if you have an IBM z/Architecture machine
|
||||
and want to use the 64 bit addressing mode.
|
||||
|
||||
config 32BIT
|
||||
bool
|
||||
default y if !64BIT
|
||||
def_bool y if !64BIT
|
||||
|
||||
config KTIME_SCALAR
|
||||
def_bool 32BIT
|
||||
|
||||
config SMP
|
||||
bool "Symmetric multi-processing support"
|
||||
def_bool y
|
||||
prompt "Symmetric multi-processing support"
|
||||
---help---
|
||||
This enables support for systems with more than one CPU. If you have
|
||||
a system with only one CPU, like most personal computers, say N. If
|
||||
|
@ -189,10 +176,10 @@ config NR_CPUS
|
|||
approximately sixteen kilobytes to the kernel image.
|
||||
|
||||
config HOTPLUG_CPU
|
||||
bool "Support for hot-pluggable CPUs"
|
||||
def_bool y
|
||||
prompt "Support for hot-pluggable CPUs"
|
||||
depends on SMP
|
||||
select HOTPLUG
|
||||
default n
|
||||
help
|
||||
Say Y here to be able to turn CPUs off and on. CPUs
|
||||
can be controlled through /sys/devices/system/cpu/cpu#.
|
||||
|
@ -208,14 +195,16 @@ config SCHED_MC
|
|||
increased overhead in some places.
|
||||
|
||||
config SCHED_BOOK
|
||||
bool "Book scheduler support"
|
||||
def_bool y
|
||||
prompt "Book scheduler support"
|
||||
depends on SMP && SCHED_MC
|
||||
help
|
||||
Book scheduler support improves the CPU scheduler's decision making
|
||||
when dealing with machines that have several books.
|
||||
|
||||
config MATHEMU
|
||||
bool "IEEE FPU emulation"
|
||||
def_bool y
|
||||
prompt "IEEE FPU emulation"
|
||||
depends on MARCH_G5
|
||||
help
|
||||
This option is required for IEEE compliant floating point arithmetic
|
||||
|
@ -223,7 +212,8 @@ config MATHEMU
|
|||
need this.
|
||||
|
||||
config COMPAT
|
||||
bool "Kernel support for 31 bit emulation"
|
||||
def_bool y
|
||||
prompt "Kernel support for 31 bit emulation"
|
||||
depends on 64BIT
|
||||
select COMPAT_BINFMT_ELF
|
||||
help
|
||||
|
@ -233,16 +223,14 @@ config COMPAT
|
|||
executing 31 bit applications. It is safe to say "Y".
|
||||
|
||||
config SYSVIPC_COMPAT
|
||||
bool
|
||||
depends on COMPAT && SYSVIPC
|
||||
default y
|
||||
def_bool y if COMPAT && SYSVIPC
|
||||
|
||||
config AUDIT_ARCH
|
||||
bool
|
||||
default y
|
||||
def_bool y
|
||||
|
||||
config S390_EXEC_PROTECT
|
||||
bool "Data execute protection"
|
||||
def_bool y
|
||||
prompt "Data execute protection"
|
||||
help
|
||||
This option allows to enable a buffer overflow protection for user
|
||||
space programs and it also selects the addressing mode option above.
|
||||
|
@ -302,7 +290,8 @@ config MARCH_Z196
|
|||
endchoice
|
||||
|
||||
config PACK_STACK
|
||||
bool "Pack kernel stack"
|
||||
def_bool y
|
||||
prompt "Pack kernel stack"
|
||||
help
|
||||
This option enables the compiler option -mkernel-backchain if it
|
||||
is available. If the option is available the compiler supports
|
||||
|
@ -315,7 +304,8 @@ config PACK_STACK
|
|||
Say Y if you are unsure.
|
||||
|
||||
config SMALL_STACK
|
||||
bool "Use 8kb for kernel stack instead of 16kb"
|
||||
def_bool n
|
||||
prompt "Use 8kb for kernel stack instead of 16kb"
|
||||
depends on PACK_STACK && 64BIT && !LOCKDEP
|
||||
help
|
||||
If you say Y here and the compiler supports the -mkernel-backchain
|
||||
|
@ -327,7 +317,8 @@ config SMALL_STACK
|
|||
Say N if you are unsure.
|
||||
|
||||
config CHECK_STACK
|
||||
bool "Detect kernel stack overflow"
|
||||
def_bool y
|
||||
prompt "Detect kernel stack overflow"
|
||||
help
|
||||
This option enables the compiler option -mstack-guard and
|
||||
-mstack-size if they are available. If the compiler supports them
|
||||
|
@ -351,7 +342,8 @@ config STACK_GUARD
|
|||
512 for 64 bit.
|
||||
|
||||
config WARN_STACK
|
||||
bool "Emit compiler warnings for function with broken stack usage"
|
||||
def_bool n
|
||||
prompt "Emit compiler warnings for function with broken stack usage"
|
||||
help
|
||||
This option enables the compiler options -mwarn-framesize and
|
||||
-mwarn-dynamicstack. If the compiler supports these options it
|
||||
|
@ -386,24 +378,24 @@ config ARCH_SPARSEMEM_DEFAULT
|
|||
def_bool y
|
||||
|
||||
config ARCH_SELECT_MEMORY_MODEL
|
||||
def_bool y
|
||||
def_bool y
|
||||
|
||||
config ARCH_ENABLE_MEMORY_HOTPLUG
|
||||
def_bool y
|
||||
depends on SPARSEMEM
|
||||
def_bool y if SPARSEMEM
|
||||
|
||||
config ARCH_ENABLE_MEMORY_HOTREMOVE
|
||||
def_bool y
|
||||
|
||||
config ARCH_HIBERNATION_POSSIBLE
|
||||
def_bool y if 64BIT
|
||||
def_bool y if 64BIT
|
||||
|
||||
source "mm/Kconfig"
|
||||
|
||||
comment "I/O subsystem configuration"
|
||||
|
||||
config QDIO
|
||||
tristate "QDIO support"
|
||||
def_tristate y
|
||||
prompt "QDIO support"
|
||||
---help---
|
||||
This driver provides the Queued Direct I/O base support for
|
||||
IBM System z.
|
||||
|
@ -414,7 +406,8 @@ config QDIO
|
|||
If unsure, say Y.
|
||||
|
||||
config CHSC_SCH
|
||||
tristate "Support for CHSC subchannels"
|
||||
def_tristate y
|
||||
prompt "Support for CHSC subchannels"
|
||||
help
|
||||
This driver allows usage of CHSC subchannels. A CHSC subchannel
|
||||
is usually present on LPAR only.
|
||||
|
@ -432,7 +425,8 @@ config CHSC_SCH
|
|||
comment "Misc"
|
||||
|
||||
config IPL
|
||||
bool "Builtin IPL record support"
|
||||
def_bool y
|
||||
prompt "Builtin IPL record support"
|
||||
help
|
||||
If you want to use the produced kernel to IPL directly from a
|
||||
device, you have to merge a bootsector specific to the device
|
||||
|
@ -464,7 +458,8 @@ config FORCE_MAX_ZONEORDER
|
|||
default "9"
|
||||
|
||||
config PFAULT
|
||||
bool "Pseudo page fault support"
|
||||
def_bool y
|
||||
prompt "Pseudo page fault support"
|
||||
help
|
||||
Select this option, if you want to use PFAULT pseudo page fault
|
||||
handling under VM. If running native or in LPAR, this option
|
||||
|
@ -476,7 +471,8 @@ config PFAULT
|
|||
this option.
|
||||
|
||||
config SHARED_KERNEL
|
||||
bool "VM shared kernel support"
|
||||
def_bool y
|
||||
prompt "VM shared kernel support"
|
||||
help
|
||||
Select this option, if you want to share the text segment of the
|
||||
Linux kernel between different VM guests. This reduces memory
|
||||
|
@ -487,7 +483,8 @@ config SHARED_KERNEL
|
|||
doing and want to exploit this feature.
|
||||
|
||||
config CMM
|
||||
tristate "Cooperative memory management"
|
||||
def_tristate n
|
||||
prompt "Cooperative memory management"
|
||||
help
|
||||
Select this option, if you want to enable the kernel interface
|
||||
to reduce the memory size of the system. This is accomplished
|
||||
|
@ -499,14 +496,16 @@ config CMM
|
|||
option.
|
||||
|
||||
config CMM_IUCV
|
||||
bool "IUCV special message interface to cooperative memory management"
|
||||
def_bool y
|
||||
prompt "IUCV special message interface to cooperative memory management"
|
||||
depends on CMM && (SMSGIUCV=y || CMM=SMSGIUCV)
|
||||
help
|
||||
Select this option to enable the special message interface to
|
||||
the cooperative memory management.
|
||||
|
||||
config APPLDATA_BASE
|
||||
bool "Linux - VM Monitor Stream, base infrastructure"
|
||||
def_bool n
|
||||
prompt "Linux - VM Monitor Stream, base infrastructure"
|
||||
depends on PROC_FS
|
||||
help
|
||||
This provides a kernel interface for creating and updating z/VM APPLDATA
|
||||
|
@ -521,7 +520,8 @@ config APPLDATA_BASE
|
|||
The /proc entries can also be read from, showing the current settings.
|
||||
|
||||
config APPLDATA_MEM
|
||||
tristate "Monitor memory management statistics"
|
||||
def_tristate m
|
||||
prompt "Monitor memory management statistics"
|
||||
depends on APPLDATA_BASE && VM_EVENT_COUNTERS
|
||||
help
|
||||
This provides memory management related data to the Linux - VM Monitor
|
||||
|
@ -537,7 +537,8 @@ config APPLDATA_MEM
|
|||
appldata_mem.o.
|
||||
|
||||
config APPLDATA_OS
|
||||
tristate "Monitor OS statistics"
|
||||
def_tristate m
|
||||
prompt "Monitor OS statistics"
|
||||
depends on APPLDATA_BASE
|
||||
help
|
||||
This provides OS related data to the Linux - VM Monitor Stream, like
|
||||
|
@ -551,7 +552,8 @@ config APPLDATA_OS
|
|||
appldata_os.o.
|
||||
|
||||
config APPLDATA_NET_SUM
|
||||
tristate "Monitor overall network statistics"
|
||||
def_tristate m
|
||||
prompt "Monitor overall network statistics"
|
||||
depends on APPLDATA_BASE && NET
|
||||
help
|
||||
This provides network related data to the Linux - VM Monitor Stream,
|
||||
|
@ -568,30 +570,32 @@ config APPLDATA_NET_SUM
|
|||
source kernel/Kconfig.hz
|
||||
|
||||
config S390_HYPFS_FS
|
||||
bool "s390 hypervisor file system support"
|
||||
def_bool y
|
||||
prompt "s390 hypervisor file system support"
|
||||
select SYS_HYPERVISOR
|
||||
default y
|
||||
help
|
||||
This is a virtual file system intended to provide accounting
|
||||
information in an s390 hypervisor environment.
|
||||
|
||||
config KEXEC
|
||||
bool "kexec system call"
|
||||
def_bool n
|
||||
prompt "kexec system call"
|
||||
help
|
||||
kexec is a system call that implements the ability to shutdown your
|
||||
current kernel, and to start another kernel. It is like a reboot
|
||||
but is independent of hardware/microcode support.
|
||||
|
||||
config ZFCPDUMP
|
||||
bool "zfcpdump support"
|
||||
def_bool n
|
||||
prompt "zfcpdump support"
|
||||
select SMP
|
||||
default n
|
||||
help
|
||||
Select this option if you want to build an zfcpdump enabled kernel.
|
||||
Refer to <file:Documentation/s390/zfcpdump.txt> for more details on this.
|
||||
|
||||
config S390_GUEST
|
||||
bool "s390 guest support for KVM (EXPERIMENTAL)"
|
||||
def_bool y
|
||||
prompt "s390 guest support for KVM (EXPERIMENTAL)"
|
||||
depends on 64BIT && EXPERIMENTAL
|
||||
select VIRTIO
|
||||
select VIRTIO_RING
|
||||
|
@ -603,9 +607,9 @@ bool "s390 guest support for KVM (EXPERIMENTAL)"
|
|||
the default console.
|
||||
|
||||
config SECCOMP
|
||||
bool "Enable seccomp to safely compute untrusted bytecode"
|
||||
def_bool y
|
||||
prompt "Enable seccomp to safely compute untrusted bytecode"
|
||||
depends on PROC_FS
|
||||
default y
|
||||
help
|
||||
This kernel feature is useful for number crunching applications
|
||||
that may need to compute untrusted bytecode during their
|
||||
|
|
|
@ -1,8 +1,7 @@
|
|||
menu "Kernel hacking"
|
||||
|
||||
config TRACE_IRQFLAGS_SUPPORT
|
||||
bool
|
||||
default y
|
||||
def_bool y
|
||||
|
||||
source "lib/Kconfig.debug"
|
||||
|
||||
|
@ -19,7 +18,8 @@ config STRICT_DEVMEM
|
|||
If you are unsure, say Y.
|
||||
|
||||
config DEBUG_STRICT_USER_COPY_CHECKS
|
||||
bool "Strict user copy size checks"
|
||||
def_bool n
|
||||
prompt "Strict user copy size checks"
|
||||
---help---
|
||||
Enabling this option turns a certain set of sanity checks for user
|
||||
copy operations into compile time warnings.
|
||||
|
|
|
@ -2,16 +2,12 @@ CONFIG_EXPERIMENTAL=y
|
|||
CONFIG_SYSVIPC=y
|
||||
CONFIG_POSIX_MQUEUE=y
|
||||
CONFIG_AUDIT=y
|
||||
CONFIG_RCU_TRACE=y
|
||||
CONFIG_IKCONFIG=y
|
||||
CONFIG_IKCONFIG_PROC=y
|
||||
CONFIG_CGROUPS=y
|
||||
CONFIG_CGROUP_NS=y
|
||||
CONFIG_SYSFS_DEPRECATED_V2=y
|
||||
CONFIG_UTS_NS=y
|
||||
CONFIG_IPC_NS=y
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
CONFIG_PERF_EVENTS=y
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_KPROBES=y
|
||||
CONFIG_MODULES=y
|
||||
|
@ -20,24 +16,12 @@ CONFIG_MODVERSIONS=y
|
|||
CONFIG_DEFAULT_DEADLINE=y
|
||||
CONFIG_NO_HZ=y
|
||||
CONFIG_HIGH_RES_TIMERS=y
|
||||
CONFIG_64BIT=y
|
||||
CONFIG_SMP=y
|
||||
CONFIG_NR_CPUS=32
|
||||
CONFIG_COMPAT=y
|
||||
CONFIG_S390_EXEC_PROTECT=y
|
||||
CONFIG_PACK_STACK=y
|
||||
CONFIG_CHECK_STACK=y
|
||||
CONFIG_PREEMPT=y
|
||||
CONFIG_MEMORY_HOTPLUG=y
|
||||
CONFIG_MEMORY_HOTREMOVE=y
|
||||
CONFIG_QDIO=y
|
||||
CONFIG_CHSC_SCH=m
|
||||
CONFIG_IPL=y
|
||||
CONFIG_BINFMT_MISC=m
|
||||
CONFIG_PFAULT=y
|
||||
CONFIG_HZ_100=y
|
||||
CONFIG_KEXEC=y
|
||||
CONFIG_S390_GUEST=y
|
||||
CONFIG_PM=y
|
||||
CONFIG_HIBERNATION=y
|
||||
CONFIG_PACKET=y
|
||||
|
@ -46,16 +30,15 @@ CONFIG_NET_KEY=y
|
|||
CONFIG_AFIUCV=m
|
||||
CONFIG_INET=y
|
||||
CONFIG_IP_MULTICAST=y
|
||||
# CONFIG_INET_LRO is not set
|
||||
CONFIG_IPV6=y
|
||||
CONFIG_NETFILTER=y
|
||||
CONFIG_NETFILTER_NETLINK_QUEUE=m
|
||||
CONFIG_NETFILTER_NETLINK_LOG=m
|
||||
CONFIG_NF_CONNTRACK=m
|
||||
# CONFIG_NF_CT_PROTO_SCTP is not set
|
||||
CONFIG_NET_SCTPPROBE=m
|
||||
CONFIG_L2TP=m
|
||||
CONFIG_L2TP_DEBUGFS=m
|
||||
CONFIG_VLAN_8021Q=y
|
||||
CONFIG_NET_SCHED=y
|
||||
CONFIG_NET_SCH_CBQ=m
|
||||
CONFIG_NET_SCH_PRIO=m
|
||||
CONFIG_NET_SCH_MULTIQ=y
|
||||
CONFIG_NET_SCH_RED=m
|
||||
CONFIG_NET_SCH_SFQ=m
|
||||
CONFIG_NET_SCH_TEQL=m
|
||||
|
@ -69,28 +52,14 @@ CONFIG_NET_CLS_U32=m
|
|||
CONFIG_CLS_U32_MARK=y
|
||||
CONFIG_NET_CLS_RSVP=m
|
||||
CONFIG_NET_CLS_RSVP6=m
|
||||
CONFIG_NET_CLS_FLOW=m
|
||||
CONFIG_NET_CLS_ACT=y
|
||||
CONFIG_NET_ACT_POLICE=y
|
||||
CONFIG_NET_ACT_NAT=m
|
||||
CONFIG_CAN=m
|
||||
CONFIG_CAN_RAW=m
|
||||
CONFIG_CAN_BCM=m
|
||||
CONFIG_CAN_VCAN=m
|
||||
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
|
||||
# CONFIG_FIRMWARE_IN_KERNEL is not set
|
||||
CONFIG_BLK_DEV_LOOP=m
|
||||
CONFIG_BLK_DEV_NBD=m
|
||||
CONFIG_BLK_DEV_RAM=y
|
||||
CONFIG_BLK_DEV_XIP=y
|
||||
CONFIG_BLK_DEV_XPRAM=m
|
||||
CONFIG_DASD=y
|
||||
CONFIG_DASD_PROFILE=y
|
||||
CONFIG_DASD_ECKD=y
|
||||
CONFIG_DASD_FBA=y
|
||||
CONFIG_DASD_DIAG=y
|
||||
CONFIG_DASD_EER=y
|
||||
CONFIG_VIRTIO_BLK=m
|
||||
CONFIG_VIRTIO_BLK=y
|
||||
CONFIG_SCSI=y
|
||||
CONFIG_BLK_DEV_SD=y
|
||||
CONFIG_CHR_DEV_ST=y
|
||||
|
@ -102,101 +71,92 @@ CONFIG_SCSI_CONSTANTS=y
|
|||
CONFIG_SCSI_LOGGING=y
|
||||
CONFIG_SCSI_SCAN_ASYNC=y
|
||||
CONFIG_ZFCP=y
|
||||
CONFIG_SCSI_DH=m
|
||||
CONFIG_SCSI_DH_RDAC=m
|
||||
CONFIG_SCSI_DH_HP_SW=m
|
||||
CONFIG_SCSI_DH_EMC=m
|
||||
CONFIG_SCSI_DH_ALUA=m
|
||||
CONFIG_SCSI_OSD_INITIATOR=m
|
||||
CONFIG_SCSI_OSD_ULD=m
|
||||
CONFIG_MD=y
|
||||
CONFIG_BLK_DEV_MD=y
|
||||
CONFIG_MD_LINEAR=m
|
||||
CONFIG_MD_RAID0=m
|
||||
CONFIG_MD_RAID1=m
|
||||
CONFIG_MD_MULTIPATH=m
|
||||
CONFIG_BLK_DEV_DM=y
|
||||
CONFIG_DM_CRYPT=y
|
||||
CONFIG_DM_SNAPSHOT=y
|
||||
CONFIG_DM_MIRROR=y
|
||||
CONFIG_DM_ZERO=y
|
||||
CONFIG_DM_MULTIPATH=m
|
||||
CONFIG_ZFCP_DIF=y
|
||||
CONFIG_NETDEVICES=y
|
||||
CONFIG_DUMMY=m
|
||||
CONFIG_BONDING=m
|
||||
CONFIG_EQUALIZER=m
|
||||
CONFIG_TUN=m
|
||||
CONFIG_VETH=m
|
||||
CONFIG_NET_ETHERNET=y
|
||||
CONFIG_LCS=m
|
||||
CONFIG_CTCM=m
|
||||
CONFIG_QETH=y
|
||||
CONFIG_QETH_L2=y
|
||||
CONFIG_QETH_L3=y
|
||||
CONFIG_VIRTIO_NET=m
|
||||
CONFIG_HW_RANDOM_VIRTIO=m
|
||||
CONFIG_VIRTIO_NET=y
|
||||
CONFIG_RAW_DRIVER=m
|
||||
CONFIG_TN3270=y
|
||||
CONFIG_TN3270_TTY=y
|
||||
CONFIG_TN3270_FS=m
|
||||
CONFIG_TN3270_CONSOLE=y
|
||||
CONFIG_TN3215=y
|
||||
CONFIG_TN3215_CONSOLE=y
|
||||
CONFIG_SCLP_TTY=y
|
||||
CONFIG_SCLP_CONSOLE=y
|
||||
CONFIG_SCLP_VT220_TTY=y
|
||||
CONFIG_SCLP_VT220_CONSOLE=y
|
||||
CONFIG_SCLP_CPI=m
|
||||
CONFIG_SCLP_ASYNC=m
|
||||
CONFIG_S390_TAPE=m
|
||||
CONFIG_S390_TAPE_BLOCK=y
|
||||
CONFIG_S390_TAPE_34XX=m
|
||||
CONFIG_ACCESSIBILITY=y
|
||||
CONFIG_EXT2_FS=y
|
||||
CONFIG_EXT3_FS=y
|
||||
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
|
||||
CONFIG_EXT4_FS=y
|
||||
CONFIG_EXT4_FS_POSIX_ACL=y
|
||||
CONFIG_EXT4_FS_SECURITY=y
|
||||
CONFIG_PROC_KCORE=y
|
||||
CONFIG_TMPFS=y
|
||||
CONFIG_TMPFS_POSIX_ACL=y
|
||||
CONFIG_NFS_FS=y
|
||||
CONFIG_NFS_V3=y
|
||||
CONFIG_NFSD=y
|
||||
CONFIG_NFSD_V3=y
|
||||
# CONFIG_NETWORK_FILESYSTEMS is not set
|
||||
CONFIG_PARTITION_ADVANCED=y
|
||||
CONFIG_IBM_PARTITION=y
|
||||
CONFIG_DLM=m
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_DEBUG_KERNEL=y
|
||||
# CONFIG_SCHED_DEBUG is not set
|
||||
CONFIG_DEBUG_SPINLOCK=y
|
||||
CONFIG_DEBUG_MUTEXES=y
|
||||
CONFIG_TIMER_STATS=y
|
||||
CONFIG_PROVE_LOCKING=y
|
||||
CONFIG_PROVE_RCU=y
|
||||
CONFIG_LOCK_STAT=y
|
||||
CONFIG_DEBUG_LOCKDEP=y
|
||||
CONFIG_DEBUG_SPINLOCK_SLEEP=y
|
||||
CONFIG_DEBUG_LIST=y
|
||||
CONFIG_DEBUG_NOTIFIERS=y
|
||||
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
|
||||
CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
|
||||
CONFIG_KPROBES_SANITY_TEST=y
|
||||
CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
|
||||
CONFIG_LATENCYTOP=y
|
||||
CONFIG_SYSCTL_SYSCALL_CHECK=y
|
||||
CONFIG_SAMPLES=y
|
||||
CONFIG_CRYPTO_FIPS=y
|
||||
CONFIG_DEBUG_PAGEALLOC=y
|
||||
# CONFIG_FTRACE is not set
|
||||
# CONFIG_STRICT_DEVMEM is not set
|
||||
CONFIG_CRYPTO_NULL=m
|
||||
CONFIG_CRYPTO_CRYPTD=m
|
||||
CONFIG_CRYPTO_AUTHENC=m
|
||||
CONFIG_CRYPTO_TEST=m
|
||||
CONFIG_CRYPTO_CCM=m
|
||||
CONFIG_CRYPTO_GCM=m
|
||||
CONFIG_CRYPTO_CBC=y
|
||||
CONFIG_CRYPTO_CTS=m
|
||||
CONFIG_CRYPTO_ECB=m
|
||||
CONFIG_CRYPTO_LRW=m
|
||||
CONFIG_CRYPTO_PCBC=m
|
||||
CONFIG_CRYPTO_XTS=m
|
||||
CONFIG_CRYPTO_XCBC=m
|
||||
CONFIG_CRYPTO_VMAC=m
|
||||
CONFIG_CRYPTO_MD4=m
|
||||
CONFIG_CRYPTO_MICHAEL_MIC=m
|
||||
CONFIG_CRYPTO_RMD128=m
|
||||
CONFIG_CRYPTO_RMD160=m
|
||||
CONFIG_CRYPTO_RMD256=m
|
||||
CONFIG_CRYPTO_RMD320=m
|
||||
CONFIG_CRYPTO_SHA256=m
|
||||
CONFIG_CRYPTO_SHA512=m
|
||||
CONFIG_CRYPTO_TGR192=m
|
||||
CONFIG_CRYPTO_WP512=m
|
||||
CONFIG_CRYPTO_ANUBIS=m
|
||||
CONFIG_CRYPTO_ARC4=m
|
||||
CONFIG_CRYPTO_BLOWFISH=m
|
||||
CONFIG_CRYPTO_CAMELLIA=m
|
||||
CONFIG_CRYPTO_CAST5=m
|
||||
CONFIG_CRYPTO_CAST6=m
|
||||
CONFIG_CRYPTO_DES=m
|
||||
CONFIG_CRYPTO_FCRYPT=m
|
||||
CONFIG_CRYPTO_KHAZAD=m
|
||||
CONFIG_CRYPTO_SALSA20=m
|
||||
CONFIG_CRYPTO_SEED=m
|
||||
CONFIG_CRYPTO_SERPENT=m
|
||||
CONFIG_CRYPTO_TEA=m
|
||||
CONFIG_CRYPTO_TWOFISH=m
|
||||
CONFIG_CRYPTO_DEFLATE=m
|
||||
CONFIG_CRYPTO_ZLIB=m
|
||||
CONFIG_CRYPTO_LZO=m
|
||||
CONFIG_ZCRYPT=m
|
||||
CONFIG_CRYPTO_SHA1_S390=m
|
||||
CONFIG_CRYPTO_SHA256_S390=m
|
||||
CONFIG_CRYPTO_SHA512_S390=m
|
||||
CONFIG_CRC_T10DIF=y
|
||||
CONFIG_CRC32=m
|
||||
CONFIG_CRYPTO_DES_S390=m
|
||||
CONFIG_CRYPTO_AES_S390=m
|
||||
CONFIG_CRC7=m
|
||||
CONFIG_KVM=m
|
||||
CONFIG_VIRTIO_BALLOON=m
|
||||
CONFIG_VIRTIO_BALLOON=y
|
||||
|
|
|
@ -4,4 +4,4 @@
|
|||
|
||||
obj-$(CONFIG_S390_HYPFS_FS) += s390_hypfs.o
|
||||
|
||||
s390_hypfs-objs := inode.o hypfs_diag.o hypfs_vm.o
|
||||
s390_hypfs-objs := inode.o hypfs_diag.o hypfs_vm.o hypfs_dbfs.o
|
||||
|
|
|
@ -12,6 +12,8 @@
|
|||
#include <linux/fs.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/kref.h>
|
||||
|
||||
#define REG_FILE_MODE 0440
|
||||
#define UPDATE_FILE_MODE 0220
|
||||
|
@ -38,6 +40,33 @@ extern int hypfs_vm_init(void);
|
|||
extern void hypfs_vm_exit(void);
|
||||
extern int hypfs_vm_create_files(struct super_block *sb, struct dentry *root);
|
||||
|
||||
/* Directory for debugfs files */
|
||||
extern struct dentry *hypfs_dbfs_dir;
|
||||
/* debugfs interface */
|
||||
struct hypfs_dbfs_file;
|
||||
|
||||
struct hypfs_dbfs_data {
|
||||
void *buf;
|
||||
void *buf_free_ptr;
|
||||
size_t size;
|
||||
struct hypfs_dbfs_file *dbfs_file;;
|
||||
struct kref kref;
|
||||
};
|
||||
|
||||
struct hypfs_dbfs_file {
|
||||
const char *name;
|
||||
int (*data_create)(void **data, void **data_free_ptr,
|
||||
size_t *size);
|
||||
void (*data_free)(const void *buf_free_ptr);
|
||||
|
||||
/* Private data for hypfs_dbfs.c */
|
||||
struct hypfs_dbfs_data *data;
|
||||
struct delayed_work data_free_work;
|
||||
struct mutex lock;
|
||||
struct dentry *dentry;
|
||||
};
|
||||
|
||||
extern int hypfs_dbfs_init(void);
|
||||
extern void hypfs_dbfs_exit(void);
|
||||
extern int hypfs_dbfs_create_file(struct hypfs_dbfs_file *df);
|
||||
extern void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df);
|
||||
|
||||
#endif /* _HYPFS_H_ */
|
||||
|
|
116
arch/s390/hypfs/hypfs_dbfs.c
Normal file
116
arch/s390/hypfs/hypfs_dbfs.c
Normal file
|
@ -0,0 +1,116 @@
|
|||
/*
|
||||
* Hypervisor filesystem for Linux on s390 - debugfs interface
|
||||
*
|
||||
* Copyright (C) IBM Corp. 2010
|
||||
* Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include "hypfs.h"
|
||||
|
||||
static struct dentry *dbfs_dir;
|
||||
|
||||
static struct hypfs_dbfs_data *hypfs_dbfs_data_alloc(struct hypfs_dbfs_file *f)
|
||||
{
|
||||
struct hypfs_dbfs_data *data;
|
||||
|
||||
data = kmalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return NULL;
|
||||
kref_init(&data->kref);
|
||||
data->dbfs_file = f;
|
||||
return data;
|
||||
}
|
||||
|
||||
static void hypfs_dbfs_data_free(struct kref *kref)
|
||||
{
|
||||
struct hypfs_dbfs_data *data;
|
||||
|
||||
data = container_of(kref, struct hypfs_dbfs_data, kref);
|
||||
data->dbfs_file->data_free(data->buf_free_ptr);
|
||||
kfree(data);
|
||||
}
|
||||
|
||||
static void data_free_delayed(struct work_struct *work)
|
||||
{
|
||||
struct hypfs_dbfs_data *data;
|
||||
struct hypfs_dbfs_file *df;
|
||||
|
||||
df = container_of(work, struct hypfs_dbfs_file, data_free_work.work);
|
||||
mutex_lock(&df->lock);
|
||||
data = df->data;
|
||||
df->data = NULL;
|
||||
mutex_unlock(&df->lock);
|
||||
kref_put(&data->kref, hypfs_dbfs_data_free);
|
||||
}
|
||||
|
||||
static ssize_t dbfs_read(struct file *file, char __user *buf,
|
||||
size_t size, loff_t *ppos)
|
||||
{
|
||||
struct hypfs_dbfs_data *data;
|
||||
struct hypfs_dbfs_file *df;
|
||||
ssize_t rc;
|
||||
|
||||
if (*ppos != 0)
|
||||
return 0;
|
||||
|
||||
df = file->f_path.dentry->d_inode->i_private;
|
||||
mutex_lock(&df->lock);
|
||||
if (!df->data) {
|
||||
data = hypfs_dbfs_data_alloc(df);
|
||||
if (!data) {
|
||||
mutex_unlock(&df->lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
rc = df->data_create(&data->buf, &data->buf_free_ptr,
|
||||
&data->size);
|
||||
if (rc) {
|
||||
mutex_unlock(&df->lock);
|
||||
kfree(data);
|
||||
return rc;
|
||||
}
|
||||
df->data = data;
|
||||
schedule_delayed_work(&df->data_free_work, HZ);
|
||||
}
|
||||
data = df->data;
|
||||
kref_get(&data->kref);
|
||||
mutex_unlock(&df->lock);
|
||||
|
||||
rc = simple_read_from_buffer(buf, size, ppos, data->buf, data->size);
|
||||
kref_put(&data->kref, hypfs_dbfs_data_free);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static const struct file_operations dbfs_ops = {
|
||||
.read = dbfs_read,
|
||||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
int hypfs_dbfs_create_file(struct hypfs_dbfs_file *df)
|
||||
{
|
||||
df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df,
|
||||
&dbfs_ops);
|
||||
if (IS_ERR(df->dentry))
|
||||
return PTR_ERR(df->dentry);
|
||||
mutex_init(&df->lock);
|
||||
INIT_DELAYED_WORK(&df->data_free_work, data_free_delayed);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df)
|
||||
{
|
||||
debugfs_remove(df->dentry);
|
||||
}
|
||||
|
||||
int hypfs_dbfs_init(void)
|
||||
{
|
||||
dbfs_dir = debugfs_create_dir("s390_hypfs", NULL);
|
||||
if (IS_ERR(dbfs_dir))
|
||||
return PTR_ERR(dbfs_dir);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void hypfs_dbfs_exit(void)
|
||||
{
|
||||
debugfs_remove(dbfs_dir);
|
||||
}
|
|
@ -555,81 +555,38 @@ struct dbfs_d204 {
|
|||
char buf[]; /* d204 buffer */
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct dbfs_d204_private {
|
||||
struct dbfs_d204 *d204; /* Aligned d204 data with header */
|
||||
void *base; /* Base pointer (needed for vfree) */
|
||||
};
|
||||
|
||||
static int dbfs_d204_open(struct inode *inode, struct file *file)
|
||||
static int dbfs_d204_create(void **data, void **data_free_ptr, size_t *size)
|
||||
{
|
||||
struct dbfs_d204_private *data;
|
||||
struct dbfs_d204 *d204;
|
||||
int rc, buf_size;
|
||||
void *base;
|
||||
|
||||
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
buf_size = PAGE_SIZE * (diag204_buf_pages + 1) + sizeof(d204->hdr);
|
||||
data->base = vmalloc(buf_size);
|
||||
if (!data->base) {
|
||||
rc = -ENOMEM;
|
||||
goto fail_kfree_data;
|
||||
base = vmalloc(buf_size);
|
||||
if (!base)
|
||||
return -ENOMEM;
|
||||
memset(base, 0, buf_size);
|
||||
d204 = page_align_ptr(base + sizeof(d204->hdr)) - sizeof(d204->hdr);
|
||||
rc = diag204_do_store(d204->buf, diag204_buf_pages);
|
||||
if (rc) {
|
||||
vfree(base);
|
||||
return rc;
|
||||
}
|
||||
memset(data->base, 0, buf_size);
|
||||
d204 = page_align_ptr(data->base + sizeof(d204->hdr))
|
||||
- sizeof(d204->hdr);
|
||||
rc = diag204_do_store(&d204->buf, diag204_buf_pages);
|
||||
if (rc)
|
||||
goto fail_vfree_base;
|
||||
d204->hdr.version = DBFS_D204_HDR_VERSION;
|
||||
d204->hdr.len = PAGE_SIZE * diag204_buf_pages;
|
||||
d204->hdr.sc = diag204_store_sc;
|
||||
data->d204 = d204;
|
||||
file->private_data = data;
|
||||
return nonseekable_open(inode, file);
|
||||
|
||||
fail_vfree_base:
|
||||
vfree(data->base);
|
||||
fail_kfree_data:
|
||||
kfree(data);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int dbfs_d204_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct dbfs_d204_private *data = file->private_data;
|
||||
|
||||
vfree(data->base);
|
||||
kfree(data);
|
||||
*data = d204;
|
||||
*data_free_ptr = base;
|
||||
*size = d204->hdr.len + sizeof(struct dbfs_d204_hdr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t dbfs_d204_read(struct file *file, char __user *buf,
|
||||
size_t size, loff_t *ppos)
|
||||
{
|
||||
struct dbfs_d204_private *data = file->private_data;
|
||||
|
||||
return simple_read_from_buffer(buf, size, ppos, data->d204,
|
||||
data->d204->hdr.len +
|
||||
sizeof(data->d204->hdr));
|
||||
}
|
||||
|
||||
static const struct file_operations dbfs_d204_ops = {
|
||||
.open = dbfs_d204_open,
|
||||
.read = dbfs_d204_read,
|
||||
.release = dbfs_d204_release,
|
||||
.llseek = no_llseek,
|
||||
static struct hypfs_dbfs_file dbfs_file_d204 = {
|
||||
.name = "diag_204",
|
||||
.data_create = dbfs_d204_create,
|
||||
.data_free = vfree,
|
||||
};
|
||||
|
||||
static int hypfs_dbfs_init(void)
|
||||
{
|
||||
dbfs_d204_file = debugfs_create_file("diag_204", 0400, hypfs_dbfs_dir,
|
||||
NULL, &dbfs_d204_ops);
|
||||
if (IS_ERR(dbfs_d204_file))
|
||||
return PTR_ERR(dbfs_d204_file);
|
||||
return 0;
|
||||
}
|
||||
|
||||
__init int hypfs_diag_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
@ -639,7 +596,7 @@ __init int hypfs_diag_init(void)
|
|||
return -ENODATA;
|
||||
}
|
||||
if (diag204_info_type == INFO_EXT) {
|
||||
rc = hypfs_dbfs_init();
|
||||
rc = hypfs_dbfs_create_file(&dbfs_file_d204);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
@ -660,6 +617,7 @@ void hypfs_diag_exit(void)
|
|||
debugfs_remove(dbfs_d204_file);
|
||||
diag224_delete_name_table();
|
||||
diag204_free_buffer();
|
||||
hypfs_dbfs_remove_file(&dbfs_file_d204);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -20,8 +20,6 @@ static char local_guest[] = " ";
|
|||
static char all_guests[] = "* ";
|
||||
static char *guest_query;
|
||||
|
||||
static struct dentry *dbfs_d2fc_file;
|
||||
|
||||
struct diag2fc_data {
|
||||
__u32 version;
|
||||
__u32 flags;
|
||||
|
@ -104,7 +102,7 @@ static void *diag2fc_store(char *query, unsigned int *count, int offset)
|
|||
return data;
|
||||
}
|
||||
|
||||
static void diag2fc_free(void *data)
|
||||
static void diag2fc_free(const void *data)
|
||||
{
|
||||
vfree(data);
|
||||
}
|
||||
|
@ -239,43 +237,29 @@ struct dbfs_d2fc {
|
|||
char buf[]; /* d2fc buffer */
|
||||
} __attribute__ ((packed));
|
||||
|
||||
static int dbfs_d2fc_open(struct inode *inode, struct file *file)
|
||||
static int dbfs_diag2fc_create(void **data, void **data_free_ptr, size_t *size)
|
||||
{
|
||||
struct dbfs_d2fc *data;
|
||||
struct dbfs_d2fc *d2fc;
|
||||
unsigned int count;
|
||||
|
||||
data = diag2fc_store(guest_query, &count, sizeof(data->hdr));
|
||||
if (IS_ERR(data))
|
||||
return PTR_ERR(data);
|
||||
get_clock_ext(data->hdr.tod_ext);
|
||||
data->hdr.len = count * sizeof(struct diag2fc_data);
|
||||
data->hdr.version = DBFS_D2FC_HDR_VERSION;
|
||||
data->hdr.count = count;
|
||||
memset(&data->hdr.reserved, 0, sizeof(data->hdr.reserved));
|
||||
file->private_data = data;
|
||||
return nonseekable_open(inode, file);
|
||||
}
|
||||
|
||||
static int dbfs_d2fc_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
diag2fc_free(file->private_data);
|
||||
d2fc = diag2fc_store(guest_query, &count, sizeof(d2fc->hdr));
|
||||
if (IS_ERR(d2fc))
|
||||
return PTR_ERR(d2fc);
|
||||
get_clock_ext(d2fc->hdr.tod_ext);
|
||||
d2fc->hdr.len = count * sizeof(struct diag2fc_data);
|
||||
d2fc->hdr.version = DBFS_D2FC_HDR_VERSION;
|
||||
d2fc->hdr.count = count;
|
||||
memset(&d2fc->hdr.reserved, 0, sizeof(d2fc->hdr.reserved));
|
||||
*data = d2fc;
|
||||
*data_free_ptr = d2fc;
|
||||
*size = d2fc->hdr.len + sizeof(struct dbfs_d2fc_hdr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t dbfs_d2fc_read(struct file *file, char __user *buf,
|
||||
size_t size, loff_t *ppos)
|
||||
{
|
||||
struct dbfs_d2fc *data = file->private_data;
|
||||
|
||||
return simple_read_from_buffer(buf, size, ppos, data, data->hdr.len +
|
||||
sizeof(struct dbfs_d2fc_hdr));
|
||||
}
|
||||
|
||||
static const struct file_operations dbfs_d2fc_ops = {
|
||||
.open = dbfs_d2fc_open,
|
||||
.read = dbfs_d2fc_read,
|
||||
.release = dbfs_d2fc_release,
|
||||
.llseek = no_llseek,
|
||||
static struct hypfs_dbfs_file dbfs_file_2fc = {
|
||||
.name = "diag_2fc",
|
||||
.data_create = dbfs_diag2fc_create,
|
||||
.data_free = diag2fc_free,
|
||||
};
|
||||
|
||||
int hypfs_vm_init(void)
|
||||
|
@ -288,18 +272,12 @@ int hypfs_vm_init(void)
|
|||
guest_query = local_guest;
|
||||
else
|
||||
return -EACCES;
|
||||
|
||||
dbfs_d2fc_file = debugfs_create_file("diag_2fc", 0400, hypfs_dbfs_dir,
|
||||
NULL, &dbfs_d2fc_ops);
|
||||
if (IS_ERR(dbfs_d2fc_file))
|
||||
return PTR_ERR(dbfs_d2fc_file);
|
||||
|
||||
return 0;
|
||||
return hypfs_dbfs_create_file(&dbfs_file_2fc);
|
||||
}
|
||||
|
||||
void hypfs_vm_exit(void)
|
||||
{
|
||||
if (!MACHINE_IS_VM)
|
||||
return;
|
||||
debugfs_remove(dbfs_d2fc_file);
|
||||
hypfs_dbfs_remove_file(&dbfs_file_2fc);
|
||||
}
|
||||
|
|
|
@ -46,8 +46,6 @@ static const struct super_operations hypfs_s_ops;
|
|||
/* start of list of all dentries, which have to be deleted on update */
|
||||
static struct dentry *hypfs_last_dentry;
|
||||
|
||||
struct dentry *hypfs_dbfs_dir;
|
||||
|
||||
static void hypfs_update_update(struct super_block *sb)
|
||||
{
|
||||
struct hypfs_sb_info *sb_info = sb->s_fs_info;
|
||||
|
@ -471,13 +469,12 @@ static int __init hypfs_init(void)
|
|||
{
|
||||
int rc;
|
||||
|
||||
hypfs_dbfs_dir = debugfs_create_dir("s390_hypfs", NULL);
|
||||
if (IS_ERR(hypfs_dbfs_dir))
|
||||
return PTR_ERR(hypfs_dbfs_dir);
|
||||
|
||||
rc = hypfs_dbfs_init();
|
||||
if (rc)
|
||||
return rc;
|
||||
if (hypfs_diag_init()) {
|
||||
rc = -ENODATA;
|
||||
goto fail_debugfs_remove;
|
||||
goto fail_dbfs_exit;
|
||||
}
|
||||
if (hypfs_vm_init()) {
|
||||
rc = -ENODATA;
|
||||
|
@ -499,9 +496,8 @@ static int __init hypfs_init(void)
|
|||
hypfs_vm_exit();
|
||||
fail_hypfs_diag_exit:
|
||||
hypfs_diag_exit();
|
||||
fail_debugfs_remove:
|
||||
debugfs_remove(hypfs_dbfs_dir);
|
||||
|
||||
fail_dbfs_exit:
|
||||
hypfs_dbfs_exit();
|
||||
pr_err("Initialization of hypfs failed with rc=%i\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
@ -510,7 +506,7 @@ static void __exit hypfs_exit(void)
|
|||
{
|
||||
hypfs_diag_exit();
|
||||
hypfs_vm_exit();
|
||||
debugfs_remove(hypfs_dbfs_dir);
|
||||
hypfs_dbfs_exit();
|
||||
unregister_filesystem(&hypfs_type);
|
||||
kobject_put(s390_kobj);
|
||||
}
|
||||
|
|
|
@ -204,6 +204,8 @@ int ccw_device_tm_start_timeout(struct ccw_device *, struct tcw *,
|
|||
unsigned long, u8, int);
|
||||
int ccw_device_tm_intrg(struct ccw_device *cdev);
|
||||
|
||||
int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask);
|
||||
|
||||
extern int ccw_device_set_online(struct ccw_device *cdev);
|
||||
extern int ccw_device_set_offline(struct ccw_device *cdev);
|
||||
|
||||
|
|
|
@ -202,7 +202,7 @@ static inline void s390_idle_check(struct pt_regs *regs, __u64 int_clock,
|
|||
|
||||
static inline int s390_nohz_delay(int cpu)
|
||||
{
|
||||
return per_cpu(s390_idle, cpu).nohz_delay != 0;
|
||||
return __get_cpu_var(s390_idle).nohz_delay != 0;
|
||||
}
|
||||
|
||||
#define arch_needs_cpu(cpu) s390_nohz_delay(cpu)
|
||||
|
|
|
@ -73,6 +73,7 @@ typedef struct dasd_information2_t {
|
|||
* 0x02: use diag discipline (diag)
|
||||
* 0x04: set the device initially online (internal use only)
|
||||
* 0x08: enable ERP related logging
|
||||
* 0x20: give access to raw eckd data
|
||||
*/
|
||||
#define DASD_FEATURE_DEFAULT 0x00
|
||||
#define DASD_FEATURE_READONLY 0x01
|
||||
|
@ -80,6 +81,8 @@ typedef struct dasd_information2_t {
|
|||
#define DASD_FEATURE_INITIAL_ONLINE 0x04
|
||||
#define DASD_FEATURE_ERPLOG 0x08
|
||||
#define DASD_FEATURE_FAILFAST 0x10
|
||||
#define DASD_FEATURE_FAILONSLCK 0x20
|
||||
#define DASD_FEATURE_USERAW 0x40
|
||||
|
||||
#define DASD_PARTN_BITS 2
|
||||
|
||||
|
|
|
@ -4,20 +4,17 @@
|
|||
#ifndef __ASSEMBLY__
|
||||
|
||||
extern void _mcount(void);
|
||||
extern unsigned long ftrace_dyn_func;
|
||||
|
||||
struct dyn_arch_ftrace { };
|
||||
|
||||
#define MCOUNT_ADDR ((long)_mcount)
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#define MCOUNT_OFFSET_RET 18
|
||||
#define MCOUNT_INSN_SIZE 24
|
||||
#define MCOUNT_OFFSET 14
|
||||
#else
|
||||
#define MCOUNT_OFFSET_RET 26
|
||||
#define MCOUNT_INSN_SIZE 30
|
||||
#define MCOUNT_INSN_SIZE 12
|
||||
#define MCOUNT_OFFSET 8
|
||||
#else
|
||||
#define MCOUNT_INSN_SIZE 20
|
||||
#define MCOUNT_OFFSET 4
|
||||
#endif
|
||||
|
||||
static inline unsigned long ftrace_call_adjust(unsigned long addr)
|
||||
|
|
|
@ -21,20 +21,4 @@
|
|||
|
||||
#define HARDIRQ_BITS 8
|
||||
|
||||
void clock_comparator_work(void);
|
||||
|
||||
static inline unsigned long long local_tick_disable(void)
|
||||
{
|
||||
unsigned long long old;
|
||||
|
||||
old = S390_lowcore.clock_comparator;
|
||||
S390_lowcore.clock_comparator = -1ULL;
|
||||
return old;
|
||||
}
|
||||
|
||||
static inline void local_tick_enable(unsigned long long comp)
|
||||
{
|
||||
S390_lowcore.clock_comparator = comp;
|
||||
}
|
||||
|
||||
#endif /* __ASM_HARDIRQ_H */
|
||||
|
|
|
@ -1,23 +1,33 @@
|
|||
#ifndef _ASM_IRQ_H
|
||||
#define _ASM_IRQ_H
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#include <linux/hardirq.h>
|
||||
|
||||
/*
|
||||
* the definition of irqs has changed in 2.5.46:
|
||||
* NR_IRQS is no longer the number of i/o
|
||||
* interrupts (65536), but rather the number
|
||||
* of interrupt classes (2).
|
||||
* Only external and i/o interrupts make much sense here (CH).
|
||||
*/
|
||||
|
||||
enum interruption_class {
|
||||
EXTERNAL_INTERRUPT,
|
||||
IO_INTERRUPT,
|
||||
|
||||
EXTINT_CLK,
|
||||
EXTINT_IPI,
|
||||
EXTINT_TMR,
|
||||
EXTINT_TLA,
|
||||
EXTINT_PFL,
|
||||
EXTINT_DSD,
|
||||
EXTINT_VRT,
|
||||
EXTINT_SCP,
|
||||
EXTINT_IUC,
|
||||
IOINT_QAI,
|
||||
IOINT_QDI,
|
||||
IOINT_DAS,
|
||||
IOINT_C15,
|
||||
IOINT_C70,
|
||||
IOINT_TAP,
|
||||
IOINT_VMR,
|
||||
IOINT_LCS,
|
||||
IOINT_CLW,
|
||||
IOINT_CTC,
|
||||
IOINT_APB,
|
||||
NMI_NMI,
|
||||
NR_IRQS,
|
||||
};
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif
|
||||
#endif /* _ASM_IRQ_H */
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
#include <linux/ptrace.h>
|
||||
#include <linux/percpu.h>
|
||||
|
||||
#define __ARCH_WANT_KPROBES_INSN_SLOT
|
||||
struct pt_regs;
|
||||
struct kprobe;
|
||||
|
||||
|
@ -58,23 +57,12 @@ typedef u16 kprobe_opcode_t;
|
|||
/* Architecture specific copy of original instruction */
|
||||
struct arch_specific_insn {
|
||||
/* copy of original instruction */
|
||||
kprobe_opcode_t *insn;
|
||||
int fixup;
|
||||
int ilen;
|
||||
int reg;
|
||||
kprobe_opcode_t insn[MAX_INSN_SIZE];
|
||||
};
|
||||
|
||||
struct ins_replace_args {
|
||||
kprobe_opcode_t *ptr;
|
||||
kprobe_opcode_t old;
|
||||
kprobe_opcode_t new;
|
||||
};
|
||||
struct prev_kprobe {
|
||||
struct kprobe *kp;
|
||||
unsigned long status;
|
||||
unsigned long saved_psw;
|
||||
unsigned long kprobe_saved_imask;
|
||||
unsigned long kprobe_saved_ctl[3];
|
||||
};
|
||||
|
||||
/* per-cpu kprobe control block */
|
||||
|
@ -82,17 +70,13 @@ struct kprobe_ctlblk {
|
|||
unsigned long kprobe_status;
|
||||
unsigned long kprobe_saved_imask;
|
||||
unsigned long kprobe_saved_ctl[3];
|
||||
struct pt_regs jprobe_saved_regs;
|
||||
unsigned long jprobe_saved_r14;
|
||||
unsigned long jprobe_saved_r15;
|
||||
struct prev_kprobe prev_kprobe;
|
||||
struct pt_regs jprobe_saved_regs;
|
||||
kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
|
||||
};
|
||||
|
||||
void arch_remove_kprobe(struct kprobe *p);
|
||||
void kretprobe_trampoline(void);
|
||||
int is_prohibited_opcode(kprobe_opcode_t *instruction);
|
||||
void get_instruction_type(struct arch_specific_insn *ainsn);
|
||||
|
||||
int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
|
||||
int kprobe_exceptions_notify(struct notifier_block *self,
|
||||
|
|
|
@ -32,7 +32,6 @@ static inline void get_cpu_id(struct cpuid *ptr)
|
|||
}
|
||||
|
||||
extern void s390_adjust_jiffies(void);
|
||||
extern void print_cpu_info(void);
|
||||
extern int get_cpu_capability(unsigned int *);
|
||||
|
||||
/*
|
||||
|
@ -81,7 +80,8 @@ struct thread_struct {
|
|||
mm_segment_t mm_segment;
|
||||
unsigned long prot_addr; /* address of protection-excep. */
|
||||
unsigned int trap_no;
|
||||
per_struct per_info;
|
||||
struct per_regs per_user; /* User specified PER registers */
|
||||
struct per_event per_event; /* Cause of the last PER trap */
|
||||
/* pfault_wait is used to block the process on a pfault event */
|
||||
unsigned long pfault_wait;
|
||||
};
|
||||
|
|
|
@ -331,10 +331,60 @@ struct pt_regs
|
|||
unsigned short ilc;
|
||||
unsigned short svcnr;
|
||||
};
|
||||
|
||||
/*
|
||||
* Program event recording (PER) register set.
|
||||
*/
|
||||
struct per_regs {
|
||||
unsigned long control; /* PER control bits */
|
||||
unsigned long start; /* PER starting address */
|
||||
unsigned long end; /* PER ending address */
|
||||
};
|
||||
|
||||
/*
|
||||
* PER event contains information about the cause of the last PER exception.
|
||||
*/
|
||||
struct per_event {
|
||||
unsigned short cause; /* PER code, ATMID and AI */
|
||||
unsigned long address; /* PER address */
|
||||
unsigned char paid; /* PER access identification */
|
||||
};
|
||||
|
||||
/*
|
||||
* Simplified per_info structure used to decode the ptrace user space ABI.
|
||||
*/
|
||||
struct per_struct_kernel {
|
||||
unsigned long cr9; /* PER control bits */
|
||||
unsigned long cr10; /* PER starting address */
|
||||
unsigned long cr11; /* PER ending address */
|
||||
unsigned long bits; /* Obsolete software bits */
|
||||
unsigned long starting_addr; /* User specified start address */
|
||||
unsigned long ending_addr; /* User specified end address */
|
||||
unsigned short perc_atmid; /* PER trap ATMID */
|
||||
unsigned long address; /* PER trap instruction address */
|
||||
unsigned char access_id; /* PER trap access identification */
|
||||
};
|
||||
|
||||
#define PER_EVENT_MASK 0xE9000000UL
|
||||
|
||||
#define PER_EVENT_BRANCH 0x80000000UL
|
||||
#define PER_EVENT_IFETCH 0x40000000UL
|
||||
#define PER_EVENT_STORE 0x20000000UL
|
||||
#define PER_EVENT_STORE_REAL 0x08000000UL
|
||||
#define PER_EVENT_NULLIFICATION 0x01000000UL
|
||||
|
||||
#define PER_CONTROL_MASK 0x00a00000UL
|
||||
|
||||
#define PER_CONTROL_BRANCH_ADDRESS 0x00800000UL
|
||||
#define PER_CONTROL_ALTERATION 0x00200000UL
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Now for the program event recording (trace) definitions.
|
||||
* Now for the user space program event recording (trace) definitions.
|
||||
* The following structures are used only for the ptrace interface, don't
|
||||
* touch or even look at it if you don't want to modify the user-space
|
||||
* ptrace interface. In particular stay away from it for in-kernel PER.
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
|
|
|
@ -361,6 +361,7 @@ struct qdio_initialize {
|
|||
qdio_handler_t *input_handler;
|
||||
qdio_handler_t *output_handler;
|
||||
void (*queue_start_poll) (struct ccw_device *, int, unsigned long);
|
||||
int scan_threshold;
|
||||
unsigned long int_parm;
|
||||
void **input_sbal_addr_array;
|
||||
void **output_sbal_addr_array;
|
||||
|
|
|
@ -1,32 +1,17 @@
|
|||
/*
|
||||
* Copyright IBM Corp. 1999,2010
|
||||
* Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>,
|
||||
* Martin Schwidefsky <schwidefsky@de.ibm.com>,
|
||||
*/
|
||||
|
||||
#ifndef _S390_EXTINT_H
|
||||
#define _S390_EXTINT_H
|
||||
|
||||
/*
|
||||
* include/asm-s390/s390_ext.h
|
||||
*
|
||||
* S390 version
|
||||
* Copyright IBM Corp. 1999,2007
|
||||
* Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com),
|
||||
* Martin Schwidefsky (schwidefsky@de.ibm.com)
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
typedef void (*ext_int_handler_t)(unsigned int, unsigned int, unsigned long);
|
||||
|
||||
typedef struct ext_int_info_t {
|
||||
struct ext_int_info_t *next;
|
||||
ext_int_handler_t handler;
|
||||
__u16 code;
|
||||
} ext_int_info_t;
|
||||
|
||||
extern ext_int_info_t *ext_int_hash[];
|
||||
|
||||
int register_external_interrupt(__u16 code, ext_int_handler_t handler);
|
||||
int register_early_external_interrupt(__u16 code, ext_int_handler_t handler,
|
||||
ext_int_info_t *info);
|
||||
int unregister_external_interrupt(__u16 code, ext_int_handler_t handler);
|
||||
int unregister_early_external_interrupt(__u16 code, ext_int_handler_t handler,
|
||||
ext_int_info_t *info);
|
||||
|
||||
#endif
|
||||
#endif /* _S390_EXTINT_H */
|
||||
|
|
|
@ -20,7 +20,6 @@ extern void machine_power_off_smp(void);
|
|||
|
||||
extern int __cpu_disable (void);
|
||||
extern void __cpu_die (unsigned int cpu);
|
||||
extern void cpu_die (void) __attribute__ ((noreturn));
|
||||
extern int __cpu_up (unsigned int cpu);
|
||||
|
||||
extern struct mutex smp_cpu_state_mutex;
|
||||
|
@ -71,8 +70,10 @@ static inline void smp_switch_to_ipl_cpu(void (*func)(void *), void *data)
|
|||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
extern int smp_rescan_cpus(void);
|
||||
extern void __noreturn cpu_die(void);
|
||||
#else
|
||||
static inline int smp_rescan_cpus(void) { return 0; }
|
||||
static inline void cpu_die(void) { }
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_SMP_H */
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
struct task_struct;
|
||||
|
||||
extern struct task_struct *__switch_to(void *, void *);
|
||||
extern void update_per_regs(struct task_struct *task);
|
||||
|
||||
static inline void save_fp_regs(s390_fp_regs *fpregs)
|
||||
{
|
||||
|
@ -93,6 +94,7 @@ static inline void restore_access_regs(unsigned int *acrs)
|
|||
if (next->mm) { \
|
||||
restore_fp_regs(&next->thread.fp_regs); \
|
||||
restore_access_regs(&next->thread.acrs[0]); \
|
||||
update_per_regs(next); \
|
||||
} \
|
||||
prev = __switch_to(prev,next); \
|
||||
} while (0)
|
||||
|
@ -101,11 +103,9 @@ extern void account_vtime(struct task_struct *, struct task_struct *);
|
|||
extern void account_tick_vtime(struct task_struct *);
|
||||
|
||||
#ifdef CONFIG_PFAULT
|
||||
extern void pfault_irq_init(void);
|
||||
extern int pfault_init(void);
|
||||
extern void pfault_fini(void);
|
||||
#else /* CONFIG_PFAULT */
|
||||
#define pfault_irq_init() do { } while (0)
|
||||
#define pfault_init() ({-1;})
|
||||
#define pfault_fini() do { } while (0)
|
||||
#endif /* CONFIG_PFAULT */
|
||||
|
|
|
@ -74,7 +74,7 @@ struct thread_info {
|
|||
/* how to get the thread information struct from C */
|
||||
static inline struct thread_info *current_thread_info(void)
|
||||
{
|
||||
return (struct thread_info *)(S390_lowcore.kernel_stack - THREAD_SIZE);
|
||||
return (struct thread_info *) S390_lowcore.thread_info;
|
||||
}
|
||||
|
||||
#define THREAD_SIZE_ORDER THREAD_ORDER
|
||||
|
@ -88,7 +88,7 @@ static inline struct thread_info *current_thread_info(void)
|
|||
#define TIF_SIGPENDING 2 /* signal pending */
|
||||
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
|
||||
#define TIF_RESTART_SVC 4 /* restart svc with new svc number */
|
||||
#define TIF_SINGLE_STEP 6 /* deliver sigtrap on return to user */
|
||||
#define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */
|
||||
#define TIF_MCCK_PENDING 7 /* machine check handling is pending */
|
||||
#define TIF_SYSCALL_TRACE 8 /* syscall trace active */
|
||||
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */
|
||||
|
@ -99,14 +99,15 @@ static inline struct thread_info *current_thread_info(void)
|
|||
#define TIF_31BIT 17 /* 32bit process */
|
||||
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
||||
#define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */
|
||||
#define TIF_FREEZE 20 /* thread is freezing for suspend */
|
||||
#define TIF_SINGLE_STEP 20 /* This task is single stepped */
|
||||
#define TIF_FREEZE 21 /* thread is freezing for suspend */
|
||||
|
||||
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
||||
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
|
||||
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
|
||||
#define _TIF_RESTART_SVC (1<<TIF_RESTART_SVC)
|
||||
#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP)
|
||||
#define _TIF_PER_TRAP (1<<TIF_PER_TRAP)
|
||||
#define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING)
|
||||
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
||||
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
|
||||
|
@ -114,6 +115,7 @@ static inline struct thread_info *current_thread_info(void)
|
|||
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
|
||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||
#define _TIF_31BIT (1<<TIF_31BIT)
|
||||
#define _TIF_SINGLE_STEP (1<<TIF_FREEZE)
|
||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
|
|
@ -11,6 +11,8 @@
|
|||
#ifndef _ASM_S390_TIMEX_H
|
||||
#define _ASM_S390_TIMEX_H
|
||||
|
||||
#include <asm/lowcore.h>
|
||||
|
||||
/* The value of the TOD clock for 1.1.1970. */
|
||||
#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
|
||||
|
||||
|
@ -49,6 +51,24 @@ static inline void store_clock_comparator(__u64 *time)
|
|||
asm volatile("stckc %0" : "=Q" (*time));
|
||||
}
|
||||
|
||||
void clock_comparator_work(void);
|
||||
|
||||
static inline unsigned long long local_tick_disable(void)
|
||||
{
|
||||
unsigned long long old;
|
||||
|
||||
old = S390_lowcore.clock_comparator;
|
||||
S390_lowcore.clock_comparator = -1ULL;
|
||||
set_clock_comparator(S390_lowcore.clock_comparator);
|
||||
return old;
|
||||
}
|
||||
|
||||
static inline void local_tick_enable(unsigned long long comp)
|
||||
{
|
||||
S390_lowcore.clock_comparator = comp;
|
||||
set_clock_comparator(S390_lowcore.clock_comparator);
|
||||
}
|
||||
|
||||
#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
|
||||
|
||||
typedef unsigned long long cycles_t;
|
||||
|
|
|
@ -23,14 +23,16 @@ int main(void)
|
|||
{
|
||||
DEFINE(__THREAD_info, offsetof(struct task_struct, stack));
|
||||
DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp));
|
||||
DEFINE(__THREAD_per, offsetof(struct task_struct, thread.per_info));
|
||||
DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment));
|
||||
BLANK();
|
||||
DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
|
||||
BLANK();
|
||||
DEFINE(__PER_atmid, offsetof(per_struct, lowcore.words.perc_atmid));
|
||||
DEFINE(__PER_address, offsetof(per_struct, lowcore.words.address));
|
||||
DEFINE(__PER_access_id, offsetof(per_struct, lowcore.words.access_id));
|
||||
DEFINE(__THREAD_per_cause,
|
||||
offsetof(struct task_struct, thread.per_event.cause));
|
||||
DEFINE(__THREAD_per_address,
|
||||
offsetof(struct task_struct, thread.per_event.address));
|
||||
DEFINE(__THREAD_per_paid,
|
||||
offsetof(struct task_struct, thread.per_event.paid));
|
||||
BLANK();
|
||||
DEFINE(__TI_task, offsetof(struct thread_info, task));
|
||||
DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain));
|
||||
|
@ -85,9 +87,9 @@ int main(void)
|
|||
DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc));
|
||||
DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code));
|
||||
DEFINE(__LC_TRANS_EXC_CODE, offsetof(struct _lowcore, trans_exc_code));
|
||||
DEFINE(__LC_PER_ATMID, offsetof(struct _lowcore, per_perc_atmid));
|
||||
DEFINE(__LC_PER_CAUSE, offsetof(struct _lowcore, per_perc_atmid));
|
||||
DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address));
|
||||
DEFINE(__LC_PER_ACCESS_ID, offsetof(struct _lowcore, per_access_id));
|
||||
DEFINE(__LC_PER_PAID, offsetof(struct _lowcore, per_access_id));
|
||||
DEFINE(__LC_AR_MODE_ID, offsetof(struct _lowcore, ar_access_id));
|
||||
DEFINE(__LC_SUBCHANNEL_ID, offsetof(struct _lowcore, subchannel_id));
|
||||
DEFINE(__LC_SUBCHANNEL_NR, offsetof(struct _lowcore, subchannel_nr));
|
||||
|
|
|
@ -4,40 +4,19 @@
|
|||
#include <asm/ptrace.h> /* needed for NUM_CR_WORDS */
|
||||
#include "compat_linux.h" /* needed for psw_compat_t */
|
||||
|
||||
typedef struct {
|
||||
__u32 cr[NUM_CR_WORDS];
|
||||
} per_cr_words32;
|
||||
struct compat_per_struct_kernel {
|
||||
__u32 cr9; /* PER control bits */
|
||||
__u32 cr10; /* PER starting address */
|
||||
__u32 cr11; /* PER ending address */
|
||||
__u32 bits; /* Obsolete software bits */
|
||||
__u32 starting_addr; /* User specified start address */
|
||||
__u32 ending_addr; /* User specified end address */
|
||||
__u16 perc_atmid; /* PER trap ATMID */
|
||||
__u32 address; /* PER trap instruction address */
|
||||
__u8 access_id; /* PER trap access identification */
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
__u16 perc_atmid; /* 0x096 */
|
||||
__u32 address; /* 0x098 */
|
||||
__u8 access_id; /* 0x0a1 */
|
||||
} per_lowcore_words32;
|
||||
|
||||
typedef struct {
|
||||
union {
|
||||
per_cr_words32 words;
|
||||
} control_regs;
|
||||
/*
|
||||
* Use these flags instead of setting em_instruction_fetch
|
||||
* directly they are used so that single stepping can be
|
||||
* switched on & off while not affecting other tracing
|
||||
*/
|
||||
unsigned single_step : 1;
|
||||
unsigned instruction_fetch : 1;
|
||||
unsigned : 30;
|
||||
/*
|
||||
* These addresses are copied into cr10 & cr11 if single
|
||||
* stepping is switched off
|
||||
*/
|
||||
__u32 starting_addr;
|
||||
__u32 ending_addr;
|
||||
union {
|
||||
per_lowcore_words32 words;
|
||||
} lowcore;
|
||||
} per_struct32;
|
||||
|
||||
struct user_regs_struct32
|
||||
struct compat_user_regs_struct
|
||||
{
|
||||
psw_compat_t psw;
|
||||
u32 gprs[NUM_GPRS];
|
||||
|
@ -50,14 +29,14 @@ struct user_regs_struct32
|
|||
* itself as there is no "official" ptrace interface for hardware
|
||||
* watchpoints. This is the way intel does it.
|
||||
*/
|
||||
per_struct32 per_info;
|
||||
struct compat_per_struct_kernel per_info;
|
||||
u32 ieee_instruction_pointer; /* obsolete, always 0 */
|
||||
};
|
||||
|
||||
struct user32 {
|
||||
struct compat_user {
|
||||
/* We start with the registers, to mimic the way that "memory"
|
||||
is returned from the ptrace(3,...) function. */
|
||||
struct user_regs_struct32 regs; /* Where the registers are actually stored */
|
||||
struct compat_user_regs_struct regs;
|
||||
/* The rest of this junk is to help gdb figure out what goes where */
|
||||
u32 u_tsize; /* Text segment size (pages). */
|
||||
u32 u_dsize; /* Data segment size (pages). */
|
||||
|
@ -79,6 +58,6 @@ typedef struct
|
|||
__u32 len;
|
||||
__u32 kernel_addr;
|
||||
__u32 process_addr;
|
||||
} ptrace_area_emu31;
|
||||
} compat_ptrace_area;
|
||||
|
||||
#endif /* _PTRACE32_H */
|
||||
|
|
|
@ -9,7 +9,6 @@
|
|||
* Heiko Carstens <heiko.carstens@de.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/sys.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/cache.h>
|
||||
|
@ -49,7 +48,7 @@ SP_SVCNR = STACK_FRAME_OVERHEAD + __PT_SVCNR
|
|||
SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE
|
||||
|
||||
_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
|
||||
_TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP )
|
||||
_TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_PER_TRAP )
|
||||
_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
|
||||
_TIF_MCCK_PENDING)
|
||||
_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
|
||||
|
@ -110,31 +109,36 @@ STACK_SIZE = 1 << STACK_SHIFT
|
|||
1: stm %r10,%r11,\lc_sum
|
||||
.endm
|
||||
|
||||
.macro SAVE_ALL_SVC psworg,savearea
|
||||
stm %r12,%r15,\savearea
|
||||
l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
|
||||
l %r15,__LC_KERNEL_STACK # problem state -> load ksp
|
||||
s %r15,BASED(.Lc_spsize) # make room for registers & psw
|
||||
.endm
|
||||
|
||||
.macro SAVE_ALL_BASE savearea
|
||||
stm %r12,%r15,\savearea
|
||||
l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
|
||||
.endm
|
||||
|
||||
.macro SAVE_ALL_SVC psworg,savearea
|
||||
la %r12,\psworg
|
||||
l %r15,__LC_KERNEL_STACK # problem state -> load ksp
|
||||
.endm
|
||||
|
||||
.macro SAVE_ALL_SYNC psworg,savearea
|
||||
la %r12,\psworg
|
||||
.macro SAVE_ALL_PGM psworg,savearea
|
||||
tm \psworg+1,0x01 # test problem state bit
|
||||
bz BASED(2f) # skip stack setup save
|
||||
l %r15,__LC_KERNEL_STACK # problem state -> load ksp
|
||||
#ifdef CONFIG_CHECK_STACK
|
||||
b BASED(3f)
|
||||
2: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
|
||||
bz BASED(stack_overflow)
|
||||
3:
|
||||
bnz BASED(1f)
|
||||
tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
|
||||
bnz BASED(2f)
|
||||
la %r12,\psworg
|
||||
b BASED(stack_overflow)
|
||||
#else
|
||||
bz BASED(2f)
|
||||
#endif
|
||||
2:
|
||||
1: l %r15,__LC_KERNEL_STACK # problem state -> load ksp
|
||||
2: s %r15,BASED(.Lc_spsize) # make room for registers & psw
|
||||
.endm
|
||||
|
||||
.macro SAVE_ALL_ASYNC psworg,savearea
|
||||
stm %r12,%r15,\savearea
|
||||
l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
|
||||
la %r12,\psworg
|
||||
tm \psworg+1,0x01 # test problem state bit
|
||||
bnz BASED(1f) # from user -> load async stack
|
||||
|
@ -149,27 +153,23 @@ STACK_SIZE = 1 << STACK_SHIFT
|
|||
0: l %r14,__LC_ASYNC_STACK # are we already on the async stack ?
|
||||
slr %r14,%r15
|
||||
sra %r14,STACK_SHIFT
|
||||
be BASED(2f)
|
||||
1: l %r15,__LC_ASYNC_STACK
|
||||
#ifdef CONFIG_CHECK_STACK
|
||||
b BASED(3f)
|
||||
2: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
|
||||
bz BASED(stack_overflow)
|
||||
3:
|
||||
bnz BASED(1f)
|
||||
tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
|
||||
bnz BASED(2f)
|
||||
b BASED(stack_overflow)
|
||||
#else
|
||||
bz BASED(2f)
|
||||
#endif
|
||||
2:
|
||||
1: l %r15,__LC_ASYNC_STACK
|
||||
2: s %r15,BASED(.Lc_spsize) # make room for registers & psw
|
||||
.endm
|
||||
|
||||
.macro CREATE_STACK_FRAME psworg,savearea
|
||||
s %r15,BASED(.Lc_spsize) # make room for registers & psw
|
||||
mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack
|
||||
.macro CREATE_STACK_FRAME savearea
|
||||
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
|
||||
st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
|
||||
icm %r12,12,__LC_SVC_ILC
|
||||
stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
|
||||
st %r12,SP_ILC(%r15)
|
||||
mvc SP_R12(16,%r15),\savearea # move %r12-%r15 to stack
|
||||
la %r12,0
|
||||
st %r12,__SF_BACKCHAIN(%r15) # clear back chain
|
||||
stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
|
||||
.endm
|
||||
|
||||
.macro RESTORE_ALL psworg,sync
|
||||
|
@ -188,6 +188,8 @@ STACK_SIZE = 1 << STACK_SHIFT
|
|||
ssm __SF_EMPTY(%r15)
|
||||
.endm
|
||||
|
||||
.section .kprobes.text, "ax"
|
||||
|
||||
/*
|
||||
* Scheduler resume function, called by switch_to
|
||||
* gpr2 = (task_struct *) prev
|
||||
|
@ -198,31 +200,21 @@ STACK_SIZE = 1 << STACK_SHIFT
|
|||
.globl __switch_to
|
||||
__switch_to:
|
||||
basr %r1,0
|
||||
__switch_to_base:
|
||||
tm __THREAD_per(%r3),0xe8 # new process is using per ?
|
||||
bz __switch_to_noper-__switch_to_base(%r1) # if not we're fine
|
||||
stctl %c9,%c11,__SF_EMPTY(%r15) # We are using per stuff
|
||||
clc __THREAD_per(12,%r3),__SF_EMPTY(%r15)
|
||||
be __switch_to_noper-__switch_to_base(%r1) # we got away w/o bashing TLB's
|
||||
lctl %c9,%c11,__THREAD_per(%r3) # Nope we didn't
|
||||
__switch_to_noper:
|
||||
l %r4,__THREAD_info(%r2) # get thread_info of prev
|
||||
0: l %r4,__THREAD_info(%r2) # get thread_info of prev
|
||||
l %r5,__THREAD_info(%r3) # get thread_info of next
|
||||
tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending?
|
||||
bz __switch_to_no_mcck-__switch_to_base(%r1)
|
||||
ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev
|
||||
l %r4,__THREAD_info(%r3) # get thread_info of next
|
||||
oi __TI_flags+3(%r4),_TIF_MCCK_PENDING # set it in next
|
||||
__switch_to_no_mcck:
|
||||
stm %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task
|
||||
st %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp
|
||||
l %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp
|
||||
lm %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task
|
||||
st %r3,__LC_CURRENT # __LC_CURRENT = current task struct
|
||||
lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
|
||||
l %r3,__THREAD_info(%r3) # load thread_info from task struct
|
||||
st %r3,__LC_THREAD_INFO
|
||||
ahi %r3,STACK_SIZE
|
||||
st %r3,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack
|
||||
bz 1f-0b(%r1)
|
||||
ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev
|
||||
oi __TI_flags+3(%r5),_TIF_MCCK_PENDING # set it in next
|
||||
1: stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
|
||||
st %r15,__THREAD_ksp(%r2) # store kernel stack of prev
|
||||
l %r15,__THREAD_ksp(%r3) # load kernel stack of next
|
||||
lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
|
||||
lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
|
||||
st %r3,__LC_CURRENT # store task struct of next
|
||||
st %r5,__LC_THREAD_INFO # store thread info of next
|
||||
ahi %r5,STACK_SIZE # end of kernel stack of next
|
||||
st %r5,__LC_KERNEL_STACK # store end of kernel stack
|
||||
br %r14
|
||||
|
||||
__critical_start:
|
||||
|
@ -235,10 +227,11 @@ __critical_start:
|
|||
system_call:
|
||||
stpt __LC_SYNC_ENTER_TIMER
|
||||
sysc_saveall:
|
||||
SAVE_ALL_BASE __LC_SAVE_AREA
|
||||
SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
|
||||
CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
|
||||
lh %r7,0x8a # get svc number from lowcore
|
||||
CREATE_STACK_FRAME __LC_SAVE_AREA
|
||||
mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW
|
||||
mvc SP_ILC(4,%r15),__LC_SVC_ILC
|
||||
l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
|
||||
sysc_vtime:
|
||||
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
|
||||
sysc_stime:
|
||||
|
@ -246,20 +239,20 @@ sysc_stime:
|
|||
sysc_update:
|
||||
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
|
||||
sysc_do_svc:
|
||||
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
|
||||
ltr %r7,%r7 # test for svc 0
|
||||
xr %r7,%r7
|
||||
icm %r7,3,SP_SVCNR(%r15) # load svc number and test for svc 0
|
||||
bnz BASED(sysc_nr_ok) # svc number > 0
|
||||
# svc 0: system call number in %r1
|
||||
cl %r1,BASED(.Lnr_syscalls)
|
||||
bnl BASED(sysc_nr_ok)
|
||||
sth %r1,SP_SVCNR(%r15)
|
||||
lr %r7,%r1 # copy svc number to %r7
|
||||
sysc_nr_ok:
|
||||
sth %r7,SP_SVCNR(%r15)
|
||||
sll %r7,2 # svc number *4
|
||||
l %r8,BASED(.Lsysc_table)
|
||||
tm __TI_flags+2(%r9),_TIF_SYSCALL
|
||||
l %r10,BASED(.Lsysc_table)
|
||||
tm __TI_flags+2(%r12),_TIF_SYSCALL
|
||||
mvc SP_ARGS(4,%r15),SP_R7(%r15)
|
||||
l %r8,0(%r7,%r8) # get system call addr.
|
||||
l %r8,0(%r7,%r10) # get system call addr.
|
||||
bnz BASED(sysc_tracesys)
|
||||
basr %r14,%r8 # call sys_xxxx
|
||||
st %r2,SP_R2(%r15) # store return value (change R2 on stack)
|
||||
|
@ -267,7 +260,7 @@ sysc_nr_ok:
|
|||
sysc_return:
|
||||
LOCKDEP_SYS_EXIT
|
||||
sysc_tif:
|
||||
tm __TI_flags+3(%r9),_TIF_WORK_SVC
|
||||
tm __TI_flags+3(%r12),_TIF_WORK_SVC
|
||||
bnz BASED(sysc_work) # there is work to do (signals etc.)
|
||||
sysc_restore:
|
||||
RESTORE_ALL __LC_RETURN_PSW,1
|
||||
|
@ -284,17 +277,17 @@ sysc_work:
|
|||
# One of the work bits is on. Find out which one.
|
||||
#
|
||||
sysc_work_tif:
|
||||
tm __TI_flags+3(%r9),_TIF_MCCK_PENDING
|
||||
tm __TI_flags+3(%r12),_TIF_MCCK_PENDING
|
||||
bo BASED(sysc_mcck_pending)
|
||||
tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
|
||||
tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
|
||||
bo BASED(sysc_reschedule)
|
||||
tm __TI_flags+3(%r9),_TIF_SIGPENDING
|
||||
tm __TI_flags+3(%r12),_TIF_SIGPENDING
|
||||
bo BASED(sysc_sigpending)
|
||||
tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME
|
||||
tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
|
||||
bo BASED(sysc_notify_resume)
|
||||
tm __TI_flags+3(%r9),_TIF_RESTART_SVC
|
||||
tm __TI_flags+3(%r12),_TIF_RESTART_SVC
|
||||
bo BASED(sysc_restart)
|
||||
tm __TI_flags+3(%r9),_TIF_SINGLE_STEP
|
||||
tm __TI_flags+3(%r12),_TIF_PER_TRAP
|
||||
bo BASED(sysc_singlestep)
|
||||
b BASED(sysc_return) # beware of critical section cleanup
|
||||
|
||||
|
@ -318,13 +311,13 @@ sysc_mcck_pending:
|
|||
# _TIF_SIGPENDING is set, call do_signal
|
||||
#
|
||||
sysc_sigpending:
|
||||
ni __TI_flags+3(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
|
||||
ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
|
||||
la %r2,SP_PTREGS(%r15) # load pt_regs
|
||||
l %r1,BASED(.Ldo_signal)
|
||||
basr %r14,%r1 # call do_signal
|
||||
tm __TI_flags+3(%r9),_TIF_RESTART_SVC
|
||||
tm __TI_flags+3(%r12),_TIF_RESTART_SVC
|
||||
bo BASED(sysc_restart)
|
||||
tm __TI_flags+3(%r9),_TIF_SINGLE_STEP
|
||||
tm __TI_flags+3(%r12),_TIF_PER_TRAP
|
||||
bo BASED(sysc_singlestep)
|
||||
b BASED(sysc_return)
|
||||
|
||||
|
@ -342,23 +335,23 @@ sysc_notify_resume:
|
|||
# _TIF_RESTART_SVC is set, set up registers and restart svc
|
||||
#
|
||||
sysc_restart:
|
||||
ni __TI_flags+3(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
|
||||
ni __TI_flags+3(%r12),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
|
||||
l %r7,SP_R2(%r15) # load new svc number
|
||||
mvc SP_R2(4,%r15),SP_ORIG_R2(%r15) # restore first argument
|
||||
lm %r2,%r6,SP_R2(%r15) # load svc arguments
|
||||
sth %r7,SP_SVCNR(%r15)
|
||||
b BASED(sysc_nr_ok) # restart svc
|
||||
|
||||
#
|
||||
# _TIF_SINGLE_STEP is set, call do_single_step
|
||||
# _TIF_PER_TRAP is set, call do_per_trap
|
||||
#
|
||||
sysc_singlestep:
|
||||
ni __TI_flags+3(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
|
||||
mvi SP_SVCNR(%r15),0xff # set trap indication to pgm check
|
||||
mvi SP_SVCNR+1(%r15),0xff
|
||||
ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
|
||||
xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
|
||||
la %r2,SP_PTREGS(%r15) # address of register-save area
|
||||
l %r1,BASED(.Lhandle_per) # load adr. of per handler
|
||||
la %r14,BASED(sysc_return) # load adr. of system return
|
||||
br %r1 # branch to do_single_step
|
||||
br %r1 # branch to do_per_trap
|
||||
|
||||
#
|
||||
# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
|
||||
|
@ -368,15 +361,15 @@ sysc_tracesys:
|
|||
l %r1,BASED(.Ltrace_entry)
|
||||
la %r2,SP_PTREGS(%r15) # load pt_regs
|
||||
la %r3,0
|
||||
srl %r7,2
|
||||
st %r7,SP_R2(%r15)
|
||||
xr %r0,%r0
|
||||
icm %r0,3,SP_SVCNR(%r15)
|
||||
st %r0,SP_R2(%r15)
|
||||
basr %r14,%r1
|
||||
cl %r2,BASED(.Lnr_syscalls)
|
||||
bnl BASED(sysc_tracenogo)
|
||||
l %r8,BASED(.Lsysc_table)
|
||||
lr %r7,%r2
|
||||
sll %r7,2 # svc number *4
|
||||
l %r8,0(%r7,%r8)
|
||||
l %r8,0(%r7,%r10)
|
||||
sysc_tracego:
|
||||
lm %r3,%r6,SP_R3(%r15)
|
||||
mvc SP_ARGS(4,%r15),SP_R7(%r15)
|
||||
|
@ -384,7 +377,7 @@ sysc_tracego:
|
|||
basr %r14,%r8 # call sys_xxx
|
||||
st %r2,SP_R2(%r15) # store return value
|
||||
sysc_tracenogo:
|
||||
tm __TI_flags+2(%r9),_TIF_SYSCALL
|
||||
tm __TI_flags+2(%r12),_TIF_SYSCALL
|
||||
bz BASED(sysc_return)
|
||||
l %r1,BASED(.Ltrace_exit)
|
||||
la %r2,SP_PTREGS(%r15) # load pt_regs
|
||||
|
@ -397,7 +390,7 @@ sysc_tracenogo:
|
|||
.globl ret_from_fork
|
||||
ret_from_fork:
|
||||
l %r13,__LC_SVC_NEW_PSW+4
|
||||
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
|
||||
l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
|
||||
tm SP_PSW+1(%r15),0x01 # forking a kernel thread ?
|
||||
bo BASED(0f)
|
||||
st %r15,SP_R15(%r15) # store stack pointer for new kthread
|
||||
|
@ -432,8 +425,8 @@ kernel_execve:
|
|||
0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts
|
||||
l %r15,__LC_KERNEL_STACK # load ksp
|
||||
s %r15,BASED(.Lc_spsize) # make room for registers & psw
|
||||
l %r9,__LC_THREAD_INFO
|
||||
mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs
|
||||
l %r12,__LC_THREAD_INFO
|
||||
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
|
||||
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
|
||||
l %r1,BASED(.Lexecve_tail)
|
||||
|
@ -463,26 +456,27 @@ pgm_check_handler:
|
|||
SAVE_ALL_BASE __LC_SAVE_AREA
|
||||
tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
|
||||
bnz BASED(pgm_per) # got per exception -> special case
|
||||
SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
|
||||
CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
|
||||
SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
|
||||
CREATE_STACK_FRAME __LC_SAVE_AREA
|
||||
xc SP_ILC(4,%r15),SP_ILC(%r15)
|
||||
mvc SP_PSW(8,%r15),__LC_PGM_OLD_PSW
|
||||
l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
|
||||
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
|
||||
bz BASED(pgm_no_vtime)
|
||||
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
|
||||
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
|
||||
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
|
||||
pgm_no_vtime:
|
||||
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
|
||||
l %r3,__LC_PGM_ILC # load program interruption code
|
||||
l %r4,__LC_TRANS_EXC_CODE
|
||||
REENABLE_IRQS
|
||||
la %r8,0x7f
|
||||
nr %r8,%r3
|
||||
pgm_do_call:
|
||||
l %r7,BASED(.Ljump_table)
|
||||
sll %r8,2
|
||||
l %r7,0(%r8,%r7) # load address of handler routine
|
||||
l %r1,BASED(.Ljump_table)
|
||||
l %r1,0(%r8,%r1) # load address of handler routine
|
||||
la %r2,SP_PTREGS(%r15) # address of register-save area
|
||||
basr %r14,%r7 # branch to interrupt-handler
|
||||
basr %r14,%r1 # branch to interrupt-handler
|
||||
pgm_exit:
|
||||
b BASED(sysc_return)
|
||||
|
||||
|
@ -503,33 +497,34 @@ pgm_per:
|
|||
# Normal per exception
|
||||
#
|
||||
pgm_per_std:
|
||||
SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
|
||||
CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
|
||||
SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
|
||||
CREATE_STACK_FRAME __LC_SAVE_AREA
|
||||
mvc SP_PSW(8,%r15),__LC_PGM_OLD_PSW
|
||||
l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
|
||||
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
|
||||
bz BASED(pgm_no_vtime2)
|
||||
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
|
||||
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
|
||||
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
|
||||
pgm_no_vtime2:
|
||||
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
|
||||
l %r1,__TI_task(%r9)
|
||||
l %r1,__TI_task(%r12)
|
||||
tm SP_PSW+1(%r15),0x01 # kernel per event ?
|
||||
bz BASED(kernel_per)
|
||||
mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
|
||||
mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS
|
||||
mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
|
||||
oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
|
||||
mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE
|
||||
mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS
|
||||
mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID
|
||||
oi __TI_flags+3(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
|
||||
l %r3,__LC_PGM_ILC # load program interruption code
|
||||
l %r4,__LC_TRANS_EXC_CODE
|
||||
REENABLE_IRQS
|
||||
la %r8,0x7f
|
||||
nr %r8,%r3 # clear per-event-bit and ilc
|
||||
be BASED(pgm_exit2) # only per or per+check ?
|
||||
l %r7,BASED(.Ljump_table)
|
||||
sll %r8,2
|
||||
l %r7,0(%r8,%r7) # load address of handler routine
|
||||
l %r1,BASED(.Ljump_table)
|
||||
l %r1,0(%r8,%r1) # load address of handler routine
|
||||
la %r2,SP_PTREGS(%r15) # address of register-save area
|
||||
basr %r14,%r7 # branch to interrupt-handler
|
||||
basr %r14,%r1 # branch to interrupt-handler
|
||||
pgm_exit2:
|
||||
b BASED(sysc_return)
|
||||
|
||||
|
@ -537,18 +532,19 @@ pgm_exit2:
|
|||
# it was a single stepped SVC that is causing all the trouble
|
||||
#
|
||||
pgm_svcper:
|
||||
SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
|
||||
CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
|
||||
SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA
|
||||
CREATE_STACK_FRAME __LC_SAVE_AREA
|
||||
mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW
|
||||
mvc SP_ILC(4,%r15),__LC_SVC_ILC
|
||||
l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
|
||||
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
|
||||
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
|
||||
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
|
||||
lh %r7,0x8a # get svc number from lowcore
|
||||
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
|
||||
l %r8,__TI_task(%r9)
|
||||
mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID
|
||||
mvc __THREAD_per+__PER_address(4,%r8),__LC_PER_ADDRESS
|
||||
mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID
|
||||
oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
|
||||
l %r8,__TI_task(%r12)
|
||||
mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE
|
||||
mvc __THREAD_per_address(4,%r8),__LC_PER_ADDRESS
|
||||
mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID
|
||||
oi __TI_flags+3(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
|
||||
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
|
||||
lm %r2,%r6,SP_R2(%r15) # load svc arguments
|
||||
b BASED(sysc_do_svc)
|
||||
|
@ -558,8 +554,7 @@ pgm_svcper:
|
|||
#
|
||||
kernel_per:
|
||||
REENABLE_IRQS
|
||||
mvi SP_SVCNR(%r15),0xff # set trap indication to pgm check
|
||||
mvi SP_SVCNR+1(%r15),0xff
|
||||
xc SP_SVCNR(2,%r15),SP_SVCNR(%r15)
|
||||
la %r2,SP_PTREGS(%r15) # address of register-save area
|
||||
l %r1,BASED(.Lhandle_per) # load adr. of per handler
|
||||
basr %r14,%r1 # branch to do_single_step
|
||||
|
@ -573,9 +568,10 @@ kernel_per:
|
|||
io_int_handler:
|
||||
stck __LC_INT_CLOCK
|
||||
stpt __LC_ASYNC_ENTER_TIMER
|
||||
SAVE_ALL_BASE __LC_SAVE_AREA+16
|
||||
SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
|
||||
CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
|
||||
CREATE_STACK_FRAME __LC_SAVE_AREA+16
|
||||
mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack
|
||||
l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
|
||||
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
|
||||
bz BASED(io_no_vtime)
|
||||
UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
|
||||
|
@ -583,7 +579,6 @@ io_int_handler:
|
|||
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
|
||||
io_no_vtime:
|
||||
TRACE_IRQS_OFF
|
||||
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
|
||||
l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ
|
||||
la %r2,SP_PTREGS(%r15) # address of register-save area
|
||||
basr %r14,%r1 # branch to standard irq handler
|
||||
|
@ -591,7 +586,7 @@ io_return:
|
|||
LOCKDEP_SYS_EXIT
|
||||
TRACE_IRQS_ON
|
||||
io_tif:
|
||||
tm __TI_flags+3(%r9),_TIF_WORK_INT
|
||||
tm __TI_flags+3(%r12),_TIF_WORK_INT
|
||||
bnz BASED(io_work) # there is work to do (signals etc.)
|
||||
io_restore:
|
||||
RESTORE_ALL __LC_RETURN_PSW,0
|
||||
|
@ -609,9 +604,9 @@ io_work:
|
|||
bo BASED(io_work_user) # yes -> do resched & signal
|
||||
#ifdef CONFIG_PREEMPT
|
||||
# check for preemptive scheduling
|
||||
icm %r0,15,__TI_precount(%r9)
|
||||
icm %r0,15,__TI_precount(%r12)
|
||||
bnz BASED(io_restore) # preemption disabled
|
||||
tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
|
||||
tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
|
||||
bno BASED(io_restore)
|
||||
# switch to kernel stack
|
||||
l %r1,SP_R15(%r15)
|
||||
|
@ -645,13 +640,13 @@ io_work_user:
|
|||
# and _TIF_MCCK_PENDING
|
||||
#
|
||||
io_work_tif:
|
||||
tm __TI_flags+3(%r9),_TIF_MCCK_PENDING
|
||||
tm __TI_flags+3(%r12),_TIF_MCCK_PENDING
|
||||
bo BASED(io_mcck_pending)
|
||||
tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
|
||||
tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
|
||||
bo BASED(io_reschedule)
|
||||
tm __TI_flags+3(%r9),_TIF_SIGPENDING
|
||||
tm __TI_flags+3(%r12),_TIF_SIGPENDING
|
||||
bo BASED(io_sigpending)
|
||||
tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME
|
||||
tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
|
||||
bo BASED(io_notify_resume)
|
||||
b BASED(io_return) # beware of critical section cleanup
|
||||
|
||||
|
@ -711,16 +706,16 @@ io_notify_resume:
|
|||
ext_int_handler:
|
||||
stck __LC_INT_CLOCK
|
||||
stpt __LC_ASYNC_ENTER_TIMER
|
||||
SAVE_ALL_BASE __LC_SAVE_AREA+16
|
||||
SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
|
||||
CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
|
||||
CREATE_STACK_FRAME __LC_SAVE_AREA+16
|
||||
mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack
|
||||
l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
|
||||
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
|
||||
bz BASED(ext_no_vtime)
|
||||
UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
|
||||
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
|
||||
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
|
||||
ext_no_vtime:
|
||||
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
|
||||
TRACE_IRQS_OFF
|
||||
la %r2,SP_PTREGS(%r15) # address of register-save area
|
||||
l %r3,__LC_CPU_ADDRESS # get cpu address + interruption code
|
||||
|
@ -775,7 +770,10 @@ mcck_int_main:
|
|||
sra %r14,PAGE_SHIFT
|
||||
be BASED(0f)
|
||||
l %r15,__LC_PANIC_STACK # load panic stack
|
||||
0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32
|
||||
0: s %r15,BASED(.Lc_spsize) # make room for registers & psw
|
||||
CREATE_STACK_FRAME __LC_SAVE_AREA+32
|
||||
mvc SP_PSW(8,%r15),0(%r12)
|
||||
l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
|
||||
tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
|
||||
bno BASED(mcck_no_vtime) # no -> skip cleanup critical
|
||||
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
|
||||
|
@ -784,7 +782,6 @@ mcck_int_main:
|
|||
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
|
||||
mvc __LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER
|
||||
mcck_no_vtime:
|
||||
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
|
||||
la %r2,SP_PTREGS(%r15) # load pt_regs
|
||||
l %r1,BASED(.Ls390_mcck)
|
||||
basr %r14,%r1 # call machine check handler
|
||||
|
@ -796,7 +793,7 @@ mcck_no_vtime:
|
|||
xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
|
||||
lr %r15,%r1
|
||||
stosm __SF_EMPTY(%r15),0x04 # turn dat on
|
||||
tm __TI_flags+3(%r9),_TIF_MCCK_PENDING
|
||||
tm __TI_flags+3(%r12),_TIF_MCCK_PENDING
|
||||
bno BASED(mcck_return)
|
||||
TRACE_IRQS_OFF
|
||||
l %r1,BASED(.Ls390_handle_mcck)
|
||||
|
@ -861,6 +858,8 @@ restart_crash:
|
|||
restart_go:
|
||||
#endif
|
||||
|
||||
.section .kprobes.text, "ax"
|
||||
|
||||
#ifdef CONFIG_CHECK_STACK
|
||||
/*
|
||||
* The synchronous or the asynchronous stack overflowed. We are dead.
|
||||
|
@ -943,12 +942,13 @@ cleanup_system_call:
|
|||
bh BASED(0f)
|
||||
mvc __LC_SAVE_AREA(16),0(%r12)
|
||||
0: st %r13,4(%r12)
|
||||
st %r12,__LC_SAVE_AREA+48 # argh
|
||||
SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
|
||||
CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
|
||||
l %r12,__LC_SAVE_AREA+48 # argh
|
||||
l %r15,__LC_KERNEL_STACK # problem state -> load ksp
|
||||
s %r15,BASED(.Lc_spsize) # make room for registers & psw
|
||||
st %r15,12(%r12)
|
||||
lh %r7,0x8a
|
||||
CREATE_STACK_FRAME __LC_SAVE_AREA
|
||||
mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW
|
||||
mvc SP_ILC(4,%r15),__LC_SVC_ILC
|
||||
mvc 0(4,%r12),__LC_THREAD_INFO
|
||||
cleanup_vtime:
|
||||
clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12)
|
||||
bhe BASED(cleanup_stime)
|
||||
|
@ -1046,7 +1046,7 @@ cleanup_io_restore_insn:
|
|||
.Ldo_signal: .long do_signal
|
||||
.Ldo_notify_resume:
|
||||
.long do_notify_resume
|
||||
.Lhandle_per: .long do_single_step
|
||||
.Lhandle_per: .long do_per_trap
|
||||
.Ldo_execve: .long do_execve
|
||||
.Lexecve_tail: .long execve_tail
|
||||
.Ljump_table: .long pgm_check_table
|
||||
|
|
|
@ -12,7 +12,7 @@ pgm_check_handler_t do_dat_exception;
|
|||
|
||||
extern int sysctl_userprocess_debug;
|
||||
|
||||
void do_single_step(struct pt_regs *regs);
|
||||
void do_per_trap(struct pt_regs *regs);
|
||||
void syscall_trace(struct pt_regs *regs, int entryexit);
|
||||
void kernel_stack_overflow(struct pt_regs * regs);
|
||||
void do_signal(struct pt_regs *regs);
|
||||
|
|
|
@ -51,7 +51,7 @@ STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
|
|||
STACK_SIZE = 1 << STACK_SHIFT
|
||||
|
||||
_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
|
||||
_TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP )
|
||||
_TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_PER_TRAP )
|
||||
_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
|
||||
_TIF_MCCK_PENDING)
|
||||
_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
|
||||
|
@ -197,6 +197,8 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
|
|||
ssm __SF_EMPTY(%r15)
|
||||
.endm
|
||||
|
||||
.section .kprobes.text, "ax"
|
||||
|
||||
/*
|
||||
* Scheduler resume function, called by switch_to
|
||||
* gpr2 = (task_struct *) prev
|
||||
|
@ -206,30 +208,21 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
|
|||
*/
|
||||
.globl __switch_to
|
||||
__switch_to:
|
||||
tm __THREAD_per+4(%r3),0xe8 # is the new process using per ?
|
||||
jz __switch_to_noper # if not we're fine
|
||||
stctg %c9,%c11,__SF_EMPTY(%r15)# We are using per stuff
|
||||
clc __THREAD_per(24,%r3),__SF_EMPTY(%r15)
|
||||
je __switch_to_noper # we got away without bashing TLB's
|
||||
lctlg %c9,%c11,__THREAD_per(%r3) # Nope we didn't
|
||||
__switch_to_noper:
|
||||
lg %r4,__THREAD_info(%r2) # get thread_info of prev
|
||||
lg %r4,__THREAD_info(%r2) # get thread_info of prev
|
||||
lg %r5,__THREAD_info(%r3) # get thread_info of next
|
||||
tm __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending?
|
||||
jz __switch_to_no_mcck
|
||||
ni __TI_flags+7(%r4),255-_TIF_MCCK_PENDING # clear flag in prev
|
||||
lg %r4,__THREAD_info(%r3) # get thread_info of next
|
||||
oi __TI_flags+7(%r4),_TIF_MCCK_PENDING # set it in next
|
||||
__switch_to_no_mcck:
|
||||
stmg %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task
|
||||
stg %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp
|
||||
lg %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp
|
||||
lmg %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task
|
||||
stg %r3,__LC_CURRENT # __LC_CURRENT = current task struct
|
||||
lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
|
||||
lg %r3,__THREAD_info(%r3) # load thread_info from task struct
|
||||
stg %r3,__LC_THREAD_INFO
|
||||
aghi %r3,STACK_SIZE
|
||||
stg %r3,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack
|
||||
jz 0f
|
||||
ni __TI_flags+7(%r4),255-_TIF_MCCK_PENDING # clear flag in prev
|
||||
oi __TI_flags+7(%r5),_TIF_MCCK_PENDING # set it in next
|
||||
0: stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
|
||||
stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev
|
||||
lg %r15,__THREAD_ksp(%r3) # load kernel stack of next
|
||||
lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
|
||||
lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
|
||||
stg %r3,__LC_CURRENT # store task struct of next
|
||||
stg %r5,__LC_THREAD_INFO # store thread info of next
|
||||
aghi %r5,STACK_SIZE # end of kernel stack of next
|
||||
stg %r5,__LC_KERNEL_STACK # store end of kernel stack
|
||||
br %r14
|
||||
|
||||
__critical_start:
|
||||
|
@ -309,7 +302,7 @@ sysc_work_tif:
|
|||
jo sysc_notify_resume
|
||||
tm __TI_flags+7(%r12),_TIF_RESTART_SVC
|
||||
jo sysc_restart
|
||||
tm __TI_flags+7(%r12),_TIF_SINGLE_STEP
|
||||
tm __TI_flags+7(%r12),_TIF_PER_TRAP
|
||||
jo sysc_singlestep
|
||||
j sysc_return # beware of critical section cleanup
|
||||
|
||||
|
@ -331,12 +324,12 @@ sysc_mcck_pending:
|
|||
# _TIF_SIGPENDING is set, call do_signal
|
||||
#
|
||||
sysc_sigpending:
|
||||
ni __TI_flags+7(%r12),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
|
||||
ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
|
||||
la %r2,SP_PTREGS(%r15) # load pt_regs
|
||||
brasl %r14,do_signal # call do_signal
|
||||
tm __TI_flags+7(%r12),_TIF_RESTART_SVC
|
||||
jo sysc_restart
|
||||
tm __TI_flags+7(%r12),_TIF_SINGLE_STEP
|
||||
tm __TI_flags+7(%r12),_TIF_PER_TRAP
|
||||
jo sysc_singlestep
|
||||
j sysc_return
|
||||
|
||||
|
@ -361,14 +354,14 @@ sysc_restart:
|
|||
j sysc_nr_ok # restart svc
|
||||
|
||||
#
|
||||
# _TIF_SINGLE_STEP is set, call do_single_step
|
||||
# _TIF_PER_TRAP is set, call do_per_trap
|
||||
#
|
||||
sysc_singlestep:
|
||||
ni __TI_flags+7(%r12),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
|
||||
ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
|
||||
xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
|
||||
la %r2,SP_PTREGS(%r15) # address of register-save area
|
||||
larl %r14,sysc_return # load adr. of system return
|
||||
jg do_single_step # branch to do_sigtrap
|
||||
jg do_per_trap
|
||||
|
||||
#
|
||||
# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
|
||||
|
@ -524,10 +517,10 @@ pgm_no_vtime2:
|
|||
lg %r1,__TI_task(%r12)
|
||||
tm SP_PSW+1(%r15),0x01 # kernel per event ?
|
||||
jz kernel_per
|
||||
mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
|
||||
mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
|
||||
mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
|
||||
oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
|
||||
mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE
|
||||
mvc __THREAD_per_address(8,%r1),__LC_PER_ADDRESS
|
||||
mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID
|
||||
oi __TI_flags+7(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
|
||||
lgf %r3,__LC_PGM_ILC # load program interruption code
|
||||
lg %r4,__LC_TRANS_EXC_CODE
|
||||
REENABLE_IRQS
|
||||
|
@ -556,10 +549,10 @@ pgm_svcper:
|
|||
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
|
||||
LAST_BREAK
|
||||
lg %r8,__TI_task(%r12)
|
||||
mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID
|
||||
mvc __THREAD_per+__PER_address(8,%r8),__LC_PER_ADDRESS
|
||||
mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID
|
||||
oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
|
||||
mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE
|
||||
mvc __THREAD_per_address(8,%r8),__LC_PER_ADDRESS
|
||||
mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID
|
||||
oi __TI_flags+7(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
|
||||
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
|
||||
lmg %r2,%r6,SP_R2(%r15) # load svc arguments
|
||||
j sysc_do_svc
|
||||
|
@ -571,7 +564,7 @@ kernel_per:
|
|||
REENABLE_IRQS
|
||||
xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
|
||||
la %r2,SP_PTREGS(%r15) # address of register-save area
|
||||
brasl %r14,do_single_step
|
||||
brasl %r14,do_per_trap
|
||||
j pgm_exit
|
||||
|
||||
/*
|
||||
|
@ -868,6 +861,8 @@ restart_crash:
|
|||
restart_go:
|
||||
#endif
|
||||
|
||||
.section .kprobes.text, "ax"
|
||||
|
||||
#ifdef CONFIG_CHECK_STACK
|
||||
/*
|
||||
* The synchronous or the asynchronous stack overflowed. We are dead.
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
* Copyright IBM Corp. 2009
|
||||
*
|
||||
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
|
||||
*
|
||||
* Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/hardirq.h>
|
||||
|
@ -12,176 +12,144 @@
|
|||
#include <linux/ftrace.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <trace/syscall.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#define MCOUNT_OFFSET_RET 12
|
||||
#else
|
||||
#define MCOUNT_OFFSET_RET 22
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
|
||||
void ftrace_disable_code(void);
|
||||
void ftrace_disable_return(void);
|
||||
void ftrace_call_code(void);
|
||||
void ftrace_nop_code(void);
|
||||
|
||||
#define FTRACE_INSN_SIZE 4
|
||||
void ftrace_enable_insn(void);
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
|
||||
/*
|
||||
* The 64-bit mcount code looks like this:
|
||||
* stg %r14,8(%r15) # offset 0
|
||||
* > larl %r1,<&counter> # offset 6
|
||||
* > brasl %r14,_mcount # offset 12
|
||||
* lg %r14,8(%r15) # offset 18
|
||||
* Total length is 24 bytes. The middle two instructions of the mcount
|
||||
* block get overwritten by ftrace_make_nop / ftrace_make_call.
|
||||
* The 64-bit enabled ftrace code block looks like this:
|
||||
* stg %r14,8(%r15) # offset 0
|
||||
* > lg %r1,__LC_FTRACE_FUNC # offset 6
|
||||
* > lgr %r0,%r0 # offset 12
|
||||
* > basr %r14,%r1 # offset 16
|
||||
* lg %r14,8(%15) # offset 18
|
||||
* The return points of the mcount/ftrace function have the same offset 18.
|
||||
* The 64-bit disable ftrace code block looks like this:
|
||||
* stg %r14,8(%r15) # offset 0
|
||||
* > jg .+18 # offset 6
|
||||
* > lgr %r0,%r0 # offset 12
|
||||
* > basr %r14,%r1 # offset 16
|
||||
* lg %r14,8(%15) # offset 18
|
||||
* The jg instruction branches to offset 24 to skip as many instructions
|
||||
* as possible.
|
||||
*/
|
||||
asm(
|
||||
" .align 4\n"
|
||||
"ftrace_disable_code:\n"
|
||||
" j 0f\n"
|
||||
" .word 0x0024\n"
|
||||
" lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n"
|
||||
" basr %r14,%r1\n"
|
||||
"ftrace_disable_return:\n"
|
||||
" lg %r14,8(15)\n"
|
||||
" jg 0f\n"
|
||||
" lgr %r0,%r0\n"
|
||||
"0:\n");
|
||||
|
||||
asm(
|
||||
" basr %r14,%r1\n"
|
||||
"0:\n"
|
||||
" .align 4\n"
|
||||
"ftrace_nop_code:\n"
|
||||
" j .+"__stringify(MCOUNT_INSN_SIZE)"\n");
|
||||
"ftrace_enable_insn:\n"
|
||||
" lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n");
|
||||
|
||||
asm(
|
||||
" .align 4\n"
|
||||
"ftrace_call_code:\n"
|
||||
" stg %r14,8(%r15)\n");
|
||||
#define FTRACE_INSN_SIZE 6
|
||||
|
||||
#else /* CONFIG_64BIT */
|
||||
|
||||
/*
|
||||
* The 31-bit mcount code looks like this:
|
||||
* st %r14,4(%r15) # offset 0
|
||||
* > bras %r1,0f # offset 4
|
||||
* > .long _mcount # offset 8
|
||||
* > .long <&counter> # offset 12
|
||||
* > 0: l %r14,0(%r1) # offset 16
|
||||
* > l %r1,4(%r1) # offset 20
|
||||
* basr %r14,%r14 # offset 24
|
||||
* l %r14,4(%r15) # offset 26
|
||||
* Total length is 30 bytes. The twenty bytes starting from offset 4
|
||||
* to offset 24 get overwritten by ftrace_make_nop / ftrace_make_call.
|
||||
* The 31-bit enabled ftrace code block looks like this:
|
||||
* st %r14,4(%r15) # offset 0
|
||||
* > l %r14,__LC_FTRACE_FUNC # offset 4
|
||||
* > j 0f # offset 8
|
||||
* > .fill 12,1,0x07 # offset 12
|
||||
* 0: basr %r14,%r14 # offset 24
|
||||
* l %r14,4(%r14) # offset 26
|
||||
* The return points of the mcount/ftrace function have the same offset 26.
|
||||
* The 31-bit disabled ftrace code block looks like this:
|
||||
* st %r14,4(%r15) # offset 0
|
||||
* > j .+26 # offset 4
|
||||
* > j 0f # offset 8
|
||||
* > .fill 12,1,0x07 # offset 12
|
||||
* 0: basr %r14,%r14 # offset 24
|
||||
* l %r14,4(%r14) # offset 26
|
||||
* The j instruction branches to offset 30 to skip as many instructions
|
||||
* as possible.
|
||||
*/
|
||||
asm(
|
||||
" .align 4\n"
|
||||
"ftrace_disable_code:\n"
|
||||
" j 1f\n"
|
||||
" j 0f\n"
|
||||
" l %r1,"__stringify(__LC_FTRACE_FUNC)"\n"
|
||||
" basr %r14,%r1\n"
|
||||
"ftrace_disable_return:\n"
|
||||
" l %r14,4(%r15)\n"
|
||||
" j 0f\n"
|
||||
" bcr 0,%r7\n"
|
||||
" bcr 0,%r7\n"
|
||||
" bcr 0,%r7\n"
|
||||
" bcr 0,%r7\n"
|
||||
" bcr 0,%r7\n"
|
||||
" bcr 0,%r7\n"
|
||||
"0:\n");
|
||||
|
||||
asm(
|
||||
" .fill 12,1,0x07\n"
|
||||
"0: basr %r14,%r14\n"
|
||||
"1:\n"
|
||||
" .align 4\n"
|
||||
"ftrace_nop_code:\n"
|
||||
" j .+"__stringify(MCOUNT_INSN_SIZE)"\n");
|
||||
"ftrace_enable_insn:\n"
|
||||
" l %r14,"__stringify(__LC_FTRACE_FUNC)"\n");
|
||||
|
||||
asm(
|
||||
" .align 4\n"
|
||||
"ftrace_call_code:\n"
|
||||
" st %r14,4(%r15)\n");
|
||||
#define FTRACE_INSN_SIZE 4
|
||||
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
static int ftrace_modify_code(unsigned long ip,
|
||||
void *old_code, int old_size,
|
||||
void *new_code, int new_size)
|
||||
{
|
||||
unsigned char replaced[MCOUNT_INSN_SIZE];
|
||||
|
||||
/*
|
||||
* Note: Due to modules code can disappear and change.
|
||||
* We need to protect against faulting as well as code
|
||||
* changing. We do this by using the probe_kernel_*
|
||||
* functions.
|
||||
* This however is just a simple sanity check.
|
||||
*/
|
||||
if (probe_kernel_read(replaced, (void *)ip, old_size))
|
||||
return -EFAULT;
|
||||
if (memcmp(replaced, old_code, old_size) != 0)
|
||||
return -EINVAL;
|
||||
if (probe_kernel_write((void *)ip, new_code, new_size))
|
||||
return -EPERM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ftrace_make_initial_nop(struct module *mod, struct dyn_ftrace *rec,
|
||||
unsigned long addr)
|
||||
{
|
||||
return ftrace_modify_code(rec->ip,
|
||||
ftrace_call_code, FTRACE_INSN_SIZE,
|
||||
ftrace_disable_code, MCOUNT_INSN_SIZE);
|
||||
}
|
||||
|
||||
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
|
||||
unsigned long addr)
|
||||
{
|
||||
if (addr == MCOUNT_ADDR)
|
||||
return ftrace_make_initial_nop(mod, rec, addr);
|
||||
return ftrace_modify_code(rec->ip,
|
||||
ftrace_call_code, FTRACE_INSN_SIZE,
|
||||
ftrace_nop_code, FTRACE_INSN_SIZE);
|
||||
if (probe_kernel_write((void *) rec->ip, ftrace_disable_code,
|
||||
MCOUNT_INSN_SIZE))
|
||||
return -EPERM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
||||
{
|
||||
return ftrace_modify_code(rec->ip,
|
||||
ftrace_nop_code, FTRACE_INSN_SIZE,
|
||||
ftrace_call_code, FTRACE_INSN_SIZE);
|
||||
if (probe_kernel_write((void *) rec->ip, ftrace_enable_insn,
|
||||
FTRACE_INSN_SIZE))
|
||||
return -EPERM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ftrace_update_ftrace_func(ftrace_func_t func)
|
||||
{
|
||||
ftrace_dyn_func = (unsigned long)func;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __init ftrace_dyn_arch_init(void *data)
|
||||
{
|
||||
*(unsigned long *)data = 0;
|
||||
*(unsigned long *) data = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
/*
|
||||
* Patch the kernel code at ftrace_graph_caller location:
|
||||
* The instruction there is branch relative on condition. The condition mask
|
||||
* is either all ones (always branch aka disable ftrace_graph_caller) or all
|
||||
* zeroes (nop aka enable ftrace_graph_caller).
|
||||
* Instruction format for brc is a7m4xxxx where m is the condition mask.
|
||||
*/
|
||||
int ftrace_enable_ftrace_graph_caller(void)
|
||||
{
|
||||
unsigned short opcode = 0xa704;
|
||||
|
||||
return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
|
||||
}
|
||||
|
||||
int ftrace_disable_ftrace_graph_caller(void)
|
||||
{
|
||||
unsigned short opcode = 0xa7f4;
|
||||
|
||||
return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
|
||||
}
|
||||
|
||||
static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
|
||||
{
|
||||
return addr - (ftrace_disable_return - ftrace_disable_code);
|
||||
}
|
||||
|
||||
#else /* CONFIG_DYNAMIC_FTRACE */
|
||||
|
||||
static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
|
||||
{
|
||||
return addr - MCOUNT_OFFSET_RET;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
|
||||
/*
|
||||
* Hook the return address and push it in the stack of return addresses
|
||||
* in current thread info.
|
||||
*/
|
||||
unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent)
|
||||
unsigned long __kprobes prepare_ftrace_return(unsigned long parent,
|
||||
unsigned long ip)
|
||||
{
|
||||
struct ftrace_graph_ent trace;
|
||||
|
||||
|
@ -189,14 +157,42 @@ unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent)
|
|||
goto out;
|
||||
if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
|
||||
goto out;
|
||||
trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN;
|
||||
trace.func = (ip & PSW_ADDR_INSN) - MCOUNT_OFFSET_RET;
|
||||
/* Only trace if the calling function expects to. */
|
||||
if (!ftrace_graph_entry(&trace)) {
|
||||
current->curr_ret_stack--;
|
||||
goto out;
|
||||
}
|
||||
parent = (unsigned long)return_to_handler;
|
||||
parent = (unsigned long) return_to_handler;
|
||||
out:
|
||||
return parent;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
/*
|
||||
* Patch the kernel code at ftrace_graph_caller location. The instruction
|
||||
* there is branch relative and save to prepare_ftrace_return. To disable
|
||||
* the call to prepare_ftrace_return we patch the bras offset to point
|
||||
* directly after the instructions. To enable the call we calculate
|
||||
* the original offset to prepare_ftrace_return and put it back.
|
||||
*/
|
||||
int ftrace_enable_ftrace_graph_caller(void)
|
||||
{
|
||||
unsigned short offset;
|
||||
|
||||
offset = ((void *) prepare_ftrace_return -
|
||||
(void *) ftrace_graph_caller) / 2;
|
||||
return probe_kernel_write(ftrace_graph_caller + 2,
|
||||
&offset, sizeof(offset));
|
||||
}
|
||||
|
||||
int ftrace_disable_ftrace_graph_caller(void)
|
||||
{
|
||||
static unsigned short offset = 0x0002;
|
||||
|
||||
return probe_kernel_write(ftrace_graph_caller + 2,
|
||||
&offset, sizeof(offset));
|
||||
}
|
||||
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
/*
|
||||
* arch/s390/kernel/irq.c
|
||||
*
|
||||
* Copyright IBM Corp. 2004,2007
|
||||
* Copyright IBM Corp. 2004,2010
|
||||
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
|
||||
* Thomas Spatzier (tspat@de.ibm.com)
|
||||
*
|
||||
|
@ -17,12 +15,42 @@
|
|||
#include <linux/proc_fs.h>
|
||||
#include <linux/profile.h>
|
||||
|
||||
struct irq_class {
|
||||
char *name;
|
||||
char *desc;
|
||||
};
|
||||
|
||||
static const struct irq_class intrclass_names[] = {
|
||||
{.name = "EXT" },
|
||||
{.name = "I/O" },
|
||||
{.name = "CLK", .desc = "[EXT] Clock Comparator" },
|
||||
{.name = "IPI", .desc = "[EXT] Signal Processor" },
|
||||
{.name = "TMR", .desc = "[EXT] CPU Timer" },
|
||||
{.name = "TAL", .desc = "[EXT] Timing Alert" },
|
||||
{.name = "PFL", .desc = "[EXT] Pseudo Page Fault" },
|
||||
{.name = "DSD", .desc = "[EXT] DASD Diag" },
|
||||
{.name = "VRT", .desc = "[EXT] Virtio" },
|
||||
{.name = "SCP", .desc = "[EXT] Service Call" },
|
||||
{.name = "IUC", .desc = "[EXT] IUCV" },
|
||||
{.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt" },
|
||||
{.name = "QDI", .desc = "[I/O] QDIO Interrupt" },
|
||||
{.name = "DAS", .desc = "[I/O] DASD" },
|
||||
{.name = "C15", .desc = "[I/O] 3215" },
|
||||
{.name = "C70", .desc = "[I/O] 3270" },
|
||||
{.name = "TAP", .desc = "[I/O] Tape" },
|
||||
{.name = "VMR", .desc = "[I/O] Unit Record Devices" },
|
||||
{.name = "LCS", .desc = "[I/O] LCS" },
|
||||
{.name = "CLW", .desc = "[I/O] CLAW" },
|
||||
{.name = "CTC", .desc = "[I/O] CTC" },
|
||||
{.name = "APB", .desc = "[I/O] AP Bus" },
|
||||
{.name = "NMI", .desc = "[NMI] Machine Check" },
|
||||
};
|
||||
|
||||
/*
|
||||
* show_interrupts is needed by /proc/interrupts.
|
||||
*/
|
||||
int show_interrupts(struct seq_file *p, void *v)
|
||||
{
|
||||
static const char *intrclass_names[] = { "EXT", "I/O", };
|
||||
int i = *(loff_t *) v, j;
|
||||
|
||||
get_online_cpus();
|
||||
|
@ -34,15 +62,16 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
}
|
||||
|
||||
if (i < NR_IRQS) {
|
||||
seq_printf(p, "%s: ", intrclass_names[i]);
|
||||
seq_printf(p, "%s: ", intrclass_names[i].name);
|
||||
#ifndef CONFIG_SMP
|
||||
seq_printf(p, "%10u ", kstat_irqs(i));
|
||||
#else
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
|
||||
#endif
|
||||
if (intrclass_names[i].desc)
|
||||
seq_printf(p, " %s", intrclass_names[i].desc);
|
||||
seq_putc(p, '\n');
|
||||
|
||||
}
|
||||
put_online_cpus();
|
||||
return 0;
|
||||
|
|
|
@ -32,34 +32,14 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/hardirq.h>
|
||||
|
||||
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
|
||||
DEFINE_PER_CPU(struct kprobe *, current_kprobe);
|
||||
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
|
||||
|
||||
struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
|
||||
struct kretprobe_blackpoint kretprobe_blacklist[] = { };
|
||||
|
||||
int __kprobes arch_prepare_kprobe(struct kprobe *p)
|
||||
static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn)
|
||||
{
|
||||
/* Make sure the probe isn't going on a difficult instruction */
|
||||
if (is_prohibited_opcode((kprobe_opcode_t *) p->addr))
|
||||
return -EINVAL;
|
||||
|
||||
if ((unsigned long)p->addr & 0x01)
|
||||
return -EINVAL;
|
||||
|
||||
/* Use the get_insn_slot() facility for correctness */
|
||||
if (!(p->ainsn.insn = get_insn_slot()))
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
|
||||
|
||||
get_instruction_type(&p->ainsn);
|
||||
p->opcode = *p->addr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction)
|
||||
{
|
||||
switch (*(__u8 *) instruction) {
|
||||
switch (insn[0] >> 8) {
|
||||
case 0x0c: /* bassm */
|
||||
case 0x0b: /* bsm */
|
||||
case 0x83: /* diag */
|
||||
|
@ -68,7 +48,7 @@ int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction)
|
|||
case 0xad: /* stosm */
|
||||
return -EINVAL;
|
||||
}
|
||||
switch (*(__u16 *) instruction) {
|
||||
switch (insn[0]) {
|
||||
case 0x0101: /* pr */
|
||||
case 0xb25a: /* bsa */
|
||||
case 0xb240: /* bakr */
|
||||
|
@ -81,93 +61,92 @@ int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void __kprobes get_instruction_type(struct arch_specific_insn *ainsn)
|
||||
static int __kprobes get_fixup_type(kprobe_opcode_t *insn)
|
||||
{
|
||||
/* default fixup method */
|
||||
ainsn->fixup = FIXUP_PSW_NORMAL;
|
||||
int fixup = FIXUP_PSW_NORMAL;
|
||||
|
||||
/* save r1 operand */
|
||||
ainsn->reg = (*ainsn->insn & 0xf0) >> 4;
|
||||
|
||||
/* save the instruction length (pop 5-5) in bytes */
|
||||
switch (*(__u8 *) (ainsn->insn) >> 6) {
|
||||
case 0:
|
||||
ainsn->ilen = 2;
|
||||
break;
|
||||
case 1:
|
||||
case 2:
|
||||
ainsn->ilen = 4;
|
||||
break;
|
||||
case 3:
|
||||
ainsn->ilen = 6;
|
||||
break;
|
||||
}
|
||||
|
||||
switch (*(__u8 *) ainsn->insn) {
|
||||
switch (insn[0] >> 8) {
|
||||
case 0x05: /* balr */
|
||||
case 0x0d: /* basr */
|
||||
ainsn->fixup = FIXUP_RETURN_REGISTER;
|
||||
fixup = FIXUP_RETURN_REGISTER;
|
||||
/* if r2 = 0, no branch will be taken */
|
||||
if ((*ainsn->insn & 0x0f) == 0)
|
||||
ainsn->fixup |= FIXUP_BRANCH_NOT_TAKEN;
|
||||
if ((insn[0] & 0x0f) == 0)
|
||||
fixup |= FIXUP_BRANCH_NOT_TAKEN;
|
||||
break;
|
||||
case 0x06: /* bctr */
|
||||
case 0x07: /* bcr */
|
||||
ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
|
||||
fixup = FIXUP_BRANCH_NOT_TAKEN;
|
||||
break;
|
||||
case 0x45: /* bal */
|
||||
case 0x4d: /* bas */
|
||||
ainsn->fixup = FIXUP_RETURN_REGISTER;
|
||||
fixup = FIXUP_RETURN_REGISTER;
|
||||
break;
|
||||
case 0x47: /* bc */
|
||||
case 0x46: /* bct */
|
||||
case 0x86: /* bxh */
|
||||
case 0x87: /* bxle */
|
||||
ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
|
||||
fixup = FIXUP_BRANCH_NOT_TAKEN;
|
||||
break;
|
||||
case 0x82: /* lpsw */
|
||||
ainsn->fixup = FIXUP_NOT_REQUIRED;
|
||||
fixup = FIXUP_NOT_REQUIRED;
|
||||
break;
|
||||
case 0xb2: /* lpswe */
|
||||
if (*(((__u8 *) ainsn->insn) + 1) == 0xb2) {
|
||||
ainsn->fixup = FIXUP_NOT_REQUIRED;
|
||||
}
|
||||
if ((insn[0] & 0xff) == 0xb2)
|
||||
fixup = FIXUP_NOT_REQUIRED;
|
||||
break;
|
||||
case 0xa7: /* bras */
|
||||
if ((*ainsn->insn & 0x0f) == 0x05) {
|
||||
ainsn->fixup |= FIXUP_RETURN_REGISTER;
|
||||
}
|
||||
if ((insn[0] & 0x0f) == 0x05)
|
||||
fixup |= FIXUP_RETURN_REGISTER;
|
||||
break;
|
||||
case 0xc0:
|
||||
if ((*ainsn->insn & 0x0f) == 0x00 /* larl */
|
||||
|| (*ainsn->insn & 0x0f) == 0x05) /* brasl */
|
||||
ainsn->fixup |= FIXUP_RETURN_REGISTER;
|
||||
if ((insn[0] & 0x0f) == 0x00 || /* larl */
|
||||
(insn[0] & 0x0f) == 0x05) /* brasl */
|
||||
fixup |= FIXUP_RETURN_REGISTER;
|
||||
break;
|
||||
case 0xeb:
|
||||
if (*(((__u8 *) ainsn->insn) + 5 ) == 0x44 || /* bxhg */
|
||||
*(((__u8 *) ainsn->insn) + 5) == 0x45) {/* bxleg */
|
||||
ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
|
||||
}
|
||||
if ((insn[2] & 0xff) == 0x44 || /* bxhg */
|
||||
(insn[2] & 0xff) == 0x45) /* bxleg */
|
||||
fixup = FIXUP_BRANCH_NOT_TAKEN;
|
||||
break;
|
||||
case 0xe3: /* bctg */
|
||||
if (*(((__u8 *) ainsn->insn) + 5) == 0x46) {
|
||||
ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
|
||||
}
|
||||
if ((insn[2] & 0xff) == 0x46)
|
||||
fixup = FIXUP_BRANCH_NOT_TAKEN;
|
||||
break;
|
||||
}
|
||||
return fixup;
|
||||
}
|
||||
|
||||
int __kprobes arch_prepare_kprobe(struct kprobe *p)
|
||||
{
|
||||
if ((unsigned long) p->addr & 0x01)
|
||||
return -EINVAL;
|
||||
|
||||
/* Make sure the probe isn't going on a difficult instruction */
|
||||
if (is_prohibited_opcode(p->addr))
|
||||
return -EINVAL;
|
||||
|
||||
p->opcode = *p->addr;
|
||||
memcpy(p->ainsn.insn, p->addr, ((p->opcode >> 14) + 3) & -2);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ins_replace_args {
|
||||
kprobe_opcode_t *ptr;
|
||||
kprobe_opcode_t opcode;
|
||||
};
|
||||
|
||||
static int __kprobes swap_instruction(void *aref)
|
||||
{
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
unsigned long status = kcb->kprobe_status;
|
||||
struct ins_replace_args *args = aref;
|
||||
int rc;
|
||||
|
||||
kcb->kprobe_status = KPROBE_SWAP_INST;
|
||||
rc = probe_kernel_write(args->ptr, &args->new, sizeof(args->new));
|
||||
probe_kernel_write(args->ptr, &args->opcode, sizeof(args->opcode));
|
||||
kcb->kprobe_status = status;
|
||||
return rc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __kprobes arch_arm_kprobe(struct kprobe *p)
|
||||
|
@ -175,8 +154,7 @@ void __kprobes arch_arm_kprobe(struct kprobe *p)
|
|||
struct ins_replace_args args;
|
||||
|
||||
args.ptr = p->addr;
|
||||
args.old = p->opcode;
|
||||
args.new = BREAKPOINT_INSTRUCTION;
|
||||
args.opcode = BREAKPOINT_INSTRUCTION;
|
||||
stop_machine(swap_instruction, &args, NULL);
|
||||
}
|
||||
|
||||
|
@ -185,64 +163,69 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
|
|||
struct ins_replace_args args;
|
||||
|
||||
args.ptr = p->addr;
|
||||
args.old = BREAKPOINT_INSTRUCTION;
|
||||
args.new = p->opcode;
|
||||
args.opcode = p->opcode;
|
||||
stop_machine(swap_instruction, &args, NULL);
|
||||
}
|
||||
|
||||
void __kprobes arch_remove_kprobe(struct kprobe *p)
|
||||
{
|
||||
if (p->ainsn.insn) {
|
||||
free_insn_slot(p->ainsn.insn, 0);
|
||||
p->ainsn.insn = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
|
||||
static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb,
|
||||
struct pt_regs *regs,
|
||||
unsigned long ip)
|
||||
{
|
||||
per_cr_bits kprobe_per_regs[1];
|
||||
struct per_regs per_kprobe;
|
||||
|
||||
memset(kprobe_per_regs, 0, sizeof(per_cr_bits));
|
||||
regs->psw.addr = (unsigned long)p->ainsn.insn | PSW_ADDR_AMODE;
|
||||
/* Set up the PER control registers %cr9-%cr11 */
|
||||
per_kprobe.control = PER_EVENT_IFETCH;
|
||||
per_kprobe.start = ip;
|
||||
per_kprobe.end = ip;
|
||||
|
||||
/* Set up the per control reg info, will pass to lctl */
|
||||
kprobe_per_regs[0].em_instruction_fetch = 1;
|
||||
kprobe_per_regs[0].starting_addr = (unsigned long)p->ainsn.insn;
|
||||
kprobe_per_regs[0].ending_addr = (unsigned long)p->ainsn.insn + 1;
|
||||
/* Save control regs and psw mask */
|
||||
__ctl_store(kcb->kprobe_saved_ctl, 9, 11);
|
||||
kcb->kprobe_saved_imask = regs->psw.mask &
|
||||
(PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT);
|
||||
|
||||
/* Set the PER control regs, turns on single step for this address */
|
||||
__ctl_load(kprobe_per_regs, 9, 11);
|
||||
/* Set PER control regs, turns on single step for the given address */
|
||||
__ctl_load(per_kprobe, 9, 11);
|
||||
regs->psw.mask |= PSW_MASK_PER;
|
||||
regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
|
||||
regs->psw.addr = ip | PSW_ADDR_AMODE;
|
||||
}
|
||||
|
||||
static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
|
||||
static void __kprobes disable_singlestep(struct kprobe_ctlblk *kcb,
|
||||
struct pt_regs *regs,
|
||||
unsigned long ip)
|
||||
{
|
||||
kcb->prev_kprobe.kp = kprobe_running();
|
||||
kcb->prev_kprobe.status = kcb->kprobe_status;
|
||||
kcb->prev_kprobe.kprobe_saved_imask = kcb->kprobe_saved_imask;
|
||||
memcpy(kcb->prev_kprobe.kprobe_saved_ctl, kcb->kprobe_saved_ctl,
|
||||
sizeof(kcb->kprobe_saved_ctl));
|
||||
/* Restore control regs and psw mask, set new psw address */
|
||||
__ctl_load(kcb->kprobe_saved_ctl, 9, 11);
|
||||
regs->psw.mask &= ~PSW_MASK_PER;
|
||||
regs->psw.mask |= kcb->kprobe_saved_imask;
|
||||
regs->psw.addr = ip | PSW_ADDR_AMODE;
|
||||
}
|
||||
|
||||
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
|
||||
/*
|
||||
* Activate a kprobe by storing its pointer to current_kprobe. The
|
||||
* previous kprobe is stored in kcb->prev_kprobe. A stack of up to
|
||||
* two kprobes can be active, see KPROBE_REENTER.
|
||||
*/
|
||||
static void __kprobes push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p)
|
||||
{
|
||||
kcb->prev_kprobe.kp = __get_cpu_var(current_kprobe);
|
||||
kcb->prev_kprobe.status = kcb->kprobe_status;
|
||||
__get_cpu_var(current_kprobe) = p;
|
||||
}
|
||||
|
||||
/*
|
||||
* Deactivate a kprobe by backing up to the previous state. If the
|
||||
* current state is KPROBE_REENTER prev_kprobe.kp will be non-NULL,
|
||||
* for any other state prev_kprobe.kp will be NULL.
|
||||
*/
|
||||
static void __kprobes pop_kprobe(struct kprobe_ctlblk *kcb)
|
||||
{
|
||||
__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
|
||||
kcb->kprobe_status = kcb->prev_kprobe.status;
|
||||
kcb->kprobe_saved_imask = kcb->prev_kprobe.kprobe_saved_imask;
|
||||
memcpy(kcb->kprobe_saved_ctl, kcb->prev_kprobe.kprobe_saved_ctl,
|
||||
sizeof(kcb->kprobe_saved_ctl));
|
||||
}
|
||||
|
||||
static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
|
||||
struct kprobe_ctlblk *kcb)
|
||||
{
|
||||
__get_cpu_var(current_kprobe) = p;
|
||||
/* Save the interrupt and per flags */
|
||||
kcb->kprobe_saved_imask = regs->psw.mask &
|
||||
(PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT);
|
||||
/* Save the control regs that govern PER */
|
||||
__ctl_store(kcb->kprobe_saved_ctl, 9, 11);
|
||||
}
|
||||
|
||||
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
|
||||
|
@ -251,79 +234,104 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
|
|||
ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14];
|
||||
|
||||
/* Replace the return addr with trampoline addr */
|
||||
regs->gprs[14] = (unsigned long)&kretprobe_trampoline;
|
||||
regs->gprs[14] = (unsigned long) &kretprobe_trampoline;
|
||||
}
|
||||
|
||||
static void __kprobes kprobe_reenter_check(struct kprobe_ctlblk *kcb,
|
||||
struct kprobe *p)
|
||||
{
|
||||
switch (kcb->kprobe_status) {
|
||||
case KPROBE_HIT_SSDONE:
|
||||
case KPROBE_HIT_ACTIVE:
|
||||
kprobes_inc_nmissed_count(p);
|
||||
break;
|
||||
case KPROBE_HIT_SS:
|
||||
case KPROBE_REENTER:
|
||||
default:
|
||||
/*
|
||||
* A kprobe on the code path to single step an instruction
|
||||
* is a BUG. The code path resides in the .kprobes.text
|
||||
* section and is executed with interrupts disabled.
|
||||
*/
|
||||
printk(KERN_EMERG "Invalid kprobe detected at %p.\n", p->addr);
|
||||
dump_kprobe(p);
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
static int __kprobes kprobe_handler(struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe *p;
|
||||
int ret = 0;
|
||||
unsigned long *addr = (unsigned long *)
|
||||
((regs->psw.addr & PSW_ADDR_INSN) - 2);
|
||||
struct kprobe_ctlblk *kcb;
|
||||
struct kprobe *p;
|
||||
|
||||
/*
|
||||
* We don't want to be preempted for the entire
|
||||
* duration of kprobe processing
|
||||
* We want to disable preemption for the entire duration of kprobe
|
||||
* processing. That includes the calls to the pre/post handlers
|
||||
* and single stepping the kprobe instruction.
|
||||
*/
|
||||
preempt_disable();
|
||||
kcb = get_kprobe_ctlblk();
|
||||
p = get_kprobe((void *)((regs->psw.addr & PSW_ADDR_INSN) - 2));
|
||||
|
||||
/* Check we're not actually recursing */
|
||||
if (kprobe_running()) {
|
||||
p = get_kprobe(addr);
|
||||
if (p) {
|
||||
if (kcb->kprobe_status == KPROBE_HIT_SS &&
|
||||
*p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
|
||||
regs->psw.mask &= ~PSW_MASK_PER;
|
||||
regs->psw.mask |= kcb->kprobe_saved_imask;
|
||||
goto no_kprobe;
|
||||
}
|
||||
/* We have reentered the kprobe_handler(), since
|
||||
* another probe was hit while within the handler.
|
||||
* We here save the original kprobes variables and
|
||||
* just single step on the instruction of the new probe
|
||||
* without calling any user handlers.
|
||||
if (p) {
|
||||
if (kprobe_running()) {
|
||||
/*
|
||||
* We have hit a kprobe while another is still
|
||||
* active. This can happen in the pre and post
|
||||
* handler. Single step the instruction of the
|
||||
* new probe but do not call any handler function
|
||||
* of this secondary kprobe.
|
||||
* push_kprobe and pop_kprobe saves and restores
|
||||
* the currently active kprobe.
|
||||
*/
|
||||
save_previous_kprobe(kcb);
|
||||
set_current_kprobe(p, regs, kcb);
|
||||
kprobes_inc_nmissed_count(p);
|
||||
prepare_singlestep(p, regs);
|
||||
kprobe_reenter_check(kcb, p);
|
||||
push_kprobe(kcb, p);
|
||||
kcb->kprobe_status = KPROBE_REENTER;
|
||||
return 1;
|
||||
} else {
|
||||
p = __get_cpu_var(current_kprobe);
|
||||
if (p->break_handler && p->break_handler(p, regs)) {
|
||||
goto ss_probe;
|
||||
}
|
||||
/*
|
||||
* If we have no pre-handler or it returned 0, we
|
||||
* continue with single stepping. If we have a
|
||||
* pre-handler and it returned non-zero, it prepped
|
||||
* for calling the break_handler below on re-entry
|
||||
* for jprobe processing, so get out doing nothing
|
||||
* more here.
|
||||
*/
|
||||
push_kprobe(kcb, p);
|
||||
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
|
||||
if (p->pre_handler && p->pre_handler(p, regs))
|
||||
return 1;
|
||||
kcb->kprobe_status = KPROBE_HIT_SS;
|
||||
}
|
||||
goto no_kprobe;
|
||||
}
|
||||
|
||||
p = get_kprobe(addr);
|
||||
if (!p)
|
||||
/*
|
||||
* No kprobe at this address. The fault has not been
|
||||
* caused by a kprobe breakpoint. The race of breakpoint
|
||||
* vs. kprobe remove does not exist because on s390 we
|
||||
* use stop_machine to arm/disarm the breakpoints.
|
||||
*/
|
||||
goto no_kprobe;
|
||||
|
||||
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
|
||||
set_current_kprobe(p, regs, kcb);
|
||||
if (p->pre_handler && p->pre_handler(p, regs))
|
||||
/* handler has already set things up, so skip ss setup */
|
||||
enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn);
|
||||
return 1;
|
||||
|
||||
ss_probe:
|
||||
prepare_singlestep(p, regs);
|
||||
kcb->kprobe_status = KPROBE_HIT_SS;
|
||||
return 1;
|
||||
|
||||
no_kprobe:
|
||||
} else if (kprobe_running()) {
|
||||
p = __get_cpu_var(current_kprobe);
|
||||
if (p->break_handler && p->break_handler(p, regs)) {
|
||||
/*
|
||||
* Continuation after the jprobe completed and
|
||||
* caused the jprobe_return trap. The jprobe
|
||||
* break_handler "returns" to the original
|
||||
* function that still has the kprobe breakpoint
|
||||
* installed. We continue with single stepping.
|
||||
*/
|
||||
kcb->kprobe_status = KPROBE_HIT_SS;
|
||||
enable_singlestep(kcb, regs,
|
||||
(unsigned long) p->ainsn.insn);
|
||||
return 1;
|
||||
} /* else:
|
||||
* No kprobe at this address and the current kprobe
|
||||
* has no break handler (no jprobe!). The kernel just
|
||||
* exploded, let the standard trap handler pick up the
|
||||
* pieces.
|
||||
*/
|
||||
} /* else:
|
||||
* No kprobe at this address and no active kprobe. The trap has
|
||||
* not been caused by a kprobe breakpoint. The race of breakpoint
|
||||
* vs. kprobe remove does not exist because on s390 as we use
|
||||
* stop_machine to arm/disarm the breakpoints.
|
||||
*/
|
||||
preempt_enable_no_resched();
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -344,12 +352,12 @@ static void __used kretprobe_trampoline_holder(void)
|
|||
static int __kprobes trampoline_probe_handler(struct kprobe *p,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct kretprobe_instance *ri = NULL;
|
||||
struct kretprobe_instance *ri;
|
||||
struct hlist_head *head, empty_rp;
|
||||
struct hlist_node *node, *tmp;
|
||||
unsigned long flags, orig_ret_address = 0;
|
||||
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
|
||||
kprobe_opcode_t *correct_ret_addr = NULL;
|
||||
unsigned long flags, orig_ret_address;
|
||||
unsigned long trampoline_address;
|
||||
kprobe_opcode_t *correct_ret_addr;
|
||||
|
||||
INIT_HLIST_HEAD(&empty_rp);
|
||||
kretprobe_hash_lock(current, &head, &flags);
|
||||
|
@ -367,12 +375,16 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
|
|||
* real return address, and all the rest will point to
|
||||
* kretprobe_trampoline
|
||||
*/
|
||||
ri = NULL;
|
||||
orig_ret_address = 0;
|
||||
correct_ret_addr = NULL;
|
||||
trampoline_address = (unsigned long) &kretprobe_trampoline;
|
||||
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
|
||||
if (ri->task != current)
|
||||
/* another task is sharing our hash bucket */
|
||||
continue;
|
||||
|
||||
orig_ret_address = (unsigned long)ri->ret_addr;
|
||||
orig_ret_address = (unsigned long) ri->ret_addr;
|
||||
|
||||
if (orig_ret_address != trampoline_address)
|
||||
/*
|
||||
|
@ -391,7 +403,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
|
|||
/* another task is sharing our hash bucket */
|
||||
continue;
|
||||
|
||||
orig_ret_address = (unsigned long)ri->ret_addr;
|
||||
orig_ret_address = (unsigned long) ri->ret_addr;
|
||||
|
||||
if (ri->rp && ri->rp->handler) {
|
||||
ri->ret_addr = correct_ret_addr;
|
||||
|
@ -400,19 +412,18 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
|
|||
|
||||
recycle_rp_inst(ri, &empty_rp);
|
||||
|
||||
if (orig_ret_address != trampoline_address) {
|
||||
if (orig_ret_address != trampoline_address)
|
||||
/*
|
||||
* This is the real return address. Any other
|
||||
* instances associated with this task are for
|
||||
* other calls deeper on the call stack
|
||||
*/
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE;
|
||||
|
||||
reset_current_kprobe();
|
||||
pop_kprobe(get_kprobe_ctlblk());
|
||||
kretprobe_hash_unlock(current, &flags);
|
||||
preempt_enable_no_resched();
|
||||
|
||||
|
@ -439,55 +450,42 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
|
|||
static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
unsigned long ip = regs->psw.addr & PSW_ADDR_INSN;
|
||||
int fixup = get_fixup_type(p->ainsn.insn);
|
||||
|
||||
regs->psw.addr &= PSW_ADDR_INSN;
|
||||
if (fixup & FIXUP_PSW_NORMAL)
|
||||
ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
|
||||
|
||||
if (p->ainsn.fixup & FIXUP_PSW_NORMAL)
|
||||
regs->psw.addr = (unsigned long)p->addr +
|
||||
((unsigned long)regs->psw.addr -
|
||||
(unsigned long)p->ainsn.insn);
|
||||
if (fixup & FIXUP_BRANCH_NOT_TAKEN) {
|
||||
int ilen = ((p->ainsn.insn[0] >> 14) + 3) & -2;
|
||||
if (ip - (unsigned long) p->ainsn.insn == ilen)
|
||||
ip = (unsigned long) p->addr + ilen;
|
||||
}
|
||||
|
||||
if (p->ainsn.fixup & FIXUP_BRANCH_NOT_TAKEN)
|
||||
if ((unsigned long)regs->psw.addr -
|
||||
(unsigned long)p->ainsn.insn == p->ainsn.ilen)
|
||||
regs->psw.addr = (unsigned long)p->addr + p->ainsn.ilen;
|
||||
if (fixup & FIXUP_RETURN_REGISTER) {
|
||||
int reg = (p->ainsn.insn[0] & 0xf0) >> 4;
|
||||
regs->gprs[reg] += (unsigned long) p->addr -
|
||||
(unsigned long) p->ainsn.insn;
|
||||
}
|
||||
|
||||
if (p->ainsn.fixup & FIXUP_RETURN_REGISTER)
|
||||
regs->gprs[p->ainsn.reg] = ((unsigned long)p->addr +
|
||||
(regs->gprs[p->ainsn.reg] -
|
||||
(unsigned long)p->ainsn.insn))
|
||||
| PSW_ADDR_AMODE;
|
||||
|
||||
regs->psw.addr |= PSW_ADDR_AMODE;
|
||||
/* turn off PER mode */
|
||||
regs->psw.mask &= ~PSW_MASK_PER;
|
||||
/* Restore the original per control regs */
|
||||
__ctl_load(kcb->kprobe_saved_ctl, 9, 11);
|
||||
regs->psw.mask |= kcb->kprobe_saved_imask;
|
||||
disable_singlestep(kcb, regs, ip);
|
||||
}
|
||||
|
||||
static int __kprobes post_kprobe_handler(struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe *cur = kprobe_running();
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
struct kprobe *p = kprobe_running();
|
||||
|
||||
if (!cur)
|
||||
if (!p)
|
||||
return 0;
|
||||
|
||||
if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
|
||||
if (kcb->kprobe_status != KPROBE_REENTER && p->post_handler) {
|
||||
kcb->kprobe_status = KPROBE_HIT_SSDONE;
|
||||
cur->post_handler(cur, regs, 0);
|
||||
p->post_handler(p, regs, 0);
|
||||
}
|
||||
|
||||
resume_execution(cur, regs);
|
||||
|
||||
/*Restore back the original saved kprobes variables and continue. */
|
||||
if (kcb->kprobe_status == KPROBE_REENTER) {
|
||||
restore_previous_kprobe(kcb);
|
||||
goto out;
|
||||
}
|
||||
reset_current_kprobe();
|
||||
out:
|
||||
resume_execution(p, regs);
|
||||
pop_kprobe(kcb);
|
||||
preempt_enable_no_resched();
|
||||
|
||||
/*
|
||||
|
@ -495,17 +493,16 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs)
|
|||
* will have PER set, in which case, continue the remaining processing
|
||||
* of do_single_step, as if this is not a probe hit.
|
||||
*/
|
||||
if (regs->psw.mask & PSW_MASK_PER) {
|
||||
if (regs->psw.mask & PSW_MASK_PER)
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr)
|
||||
{
|
||||
struct kprobe *cur = kprobe_running();
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
struct kprobe *p = kprobe_running();
|
||||
const struct exception_table_entry *entry;
|
||||
|
||||
switch(kcb->kprobe_status) {
|
||||
|
@ -521,14 +518,8 @@ static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr)
|
|||
* and allow the page fault handler to continue as a
|
||||
* normal page fault.
|
||||
*/
|
||||
regs->psw.addr = (unsigned long)cur->addr | PSW_ADDR_AMODE;
|
||||
regs->psw.mask &= ~PSW_MASK_PER;
|
||||
regs->psw.mask |= kcb->kprobe_saved_imask;
|
||||
if (kcb->kprobe_status == KPROBE_REENTER)
|
||||
restore_previous_kprobe(kcb);
|
||||
else {
|
||||
reset_current_kprobe();
|
||||
}
|
||||
disable_singlestep(kcb, regs, (unsigned long) p->addr);
|
||||
pop_kprobe(kcb);
|
||||
preempt_enable_no_resched();
|
||||
break;
|
||||
case KPROBE_HIT_ACTIVE:
|
||||
|
@ -538,7 +529,7 @@ static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr)
|
|||
* we can also use npre/npostfault count for accouting
|
||||
* these specific fault cases.
|
||||
*/
|
||||
kprobes_inc_nmissed_count(cur);
|
||||
kprobes_inc_nmissed_count(p);
|
||||
|
||||
/*
|
||||
* We come here because instructions in the pre/post
|
||||
|
@ -547,7 +538,7 @@ static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr)
|
|||
* copy_from_user(), get_user() etc. Let the
|
||||
* user-specified handler try to fix it first.
|
||||
*/
|
||||
if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
|
||||
if (p->fault_handler && p->fault_handler(p, regs, trapnr))
|
||||
return 1;
|
||||
|
||||
/*
|
||||
|
@ -589,7 +580,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
|
|||
int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
|
||||
unsigned long val, void *data)
|
||||
{
|
||||
struct die_args *args = (struct die_args *)data;
|
||||
struct die_args *args = (struct die_args *) data;
|
||||
struct pt_regs *regs = args->regs;
|
||||
int ret = NOTIFY_DONE;
|
||||
|
||||
|
@ -598,16 +589,16 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
|
|||
|
||||
switch (val) {
|
||||
case DIE_BPT:
|
||||
if (kprobe_handler(args->regs))
|
||||
if (kprobe_handler(regs))
|
||||
ret = NOTIFY_STOP;
|
||||
break;
|
||||
case DIE_SSTEP:
|
||||
if (post_kprobe_handler(args->regs))
|
||||
if (post_kprobe_handler(regs))
|
||||
ret = NOTIFY_STOP;
|
||||
break;
|
||||
case DIE_TRAP:
|
||||
if (!preemptible() && kprobe_running() &&
|
||||
kprobe_trap_handler(args->regs, args->trapnr))
|
||||
kprobe_trap_handler(regs, args->trapnr))
|
||||
ret = NOTIFY_STOP;
|
||||
break;
|
||||
default:
|
||||
|
@ -623,23 +614,19 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
|
|||
int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
|
||||
{
|
||||
struct jprobe *jp = container_of(p, struct jprobe, kp);
|
||||
unsigned long addr;
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
unsigned long stack;
|
||||
|
||||
memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
|
||||
|
||||
/* setup return addr to the jprobe handler routine */
|
||||
regs->psw.addr = (unsigned long)(jp->entry) | PSW_ADDR_AMODE;
|
||||
regs->psw.addr = (unsigned long) jp->entry | PSW_ADDR_AMODE;
|
||||
regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
|
||||
|
||||
/* r14 is the function return address */
|
||||
kcb->jprobe_saved_r14 = (unsigned long)regs->gprs[14];
|
||||
/* r15 is the stack pointer */
|
||||
kcb->jprobe_saved_r15 = (unsigned long)regs->gprs[15];
|
||||
addr = (unsigned long)kcb->jprobe_saved_r15;
|
||||
stack = (unsigned long) regs->gprs[15];
|
||||
|
||||
memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr,
|
||||
MIN_STACK_SIZE(addr));
|
||||
memcpy(kcb->jprobes_stack, (void *) stack, MIN_STACK_SIZE(stack));
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -656,30 +643,29 @@ void __kprobes jprobe_return_end(void)
|
|||
int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_r15);
|
||||
unsigned long stack;
|
||||
|
||||
stack = (unsigned long) kcb->jprobe_saved_regs.gprs[15];
|
||||
|
||||
/* Put the regs back */
|
||||
memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
|
||||
/* put the stack back */
|
||||
memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
|
||||
MIN_STACK_SIZE(stack_addr));
|
||||
memcpy((void *) stack, kcb->jprobes_stack, MIN_STACK_SIZE(stack));
|
||||
preempt_enable_no_resched();
|
||||
return 1;
|
||||
}
|
||||
|
||||
static struct kprobe trampoline_p = {
|
||||
.addr = (kprobe_opcode_t *) & kretprobe_trampoline,
|
||||
static struct kprobe trampoline = {
|
||||
.addr = (kprobe_opcode_t *) &kretprobe_trampoline,
|
||||
.pre_handler = trampoline_probe_handler
|
||||
};
|
||||
|
||||
int __init arch_init_kprobes(void)
|
||||
{
|
||||
return register_kprobe(&trampoline_p);
|
||||
return register_kprobe(&trampoline);
|
||||
}
|
||||
|
||||
int __kprobes arch_trampoline_kprobe(struct kprobe *p)
|
||||
{
|
||||
if (p->addr == (kprobe_opcode_t *) & kretprobe_trampoline)
|
||||
return 1;
|
||||
return 0;
|
||||
return p->addr == (kprobe_opcode_t *) &kretprobe_trampoline;
|
||||
}
|
||||
|
|
|
@ -7,6 +7,8 @@
|
|||
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
.section .kprobes.text, "ax"
|
||||
|
||||
.globl ftrace_stub
|
||||
ftrace_stub:
|
||||
br %r14
|
||||
|
@ -16,22 +18,12 @@ _mcount:
|
|||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
br %r14
|
||||
|
||||
.data
|
||||
.globl ftrace_dyn_func
|
||||
ftrace_dyn_func:
|
||||
.long ftrace_stub
|
||||
.previous
|
||||
|
||||
.globl ftrace_caller
|
||||
ftrace_caller:
|
||||
#endif
|
||||
stm %r2,%r5,16(%r15)
|
||||
bras %r1,2f
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
0: .long ftrace_dyn_func
|
||||
#else
|
||||
0: .long ftrace_trace_function
|
||||
#endif
|
||||
1: .long function_trace_stop
|
||||
2: l %r2,1b-0b(%r1)
|
||||
icm %r2,0xf,0(%r2)
|
||||
|
@ -47,21 +39,15 @@ ftrace_caller:
|
|||
l %r14,0(%r14)
|
||||
basr %r14,%r14
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
l %r2,100(%r15)
|
||||
l %r3,152(%r15)
|
||||
.globl ftrace_graph_caller
|
||||
ftrace_graph_caller:
|
||||
# This unconditional branch gets runtime patched. Change only if
|
||||
# you know what you are doing. See ftrace_enable_graph_caller().
|
||||
j 1f
|
||||
#endif
|
||||
bras %r1,0f
|
||||
.long prepare_ftrace_return
|
||||
0: l %r2,152(%r15)
|
||||
l %r4,0(%r1)
|
||||
l %r3,100(%r15)
|
||||
basr %r14,%r4
|
||||
st %r2,100(%r15)
|
||||
1:
|
||||
# The bras instruction gets runtime patched to call prepare_ftrace_return.
|
||||
# See ftrace_enable_ftrace_graph_caller. The patched instruction is:
|
||||
# bras %r14,prepare_ftrace_return
|
||||
bras %r14,0f
|
||||
0: st %r2,100(%r15)
|
||||
#endif
|
||||
ahi %r15,96
|
||||
l %r14,56(%r15)
|
||||
|
|
|
@ -7,6 +7,8 @@
|
|||
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
.section .kprobes.text, "ax"
|
||||
|
||||
.globl ftrace_stub
|
||||
ftrace_stub:
|
||||
br %r14
|
||||
|
@ -16,12 +18,6 @@ _mcount:
|
|||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
br %r14
|
||||
|
||||
.data
|
||||
.globl ftrace_dyn_func
|
||||
ftrace_dyn_func:
|
||||
.quad ftrace_stub
|
||||
.previous
|
||||
|
||||
.globl ftrace_caller
|
||||
ftrace_caller:
|
||||
#endif
|
||||
|
@ -35,26 +31,19 @@ ftrace_caller:
|
|||
stg %r1,__SF_BACKCHAIN(%r15)
|
||||
lgr %r2,%r14
|
||||
lg %r3,168(%r15)
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
larl %r14,ftrace_dyn_func
|
||||
#else
|
||||
larl %r14,ftrace_trace_function
|
||||
#endif
|
||||
lg %r14,0(%r14)
|
||||
basr %r14,%r14
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
lg %r2,168(%r15)
|
||||
lg %r3,272(%r15)
|
||||
.globl ftrace_graph_caller
|
||||
ftrace_graph_caller:
|
||||
# This unconditional branch gets runtime patched. Change only if
|
||||
# you know what you are doing. See ftrace_enable_graph_caller().
|
||||
j 0f
|
||||
#endif
|
||||
lg %r2,272(%r15)
|
||||
lg %r3,168(%r15)
|
||||
brasl %r14,prepare_ftrace_return
|
||||
stg %r2,168(%r15)
|
||||
0:
|
||||
# The bras instruction gets runtime patched to call prepare_ftrace_return.
|
||||
# See ftrace_enable_ftrace_graph_caller. The patched instruction is:
|
||||
# bras %r14,prepare_ftrace_return
|
||||
bras %r14,0f
|
||||
0: stg %r2,168(%r15)
|
||||
#endif
|
||||
aghi %r15,160
|
||||
lmg %r2,%r5,32(%r15)
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
* Heiko Carstens <heiko.carstens@de.ibm.com>,
|
||||
*/
|
||||
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/hardirq.h>
|
||||
|
@ -255,7 +256,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
|
|||
nmi_enter();
|
||||
s390_idle_check(regs, S390_lowcore.mcck_clock,
|
||||
S390_lowcore.mcck_enter_timer);
|
||||
|
||||
kstat_cpu(smp_processor_id()).irqs[NMI_NMI]++;
|
||||
mci = (struct mci *) &S390_lowcore.mcck_interruption_code;
|
||||
mcck = &__get_cpu_var(cpu_mcck);
|
||||
umode = user_mode(regs);
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#include <linux/kernel_stat.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/compat.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <asm/compat.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
@ -41,6 +42,7 @@
|
|||
#include <asm/irq.h>
|
||||
#include <asm/timer.h>
|
||||
#include <asm/nmi.h>
|
||||
#include <asm/smp.h>
|
||||
#include "entry.h"
|
||||
|
||||
asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
|
||||
|
@ -75,13 +77,8 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
|
|||
*/
|
||||
static void default_idle(void)
|
||||
{
|
||||
/* CPU is going idle. */
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
if (cpu_is_offline(smp_processor_id())) {
|
||||
preempt_enable_no_resched();
|
||||
if (cpu_is_offline(smp_processor_id()))
|
||||
cpu_die();
|
||||
}
|
||||
#endif
|
||||
local_irq_disable();
|
||||
if (need_resched()) {
|
||||
local_irq_enable();
|
||||
|
@ -116,15 +113,17 @@ void cpu_idle(void)
|
|||
}
|
||||
}
|
||||
|
||||
extern void kernel_thread_starter(void);
|
||||
extern void __kprobes kernel_thread_starter(void);
|
||||
|
||||
asm(
|
||||
".align 4\n"
|
||||
".section .kprobes.text, \"ax\"\n"
|
||||
".global kernel_thread_starter\n"
|
||||
"kernel_thread_starter:\n"
|
||||
" la 2,0(10)\n"
|
||||
" basr 14,9\n"
|
||||
" la 2,0\n"
|
||||
" br 11\n");
|
||||
" br 11\n"
|
||||
".previous\n");
|
||||
|
||||
int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
|
||||
{
|
||||
|
@ -214,8 +213,10 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
|
|||
/* start new process with ar4 pointing to the correct address space */
|
||||
p->thread.mm_segment = get_fs();
|
||||
/* Don't copy debug registers */
|
||||
memset(&p->thread.per_info, 0, sizeof(p->thread.per_info));
|
||||
memset(&p->thread.per_user, 0, sizeof(p->thread.per_user));
|
||||
memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
|
||||
clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
|
||||
clear_tsk_thread_flag(p, TIF_PER_TRAP);
|
||||
/* Initialize per thread user and system timer values */
|
||||
ti = task_thread_info(p);
|
||||
ti->user_timer = 0;
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
#include <linux/smp.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include <linux/cpu.h>
|
||||
#include <asm/elf.h>
|
||||
#include <asm/lowcore.h>
|
||||
#include <asm/param.h>
|
||||
|
@ -34,17 +34,6 @@ void __cpuinit cpu_init(void)
|
|||
enter_lazy_tlb(&init_mm, current);
|
||||
}
|
||||
|
||||
/*
|
||||
* print_cpu_info - print basic information about a cpu
|
||||
*/
|
||||
void __cpuinit print_cpu_info(void)
|
||||
{
|
||||
struct cpuid *id = &per_cpu(cpu_id, smp_processor_id());
|
||||
|
||||
pr_info("Processor %d started, address %d, identification %06X\n",
|
||||
S390_lowcore.cpu_nr, stap(), id->ident);
|
||||
}
|
||||
|
||||
/*
|
||||
* show_cpuinfo - Get information on one CPU for use by procfs.
|
||||
*/
|
||||
|
@ -57,9 +46,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
|||
unsigned long n = (unsigned long) v - 1;
|
||||
int i;
|
||||
|
||||
s390_adjust_jiffies();
|
||||
preempt_disable();
|
||||
if (!n) {
|
||||
s390_adjust_jiffies();
|
||||
seq_printf(m, "vendor_id : IBM/S390\n"
|
||||
"# processors : %i\n"
|
||||
"bogomips per cpu: %lu.%02lu\n",
|
||||
|
@ -71,7 +59,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
|||
seq_printf(m, "%s ", hwcap_str[i]);
|
||||
seq_puts(m, "\n");
|
||||
}
|
||||
|
||||
get_online_cpus();
|
||||
if (cpu_online(n)) {
|
||||
struct cpuid *id = &per_cpu(cpu_id, n);
|
||||
seq_printf(m, "processor %li: "
|
||||
|
@ -80,7 +68,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
|||
"machine = %04X\n",
|
||||
n, id->version, id->ident, id->machine);
|
||||
}
|
||||
preempt_enable();
|
||||
put_online_cpus();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,25 +1,9 @@
|
|||
/*
|
||||
* arch/s390/kernel/ptrace.c
|
||||
* Ptrace user space interface.
|
||||
*
|
||||
* S390 version
|
||||
* Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
|
||||
* Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
|
||||
* Copyright IBM Corp. 1999,2010
|
||||
* Author(s): Denis Joseph Barrow
|
||||
* Martin Schwidefsky (schwidefsky@de.ibm.com)
|
||||
*
|
||||
* Based on PowerPC version
|
||||
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
|
||||
*
|
||||
* Derived from "arch/m68k/kernel/ptrace.c"
|
||||
* Copyright (C) 1994 by Hamish Macdonald
|
||||
* Taken from linux/kernel/ptrace.c and modified for M680x0.
|
||||
* linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
|
||||
*
|
||||
* Modified by Cort Dougan (cort@cs.nmt.edu)
|
||||
*
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General
|
||||
* Public License. See the file README.legal in the main directory of
|
||||
* this archive for more details.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
|
@ -61,76 +45,58 @@ enum s390_regset {
|
|||
REGSET_GENERAL_EXTENDED,
|
||||
};
|
||||
|
||||
static void
|
||||
FixPerRegisters(struct task_struct *task)
|
||||
void update_per_regs(struct task_struct *task)
|
||||
{
|
||||
struct pt_regs *regs;
|
||||
per_struct *per_info;
|
||||
per_cr_words cr_words;
|
||||
static const struct per_regs per_single_step = {
|
||||
.control = PER_EVENT_IFETCH,
|
||||
.start = 0,
|
||||
.end = PSW_ADDR_INSN,
|
||||
};
|
||||
struct pt_regs *regs = task_pt_regs(task);
|
||||
struct thread_struct *thread = &task->thread;
|
||||
const struct per_regs *new;
|
||||
struct per_regs old;
|
||||
|
||||
regs = task_pt_regs(task);
|
||||
per_info = (per_struct *) &task->thread.per_info;
|
||||
per_info->control_regs.bits.em_instruction_fetch =
|
||||
per_info->single_step | per_info->instruction_fetch;
|
||||
|
||||
if (per_info->single_step) {
|
||||
per_info->control_regs.bits.starting_addr = 0;
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (is_compat_task())
|
||||
per_info->control_regs.bits.ending_addr = 0x7fffffffUL;
|
||||
else
|
||||
#endif
|
||||
per_info->control_regs.bits.ending_addr = PSW_ADDR_INSN;
|
||||
} else {
|
||||
per_info->control_regs.bits.starting_addr =
|
||||
per_info->starting_addr;
|
||||
per_info->control_regs.bits.ending_addr =
|
||||
per_info->ending_addr;
|
||||
}
|
||||
/*
|
||||
* if any of the control reg tracing bits are on
|
||||
* we switch on per in the psw
|
||||
*/
|
||||
if (per_info->control_regs.words.cr[0] & PER_EM_MASK)
|
||||
regs->psw.mask |= PSW_MASK_PER;
|
||||
else
|
||||
/* TIF_SINGLE_STEP overrides the user specified PER registers. */
|
||||
new = test_tsk_thread_flag(task, TIF_SINGLE_STEP) ?
|
||||
&per_single_step : &thread->per_user;
|
||||
|
||||
/* Take care of the PER enablement bit in the PSW. */
|
||||
if (!(new->control & PER_EVENT_MASK)) {
|
||||
regs->psw.mask &= ~PSW_MASK_PER;
|
||||
|
||||
if (per_info->control_regs.bits.em_storage_alteration)
|
||||
per_info->control_regs.bits.storage_alt_space_ctl = 1;
|
||||
else
|
||||
per_info->control_regs.bits.storage_alt_space_ctl = 0;
|
||||
|
||||
if (task == current) {
|
||||
__ctl_store(cr_words, 9, 11);
|
||||
if (memcmp(&cr_words, &per_info->control_regs.words,
|
||||
sizeof(cr_words)) != 0)
|
||||
__ctl_load(per_info->control_regs.words, 9, 11);
|
||||
return;
|
||||
}
|
||||
regs->psw.mask |= PSW_MASK_PER;
|
||||
__ctl_store(old, 9, 11);
|
||||
if (memcmp(new, &old, sizeof(struct per_regs)) != 0)
|
||||
__ctl_load(*new, 9, 11);
|
||||
}
|
||||
|
||||
void user_enable_single_step(struct task_struct *task)
|
||||
{
|
||||
task->thread.per_info.single_step = 1;
|
||||
FixPerRegisters(task);
|
||||
set_tsk_thread_flag(task, TIF_SINGLE_STEP);
|
||||
if (task == current)
|
||||
update_per_regs(task);
|
||||
}
|
||||
|
||||
void user_disable_single_step(struct task_struct *task)
|
||||
{
|
||||
task->thread.per_info.single_step = 0;
|
||||
FixPerRegisters(task);
|
||||
clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
|
||||
if (task == current)
|
||||
update_per_regs(task);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called by kernel/ptrace.c when detaching..
|
||||
*
|
||||
* Make sure single step bits etc are not set.
|
||||
* Clear all debugging related fields.
|
||||
*/
|
||||
void
|
||||
ptrace_disable(struct task_struct *child)
|
||||
void ptrace_disable(struct task_struct *task)
|
||||
{
|
||||
/* make sure the single step bit is not set. */
|
||||
user_disable_single_step(child);
|
||||
memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
|
||||
memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
|
||||
clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
|
||||
clear_tsk_thread_flag(task, TIF_PER_TRAP);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
|
@ -139,6 +105,47 @@ ptrace_disable(struct task_struct *child)
|
|||
# define __ADDR_MASK 7
|
||||
#endif
|
||||
|
||||
static inline unsigned long __peek_user_per(struct task_struct *child,
|
||||
addr_t addr)
|
||||
{
|
||||
struct per_struct_kernel *dummy = NULL;
|
||||
|
||||
if (addr == (addr_t) &dummy->cr9)
|
||||
/* Control bits of the active per set. */
|
||||
return test_thread_flag(TIF_SINGLE_STEP) ?
|
||||
PER_EVENT_IFETCH : child->thread.per_user.control;
|
||||
else if (addr == (addr_t) &dummy->cr10)
|
||||
/* Start address of the active per set. */
|
||||
return test_thread_flag(TIF_SINGLE_STEP) ?
|
||||
0 : child->thread.per_user.start;
|
||||
else if (addr == (addr_t) &dummy->cr11)
|
||||
/* End address of the active per set. */
|
||||
return test_thread_flag(TIF_SINGLE_STEP) ?
|
||||
PSW_ADDR_INSN : child->thread.per_user.end;
|
||||
else if (addr == (addr_t) &dummy->bits)
|
||||
/* Single-step bit. */
|
||||
return test_thread_flag(TIF_SINGLE_STEP) ?
|
||||
(1UL << (BITS_PER_LONG - 1)) : 0;
|
||||
else if (addr == (addr_t) &dummy->starting_addr)
|
||||
/* Start address of the user specified per set. */
|
||||
return child->thread.per_user.start;
|
||||
else if (addr == (addr_t) &dummy->ending_addr)
|
||||
/* End address of the user specified per set. */
|
||||
return child->thread.per_user.end;
|
||||
else if (addr == (addr_t) &dummy->perc_atmid)
|
||||
/* PER code, ATMID and AI of the last PER trap */
|
||||
return (unsigned long)
|
||||
child->thread.per_event.cause << (BITS_PER_LONG - 16);
|
||||
else if (addr == (addr_t) &dummy->address)
|
||||
/* Address of the last PER trap */
|
||||
return child->thread.per_event.address;
|
||||
else if (addr == (addr_t) &dummy->access_id)
|
||||
/* Access id of the last PER trap */
|
||||
return (unsigned long)
|
||||
child->thread.per_event.paid << (BITS_PER_LONG - 8);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Read the word at offset addr from the user area of a process. The
|
||||
* trouble here is that the information is littered over different
|
||||
|
@ -204,10 +211,10 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
|
|||
|
||||
} else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
|
||||
/*
|
||||
* per_info is found in the thread structure
|
||||
* Handle access to the per_info structure.
|
||||
*/
|
||||
offset = addr - (addr_t) &dummy->regs.per_info;
|
||||
tmp = *(addr_t *)((addr_t) &child->thread.per_info + offset);
|
||||
addr -= (addr_t) &dummy->regs.per_info;
|
||||
tmp = __peek_user_per(child, addr);
|
||||
|
||||
} else
|
||||
tmp = 0;
|
||||
|
@ -237,6 +244,35 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
|
|||
return put_user(tmp, (addr_t __user *) data);
|
||||
}
|
||||
|
||||
static inline void __poke_user_per(struct task_struct *child,
|
||||
addr_t addr, addr_t data)
|
||||
{
|
||||
struct per_struct_kernel *dummy = NULL;
|
||||
|
||||
/*
|
||||
* There are only three fields in the per_info struct that the
|
||||
* debugger user can write to.
|
||||
* 1) cr9: the debugger wants to set a new PER event mask
|
||||
* 2) starting_addr: the debugger wants to set a new starting
|
||||
* address to use with the PER event mask.
|
||||
* 3) ending_addr: the debugger wants to set a new ending
|
||||
* address to use with the PER event mask.
|
||||
* The user specified PER event mask and the start and end
|
||||
* addresses are used only if single stepping is not in effect.
|
||||
* Writes to any other field in per_info are ignored.
|
||||
*/
|
||||
if (addr == (addr_t) &dummy->cr9)
|
||||
/* PER event mask of the user specified per set. */
|
||||
child->thread.per_user.control =
|
||||
data & (PER_EVENT_MASK | PER_CONTROL_MASK);
|
||||
else if (addr == (addr_t) &dummy->starting_addr)
|
||||
/* Starting address of the user specified per set. */
|
||||
child->thread.per_user.start = data;
|
||||
else if (addr == (addr_t) &dummy->ending_addr)
|
||||
/* Ending address of the user specified per set. */
|
||||
child->thread.per_user.end = data;
|
||||
}
|
||||
|
||||
/*
|
||||
* Write a word to the user area of a process at location addr. This
|
||||
* operation does have an additional problem compared to peek_user.
|
||||
|
@ -311,19 +347,17 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
|
|||
|
||||
} else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
|
||||
/*
|
||||
* per_info is found in the thread structure
|
||||
* Handle access to the per_info structure.
|
||||
*/
|
||||
offset = addr - (addr_t) &dummy->regs.per_info;
|
||||
*(addr_t *)((addr_t) &child->thread.per_info + offset) = data;
|
||||
addr -= (addr_t) &dummy->regs.per_info;
|
||||
__poke_user_per(child, addr, data);
|
||||
|
||||
}
|
||||
|
||||
FixPerRegisters(child);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
poke_user(struct task_struct *child, addr_t addr, addr_t data)
|
||||
static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
|
||||
{
|
||||
addr_t mask;
|
||||
|
||||
|
@ -409,13 +443,54 @@ long arch_ptrace(struct task_struct *child, long request,
|
|||
* a 64 bit program is a no-no.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Same as peek_user_per but for a 31 bit program.
|
||||
*/
|
||||
static inline __u32 __peek_user_per_compat(struct task_struct *child,
|
||||
addr_t addr)
|
||||
{
|
||||
struct compat_per_struct_kernel *dummy32 = NULL;
|
||||
|
||||
if (addr == (addr_t) &dummy32->cr9)
|
||||
/* Control bits of the active per set. */
|
||||
return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
|
||||
PER_EVENT_IFETCH : child->thread.per_user.control;
|
||||
else if (addr == (addr_t) &dummy32->cr10)
|
||||
/* Start address of the active per set. */
|
||||
return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
|
||||
0 : child->thread.per_user.start;
|
||||
else if (addr == (addr_t) &dummy32->cr11)
|
||||
/* End address of the active per set. */
|
||||
return test_thread_flag(TIF_SINGLE_STEP) ?
|
||||
PSW32_ADDR_INSN : child->thread.per_user.end;
|
||||
else if (addr == (addr_t) &dummy32->bits)
|
||||
/* Single-step bit. */
|
||||
return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
|
||||
0x80000000 : 0;
|
||||
else if (addr == (addr_t) &dummy32->starting_addr)
|
||||
/* Start address of the user specified per set. */
|
||||
return (__u32) child->thread.per_user.start;
|
||||
else if (addr == (addr_t) &dummy32->ending_addr)
|
||||
/* End address of the user specified per set. */
|
||||
return (__u32) child->thread.per_user.end;
|
||||
else if (addr == (addr_t) &dummy32->perc_atmid)
|
||||
/* PER code, ATMID and AI of the last PER trap */
|
||||
return (__u32) child->thread.per_event.cause << 16;
|
||||
else if (addr == (addr_t) &dummy32->address)
|
||||
/* Address of the last PER trap */
|
||||
return (__u32) child->thread.per_event.address;
|
||||
else if (addr == (addr_t) &dummy32->access_id)
|
||||
/* Access id of the last PER trap */
|
||||
return (__u32) child->thread.per_event.paid << 24;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Same as peek_user but for a 31 bit program.
|
||||
*/
|
||||
static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
|
||||
{
|
||||
struct user32 *dummy32 = NULL;
|
||||
per_struct32 *dummy_per32 = NULL;
|
||||
struct compat_user *dummy32 = NULL;
|
||||
addr_t offset;
|
||||
__u32 tmp;
|
||||
|
||||
|
@ -465,19 +540,10 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
|
|||
|
||||
} else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
|
||||
/*
|
||||
* per_info is found in the thread structure
|
||||
* Handle access to the per_info structure.
|
||||
*/
|
||||
offset = addr - (addr_t) &dummy32->regs.per_info;
|
||||
/* This is magic. See per_struct and per_struct32. */
|
||||
if ((offset >= (addr_t) &dummy_per32->control_regs &&
|
||||
offset < (addr_t) (&dummy_per32->control_regs + 1)) ||
|
||||
(offset >= (addr_t) &dummy_per32->starting_addr &&
|
||||
offset <= (addr_t) &dummy_per32->ending_addr) ||
|
||||
offset == (addr_t) &dummy_per32->lowcore.words.address)
|
||||
offset = offset*2 + 4;
|
||||
else
|
||||
offset = offset*2;
|
||||
tmp = *(__u32 *)((addr_t) &child->thread.per_info + offset);
|
||||
addr -= (addr_t) &dummy32->regs.per_info;
|
||||
tmp = __peek_user_per_compat(child, addr);
|
||||
|
||||
} else
|
||||
tmp = 0;
|
||||
|
@ -497,14 +563,33 @@ static int peek_user_compat(struct task_struct *child,
|
|||
return put_user(tmp, (__u32 __user *) data);
|
||||
}
|
||||
|
||||
/*
|
||||
* Same as poke_user_per but for a 31 bit program.
|
||||
*/
|
||||
static inline void __poke_user_per_compat(struct task_struct *child,
|
||||
addr_t addr, __u32 data)
|
||||
{
|
||||
struct compat_per_struct_kernel *dummy32 = NULL;
|
||||
|
||||
if (addr == (addr_t) &dummy32->cr9)
|
||||
/* PER event mask of the user specified per set. */
|
||||
child->thread.per_user.control =
|
||||
data & (PER_EVENT_MASK | PER_CONTROL_MASK);
|
||||
else if (addr == (addr_t) &dummy32->starting_addr)
|
||||
/* Starting address of the user specified per set. */
|
||||
child->thread.per_user.start = data;
|
||||
else if (addr == (addr_t) &dummy32->ending_addr)
|
||||
/* Ending address of the user specified per set. */
|
||||
child->thread.per_user.end = data;
|
||||
}
|
||||
|
||||
/*
|
||||
* Same as poke_user but for a 31 bit program.
|
||||
*/
|
||||
static int __poke_user_compat(struct task_struct *child,
|
||||
addr_t addr, addr_t data)
|
||||
{
|
||||
struct user32 *dummy32 = NULL;
|
||||
per_struct32 *dummy_per32 = NULL;
|
||||
struct compat_user *dummy32 = NULL;
|
||||
__u32 tmp = (__u32) data;
|
||||
addr_t offset;
|
||||
|
||||
|
@ -561,37 +646,20 @@ static int __poke_user_compat(struct task_struct *child,
|
|||
|
||||
} else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
|
||||
/*
|
||||
* per_info is found in the thread structure.
|
||||
* Handle access to the per_info structure.
|
||||
*/
|
||||
offset = addr - (addr_t) &dummy32->regs.per_info;
|
||||
/*
|
||||
* This is magic. See per_struct and per_struct32.
|
||||
* By incident the offsets in per_struct are exactly
|
||||
* twice the offsets in per_struct32 for all fields.
|
||||
* The 8 byte fields need special handling though,
|
||||
* because the second half (bytes 4-7) is needed and
|
||||
* not the first half.
|
||||
*/
|
||||
if ((offset >= (addr_t) &dummy_per32->control_regs &&
|
||||
offset < (addr_t) (&dummy_per32->control_regs + 1)) ||
|
||||
(offset >= (addr_t) &dummy_per32->starting_addr &&
|
||||
offset <= (addr_t) &dummy_per32->ending_addr) ||
|
||||
offset == (addr_t) &dummy_per32->lowcore.words.address)
|
||||
offset = offset*2 + 4;
|
||||
else
|
||||
offset = offset*2;
|
||||
*(__u32 *)((addr_t) &child->thread.per_info + offset) = tmp;
|
||||
|
||||
addr -= (addr_t) &dummy32->regs.per_info;
|
||||
__poke_user_per_compat(child, addr, data);
|
||||
}
|
||||
|
||||
FixPerRegisters(child);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int poke_user_compat(struct task_struct *child,
|
||||
addr_t addr, addr_t data)
|
||||
{
|
||||
if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user32) - 3)
|
||||
if (!is_compat_task() || (addr & 3) ||
|
||||
addr > sizeof(struct compat_user) - 3)
|
||||
return -EIO;
|
||||
|
||||
return __poke_user_compat(child, addr, data);
|
||||
|
@ -602,7 +670,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
|
|||
{
|
||||
unsigned long addr = caddr;
|
||||
unsigned long data = cdata;
|
||||
ptrace_area_emu31 parea;
|
||||
compat_ptrace_area parea;
|
||||
int copied, ret;
|
||||
|
||||
switch (request) {
|
||||
|
|
|
@ -1,33 +1,36 @@
|
|||
/*
|
||||
* arch/s390/kernel/s390_ext.c
|
||||
*
|
||||
* S390 version
|
||||
* Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
|
||||
* Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com),
|
||||
* Martin Schwidefsky (schwidefsky@de.ibm.com)
|
||||
* Copyright IBM Corp. 1999,2010
|
||||
* Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>,
|
||||
* Martin Schwidefsky <schwidefsky@de.ibm.com>,
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <asm/cputime.h>
|
||||
#include <asm/lowcore.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/s390_ext.h>
|
||||
#include <asm/irq_regs.h>
|
||||
#include <asm/cputime.h>
|
||||
#include <asm/lowcore.h>
|
||||
#include <asm/irq.h>
|
||||
#include "entry.h"
|
||||
|
||||
struct ext_int_info {
|
||||
struct ext_int_info *next;
|
||||
ext_int_handler_t handler;
|
||||
__u16 code;
|
||||
};
|
||||
|
||||
/*
|
||||
* ext_int_hash[index] is the start of the list for all external interrupts
|
||||
* that hash to this index. With the current set of external interrupts
|
||||
* (0x1202 external call, 0x1004 cpu timer, 0x2401 hwc console, 0x4000
|
||||
* iucv and 0x2603 pfault) this is always the first element.
|
||||
*/
|
||||
ext_int_info_t *ext_int_hash[256] = { NULL, };
|
||||
static struct ext_int_info *ext_int_hash[256];
|
||||
|
||||
static inline int ext_hash(__u16 code)
|
||||
{
|
||||
|
@ -36,90 +39,53 @@ static inline int ext_hash(__u16 code)
|
|||
|
||||
int register_external_interrupt(__u16 code, ext_int_handler_t handler)
|
||||
{
|
||||
ext_int_info_t *p;
|
||||
int index;
|
||||
struct ext_int_info *p;
|
||||
int index;
|
||||
|
||||
p = kmalloc(sizeof(ext_int_info_t), GFP_ATOMIC);
|
||||
if (p == NULL)
|
||||
return -ENOMEM;
|
||||
p->code = code;
|
||||
p->handler = handler;
|
||||
p = kmalloc(sizeof(*p), GFP_ATOMIC);
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
p->code = code;
|
||||
p->handler = handler;
|
||||
index = ext_hash(code);
|
||||
p->next = ext_int_hash[index];
|
||||
ext_int_hash[index] = p;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int register_early_external_interrupt(__u16 code, ext_int_handler_t handler,
|
||||
ext_int_info_t *p)
|
||||
{
|
||||
int index;
|
||||
|
||||
if (p == NULL)
|
||||
return -EINVAL;
|
||||
p->code = code;
|
||||
p->handler = handler;
|
||||
index = ext_hash(code);
|
||||
p->next = ext_int_hash[index];
|
||||
ext_int_hash[index] = p;
|
||||
return 0;
|
||||
p->next = ext_int_hash[index];
|
||||
ext_int_hash[index] = p;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(register_external_interrupt);
|
||||
|
||||
int unregister_external_interrupt(__u16 code, ext_int_handler_t handler)
|
||||
{
|
||||
ext_int_info_t *p, *q;
|
||||
int index;
|
||||
|
||||
index = ext_hash(code);
|
||||
q = NULL;
|
||||
p = ext_int_hash[index];
|
||||
while (p != NULL) {
|
||||
if (p->code == code && p->handler == handler)
|
||||
break;
|
||||
q = p;
|
||||
p = p->next;
|
||||
}
|
||||
if (p == NULL)
|
||||
return -ENOENT;
|
||||
if (q != NULL)
|
||||
q->next = p->next;
|
||||
else
|
||||
ext_int_hash[index] = p->next;
|
||||
kfree(p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int unregister_early_external_interrupt(__u16 code, ext_int_handler_t handler,
|
||||
ext_int_info_t *p)
|
||||
{
|
||||
ext_int_info_t *q;
|
||||
struct ext_int_info *p, *q;
|
||||
int index;
|
||||
|
||||
if (p == NULL || p->code != code || p->handler != handler)
|
||||
return -EINVAL;
|
||||
index = ext_hash(code);
|
||||
q = ext_int_hash[index];
|
||||
if (p != q) {
|
||||
while (q != NULL) {
|
||||
if (q->next == p)
|
||||
break;
|
||||
q = q->next;
|
||||
}
|
||||
if (q == NULL)
|
||||
return -ENOENT;
|
||||
q = NULL;
|
||||
p = ext_int_hash[index];
|
||||
while (p) {
|
||||
if (p->code == code && p->handler == handler)
|
||||
break;
|
||||
q = p;
|
||||
p = p->next;
|
||||
}
|
||||
if (!p)
|
||||
return -ENOENT;
|
||||
if (q)
|
||||
q->next = p->next;
|
||||
} else
|
||||
else
|
||||
ext_int_hash[index] = p->next;
|
||||
kfree(p);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(unregister_external_interrupt);
|
||||
|
||||
void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code,
|
||||
unsigned int param32, unsigned long param64)
|
||||
{
|
||||
struct pt_regs *old_regs;
|
||||
unsigned short code;
|
||||
ext_int_info_t *p;
|
||||
int index;
|
||||
struct ext_int_info *p;
|
||||
int index;
|
||||
|
||||
code = (unsigned short) ext_int_code;
|
||||
old_regs = set_irq_regs(regs);
|
||||
|
@ -132,7 +98,7 @@ void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code,
|
|||
kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
|
||||
if (code != 0x1004)
|
||||
__get_cpu_var(s390_idle).nohz_delay = 1;
|
||||
index = ext_hash(code);
|
||||
index = ext_hash(code);
|
||||
for (p = ext_int_hash[index]; p; p = p->next) {
|
||||
if (likely(p->code == code))
|
||||
p->handler(ext_int_code, param32, param64);
|
||||
|
@ -140,6 +106,3 @@ void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code,
|
|||
irq_exit();
|
||||
set_irq_regs(old_regs);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(register_external_interrupt);
|
||||
EXPORT_SYMBOL(unregister_external_interrupt);
|
||||
|
|
|
@ -505,7 +505,7 @@ void do_signal(struct pt_regs *regs)
|
|||
* Let tracing know that we've done the handler setup.
|
||||
*/
|
||||
tracehook_signal_handler(signr, &info, &ka, regs,
|
||||
current->thread.per_info.single_step);
|
||||
test_thread_flag(TIF_SINGLE_STEP));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#define KMSG_COMPONENT "cpu"
|
||||
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/mm.h>
|
||||
|
@ -161,6 +162,7 @@ static void do_ext_call_interrupt(unsigned int ext_int_code,
|
|||
{
|
||||
unsigned long bits;
|
||||
|
||||
kstat_cpu(smp_processor_id()).irqs[EXTINT_IPI]++;
|
||||
/*
|
||||
* handle bit signal external calls
|
||||
*
|
||||
|
@ -469,25 +471,25 @@ int __cpuinit start_secondary(void *cpuvoid)
|
|||
ipi_call_unlock();
|
||||
/* Switch on interrupts */
|
||||
local_irq_enable();
|
||||
/* Print info about this processor */
|
||||
print_cpu_info();
|
||||
/* cpu_idle will call schedule for us */
|
||||
cpu_idle();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __init smp_create_idle(unsigned int cpu)
|
||||
{
|
||||
struct task_struct *p;
|
||||
struct create_idle {
|
||||
struct work_struct work;
|
||||
struct task_struct *idle;
|
||||
struct completion done;
|
||||
int cpu;
|
||||
};
|
||||
|
||||
/*
|
||||
* don't care about the psw and regs settings since we'll never
|
||||
* reschedule the forked task.
|
||||
*/
|
||||
p = fork_idle(cpu);
|
||||
if (IS_ERR(p))
|
||||
panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
|
||||
current_set[cpu] = p;
|
||||
static void __cpuinit smp_fork_idle(struct work_struct *work)
|
||||
{
|
||||
struct create_idle *c_idle;
|
||||
|
||||
c_idle = container_of(work, struct create_idle, work);
|
||||
c_idle->idle = fork_idle(c_idle->cpu);
|
||||
complete(&c_idle->done);
|
||||
}
|
||||
|
||||
static int __cpuinit smp_alloc_lowcore(int cpu)
|
||||
|
@ -551,6 +553,7 @@ static void smp_free_lowcore(int cpu)
|
|||
int __cpuinit __cpu_up(unsigned int cpu)
|
||||
{
|
||||
struct _lowcore *cpu_lowcore;
|
||||
struct create_idle c_idle;
|
||||
struct task_struct *idle;
|
||||
struct stack_frame *sf;
|
||||
u32 lowcore;
|
||||
|
@ -558,6 +561,19 @@ int __cpuinit __cpu_up(unsigned int cpu)
|
|||
|
||||
if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
|
||||
return -EIO;
|
||||
idle = current_set[cpu];
|
||||
if (!idle) {
|
||||
c_idle.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done);
|
||||
INIT_WORK_ONSTACK(&c_idle.work, smp_fork_idle);
|
||||
c_idle.cpu = cpu;
|
||||
schedule_work(&c_idle.work);
|
||||
wait_for_completion(&c_idle.done);
|
||||
if (IS_ERR(c_idle.idle))
|
||||
return PTR_ERR(c_idle.idle);
|
||||
idle = c_idle.idle;
|
||||
current_set[cpu] = c_idle.idle;
|
||||
}
|
||||
init_idle(idle, cpu);
|
||||
if (smp_alloc_lowcore(cpu))
|
||||
return -ENOMEM;
|
||||
do {
|
||||
|
@ -572,7 +588,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
|
|||
while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy)
|
||||
udelay(10);
|
||||
|
||||
idle = current_set[cpu];
|
||||
cpu_lowcore = lowcore_ptr[cpu];
|
||||
cpu_lowcore->kernel_stack = (unsigned long)
|
||||
task_stack_page(idle) + THREAD_SIZE;
|
||||
|
@ -664,7 +679,6 @@ void __cpu_die(unsigned int cpu)
|
|||
udelay(10);
|
||||
smp_free_lowcore(cpu);
|
||||
atomic_dec(&init_mm.context.attach_count);
|
||||
pr_info("Processor %d stopped\n", cpu);
|
||||
}
|
||||
|
||||
void cpu_die(void)
|
||||
|
@ -684,14 +698,12 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
|||
#endif
|
||||
unsigned long async_stack, panic_stack;
|
||||
struct _lowcore *lowcore;
|
||||
unsigned int cpu;
|
||||
|
||||
smp_detect_cpus();
|
||||
|
||||
/* request the 0x1201 emergency signal external interrupt */
|
||||
if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
|
||||
panic("Couldn't request external interrupt 0x1201");
|
||||
print_cpu_info();
|
||||
|
||||
/* Reallocate current lowcore, but keep its contents. */
|
||||
lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
|
||||
|
@ -719,9 +731,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
|||
if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore))
|
||||
BUG();
|
||||
#endif
|
||||
for_each_possible_cpu(cpu)
|
||||
if (cpu != smp_processor_id())
|
||||
smp_create_idle(cpu);
|
||||
}
|
||||
|
||||
void __init smp_prepare_boot_cpu(void)
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#define KMSG_COMPONENT "time"
|
||||
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/sched.h>
|
||||
|
@ -37,6 +38,7 @@
|
|||
#include <linux/clocksource.h>
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/delay.h>
|
||||
#include <asm/s390_ext.h>
|
||||
|
@ -60,7 +62,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators);
|
|||
/*
|
||||
* Scheduler clock - returns current time in nanosec units.
|
||||
*/
|
||||
unsigned long long notrace sched_clock(void)
|
||||
unsigned long long notrace __kprobes sched_clock(void)
|
||||
{
|
||||
return (get_clock_monotonic() * 125) >> 9;
|
||||
}
|
||||
|
@ -159,6 +161,7 @@ static void clock_comparator_interrupt(unsigned int ext_int_code,
|
|||
unsigned int param32,
|
||||
unsigned long param64)
|
||||
{
|
||||
kstat_cpu(smp_processor_id()).irqs[EXTINT_CLK]++;
|
||||
if (S390_lowcore.clock_comparator == -1ULL)
|
||||
set_clock_comparator(S390_lowcore.clock_comparator);
|
||||
}
|
||||
|
@ -169,6 +172,7 @@ static void stp_timing_alert(struct stp_irq_parm *);
|
|||
static void timing_alert_interrupt(unsigned int ext_int_code,
|
||||
unsigned int param32, unsigned long param64)
|
||||
{
|
||||
kstat_cpu(smp_processor_id()).irqs[EXTINT_TLA]++;
|
||||
if (param32 & 0x00c40000)
|
||||
etr_timing_alert((struct etr_irq_parm *) ¶m32);
|
||||
if (param32 & 0x00038000)
|
||||
|
|
|
@ -365,12 +365,10 @@ static inline void __user *get_psw_address(struct pt_regs *regs,
|
|||
((regs->psw.addr - (pgm_int_code >> 16)) & PSW_ADDR_INSN);
|
||||
}
|
||||
|
||||
void __kprobes do_single_step(struct pt_regs *regs)
|
||||
void __kprobes do_per_trap(struct pt_regs *regs)
|
||||
{
|
||||
if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0,
|
||||
SIGTRAP) == NOTIFY_STOP){
|
||||
if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP)
|
||||
return;
|
||||
}
|
||||
if (tracehook_consider_fatal_signal(current, SIGTRAP))
|
||||
force_sig(SIGTRAP, current);
|
||||
}
|
||||
|
@ -451,8 +449,8 @@ static inline void do_fp_trap(struct pt_regs *regs, void __user *location,
|
|||
"floating point exception", regs, &si);
|
||||
}
|
||||
|
||||
static void illegal_op(struct pt_regs *regs, long pgm_int_code,
|
||||
unsigned long trans_exc_code)
|
||||
static void __kprobes illegal_op(struct pt_regs *regs, long pgm_int_code,
|
||||
unsigned long trans_exc_code)
|
||||
{
|
||||
siginfo_t info;
|
||||
__u8 opcode[6];
|
||||
|
@ -688,7 +686,7 @@ static void space_switch_exception(struct pt_regs *regs, long pgm_int_code,
|
|||
do_trap(pgm_int_code, SIGILL, "space switch event", regs, &info);
|
||||
}
|
||||
|
||||
asmlinkage void kernel_stack_overflow(struct pt_regs * regs)
|
||||
asmlinkage void __kprobes kernel_stack_overflow(struct pt_regs * regs)
|
||||
{
|
||||
bust_spinlocks(1);
|
||||
printk("Kernel stack overflow.\n");
|
||||
|
@ -733,5 +731,6 @@ void __init trap_init(void)
|
|||
pgm_check_table[0x15] = &operand_exception;
|
||||
pgm_check_table[0x1C] = &space_switch_exception;
|
||||
pgm_check_table[0x1D] = &hfp_sqrt_exception;
|
||||
pfault_irq_init();
|
||||
/* Enable machine checks early. */
|
||||
local_mcck_enable();
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include <linux/rcupdate.h>
|
||||
#include <linux/posix-timers.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/kprobes.h>
|
||||
|
||||
#include <asm/s390_ext.h>
|
||||
#include <asm/timer.h>
|
||||
|
@ -122,7 +123,7 @@ void account_system_vtime(struct task_struct *tsk)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(account_system_vtime);
|
||||
|
||||
void vtime_start_cpu(__u64 int_clock, __u64 enter_timer)
|
||||
void __kprobes vtime_start_cpu(__u64 int_clock, __u64 enter_timer)
|
||||
{
|
||||
struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
|
||||
struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
|
||||
|
@ -162,7 +163,7 @@ void vtime_start_cpu(__u64 int_clock, __u64 enter_timer)
|
|||
idle->sequence++;
|
||||
}
|
||||
|
||||
void vtime_stop_cpu(void)
|
||||
void __kprobes vtime_stop_cpu(void)
|
||||
{
|
||||
struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
|
||||
struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
|
||||
|
@ -323,6 +324,7 @@ static void do_cpu_timer_interrupt(unsigned int ext_int_code,
|
|||
struct list_head cb_list; /* the callback queue */
|
||||
__u64 elapsed, next;
|
||||
|
||||
kstat_cpu(smp_processor_id()).irqs[EXTINT_TMR]++;
|
||||
INIT_LIST_HEAD(&cb_list);
|
||||
vq = &__get_cpu_var(virt_cpu_timer);
|
||||
|
||||
|
|
|
@ -4,8 +4,8 @@
|
|||
source "virt/kvm/Kconfig"
|
||||
|
||||
menuconfig VIRTUALIZATION
|
||||
bool "Virtualization"
|
||||
default y
|
||||
def_bool y
|
||||
prompt "Virtualization"
|
||||
---help---
|
||||
Say Y here to get to see options for using your Linux host to run other
|
||||
operating systems inside virtual machines (guests).
|
||||
|
@ -16,7 +16,8 @@ menuconfig VIRTUALIZATION
|
|||
if VIRTUALIZATION
|
||||
|
||||
config KVM
|
||||
tristate "Kernel-based Virtual Machine (KVM) support"
|
||||
def_tristate y
|
||||
prompt "Kernel-based Virtual Machine (KVM) support"
|
||||
depends on HAVE_KVM && EXPERIMENTAL
|
||||
select PREEMPT_NOTIFIERS
|
||||
select ANON_INODES
|
||||
|
|
|
@ -47,7 +47,6 @@ static void __udelay_disabled(unsigned long long usecs)
|
|||
lockdep_on();
|
||||
__ctl_load(cr0_saved, 0, 0);
|
||||
local_tick_enable(clock_saved);
|
||||
set_clock_comparator(S390_lowcore.clock_comparator);
|
||||
}
|
||||
|
||||
static void __udelay_enabled(unsigned long long usecs)
|
||||
|
@ -70,7 +69,6 @@ static void __udelay_enabled(unsigned long long usecs)
|
|||
if (clock_saved)
|
||||
local_tick_enable(clock_saved);
|
||||
} while (get_clock() < end);
|
||||
set_clock_comparator(S390_lowcore.clock_comparator);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
* Copyright (C) 1995 Linus Torvalds
|
||||
*/
|
||||
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/sched.h>
|
||||
|
@ -234,13 +235,13 @@ static noinline int signal_return(struct pt_regs *regs, long int_code,
|
|||
rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
|
||||
|
||||
if (!rc && instruction == 0x0a77) {
|
||||
clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
|
||||
clear_tsk_thread_flag(current, TIF_PER_TRAP);
|
||||
if (is_compat_task())
|
||||
sys32_sigreturn();
|
||||
else
|
||||
sys_sigreturn();
|
||||
} else if (!rc && instruction == 0x0aad) {
|
||||
clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
|
||||
clear_tsk_thread_flag(current, TIF_PER_TRAP);
|
||||
if (is_compat_task())
|
||||
sys32_rt_sigreturn();
|
||||
else
|
||||
|
@ -378,7 +379,7 @@ static inline int do_exception(struct pt_regs *regs, int access,
|
|||
* The instruction that caused the program check will
|
||||
* be repeated. Don't signal single step via SIGTRAP.
|
||||
*/
|
||||
clear_tsk_thread_flag(tsk, TIF_SINGLE_STEP);
|
||||
clear_tsk_thread_flag(tsk, TIF_PER_TRAP);
|
||||
fault = 0;
|
||||
out_up:
|
||||
up_read(&mm->mmap_sem);
|
||||
|
@ -480,8 +481,7 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
|
|||
/*
|
||||
* 'pfault' pseudo page faults routines.
|
||||
*/
|
||||
static ext_int_info_t ext_int_pfault;
|
||||
static int pfault_disable = 0;
|
||||
static int pfault_disable;
|
||||
|
||||
static int __init nopfault(char *str)
|
||||
{
|
||||
|
@ -543,6 +543,7 @@ static void pfault_interrupt(unsigned int ext_int_code,
|
|||
struct task_struct *tsk;
|
||||
__u16 subcode;
|
||||
|
||||
kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++;
|
||||
/*
|
||||
* Get the external interruption subcode & pfault
|
||||
* initial/completion signal bit. VM stores this
|
||||
|
@ -592,24 +593,28 @@ static void pfault_interrupt(unsigned int ext_int_code,
|
|||
}
|
||||
}
|
||||
|
||||
void __init pfault_irq_init(void)
|
||||
static int __init pfault_irq_init(void)
|
||||
{
|
||||
if (!MACHINE_IS_VM)
|
||||
return;
|
||||
int rc;
|
||||
|
||||
if (!MACHINE_IS_VM)
|
||||
return 0;
|
||||
/*
|
||||
* Try to get pfault pseudo page faults going.
|
||||
*/
|
||||
if (register_early_external_interrupt(0x2603, pfault_interrupt,
|
||||
&ext_int_pfault) != 0)
|
||||
panic("Couldn't request external interrupt 0x2603");
|
||||
|
||||
rc = register_external_interrupt(0x2603, pfault_interrupt);
|
||||
if (rc) {
|
||||
pfault_disable = 1;
|
||||
return rc;
|
||||
}
|
||||
if (pfault_init() == 0)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
/* Tough luck, no pfault. */
|
||||
pfault_disable = 1;
|
||||
unregister_early_external_interrupt(0x2603, pfault_interrupt,
|
||||
&ext_int_pfault);
|
||||
unregister_external_interrupt(0x2603, pfault_interrupt);
|
||||
return 0;
|
||||
}
|
||||
early_initcall(pfault_irq_init);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -2,7 +2,8 @@ comment "S/390 block device drivers"
|
|||
depends on S390 && BLOCK
|
||||
|
||||
config BLK_DEV_XPRAM
|
||||
tristate "XPRAM disk support"
|
||||
def_tristate m
|
||||
prompt "XPRAM disk support"
|
||||
depends on S390 && BLOCK
|
||||
help
|
||||
Select this option if you want to use your expanded storage on S/390
|
||||
|
@ -12,13 +13,15 @@ config BLK_DEV_XPRAM
|
|||
xpram. If unsure, say "N".
|
||||
|
||||
config DCSSBLK
|
||||
tristate "DCSSBLK support"
|
||||
def_tristate m
|
||||
prompt "DCSSBLK support"
|
||||
depends on S390 && BLOCK
|
||||
help
|
||||
Support for dcss block device
|
||||
|
||||
config DASD
|
||||
tristate "Support for DASD devices"
|
||||
def_tristate y
|
||||
prompt "Support for DASD devices"
|
||||
depends on CCW && BLOCK
|
||||
select IOSCHED_DEADLINE
|
||||
help
|
||||
|
@ -27,28 +30,32 @@ config DASD
|
|||
natively on a single image or an LPAR.
|
||||
|
||||
config DASD_PROFILE
|
||||
bool "Profiling support for dasd devices"
|
||||
def_bool y
|
||||
prompt "Profiling support for dasd devices"
|
||||
depends on DASD
|
||||
help
|
||||
Enable this option if you want to see profiling information
|
||||
in /proc/dasd/statistics.
|
||||
|
||||
config DASD_ECKD
|
||||
tristate "Support for ECKD Disks"
|
||||
def_tristate y
|
||||
prompt "Support for ECKD Disks"
|
||||
depends on DASD
|
||||
help
|
||||
ECKD devices are the most commonly used devices. You should enable
|
||||
this option unless you are very sure to have no ECKD device.
|
||||
|
||||
config DASD_FBA
|
||||
tristate "Support for FBA Disks"
|
||||
def_tristate y
|
||||
prompt "Support for FBA Disks"
|
||||
depends on DASD
|
||||
help
|
||||
Select this option to be able to access FBA devices. It is safe to
|
||||
say "Y".
|
||||
|
||||
config DASD_DIAG
|
||||
tristate "Support for DIAG access to Disks"
|
||||
def_tristate y
|
||||
prompt "Support for DIAG access to Disks"
|
||||
depends on DASD
|
||||
help
|
||||
Select this option if you want to use Diagnose250 command to access
|
||||
|
@ -56,7 +63,8 @@ config DASD_DIAG
|
|||
say "N".
|
||||
|
||||
config DASD_EER
|
||||
bool "Extended error reporting (EER)"
|
||||
def_bool y
|
||||
prompt "Extended error reporting (EER)"
|
||||
depends on DASD
|
||||
help
|
||||
This driver provides a character device interface to the
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#define KMSG_COMPONENT "dasd"
|
||||
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/kmod.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
@ -368,6 +369,11 @@ dasd_state_ready_to_online(struct dasd_device * device)
|
|||
device->state = DASD_STATE_ONLINE;
|
||||
if (device->block) {
|
||||
dasd_schedule_block_bh(device->block);
|
||||
if ((device->features & DASD_FEATURE_USERAW)) {
|
||||
disk = device->block->gdp;
|
||||
kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
|
||||
return 0;
|
||||
}
|
||||
disk = device->block->bdev->bd_disk;
|
||||
disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
|
||||
while ((part = disk_part_iter_next(&piter)))
|
||||
|
@ -393,7 +399,7 @@ static int dasd_state_online_to_ready(struct dasd_device *device)
|
|||
return rc;
|
||||
}
|
||||
device->state = DASD_STATE_READY;
|
||||
if (device->block) {
|
||||
if (device->block && !(device->features & DASD_FEATURE_USERAW)) {
|
||||
disk = device->block->bdev->bd_disk;
|
||||
disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
|
||||
while ((part = disk_part_iter_next(&piter)))
|
||||
|
@ -744,10 +750,6 @@ struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
|
|||
char *data;
|
||||
int size;
|
||||
|
||||
/* Sanity checks */
|
||||
BUG_ON(datasize > PAGE_SIZE ||
|
||||
(cplength*sizeof(struct ccw1)) > PAGE_SIZE);
|
||||
|
||||
size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
|
||||
if (cplength > 0)
|
||||
size += cplength * sizeof(struct ccw1);
|
||||
|
@ -853,7 +855,6 @@ int dasd_term_IO(struct dasd_ccw_req *cqr)
|
|||
rc = ccw_device_clear(device->cdev, (long) cqr);
|
||||
switch (rc) {
|
||||
case 0: /* termination successful */
|
||||
cqr->retries--;
|
||||
cqr->status = DASD_CQR_CLEAR_PENDING;
|
||||
cqr->stopclk = get_clock();
|
||||
cqr->starttime = 0;
|
||||
|
@ -905,6 +906,16 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
|
|||
return rc;
|
||||
}
|
||||
device = (struct dasd_device *) cqr->startdev;
|
||||
if (((cqr->block &&
|
||||
test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) ||
|
||||
test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) &&
|
||||
!test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
|
||||
DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p "
|
||||
"because of stolen lock", cqr);
|
||||
cqr->status = DASD_CQR_ERROR;
|
||||
cqr->intrc = -EPERM;
|
||||
return -EPERM;
|
||||
}
|
||||
if (cqr->retries < 0) {
|
||||
/* internal error 14 - start_IO run out of retries */
|
||||
sprintf(errorstring, "14 %p", cqr);
|
||||
|
@ -916,6 +927,11 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
|
|||
cqr->startclk = get_clock();
|
||||
cqr->starttime = jiffies;
|
||||
cqr->retries--;
|
||||
if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
|
||||
cqr->lpm &= device->path_data.opm;
|
||||
if (!cqr->lpm)
|
||||
cqr->lpm = device->path_data.opm;
|
||||
}
|
||||
if (cqr->cpmode == 1) {
|
||||
rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
|
||||
(long) cqr, cqr->lpm);
|
||||
|
@ -928,35 +944,53 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
|
|||
cqr->status = DASD_CQR_IN_IO;
|
||||
break;
|
||||
case -EBUSY:
|
||||
DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
|
||||
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
|
||||
"start_IO: device busy, retry later");
|
||||
break;
|
||||
case -ETIMEDOUT:
|
||||
DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
|
||||
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
|
||||
"start_IO: request timeout, retry later");
|
||||
break;
|
||||
case -EACCES:
|
||||
/* -EACCES indicates that the request used only a
|
||||
* subset of the available pathes and all these
|
||||
* pathes are gone.
|
||||
* Do a retry with all available pathes.
|
||||
/* -EACCES indicates that the request used only a subset of the
|
||||
* available paths and all these paths are gone. If the lpm of
|
||||
* this request was only a subset of the opm (e.g. the ppm) then
|
||||
* we just do a retry with all available paths.
|
||||
* If we already use the full opm, something is amiss, and we
|
||||
* need a full path verification.
|
||||
*/
|
||||
cqr->lpm = LPM_ANYPATH;
|
||||
DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
|
||||
"start_IO: selected pathes gone,"
|
||||
" retry on all pathes");
|
||||
if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
|
||||
DBF_DEV_EVENT(DBF_WARNING, device,
|
||||
"start_IO: selected paths gone (%x)",
|
||||
cqr->lpm);
|
||||
} else if (cqr->lpm != device->path_data.opm) {
|
||||
cqr->lpm = device->path_data.opm;
|
||||
DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
|
||||
"start_IO: selected paths gone,"
|
||||
" retry on all paths");
|
||||
} else {
|
||||
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
|
||||
"start_IO: all paths in opm gone,"
|
||||
" do path verification");
|
||||
dasd_generic_last_path_gone(device);
|
||||
device->path_data.opm = 0;
|
||||
device->path_data.ppm = 0;
|
||||
device->path_data.npm = 0;
|
||||
device->path_data.tbvpm =
|
||||
ccw_device_get_path_mask(device->cdev);
|
||||
}
|
||||
break;
|
||||
case -ENODEV:
|
||||
DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
|
||||
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
|
||||
"start_IO: -ENODEV device gone, retry");
|
||||
break;
|
||||
case -EIO:
|
||||
DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
|
||||
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
|
||||
"start_IO: -EIO device gone, retry");
|
||||
break;
|
||||
case -EINVAL:
|
||||
/* most likely caused in power management context */
|
||||
DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
|
||||
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
|
||||
"start_IO: -EINVAL device currently "
|
||||
"not accessible");
|
||||
break;
|
||||
|
@ -1076,6 +1110,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
|
|||
unsigned long long now;
|
||||
int expires;
|
||||
|
||||
kstat_cpu(smp_processor_id()).irqs[IOINT_DAS]++;
|
||||
if (IS_ERR(irb)) {
|
||||
switch (PTR_ERR(irb)) {
|
||||
case -EIO:
|
||||
|
@ -1094,16 +1129,11 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
|
|||
}
|
||||
|
||||
now = get_clock();
|
||||
|
||||
/* check for unsolicited interrupts */
|
||||
cqr = (struct dasd_ccw_req *) intparm;
|
||||
if (!cqr || ((scsw_cc(&irb->scsw) == 1) &&
|
||||
(scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) &&
|
||||
((scsw_stctl(&irb->scsw) == SCSW_STCTL_STATUS_PEND) ||
|
||||
(scsw_stctl(&irb->scsw) == (SCSW_STCTL_STATUS_PEND |
|
||||
SCSW_STCTL_ALERT_STATUS))))) {
|
||||
if (cqr && cqr->status == DASD_CQR_IN_IO)
|
||||
cqr->status = DASD_CQR_QUEUED;
|
||||
/* check for conditions that should be handled immediately */
|
||||
if (!cqr ||
|
||||
!(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
|
||||
scsw_cstat(&irb->scsw) == 0)) {
|
||||
if (cqr)
|
||||
memcpy(&cqr->irb, irb, sizeof(*irb));
|
||||
device = dasd_device_from_cdev_locked(cdev);
|
||||
|
@ -1114,17 +1144,14 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
|
|||
dasd_put_device(device);
|
||||
return;
|
||||
}
|
||||
device->discipline->dump_sense_dbf(device, irb,
|
||||
"unsolicited");
|
||||
if ((device->features & DASD_FEATURE_ERPLOG))
|
||||
device->discipline->dump_sense(device, cqr,
|
||||
irb);
|
||||
dasd_device_clear_timer(device);
|
||||
device->discipline->handle_unsolicited_interrupt(device,
|
||||
irb);
|
||||
device->discipline->dump_sense_dbf(device, irb, "int");
|
||||
if (device->features & DASD_FEATURE_ERPLOG)
|
||||
device->discipline->dump_sense(device, cqr, irb);
|
||||
device->discipline->check_for_device_change(device, cqr, irb);
|
||||
dasd_put_device(device);
|
||||
return;
|
||||
}
|
||||
if (!cqr)
|
||||
return;
|
||||
|
||||
device = (struct dasd_device *) cqr->startdev;
|
||||
if (!device ||
|
||||
|
@ -1164,25 +1191,19 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
|
|||
struct dasd_ccw_req, devlist);
|
||||
}
|
||||
} else { /* error */
|
||||
memcpy(&cqr->irb, irb, sizeof(struct irb));
|
||||
/* log sense for every failed I/O to s390 debugfeature */
|
||||
dasd_log_sense_dbf(cqr, irb);
|
||||
if (device->features & DASD_FEATURE_ERPLOG) {
|
||||
dasd_log_sense(cqr, irb);
|
||||
}
|
||||
|
||||
/*
|
||||
* If we don't want complex ERP for this request, then just
|
||||
* reset this and retry it in the fastpath
|
||||
*/
|
||||
if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
|
||||
cqr->retries > 0) {
|
||||
if (cqr->lpm == LPM_ANYPATH)
|
||||
if (cqr->lpm == device->path_data.opm)
|
||||
DBF_DEV_EVENT(DBF_DEBUG, device,
|
||||
"default ERP in fastpath "
|
||||
"(%i retries left)",
|
||||
cqr->retries);
|
||||
cqr->lpm = LPM_ANYPATH;
|
||||
if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
|
||||
cqr->lpm = device->path_data.opm;
|
||||
cqr->status = DASD_CQR_QUEUED;
|
||||
next = cqr;
|
||||
} else
|
||||
|
@ -1210,13 +1231,13 @@ enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb)
|
|||
goto out;
|
||||
if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
|
||||
device->state != device->target ||
|
||||
!device->discipline->handle_unsolicited_interrupt){
|
||||
!device->discipline->check_for_device_change){
|
||||
dasd_put_device(device);
|
||||
goto out;
|
||||
}
|
||||
|
||||
dasd_device_clear_timer(device);
|
||||
device->discipline->handle_unsolicited_interrupt(device, irb);
|
||||
if (device->discipline->dump_sense_dbf)
|
||||
device->discipline->dump_sense_dbf(device, irb, "uc");
|
||||
device->discipline->check_for_device_change(device, NULL, irb);
|
||||
dasd_put_device(device);
|
||||
out:
|
||||
return UC_TODO_RETRY;
|
||||
|
@ -1366,8 +1387,14 @@ static void __dasd_device_start_head(struct dasd_device *device)
|
|||
cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
|
||||
if (cqr->status != DASD_CQR_QUEUED)
|
||||
return;
|
||||
/* when device is stopped, return request to previous layer */
|
||||
if (device->stopped) {
|
||||
/* when device is stopped, return request to previous layer
|
||||
* exception: only the disconnect or unresumed bits are set and the
|
||||
* cqr is a path verification request
|
||||
*/
|
||||
if (device->stopped &&
|
||||
!(!(device->stopped & ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM))
|
||||
&& test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))) {
|
||||
cqr->intrc = -EAGAIN;
|
||||
cqr->status = DASD_CQR_CLEARED;
|
||||
dasd_schedule_device_bh(device);
|
||||
return;
|
||||
|
@ -1383,6 +1410,23 @@ static void __dasd_device_start_head(struct dasd_device *device)
|
|||
dasd_device_set_timer(device, 50);
|
||||
}
|
||||
|
||||
static void __dasd_device_check_path_events(struct dasd_device *device)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (device->path_data.tbvpm) {
|
||||
if (device->stopped & ~(DASD_STOPPED_DC_WAIT |
|
||||
DASD_UNRESUMED_PM))
|
||||
return;
|
||||
rc = device->discipline->verify_path(
|
||||
device, device->path_data.tbvpm);
|
||||
if (rc)
|
||||
dasd_device_set_timer(device, 50);
|
||||
else
|
||||
device->path_data.tbvpm = 0;
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
* Go through all request on the dasd_device request queue,
|
||||
* terminate them on the cdev if necessary, and return them to the
|
||||
|
@ -1457,6 +1501,7 @@ static void dasd_device_tasklet(struct dasd_device *device)
|
|||
__dasd_device_check_expire(device);
|
||||
/* find final requests on ccw queue */
|
||||
__dasd_device_process_ccw_queue(device, &final_queue);
|
||||
__dasd_device_check_path_events(device);
|
||||
spin_unlock_irq(get_ccwdev_lock(device->cdev));
|
||||
/* Now call the callback function of requests with final status */
|
||||
__dasd_device_process_final_queue(device, &final_queue);
|
||||
|
@ -1613,7 +1658,12 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
|
|||
continue;
|
||||
if (cqr->status != DASD_CQR_FILLED) /* could be failed */
|
||||
continue;
|
||||
|
||||
if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
|
||||
!test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
|
||||
cqr->status = DASD_CQR_FAILED;
|
||||
cqr->intrc = -EPERM;
|
||||
continue;
|
||||
}
|
||||
/* Non-temporary stop condition will trigger fail fast */
|
||||
if (device->stopped & ~DASD_STOPPED_PENDING &&
|
||||
test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
|
||||
|
@ -1621,7 +1671,6 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
|
|||
cqr->status = DASD_CQR_FAILED;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Don't try to start requests if device is stopped */
|
||||
if (interruptible) {
|
||||
rc = wait_event_interruptible(
|
||||
|
@ -1706,13 +1755,18 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
|
|||
int rc;
|
||||
|
||||
device = cqr->startdev;
|
||||
if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
|
||||
!test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
|
||||
cqr->status = DASD_CQR_FAILED;
|
||||
cqr->intrc = -EPERM;
|
||||
return -EIO;
|
||||
}
|
||||
spin_lock_irq(get_ccwdev_lock(device->cdev));
|
||||
rc = _dasd_term_running_cqr(device);
|
||||
if (rc) {
|
||||
spin_unlock_irq(get_ccwdev_lock(device->cdev));
|
||||
return rc;
|
||||
}
|
||||
|
||||
cqr->callback = dasd_wakeup_cb;
|
||||
cqr->callback_data = DASD_SLEEPON_START_TAG;
|
||||
cqr->status = DASD_CQR_QUEUED;
|
||||
|
@ -2016,6 +2070,13 @@ static void __dasd_block_start_head(struct dasd_block *block)
|
|||
list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
|
||||
if (cqr->status != DASD_CQR_FILLED)
|
||||
continue;
|
||||
if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) &&
|
||||
!test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
|
||||
cqr->status = DASD_CQR_FAILED;
|
||||
cqr->intrc = -EPERM;
|
||||
dasd_schedule_block_bh(block);
|
||||
continue;
|
||||
}
|
||||
/* Non-temporary stop condition will trigger fail fast */
|
||||
if (block->base->stopped & ~DASD_STOPPED_PENDING &&
|
||||
test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
|
||||
|
@ -2201,8 +2262,20 @@ static void dasd_setup_queue(struct dasd_block *block)
|
|||
{
|
||||
int max;
|
||||
|
||||
blk_queue_logical_block_size(block->request_queue, block->bp_block);
|
||||
max = block->base->discipline->max_blocks << block->s2b_shift;
|
||||
if (block->base->features & DASD_FEATURE_USERAW) {
|
||||
/*
|
||||
* the max_blocks value for raw_track access is 256
|
||||
* it is higher than the native ECKD value because we
|
||||
* only need one ccw per track
|
||||
* so the max_hw_sectors are
|
||||
* 2048 x 512B = 1024kB = 16 tracks
|
||||
*/
|
||||
max = 2048;
|
||||
} else {
|
||||
max = block->base->discipline->max_blocks << block->s2b_shift;
|
||||
}
|
||||
blk_queue_logical_block_size(block->request_queue,
|
||||
block->bp_block);
|
||||
blk_queue_max_hw_sectors(block->request_queue, max);
|
||||
blk_queue_max_segments(block->request_queue, -1L);
|
||||
/* with page sized segments we can translate each segement into
|
||||
|
@ -2588,10 +2661,53 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int dasd_generic_last_path_gone(struct dasd_device *device)
|
||||
{
|
||||
struct dasd_ccw_req *cqr;
|
||||
|
||||
dev_warn(&device->cdev->dev, "No operational channel path is left "
|
||||
"for the device\n");
|
||||
DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone");
|
||||
/* First of all call extended error reporting. */
|
||||
dasd_eer_write(device, NULL, DASD_EER_NOPATH);
|
||||
|
||||
if (device->state < DASD_STATE_BASIC)
|
||||
return 0;
|
||||
/* Device is active. We want to keep it. */
|
||||
list_for_each_entry(cqr, &device->ccw_queue, devlist)
|
||||
if ((cqr->status == DASD_CQR_IN_IO) ||
|
||||
(cqr->status == DASD_CQR_CLEAR_PENDING)) {
|
||||
cqr->status = DASD_CQR_QUEUED;
|
||||
cqr->retries++;
|
||||
}
|
||||
dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
|
||||
dasd_device_clear_timer(device);
|
||||
dasd_schedule_device_bh(device);
|
||||
return 1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone);
|
||||
|
||||
int dasd_generic_path_operational(struct dasd_device *device)
|
||||
{
|
||||
dev_info(&device->cdev->dev, "A channel path to the device has become "
|
||||
"operational\n");
|
||||
DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational");
|
||||
dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
|
||||
if (device->stopped & DASD_UNRESUMED_PM) {
|
||||
dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM);
|
||||
dasd_restore_device(device);
|
||||
return 1;
|
||||
}
|
||||
dasd_schedule_device_bh(device);
|
||||
if (device->block)
|
||||
dasd_schedule_block_bh(device->block);
|
||||
return 1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dasd_generic_path_operational);
|
||||
|
||||
int dasd_generic_notify(struct ccw_device *cdev, int event)
|
||||
{
|
||||
struct dasd_device *device;
|
||||
struct dasd_ccw_req *cqr;
|
||||
int ret;
|
||||
|
||||
device = dasd_device_from_cdev_locked(cdev);
|
||||
|
@ -2602,41 +2718,64 @@ int dasd_generic_notify(struct ccw_device *cdev, int event)
|
|||
case CIO_GONE:
|
||||
case CIO_BOXED:
|
||||
case CIO_NO_PATH:
|
||||
/* First of all call extended error reporting. */
|
||||
dasd_eer_write(device, NULL, DASD_EER_NOPATH);
|
||||
|
||||
if (device->state < DASD_STATE_BASIC)
|
||||
break;
|
||||
/* Device is active. We want to keep it. */
|
||||
list_for_each_entry(cqr, &device->ccw_queue, devlist)
|
||||
if (cqr->status == DASD_CQR_IN_IO) {
|
||||
cqr->status = DASD_CQR_QUEUED;
|
||||
cqr->retries++;
|
||||
}
|
||||
dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
|
||||
dasd_device_clear_timer(device);
|
||||
dasd_schedule_device_bh(device);
|
||||
ret = 1;
|
||||
device->path_data.opm = 0;
|
||||
device->path_data.ppm = 0;
|
||||
device->path_data.npm = 0;
|
||||
ret = dasd_generic_last_path_gone(device);
|
||||
break;
|
||||
case CIO_OPER:
|
||||
/* FIXME: add a sanity check. */
|
||||
dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
|
||||
if (device->stopped & DASD_UNRESUMED_PM) {
|
||||
dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM);
|
||||
dasd_restore_device(device);
|
||||
ret = 1;
|
||||
break;
|
||||
}
|
||||
dasd_schedule_device_bh(device);
|
||||
if (device->block)
|
||||
dasd_schedule_block_bh(device->block);
|
||||
ret = 1;
|
||||
if (device->path_data.opm)
|
||||
ret = dasd_generic_path_operational(device);
|
||||
break;
|
||||
}
|
||||
dasd_put_device(device);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
|
||||
{
|
||||
int chp;
|
||||
__u8 oldopm, eventlpm;
|
||||
struct dasd_device *device;
|
||||
|
||||
device = dasd_device_from_cdev_locked(cdev);
|
||||
if (IS_ERR(device))
|
||||
return;
|
||||
for (chp = 0; chp < 8; chp++) {
|
||||
eventlpm = 0x80 >> chp;
|
||||
if (path_event[chp] & PE_PATH_GONE) {
|
||||
oldopm = device->path_data.opm;
|
||||
device->path_data.opm &= ~eventlpm;
|
||||
device->path_data.ppm &= ~eventlpm;
|
||||
device->path_data.npm &= ~eventlpm;
|
||||
if (oldopm && !device->path_data.opm)
|
||||
dasd_generic_last_path_gone(device);
|
||||
}
|
||||
if (path_event[chp] & PE_PATH_AVAILABLE) {
|
||||
device->path_data.opm &= ~eventlpm;
|
||||
device->path_data.ppm &= ~eventlpm;
|
||||
device->path_data.npm &= ~eventlpm;
|
||||
device->path_data.tbvpm |= eventlpm;
|
||||
dasd_schedule_device_bh(device);
|
||||
}
|
||||
}
|
||||
dasd_put_device(device);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dasd_generic_path_event);
|
||||
|
||||
int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm)
|
||||
{
|
||||
if (!device->path_data.opm && lpm) {
|
||||
device->path_data.opm = lpm;
|
||||
dasd_generic_path_operational(device);
|
||||
} else
|
||||
device->path_data.opm |= lpm;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
|
||||
|
||||
|
||||
int dasd_generic_pm_freeze(struct ccw_device *cdev)
|
||||
{
|
||||
struct dasd_ccw_req *cqr, *n;
|
||||
|
@ -2646,6 +2785,10 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
|
|||
|
||||
if (IS_ERR(device))
|
||||
return PTR_ERR(device);
|
||||
|
||||
if (device->discipline->freeze)
|
||||
rc = device->discipline->freeze(device);
|
||||
|
||||
/* disallow new I/O */
|
||||
dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
|
||||
/* clear active requests */
|
||||
|
@ -2682,9 +2825,6 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
|
|||
list_splice_tail(&freeze_queue, &device->ccw_queue);
|
||||
spin_unlock_irq(get_ccwdev_lock(cdev));
|
||||
|
||||
if (device->discipline->freeze)
|
||||
rc = device->discipline->freeze(device);
|
||||
|
||||
dasd_put_device(device);
|
||||
return rc;
|
||||
}
|
||||
|
|
|
@ -152,9 +152,9 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
|
|||
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
|
||||
opm = ccw_device_get_path_mask(device->cdev);
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
|
||||
//FIXME: start with get_opm ?
|
||||
if (erp->lpm == 0)
|
||||
erp->lpm = LPM_ANYPATH & ~(erp->irb.esw.esw0.sublog.lpum);
|
||||
erp->lpm = device->path_data.opm &
|
||||
~(erp->irb.esw.esw0.sublog.lpum);
|
||||
else
|
||||
erp->lpm &= ~(erp->irb.esw.esw0.sublog.lpum);
|
||||
|
||||
|
@ -270,10 +270,11 @@ static struct dasd_ccw_req *dasd_3990_erp_action_1(struct dasd_ccw_req *erp)
|
|||
{
|
||||
erp->function = dasd_3990_erp_action_1;
|
||||
dasd_3990_erp_alternate_path(erp);
|
||||
if (erp->status == DASD_CQR_FAILED) {
|
||||
if (erp->status == DASD_CQR_FAILED &&
|
||||
!test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
|
||||
erp->status = DASD_CQR_FILLED;
|
||||
erp->retries = 10;
|
||||
erp->lpm = LPM_ANYPATH;
|
||||
erp->lpm = erp->startdev->path_data.opm;
|
||||
erp->function = dasd_3990_erp_action_1_sec;
|
||||
}
|
||||
return erp;
|
||||
|
@ -1907,15 +1908,14 @@ dasd_3990_erp_compound_retry(struct dasd_ccw_req * erp, char *sense)
|
|||
static void
|
||||
dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense)
|
||||
{
|
||||
|
||||
if (sense[25] & DASD_SENSE_BIT_3) {
|
||||
dasd_3990_erp_alternate_path(erp);
|
||||
|
||||
if (erp->status == DASD_CQR_FAILED) {
|
||||
if (erp->status == DASD_CQR_FAILED &&
|
||||
!test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
|
||||
/* reset the lpm and the status to be able to
|
||||
* try further actions. */
|
||||
|
||||
erp->lpm = 0;
|
||||
erp->lpm = erp->startdev->path_data.opm;
|
||||
erp->status = DASD_CQR_NEED_ERP;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -208,6 +208,8 @@ dasd_feature_list(char *str, char **endp)
|
|||
features |= DASD_FEATURE_READONLY;
|
||||
else if (len == 4 && !strncmp(str, "diag", 4))
|
||||
features |= DASD_FEATURE_USEDIAG;
|
||||
else if (len == 3 && !strncmp(str, "raw", 3))
|
||||
features |= DASD_FEATURE_USERAW;
|
||||
else if (len == 6 && !strncmp(str, "erplog", 6))
|
||||
features |= DASD_FEATURE_ERPLOG;
|
||||
else if (len == 8 && !strncmp(str, "failfast", 8))
|
||||
|
@ -639,6 +641,7 @@ dasd_put_device_wake(struct dasd_device *device)
|
|||
{
|
||||
wake_up(&dasd_delete_wq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dasd_put_device_wake);
|
||||
|
||||
/*
|
||||
* Return dasd_device structure associated with cdev.
|
||||
|
@ -856,7 +859,7 @@ dasd_use_diag_store(struct device *dev, struct device_attribute *attr,
|
|||
spin_lock(&dasd_devmap_lock);
|
||||
/* Changing diag discipline flag is only allowed in offline state. */
|
||||
rc = count;
|
||||
if (!devmap->device) {
|
||||
if (!devmap->device && !(devmap->features & DASD_FEATURE_USERAW)) {
|
||||
if (val)
|
||||
devmap->features |= DASD_FEATURE_USEDIAG;
|
||||
else
|
||||
|
@ -869,6 +872,56 @@ dasd_use_diag_store(struct device *dev, struct device_attribute *attr,
|
|||
|
||||
static DEVICE_ATTR(use_diag, 0644, dasd_use_diag_show, dasd_use_diag_store);
|
||||
|
||||
/*
|
||||
* use_raw controls whether the driver should give access to raw eckd data or
|
||||
* operate in standard mode
|
||||
*/
|
||||
static ssize_t
|
||||
dasd_use_raw_show(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct dasd_devmap *devmap;
|
||||
int use_raw;
|
||||
|
||||
devmap = dasd_find_busid(dev_name(dev));
|
||||
if (!IS_ERR(devmap))
|
||||
use_raw = (devmap->features & DASD_FEATURE_USERAW) != 0;
|
||||
else
|
||||
use_raw = (DASD_FEATURE_DEFAULT & DASD_FEATURE_USERAW) != 0;
|
||||
return sprintf(buf, use_raw ? "1\n" : "0\n");
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
dasd_use_raw_store(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct dasd_devmap *devmap;
|
||||
ssize_t rc;
|
||||
unsigned long val;
|
||||
|
||||
devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
|
||||
if (IS_ERR(devmap))
|
||||
return PTR_ERR(devmap);
|
||||
|
||||
if ((strict_strtoul(buf, 10, &val) != 0) || val > 1)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock(&dasd_devmap_lock);
|
||||
/* Changing diag discipline flag is only allowed in offline state. */
|
||||
rc = count;
|
||||
if (!devmap->device && !(devmap->features & DASD_FEATURE_USEDIAG)) {
|
||||
if (val)
|
||||
devmap->features |= DASD_FEATURE_USERAW;
|
||||
else
|
||||
devmap->features &= ~DASD_FEATURE_USERAW;
|
||||
} else
|
||||
rc = -EPERM;
|
||||
spin_unlock(&dasd_devmap_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(raw_track_access, 0644, dasd_use_raw_show,
|
||||
dasd_use_raw_store);
|
||||
|
||||
static ssize_t
|
||||
dasd_discipline_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
|
@ -1126,6 +1179,103 @@ dasd_expires_store(struct device *dev, struct device_attribute *attr,
|
|||
|
||||
static DEVICE_ATTR(expires, 0644, dasd_expires_show, dasd_expires_store);
|
||||
|
||||
static ssize_t dasd_reservation_policy_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct dasd_devmap *devmap;
|
||||
int rc = 0;
|
||||
|
||||
devmap = dasd_find_busid(dev_name(dev));
|
||||
if (IS_ERR(devmap)) {
|
||||
rc = snprintf(buf, PAGE_SIZE, "ignore\n");
|
||||
} else {
|
||||
spin_lock(&dasd_devmap_lock);
|
||||
if (devmap->features & DASD_FEATURE_FAILONSLCK)
|
||||
rc = snprintf(buf, PAGE_SIZE, "fail\n");
|
||||
else
|
||||
rc = snprintf(buf, PAGE_SIZE, "ignore\n");
|
||||
spin_unlock(&dasd_devmap_lock);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
static ssize_t dasd_reservation_policy_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct dasd_devmap *devmap;
|
||||
int rc;
|
||||
|
||||
devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
|
||||
if (IS_ERR(devmap))
|
||||
return PTR_ERR(devmap);
|
||||
rc = 0;
|
||||
spin_lock(&dasd_devmap_lock);
|
||||
if (sysfs_streq("ignore", buf))
|
||||
devmap->features &= ~DASD_FEATURE_FAILONSLCK;
|
||||
else if (sysfs_streq("fail", buf))
|
||||
devmap->features |= DASD_FEATURE_FAILONSLCK;
|
||||
else
|
||||
rc = -EINVAL;
|
||||
if (devmap->device)
|
||||
devmap->device->features = devmap->features;
|
||||
spin_unlock(&dasd_devmap_lock);
|
||||
if (rc)
|
||||
return rc;
|
||||
else
|
||||
return count;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(reservation_policy, 0644,
|
||||
dasd_reservation_policy_show, dasd_reservation_policy_store);
|
||||
|
||||
static ssize_t dasd_reservation_state_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct dasd_device *device;
|
||||
int rc = 0;
|
||||
|
||||
device = dasd_device_from_cdev(to_ccwdev(dev));
|
||||
if (IS_ERR(device))
|
||||
return snprintf(buf, PAGE_SIZE, "none\n");
|
||||
|
||||
if (test_bit(DASD_FLAG_IS_RESERVED, &device->flags))
|
||||
rc = snprintf(buf, PAGE_SIZE, "reserved\n");
|
||||
else if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags))
|
||||
rc = snprintf(buf, PAGE_SIZE, "lost\n");
|
||||
else
|
||||
rc = snprintf(buf, PAGE_SIZE, "none\n");
|
||||
dasd_put_device(device);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static ssize_t dasd_reservation_state_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct dasd_device *device;
|
||||
int rc = 0;
|
||||
|
||||
device = dasd_device_from_cdev(to_ccwdev(dev));
|
||||
if (IS_ERR(device))
|
||||
return -ENODEV;
|
||||
if (sysfs_streq("reset", buf))
|
||||
clear_bit(DASD_FLAG_LOCK_STOLEN, &device->flags);
|
||||
else
|
||||
rc = -EINVAL;
|
||||
dasd_put_device(device);
|
||||
|
||||
if (rc)
|
||||
return rc;
|
||||
else
|
||||
return count;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(last_known_reservation_state, 0644,
|
||||
dasd_reservation_state_show, dasd_reservation_state_store);
|
||||
|
||||
static struct attribute * dasd_attrs[] = {
|
||||
&dev_attr_readonly.attr,
|
||||
&dev_attr_discipline.attr,
|
||||
|
@ -1134,10 +1284,13 @@ static struct attribute * dasd_attrs[] = {
|
|||
&dev_attr_vendor.attr,
|
||||
&dev_attr_uid.attr,
|
||||
&dev_attr_use_diag.attr,
|
||||
&dev_attr_raw_track_access.attr,
|
||||
&dev_attr_eer_enabled.attr,
|
||||
&dev_attr_erplog.attr,
|
||||
&dev_attr_failfast.attr,
|
||||
&dev_attr_expires.attr,
|
||||
&dev_attr_reservation_policy.attr,
|
||||
&dev_attr_last_known_reservation_state.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
|
||||
#define KMSG_COMPONENT "dasd"
|
||||
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
|
@ -238,6 +239,7 @@ static void dasd_ext_handler(unsigned int ext_int_code,
|
|||
addr_t ip;
|
||||
int rc;
|
||||
|
||||
kstat_cpu(smp_processor_id()).irqs[EXTINT_DSD]++;
|
||||
switch (ext_int_code >> 24) {
|
||||
case DASD_DIAG_CODE_31BIT:
|
||||
ip = (addr_t) param32;
|
||||
|
@ -617,6 +619,7 @@ static struct dasd_discipline dasd_diag_discipline = {
|
|||
.ebcname = "DIAG",
|
||||
.max_blocks = DIAG_MAX_BLOCKS,
|
||||
.check_device = dasd_diag_check_device,
|
||||
.verify_path = dasd_generic_verify_path,
|
||||
.fill_geometry = dasd_diag_fill_geometry,
|
||||
.start_IO = dasd_start_diag,
|
||||
.term_IO = dasd_diag_term_IO,
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -37,14 +37,17 @@
|
|||
#define DASD_ECKD_CCW_WRITE_KD_MT 0x8d
|
||||
#define DASD_ECKD_CCW_READ_KD_MT 0x8e
|
||||
#define DASD_ECKD_CCW_RELEASE 0x94
|
||||
#define DASD_ECKD_CCW_WRITE_FULL_TRACK 0x95
|
||||
#define DASD_ECKD_CCW_READ_CKD_MT 0x9e
|
||||
#define DASD_ECKD_CCW_WRITE_CKD_MT 0x9d
|
||||
#define DASD_ECKD_CCW_WRITE_TRACK_DATA 0xA5
|
||||
#define DASD_ECKD_CCW_READ_TRACK_DATA 0xA6
|
||||
#define DASD_ECKD_CCW_RESERVE 0xB4
|
||||
#define DASD_ECKD_CCW_READ_TRACK 0xDE
|
||||
#define DASD_ECKD_CCW_PFX 0xE7
|
||||
#define DASD_ECKD_CCW_PFX_READ 0xEA
|
||||
#define DASD_ECKD_CCW_RSCK 0xF9
|
||||
#define DASD_ECKD_CCW_RCD 0xFA
|
||||
|
||||
/*
|
||||
* Perform Subsystem Function / Sub-Orders
|
||||
|
@ -57,6 +60,11 @@
|
|||
*/
|
||||
#define LV_COMPAT_CYL 0xFFFE
|
||||
|
||||
|
||||
#define FCX_MAX_DATA_FACTOR 65536
|
||||
#define DASD_ECKD_RCD_DATA_SIZE 256
|
||||
|
||||
|
||||
/*****************************************************************************
|
||||
* SECTION: Type Definitions
|
||||
****************************************************************************/
|
||||
|
@ -331,12 +339,6 @@ struct dasd_gneq {
|
|||
__u8 reserved2[22];
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct dasd_eckd_path {
|
||||
__u8 opm;
|
||||
__u8 ppm;
|
||||
__u8 npm;
|
||||
};
|
||||
|
||||
struct dasd_rssd_features {
|
||||
char feature[256];
|
||||
} __attribute__((packed));
|
||||
|
@ -442,7 +444,6 @@ struct dasd_eckd_private {
|
|||
struct vd_sneq *vdsneq;
|
||||
struct dasd_gneq *gneq;
|
||||
|
||||
struct dasd_eckd_path path_data;
|
||||
struct eckd_count count_area[5];
|
||||
int init_cqr_status;
|
||||
int uses_cdl;
|
||||
|
@ -455,6 +456,8 @@ struct dasd_eckd_private {
|
|||
struct alias_pav_group *pavgroup;
|
||||
struct alias_lcu *lcu;
|
||||
int count;
|
||||
|
||||
u32 fcx_max_data;
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -473,6 +473,7 @@ int dasd_eer_enable(struct dasd_device *device)
|
|||
cqr->retries = 255;
|
||||
cqr->expires = 10 * HZ;
|
||||
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
|
||||
set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
|
||||
|
||||
ccw = cqr->cpaddr;
|
||||
ccw->cmd_code = DASD_ECKD_CCW_SNSS;
|
||||
|
|
|
@ -96,7 +96,8 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr)
|
|||
DBF_DEV_EVENT(DBF_DEBUG, device,
|
||||
"default ERP called (%i retries left)",
|
||||
cqr->retries);
|
||||
cqr->lpm = LPM_ANYPATH;
|
||||
if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
|
||||
cqr->lpm = device->path_data.opm;
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
} else {
|
||||
pr_err("%s: default ERP has run out of retries and failed\n",
|
||||
|
|
|
@ -73,6 +73,7 @@ static struct ccw_driver dasd_fba_driver = {
|
|||
.set_offline = dasd_generic_set_offline,
|
||||
.set_online = dasd_fba_set_online,
|
||||
.notify = dasd_generic_notify,
|
||||
.path_event = dasd_generic_path_event,
|
||||
.freeze = dasd_generic_pm_freeze,
|
||||
.thaw = dasd_generic_restore_device,
|
||||
.restore = dasd_generic_restore_device,
|
||||
|
@ -164,6 +165,7 @@ dasd_fba_check_characteristics(struct dasd_device *device)
|
|||
}
|
||||
|
||||
device->default_expires = DASD_EXPIRES;
|
||||
device->path_data.opm = LPM_ANYPATH;
|
||||
|
||||
readonly = dasd_device_is_ro(device);
|
||||
if (readonly)
|
||||
|
@ -231,24 +233,16 @@ dasd_fba_erp_postaction(struct dasd_ccw_req * cqr)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void dasd_fba_handle_unsolicited_interrupt(struct dasd_device *device,
|
||||
struct irb *irb)
|
||||
static void dasd_fba_check_for_device_change(struct dasd_device *device,
|
||||
struct dasd_ccw_req *cqr,
|
||||
struct irb *irb)
|
||||
{
|
||||
char mask;
|
||||
|
||||
/* first of all check for state change pending interrupt */
|
||||
mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
|
||||
if ((irb->scsw.cmd.dstat & mask) == mask) {
|
||||
if ((irb->scsw.cmd.dstat & mask) == mask)
|
||||
dasd_generic_handle_state_change(device);
|
||||
return;
|
||||
}
|
||||
|
||||
/* check for unsolicited interrupts */
|
||||
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
|
||||
"unsolicited interrupt received");
|
||||
device->discipline->dump_sense_dbf(device, irb, "unsolicited");
|
||||
dasd_schedule_device_bh(device);
|
||||
return;
|
||||
};
|
||||
|
||||
static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
|
||||
|
@ -596,13 +590,14 @@ static struct dasd_discipline dasd_fba_discipline = {
|
|||
.max_blocks = 96,
|
||||
.check_device = dasd_fba_check_characteristics,
|
||||
.do_analysis = dasd_fba_do_analysis,
|
||||
.verify_path = dasd_generic_verify_path,
|
||||
.fill_geometry = dasd_fba_fill_geometry,
|
||||
.start_IO = dasd_start_IO,
|
||||
.term_IO = dasd_term_IO,
|
||||
.handle_terminated_request = dasd_fba_handle_terminated_request,
|
||||
.erp_action = dasd_fba_erp_action,
|
||||
.erp_postaction = dasd_fba_erp_postaction,
|
||||
.handle_unsolicited_interrupt = dasd_fba_handle_unsolicited_interrupt,
|
||||
.check_for_device_change = dasd_fba_check_for_device_change,
|
||||
.build_cp = dasd_fba_build_cp,
|
||||
.free_cp = dasd_fba_free_cp,
|
||||
.dump_sense = dasd_fba_dump_sense,
|
||||
|
|
|
@ -231,6 +231,11 @@ struct dasd_ccw_req {
|
|||
/* per dasd_ccw_req flags */
|
||||
#define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */
|
||||
#define DASD_CQR_FLAGS_FAILFAST 1 /* FAILFAST */
|
||||
#define DASD_CQR_VERIFY_PATH 2 /* path verification request */
|
||||
#define DASD_CQR_ALLOW_SLOCK 3 /* Try this request even when lock was
|
||||
* stolen. Should not be combined with
|
||||
* DASD_CQR_FLAGS_USE_ERP
|
||||
*/
|
||||
|
||||
/* Signature for error recovery functions. */
|
||||
typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *);
|
||||
|
@ -286,6 +291,14 @@ struct dasd_discipline {
|
|||
*/
|
||||
int (*do_analysis) (struct dasd_block *);
|
||||
|
||||
/*
|
||||
* This function is called, when new paths become available.
|
||||
* Disciplins may use this callback to do necessary setup work,
|
||||
* e.g. verify that new path is compatible with the current
|
||||
* configuration.
|
||||
*/
|
||||
int (*verify_path)(struct dasd_device *, __u8);
|
||||
|
||||
/*
|
||||
* Last things to do when a device is set online, and first things
|
||||
* when it is set offline.
|
||||
|
@ -325,9 +338,9 @@ struct dasd_discipline {
|
|||
void (*dump_sense) (struct dasd_device *, struct dasd_ccw_req *,
|
||||
struct irb *);
|
||||
void (*dump_sense_dbf) (struct dasd_device *, struct irb *, char *);
|
||||
|
||||
void (*handle_unsolicited_interrupt) (struct dasd_device *,
|
||||
struct irb *);
|
||||
void (*check_for_device_change) (struct dasd_device *,
|
||||
struct dasd_ccw_req *,
|
||||
struct irb *);
|
||||
|
||||
/* i/o control functions. */
|
||||
int (*fill_geometry) (struct dasd_block *, struct hd_geometry *);
|
||||
|
@ -362,6 +375,13 @@ extern struct dasd_discipline *dasd_diag_discipline_pointer;
|
|||
#define DASD_EER_STATECHANGE 3
|
||||
#define DASD_EER_PPRCSUSPEND 4
|
||||
|
||||
struct dasd_path {
|
||||
__u8 opm;
|
||||
__u8 tbvpm;
|
||||
__u8 ppm;
|
||||
__u8 npm;
|
||||
};
|
||||
|
||||
struct dasd_device {
|
||||
/* Block device stuff. */
|
||||
struct dasd_block *block;
|
||||
|
@ -377,6 +397,7 @@ struct dasd_device {
|
|||
struct dasd_discipline *discipline;
|
||||
struct dasd_discipline *base_discipline;
|
||||
char *private;
|
||||
struct dasd_path path_data;
|
||||
|
||||
/* Device state and target state. */
|
||||
int state, target;
|
||||
|
@ -456,6 +477,9 @@ struct dasd_block {
|
|||
* confuse this with the user specified
|
||||
* read-only feature.
|
||||
*/
|
||||
#define DASD_FLAG_IS_RESERVED 7 /* The device is reserved */
|
||||
#define DASD_FLAG_LOCK_STOLEN 8 /* The device lock was stolen */
|
||||
|
||||
|
||||
void dasd_put_device_wake(struct dasd_device *);
|
||||
|
||||
|
@ -620,10 +644,15 @@ void dasd_generic_remove (struct ccw_device *cdev);
|
|||
int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *);
|
||||
int dasd_generic_set_offline (struct ccw_device *cdev);
|
||||
int dasd_generic_notify(struct ccw_device *, int);
|
||||
int dasd_generic_last_path_gone(struct dasd_device *);
|
||||
int dasd_generic_path_operational(struct dasd_device *);
|
||||
|
||||
void dasd_generic_handle_state_change(struct dasd_device *);
|
||||
int dasd_generic_pm_freeze(struct ccw_device *);
|
||||
int dasd_generic_restore_device(struct ccw_device *);
|
||||
enum uc_todo dasd_generic_uc_handler(struct ccw_device *, struct irb *);
|
||||
void dasd_generic_path_event(struct ccw_device *, int *);
|
||||
int dasd_generic_verify_path(struct dasd_device *, __u8);
|
||||
|
||||
int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int);
|
||||
char *dasd_get_sense(struct irb *);
|
||||
|
|
|
@ -2,76 +2,85 @@ comment "S/390 character device drivers"
|
|||
depends on S390
|
||||
|
||||
config TN3270
|
||||
tristate "Support for locally attached 3270 terminals"
|
||||
def_tristate y
|
||||
prompt "Support for locally attached 3270 terminals"
|
||||
depends on CCW
|
||||
help
|
||||
Include support for IBM 3270 terminals.
|
||||
|
||||
config TN3270_TTY
|
||||
tristate "Support for tty input/output on 3270 terminals"
|
||||
def_tristate y
|
||||
prompt "Support for tty input/output on 3270 terminals"
|
||||
depends on TN3270
|
||||
help
|
||||
Include support for using an IBM 3270 terminal as a Linux tty.
|
||||
|
||||
config TN3270_FS
|
||||
tristate "Support for fullscreen applications on 3270 terminals"
|
||||
def_tristate m
|
||||
prompt "Support for fullscreen applications on 3270 terminals"
|
||||
depends on TN3270
|
||||
help
|
||||
Include support for fullscreen applications on an IBM 3270 terminal.
|
||||
|
||||
config TN3270_CONSOLE
|
||||
bool "Support for console on 3270 terminal"
|
||||
def_bool y
|
||||
prompt "Support for console on 3270 terminal"
|
||||
depends on TN3270=y && TN3270_TTY=y
|
||||
help
|
||||
Include support for using an IBM 3270 terminal as a Linux system
|
||||
console. Available only if 3270 support is compiled in statically.
|
||||
|
||||
config TN3215
|
||||
bool "Support for 3215 line mode terminal"
|
||||
def_bool y
|
||||
prompt "Support for 3215 line mode terminal"
|
||||
depends on CCW
|
||||
help
|
||||
Include support for IBM 3215 line-mode terminals.
|
||||
|
||||
config TN3215_CONSOLE
|
||||
bool "Support for console on 3215 line mode terminal"
|
||||
def_bool y
|
||||
prompt "Support for console on 3215 line mode terminal"
|
||||
depends on TN3215
|
||||
help
|
||||
Include support for using an IBM 3215 line-mode terminal as a
|
||||
Linux system console.
|
||||
|
||||
config CCW_CONSOLE
|
||||
bool
|
||||
depends on TN3215_CONSOLE || TN3270_CONSOLE
|
||||
default y
|
||||
def_bool y if TN3215_CONSOLE || TN3270_CONSOLE
|
||||
|
||||
config SCLP_TTY
|
||||
bool "Support for SCLP line mode terminal"
|
||||
def_bool y
|
||||
prompt "Support for SCLP line mode terminal"
|
||||
depends on S390
|
||||
help
|
||||
Include support for IBM SCLP line-mode terminals.
|
||||
|
||||
config SCLP_CONSOLE
|
||||
bool "Support for console on SCLP line mode terminal"
|
||||
def_bool y
|
||||
prompt "Support for console on SCLP line mode terminal"
|
||||
depends on SCLP_TTY
|
||||
help
|
||||
Include support for using an IBM HWC line-mode terminal as the Linux
|
||||
system console.
|
||||
|
||||
config SCLP_VT220_TTY
|
||||
bool "Support for SCLP VT220-compatible terminal"
|
||||
def_bool y
|
||||
prompt "Support for SCLP VT220-compatible terminal"
|
||||
depends on S390
|
||||
help
|
||||
Include support for an IBM SCLP VT220-compatible terminal.
|
||||
|
||||
config SCLP_VT220_CONSOLE
|
||||
bool "Support for console on SCLP VT220-compatible terminal"
|
||||
def_bool y
|
||||
prompt "Support for console on SCLP VT220-compatible terminal"
|
||||
depends on SCLP_VT220_TTY
|
||||
help
|
||||
Include support for using an IBM SCLP VT220-compatible terminal as a
|
||||
Linux system console.
|
||||
|
||||
config SCLP_CPI
|
||||
tristate "Control-Program Identification"
|
||||
def_tristate m
|
||||
prompt "Control-Program Identification"
|
||||
depends on S390
|
||||
help
|
||||
This option enables the hardware console interface for system
|
||||
|
@ -83,7 +92,8 @@ config SCLP_CPI
|
|||
need this feature and intend to run your kernel in LPAR.
|
||||
|
||||
config SCLP_ASYNC
|
||||
tristate "Support for Call Home via Asynchronous SCLP Records"
|
||||
def_tristate m
|
||||
prompt "Support for Call Home via Asynchronous SCLP Records"
|
||||
depends on S390
|
||||
help
|
||||
This option enables the call home function, which is able to inform
|
||||
|
@ -93,7 +103,8 @@ config SCLP_ASYNC
|
|||
need this feature and intend to run your kernel in LPAR.
|
||||
|
||||
config S390_TAPE
|
||||
tristate "S/390 tape device support"
|
||||
def_tristate m
|
||||
prompt "S/390 tape device support"
|
||||
depends on CCW
|
||||
help
|
||||
Select this option if you want to access channel-attached tape
|
||||
|
@ -109,7 +120,8 @@ comment "S/390 tape interface support"
|
|||
depends on S390_TAPE
|
||||
|
||||
config S390_TAPE_BLOCK
|
||||
bool "Support for tape block devices"
|
||||
def_bool y
|
||||
prompt "Support for tape block devices"
|
||||
depends on S390_TAPE && BLOCK
|
||||
help
|
||||
Select this option if you want to access your channel-attached tape
|
||||
|
@ -123,7 +135,8 @@ comment "S/390 tape hardware support"
|
|||
depends on S390_TAPE
|
||||
|
||||
config S390_TAPE_34XX
|
||||
tristate "Support for 3480/3490 tape hardware"
|
||||
def_tristate m
|
||||
prompt "Support for 3480/3490 tape hardware"
|
||||
depends on S390_TAPE
|
||||
help
|
||||
Select this option if you want to access IBM 3480/3490 magnetic
|
||||
|
@ -131,7 +144,8 @@ config S390_TAPE_34XX
|
|||
It is safe to say "Y" here.
|
||||
|
||||
config S390_TAPE_3590
|
||||
tristate "Support for 3590 tape hardware"
|
||||
def_tristate m
|
||||
prompt "Support for 3590 tape hardware"
|
||||
depends on S390_TAPE
|
||||
help
|
||||
Select this option if you want to access IBM 3590 magnetic
|
||||
|
@ -139,7 +153,8 @@ config S390_TAPE_3590
|
|||
It is safe to say "Y" here.
|
||||
|
||||
config VMLOGRDR
|
||||
tristate "Support for the z/VM recording system services (VM only)"
|
||||
def_tristate m
|
||||
prompt "Support for the z/VM recording system services (VM only)"
|
||||
depends on IUCV
|
||||
help
|
||||
Select this option if you want to be able to receive records collected
|
||||
|
@ -148,29 +163,31 @@ config VMLOGRDR
|
|||
This driver depends on the IUCV support driver.
|
||||
|
||||
config VMCP
|
||||
bool "Support for the z/VM CP interface"
|
||||
def_bool y
|
||||
prompt "Support for the z/VM CP interface"
|
||||
depends on S390
|
||||
help
|
||||
Select this option if you want to be able to interact with the control
|
||||
program on z/VM
|
||||
|
||||
config MONREADER
|
||||
tristate "API for reading z/VM monitor service records"
|
||||
def_tristate m
|
||||
prompt "API for reading z/VM monitor service records"
|
||||
depends on IUCV
|
||||
help
|
||||
Character device driver for reading z/VM monitor service records
|
||||
|
||||
config MONWRITER
|
||||
tristate "API for writing z/VM monitor service records"
|
||||
def_tristate m
|
||||
prompt "API for writing z/VM monitor service records"
|
||||
depends on S390
|
||||
default "m"
|
||||
help
|
||||
Character device driver for writing z/VM monitor service records
|
||||
|
||||
config S390_VMUR
|
||||
tristate "z/VM unit record device driver"
|
||||
def_tristate m
|
||||
prompt "z/VM unit record device driver"
|
||||
depends on S390
|
||||
default "m"
|
||||
help
|
||||
Character device driver for z/VM reader, puncher and printer.
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
* Dan Morrison, IBM Corporation <dmorriso@cse.buffalo.edu>
|
||||
*/
|
||||
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/kdev_t.h>
|
||||
|
@ -361,6 +362,7 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm,
|
|||
int cstat, dstat;
|
||||
int count;
|
||||
|
||||
kstat_cpu(smp_processor_id()).irqs[IOINT_C15]++;
|
||||
raw = dev_get_drvdata(&cdev->dev);
|
||||
req = (struct raw3215_req *) intparm;
|
||||
cstat = irb->scsw.cmd.cstat;
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
* Copyright IBM Corp. 2003, 2009
|
||||
*/
|
||||
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
|
@ -329,6 +330,7 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
|
|||
struct raw3270_request *rq;
|
||||
int rc;
|
||||
|
||||
kstat_cpu(smp_processor_id()).irqs[IOINT_C70]++;
|
||||
rp = dev_get_drvdata(&cdev->dev);
|
||||
if (!rp)
|
||||
return;
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
* Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
@ -18,16 +19,14 @@
|
|||
#include <linux/suspend.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <asm/types.h>
|
||||
#include <asm/s390_ext.h>
|
||||
#include <asm/types.h>
|
||||
#include <asm/irq.h>
|
||||
|
||||
#include "sclp.h"
|
||||
|
||||
#define SCLP_HEADER "sclp: "
|
||||
|
||||
/* Structure for register_early_external_interrupt. */
|
||||
static ext_int_info_t ext_int_info_hwc;
|
||||
|
||||
/* Lock to protect internal data consistency. */
|
||||
static DEFINE_SPINLOCK(sclp_lock);
|
||||
|
||||
|
@ -402,6 +401,7 @@ static void sclp_interrupt_handler(unsigned int ext_int_code,
|
|||
u32 finished_sccb;
|
||||
u32 evbuf_pending;
|
||||
|
||||
kstat_cpu(smp_processor_id()).irqs[EXTINT_SCP]++;
|
||||
spin_lock(&sclp_lock);
|
||||
finished_sccb = param32 & 0xfffffff8;
|
||||
evbuf_pending = param32 & 0x3;
|
||||
|
@ -824,6 +824,7 @@ static void sclp_check_handler(unsigned int ext_int_code,
|
|||
{
|
||||
u32 finished_sccb;
|
||||
|
||||
kstat_cpu(smp_processor_id()).irqs[EXTINT_SCP]++;
|
||||
finished_sccb = param32 & 0xfffffff8;
|
||||
/* Is this the interrupt we are waiting for? */
|
||||
if (finished_sccb == 0)
|
||||
|
@ -866,8 +867,7 @@ sclp_check_interface(void)
|
|||
|
||||
spin_lock_irqsave(&sclp_lock, flags);
|
||||
/* Prepare init mask command */
|
||||
rc = register_early_external_interrupt(0x2401, sclp_check_handler,
|
||||
&ext_int_info_hwc);
|
||||
rc = register_external_interrupt(0x2401, sclp_check_handler);
|
||||
if (rc) {
|
||||
spin_unlock_irqrestore(&sclp_lock, flags);
|
||||
return rc;
|
||||
|
@ -900,8 +900,7 @@ sclp_check_interface(void)
|
|||
} else
|
||||
rc = -EBUSY;
|
||||
}
|
||||
unregister_early_external_interrupt(0x2401, sclp_check_handler,
|
||||
&ext_int_info_hwc);
|
||||
unregister_external_interrupt(0x2401, sclp_check_handler);
|
||||
spin_unlock_irqrestore(&sclp_lock, flags);
|
||||
return rc;
|
||||
}
|
||||
|
@ -1064,8 +1063,7 @@ sclp_init(void)
|
|||
if (rc)
|
||||
goto fail_init_state_uninitialized;
|
||||
/* Register interrupt handler */
|
||||
rc = register_early_external_interrupt(0x2401, sclp_interrupt_handler,
|
||||
&ext_int_info_hwc);
|
||||
rc = register_external_interrupt(0x2401, sclp_interrupt_handler);
|
||||
if (rc)
|
||||
goto fail_unregister_reboot_notifier;
|
||||
sclp_init_state = sclp_init_state_initialized;
|
||||
|
|
|
@ -33,6 +33,7 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
|
|||
int cpu;
|
||||
struct sys_device *sysdev;
|
||||
|
||||
s390_adjust_jiffies();
|
||||
pr_warning("cpu capability changed.\n");
|
||||
get_online_cpus();
|
||||
for_each_online_cpu(cpu) {
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#define KMSG_COMPONENT "tape"
|
||||
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h> // for kernel parameters
|
||||
#include <linux/kmod.h> // for requesting modules
|
||||
|
@ -1114,6 +1115,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
|
|||
struct tape_request *request;
|
||||
int rc;
|
||||
|
||||
kstat_cpu(smp_processor_id()).irqs[IOINT_TAP]++;
|
||||
device = dev_get_drvdata(&cdev->dev);
|
||||
if (device == NULL) {
|
||||
return;
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#define KMSG_COMPONENT "vmur"
|
||||
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/cdev.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
|
@ -302,6 +303,7 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
|
|||
{
|
||||
struct urdev *urd;
|
||||
|
||||
kstat_cpu(smp_processor_id()).irqs[IOINT_VMR]++;
|
||||
TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n",
|
||||
intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
|
||||
irb->scsw.cmd.count);
|
||||
|
|
|
@ -66,6 +66,27 @@ __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
|
|||
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove references from ccw devices to ccw group device and from
|
||||
* ccw group device to ccw devices.
|
||||
*/
|
||||
static void __ccwgroup_remove_cdev_refs(struct ccwgroup_device *gdev)
|
||||
{
|
||||
struct ccw_device *cdev;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < gdev->count; i++) {
|
||||
cdev = gdev->cdev[i];
|
||||
if (!cdev)
|
||||
continue;
|
||||
spin_lock_irq(cdev->ccwlock);
|
||||
dev_set_drvdata(&cdev->dev, NULL);
|
||||
spin_unlock_irq(cdev->ccwlock);
|
||||
gdev->cdev[i] = NULL;
|
||||
put_device(&cdev->dev);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Provide an 'ungroup' attribute so the user can remove group devices no
|
||||
* longer needed or accidentially created. Saves memory :)
|
||||
|
@ -78,6 +99,7 @@ static void ccwgroup_ungroup_callback(struct device *dev)
|
|||
if (device_is_registered(&gdev->dev)) {
|
||||
__ccwgroup_remove_symlinks(gdev);
|
||||
device_unregister(dev);
|
||||
__ccwgroup_remove_cdev_refs(gdev);
|
||||
}
|
||||
mutex_unlock(&gdev->reg_mutex);
|
||||
}
|
||||
|
@ -116,21 +138,7 @@ static DEVICE_ATTR(ungroup, 0200, NULL, ccwgroup_ungroup_store);
|
|||
static void
|
||||
ccwgroup_release (struct device *dev)
|
||||
{
|
||||
struct ccwgroup_device *gdev;
|
||||
int i;
|
||||
|
||||
gdev = to_ccwgroupdev(dev);
|
||||
|
||||
for (i = 0; i < gdev->count; i++) {
|
||||
if (gdev->cdev[i]) {
|
||||
spin_lock_irq(gdev->cdev[i]->ccwlock);
|
||||
if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
|
||||
dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
|
||||
spin_unlock_irq(gdev->cdev[i]->ccwlock);
|
||||
put_device(&gdev->cdev[i]->dev);
|
||||
}
|
||||
}
|
||||
kfree(gdev);
|
||||
kfree(to_ccwgroupdev(dev));
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -639,6 +647,7 @@ void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
|
|||
mutex_lock(&gdev->reg_mutex);
|
||||
__ccwgroup_remove_symlinks(gdev);
|
||||
device_unregister(dev);
|
||||
__ccwgroup_remove_cdev_refs(gdev);
|
||||
mutex_unlock(&gdev->reg_mutex);
|
||||
put_device(dev);
|
||||
}
|
||||
|
@ -660,25 +669,6 @@ int ccwgroup_probe_ccwdev(struct ccw_device *cdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct ccwgroup_device *
|
||||
__ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev)
|
||||
{
|
||||
struct ccwgroup_device *gdev;
|
||||
|
||||
gdev = dev_get_drvdata(&cdev->dev);
|
||||
if (gdev) {
|
||||
if (get_device(&gdev->dev)) {
|
||||
mutex_lock(&gdev->reg_mutex);
|
||||
if (device_is_registered(&gdev->dev))
|
||||
return gdev;
|
||||
mutex_unlock(&gdev->reg_mutex);
|
||||
put_device(&gdev->dev);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* ccwgroup_remove_ccwdev() - remove function for slave devices
|
||||
* @cdev: ccw device to be removed
|
||||
|
@ -694,13 +684,25 @@ void ccwgroup_remove_ccwdev(struct ccw_device *cdev)
|
|||
/* Ignore offlining errors, device is gone anyway. */
|
||||
ccw_device_set_offline(cdev);
|
||||
/* If one of its devices is gone, the whole group is done for. */
|
||||
gdev = __ccwgroup_get_gdev_by_cdev(cdev);
|
||||
if (gdev) {
|
||||
spin_lock_irq(cdev->ccwlock);
|
||||
gdev = dev_get_drvdata(&cdev->dev);
|
||||
if (!gdev) {
|
||||
spin_unlock_irq(cdev->ccwlock);
|
||||
return;
|
||||
}
|
||||
/* Get ccwgroup device reference for local processing. */
|
||||
get_device(&gdev->dev);
|
||||
spin_unlock_irq(cdev->ccwlock);
|
||||
/* Unregister group device. */
|
||||
mutex_lock(&gdev->reg_mutex);
|
||||
if (device_is_registered(&gdev->dev)) {
|
||||
__ccwgroup_remove_symlinks(gdev);
|
||||
device_unregister(&gdev->dev);
|
||||
mutex_unlock(&gdev->reg_mutex);
|
||||
put_device(&gdev->dev);
|
||||
__ccwgroup_remove_cdev_refs(gdev);
|
||||
}
|
||||
mutex_unlock(&gdev->reg_mutex);
|
||||
/* Release ccwgroup device reference for local processing. */
|
||||
put_device(&gdev->dev);
|
||||
}
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -695,6 +695,25 @@ int chsc_determine_base_channel_path_desc(struct chp_id chpid,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
|
||||
struct channel_path_desc_fmt1 *desc)
|
||||
{
|
||||
struct chsc_response_struct *chsc_resp;
|
||||
struct chsc_scpd *scpd_area;
|
||||
int ret;
|
||||
|
||||
spin_lock_irq(&chsc_page_lock);
|
||||
scpd_area = chsc_page;
|
||||
ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area);
|
||||
if (ret)
|
||||
goto out;
|
||||
chsc_resp = (void *)&scpd_area->response;
|
||||
memcpy(desc, &chsc_resp->data, sizeof(*desc));
|
||||
out:
|
||||
spin_unlock_irq(&chsc_page_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
|
||||
struct cmg_chars *chars)
|
||||
|
|
|
@ -35,6 +35,22 @@ struct channel_path_desc {
|
|||
u8 chpp;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct channel_path_desc_fmt1 {
|
||||
u8 flags;
|
||||
u8 lsn;
|
||||
u8 desc;
|
||||
u8 chpid;
|
||||
u32:24;
|
||||
u8 chpp;
|
||||
u32 unused[3];
|
||||
u16 mdc;
|
||||
u16:13;
|
||||
u8 r:1;
|
||||
u8 s:1;
|
||||
u8 f:1;
|
||||
u32 zeros[2];
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct channel_path;
|
||||
|
||||
struct css_chsc_char {
|
||||
|
@ -92,6 +108,8 @@ int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
|
|||
int c, int m, void *page);
|
||||
int chsc_determine_base_channel_path_desc(struct chp_id chpid,
|
||||
struct channel_path_desc *desc);
|
||||
int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
|
||||
struct channel_path_desc_fmt1 *desc);
|
||||
void chsc_chp_online(struct chp_id chpid);
|
||||
void chsc_chp_offline(struct chp_id chpid);
|
||||
int chsc_get_channel_measurement_chars(struct channel_path *chp);
|
||||
|
|
|
@ -618,6 +618,7 @@ EXPORT_SYMBOL_GPL(css_schedule_reprobe);
|
|||
static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
|
||||
{
|
||||
struct subchannel_id mchk_schid;
|
||||
struct subchannel *sch;
|
||||
|
||||
if (overflow) {
|
||||
css_schedule_eval_all();
|
||||
|
@ -637,6 +638,13 @@ static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
|
|||
if (crw1)
|
||||
mchk_schid.ssid = (crw1->rsid >> 4) & 3;
|
||||
|
||||
if (crw0->erc == CRW_ERC_PMOD) {
|
||||
sch = get_subchannel_by_schid(mchk_schid);
|
||||
if (sch) {
|
||||
css_update_ssd_info(sch);
|
||||
put_device(&sch->dev);
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Since we are always presented with IPI in the CRW, we have to
|
||||
* use stsch() to find out if the subchannel in question has come
|
||||
|
|
|
@ -686,6 +686,46 @@ int ccw_device_tm_start_timeout(struct ccw_device *cdev, struct tcw *tcw,
|
|||
}
|
||||
EXPORT_SYMBOL(ccw_device_tm_start_timeout);
|
||||
|
||||
/**
|
||||
* ccw_device_get_mdc - accumulate max data count
|
||||
* @cdev: ccw device for which the max data count is accumulated
|
||||
* @mask: mask of paths to use
|
||||
*
|
||||
* Return the number of 64K-bytes blocks all paths at least support
|
||||
* for a transport command. Return values <= 0 indicate failures.
|
||||
*/
|
||||
int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask)
|
||||
{
|
||||
struct subchannel *sch = to_subchannel(cdev->dev.parent);
|
||||
struct channel_path_desc_fmt1 desc;
|
||||
struct chp_id chpid;
|
||||
int mdc = 0, ret, i;
|
||||
|
||||
/* Adjust requested path mask to excluded varied off paths. */
|
||||
if (mask)
|
||||
mask &= sch->lpm;
|
||||
else
|
||||
mask = sch->lpm;
|
||||
|
||||
chp_id_init(&chpid);
|
||||
for (i = 0; i < 8; i++) {
|
||||
if (!(mask & (0x80 >> i)))
|
||||
continue;
|
||||
chpid.id = sch->schib.pmcw.chpid[i];
|
||||
ret = chsc_determine_fmt1_channel_path_desc(chpid, &desc);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!desc.f)
|
||||
return 0;
|
||||
if (!desc.r)
|
||||
mdc = 1;
|
||||
mdc = mdc ? min(mdc, (int)desc.mdc) : desc.mdc;
|
||||
}
|
||||
|
||||
return mdc;
|
||||
}
|
||||
EXPORT_SYMBOL(ccw_device_get_mdc);
|
||||
|
||||
/**
|
||||
* ccw_device_tm_intrg - perform interrogate function
|
||||
* @cdev: ccw device on which to perform the interrogate function
|
||||
|
|
|
@ -93,6 +93,7 @@ EXPORT_SYMBOL(itcw_get_tcw);
|
|||
size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws)
|
||||
{
|
||||
size_t len;
|
||||
int cross_count;
|
||||
|
||||
/* Main data. */
|
||||
len = sizeof(struct itcw);
|
||||
|
@ -105,12 +106,27 @@ size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws)
|
|||
/* TSB */ sizeof(struct tsb) +
|
||||
/* TIDAL */ intrg_max_tidaws * sizeof(struct tidaw);
|
||||
}
|
||||
|
||||
/* Maximum required alignment padding. */
|
||||
len += /* Initial TCW */ 63 + /* Interrogate TCCB */ 7;
|
||||
/* Maximum padding for structures that may not cross 4k boundary. */
|
||||
if ((max_tidaws > 0) || (intrg_max_tidaws > 0))
|
||||
len += max(max_tidaws, intrg_max_tidaws) *
|
||||
sizeof(struct tidaw) - 1;
|
||||
|
||||
/* TIDAW lists may not cross a 4k boundary. To cross a
|
||||
* boundary we need to add a TTIC TIDAW. We need to reserve
|
||||
* one additional TIDAW for a TTIC that we may need to add due
|
||||
* to the placement of the data chunk in memory, and a further
|
||||
* TIDAW for each page boundary that the TIDAW list may cross
|
||||
* due to it's own size.
|
||||
*/
|
||||
if (max_tidaws) {
|
||||
cross_count = 1 + ((max_tidaws * sizeof(struct tidaw) - 1)
|
||||
>> PAGE_SHIFT);
|
||||
len += cross_count * sizeof(struct tidaw);
|
||||
}
|
||||
if (intrg_max_tidaws) {
|
||||
cross_count = 1 + ((intrg_max_tidaws * sizeof(struct tidaw) - 1)
|
||||
>> PAGE_SHIFT);
|
||||
len += cross_count * sizeof(struct tidaw);
|
||||
}
|
||||
return len;
|
||||
}
|
||||
EXPORT_SYMBOL(itcw_calc_size);
|
||||
|
@ -165,6 +181,7 @@ struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
|
|||
void *chunk;
|
||||
addr_t start;
|
||||
addr_t end;
|
||||
int cross_count;
|
||||
|
||||
/* Check for 2G limit. */
|
||||
start = (addr_t) buffer;
|
||||
|
@ -177,8 +194,17 @@ struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
|
|||
if (IS_ERR(chunk))
|
||||
return chunk;
|
||||
itcw = chunk;
|
||||
itcw->max_tidaws = max_tidaws;
|
||||
itcw->intrg_max_tidaws = intrg_max_tidaws;
|
||||
/* allow for TTIC tidaws that may be needed to cross a page boundary */
|
||||
cross_count = 0;
|
||||
if (max_tidaws)
|
||||
cross_count = 1 + ((max_tidaws * sizeof(struct tidaw) - 1)
|
||||
>> PAGE_SHIFT);
|
||||
itcw->max_tidaws = max_tidaws + cross_count;
|
||||
cross_count = 0;
|
||||
if (intrg_max_tidaws)
|
||||
cross_count = 1 + ((intrg_max_tidaws * sizeof(struct tidaw) - 1)
|
||||
>> PAGE_SHIFT);
|
||||
itcw->intrg_max_tidaws = intrg_max_tidaws + cross_count;
|
||||
/* Main TCW. */
|
||||
chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0);
|
||||
if (IS_ERR(chunk))
|
||||
|
@ -198,7 +224,7 @@ struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
|
|||
/* Data TIDAL. */
|
||||
if (max_tidaws > 0) {
|
||||
chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
|
||||
max_tidaws, 16, 1);
|
||||
itcw->max_tidaws, 16, 0);
|
||||
if (IS_ERR(chunk))
|
||||
return chunk;
|
||||
tcw_set_data(itcw->tcw, chunk, 1);
|
||||
|
@ -206,7 +232,7 @@ struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
|
|||
/* Interrogate data TIDAL. */
|
||||
if (intrg && (intrg_max_tidaws > 0)) {
|
||||
chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
|
||||
intrg_max_tidaws, 16, 1);
|
||||
itcw->intrg_max_tidaws, 16, 0);
|
||||
if (IS_ERR(chunk))
|
||||
return chunk;
|
||||
tcw_set_data(itcw->intrg_tcw, chunk, 1);
|
||||
|
@ -283,13 +309,29 @@ EXPORT_SYMBOL(itcw_add_dcw);
|
|||
* the new tidaw on success or -%ENOSPC if the new tidaw would exceed the
|
||||
* available space.
|
||||
*
|
||||
* Note: the tidaw-list is assumed to be contiguous with no ttics. The
|
||||
* last-tidaw flag for the last tidaw in the list will be set by itcw_finalize.
|
||||
* Note: TTIC tidaws are automatically added when needed, so explicitly calling
|
||||
* this interface with the TTIC flag is not supported. The last-tidaw flag
|
||||
* for the last tidaw in the list will be set by itcw_finalize.
|
||||
*/
|
||||
struct tidaw *itcw_add_tidaw(struct itcw *itcw, u8 flags, void *addr, u32 count)
|
||||
{
|
||||
struct tidaw *following;
|
||||
|
||||
if (itcw->num_tidaws >= itcw->max_tidaws)
|
||||
return ERR_PTR(-ENOSPC);
|
||||
/*
|
||||
* Is the tidaw, which follows the one we are about to fill, on the next
|
||||
* page? Then we have to insert a TTIC tidaw first, that points to the
|
||||
* tidaw on the new page.
|
||||
*/
|
||||
following = ((struct tidaw *) tcw_get_data(itcw->tcw))
|
||||
+ itcw->num_tidaws + 1;
|
||||
if (itcw->num_tidaws && !((unsigned long) following & ~PAGE_MASK)) {
|
||||
tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++,
|
||||
TIDAW_FLAGS_TTIC, following, 0);
|
||||
if (itcw->num_tidaws >= itcw->max_tidaws)
|
||||
return ERR_PTR(-ENOSPC);
|
||||
}
|
||||
return tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++, flags, addr, count);
|
||||
}
|
||||
EXPORT_SYMBOL(itcw_add_tidaw);
|
||||
|
|
|
@ -91,6 +91,12 @@ enum qdio_irq_states {
|
|||
#define AC1_SC_QEBSM_AVAILABLE 0x02 /* available for subchannel */
|
||||
#define AC1_SC_QEBSM_ENABLED 0x01 /* enabled for subchannel */
|
||||
|
||||
/* SIGA flags */
|
||||
#define QDIO_SIGA_WRITE 0x00
|
||||
#define QDIO_SIGA_READ 0x01
|
||||
#define QDIO_SIGA_SYNC 0x02
|
||||
#define QDIO_SIGA_QEBSM_FLAG 0x80
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
static inline int do_sqbs(u64 token, unsigned char state, int queue,
|
||||
int *start, int *count)
|
||||
|
@ -142,10 +148,9 @@ struct siga_flag {
|
|||
u8 input:1;
|
||||
u8 output:1;
|
||||
u8 sync:1;
|
||||
u8 no_sync_ti:1;
|
||||
u8 no_sync_out_ti:1;
|
||||
u8 no_sync_out_pci:1;
|
||||
u8:2;
|
||||
u8 sync_after_ai:1;
|
||||
u8 sync_out_after_pci:1;
|
||||
u8:3;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct chsc_ssqd_area {
|
||||
|
@ -202,6 +207,7 @@ struct qdio_dev_perf_stat {
|
|||
unsigned int inbound_queue_full;
|
||||
unsigned int outbound_call;
|
||||
unsigned int outbound_handler;
|
||||
unsigned int outbound_queue_full;
|
||||
unsigned int fast_requeue;
|
||||
unsigned int target_full;
|
||||
unsigned int eqbs;
|
||||
|
@ -245,10 +251,10 @@ struct qdio_input_q {
|
|||
struct qdio_output_q {
|
||||
/* PCIs are enabled for the queue */
|
||||
int pci_out_enabled;
|
||||
/* IQDIO: output multiple buffers (enhanced SIGA) */
|
||||
int use_enh_siga;
|
||||
/* timer to check for more outbound work */
|
||||
struct timer_list timer;
|
||||
/* used SBALs before tasklet schedule */
|
||||
int scan_threshold;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -383,12 +389,13 @@ static inline int multicast_outbound(struct qdio_q *q)
|
|||
(q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
|
||||
#define is_qebsm(q) (q->irq_ptr->sch_token != 0)
|
||||
|
||||
#define need_siga_sync_thinint(q) (!q->irq_ptr->siga_flag.no_sync_ti)
|
||||
#define need_siga_sync_out_thinint(q) (!q->irq_ptr->siga_flag.no_sync_out_ti)
|
||||
#define need_siga_in(q) (q->irq_ptr->siga_flag.input)
|
||||
#define need_siga_out(q) (q->irq_ptr->siga_flag.output)
|
||||
#define need_siga_sync(q) (q->irq_ptr->siga_flag.sync)
|
||||
#define siga_syncs_out_pci(q) (q->irq_ptr->siga_flag.no_sync_out_pci)
|
||||
#define need_siga_sync(q) (unlikely(q->irq_ptr->siga_flag.sync))
|
||||
#define need_siga_sync_after_ai(q) \
|
||||
(unlikely(q->irq_ptr->siga_flag.sync_after_ai))
|
||||
#define need_siga_sync_out_after_pci(q) \
|
||||
(unlikely(q->irq_ptr->siga_flag.sync_out_after_pci))
|
||||
|
||||
#define for_each_input_queue(irq_ptr, q, i) \
|
||||
for (i = 0, q = irq_ptr->input_qs[0]; \
|
||||
|
@ -423,9 +430,9 @@ struct indicator_t {
|
|||
|
||||
extern struct indicator_t *q_indicators;
|
||||
|
||||
static inline int shared_ind(struct qdio_irq *irq_ptr)
|
||||
static inline int shared_ind(u32 *dsci)
|
||||
{
|
||||
return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
|
||||
return dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
|
||||
}
|
||||
|
||||
/* prototypes for thin interrupt */
|
||||
|
|
|
@ -151,6 +151,7 @@ static char *qperf_names[] = {
|
|||
"Inbound queue full",
|
||||
"Outbound calls",
|
||||
"Outbound handler",
|
||||
"Outbound queue full",
|
||||
"Outbound fast_requeue",
|
||||
"Outbound target_full",
|
||||
"QEBSM eqbs",
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <linux/timer.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/debug.h>
|
||||
#include <asm/qdio.h>
|
||||
|
@ -29,11 +30,12 @@ MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
|
|||
MODULE_DESCRIPTION("QDIO base support");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
static inline int do_siga_sync(struct subchannel_id schid,
|
||||
unsigned int out_mask, unsigned int in_mask)
|
||||
static inline int do_siga_sync(unsigned long schid,
|
||||
unsigned int out_mask, unsigned int in_mask,
|
||||
unsigned int fc)
|
||||
{
|
||||
register unsigned long __fc asm ("0") = 2;
|
||||
register struct subchannel_id __schid asm ("1") = schid;
|
||||
register unsigned long __fc asm ("0") = fc;
|
||||
register unsigned long __schid asm ("1") = schid;
|
||||
register unsigned long out asm ("2") = out_mask;
|
||||
register unsigned long in asm ("3") = in_mask;
|
||||
int cc;
|
||||
|
@ -47,10 +49,11 @@ static inline int do_siga_sync(struct subchannel_id schid,
|
|||
return cc;
|
||||
}
|
||||
|
||||
static inline int do_siga_input(struct subchannel_id schid, unsigned int mask)
|
||||
static inline int do_siga_input(unsigned long schid, unsigned int mask,
|
||||
unsigned int fc)
|
||||
{
|
||||
register unsigned long __fc asm ("0") = 1;
|
||||
register struct subchannel_id __schid asm ("1") = schid;
|
||||
register unsigned long __fc asm ("0") = fc;
|
||||
register unsigned long __schid asm ("1") = schid;
|
||||
register unsigned long __mask asm ("2") = mask;
|
||||
int cc;
|
||||
|
||||
|
@ -279,16 +282,20 @@ void qdio_init_buf_states(struct qdio_irq *irq_ptr)
|
|||
static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
|
||||
unsigned int input)
|
||||
{
|
||||
unsigned long schid = *((u32 *) &q->irq_ptr->schid);
|
||||
unsigned int fc = QDIO_SIGA_SYNC;
|
||||
int cc;
|
||||
|
||||
if (!need_siga_sync(q))
|
||||
return 0;
|
||||
|
||||
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
|
||||
qperf_inc(q, siga_sync);
|
||||
|
||||
cc = do_siga_sync(q->irq_ptr->schid, output, input);
|
||||
if (cc)
|
||||
if (is_qebsm(q)) {
|
||||
schid = q->irq_ptr->sch_token;
|
||||
fc |= QDIO_SIGA_QEBSM_FLAG;
|
||||
}
|
||||
|
||||
cc = do_siga_sync(schid, output, input, fc);
|
||||
if (unlikely(cc))
|
||||
DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
|
||||
return cc;
|
||||
}
|
||||
|
@ -301,38 +308,22 @@ static inline int qdio_siga_sync_q(struct qdio_q *q)
|
|||
return qdio_siga_sync(q, q->mask, 0);
|
||||
}
|
||||
|
||||
static inline int qdio_siga_sync_out(struct qdio_q *q)
|
||||
{
|
||||
return qdio_siga_sync(q, ~0U, 0);
|
||||
}
|
||||
|
||||
static inline int qdio_siga_sync_all(struct qdio_q *q)
|
||||
{
|
||||
return qdio_siga_sync(q, ~0U, ~0U);
|
||||
}
|
||||
|
||||
static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit)
|
||||
{
|
||||
unsigned long schid;
|
||||
unsigned int fc = 0;
|
||||
unsigned long schid = *((u32 *) &q->irq_ptr->schid);
|
||||
unsigned int fc = QDIO_SIGA_WRITE;
|
||||
u64 start_time = 0;
|
||||
int cc;
|
||||
|
||||
if (q->u.out.use_enh_siga)
|
||||
fc = 3;
|
||||
|
||||
if (is_qebsm(q)) {
|
||||
schid = q->irq_ptr->sch_token;
|
||||
fc |= 0x80;
|
||||
fc |= QDIO_SIGA_QEBSM_FLAG;
|
||||
}
|
||||
else
|
||||
schid = *((u32 *)&q->irq_ptr->schid);
|
||||
|
||||
again:
|
||||
cc = do_siga_output(schid, q->mask, busy_bit, fc);
|
||||
|
||||
/* hipersocket busy condition */
|
||||
if (*busy_bit) {
|
||||
if (unlikely(*busy_bit)) {
|
||||
WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
|
||||
|
||||
if (!start_time) {
|
||||
|
@ -347,32 +338,41 @@ static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit)
|
|||
|
||||
static inline int qdio_siga_input(struct qdio_q *q)
|
||||
{
|
||||
unsigned long schid = *((u32 *) &q->irq_ptr->schid);
|
||||
unsigned int fc = QDIO_SIGA_READ;
|
||||
int cc;
|
||||
|
||||
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
|
||||
qperf_inc(q, siga_read);
|
||||
|
||||
cc = do_siga_input(q->irq_ptr->schid, q->mask);
|
||||
if (cc)
|
||||
if (is_qebsm(q)) {
|
||||
schid = q->irq_ptr->sch_token;
|
||||
fc |= QDIO_SIGA_QEBSM_FLAG;
|
||||
}
|
||||
|
||||
cc = do_siga_input(schid, q->mask, fc);
|
||||
if (unlikely(cc))
|
||||
DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
|
||||
return cc;
|
||||
}
|
||||
|
||||
static inline void qdio_sync_after_thinint(struct qdio_q *q)
|
||||
#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
|
||||
#define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
|
||||
|
||||
static inline void qdio_sync_queues(struct qdio_q *q)
|
||||
{
|
||||
if (pci_out_supported(q)) {
|
||||
if (need_siga_sync_thinint(q))
|
||||
qdio_siga_sync_all(q);
|
||||
else if (need_siga_sync_out_thinint(q))
|
||||
qdio_siga_sync_out(q);
|
||||
} else
|
||||
/* PCI capable outbound queues will also be scanned so sync them too */
|
||||
if (pci_out_supported(q))
|
||||
qdio_siga_sync_all(q);
|
||||
else
|
||||
qdio_siga_sync_q(q);
|
||||
}
|
||||
|
||||
int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
|
||||
unsigned char *state)
|
||||
{
|
||||
qdio_siga_sync_q(q);
|
||||
if (need_siga_sync(q))
|
||||
qdio_siga_sync_q(q);
|
||||
return get_buf_states(q, bufnr, state, 1, 0);
|
||||
}
|
||||
|
||||
|
@ -549,7 +549,8 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
|
|||
if (!atomic_read(&q->nr_buf_used))
|
||||
return 1;
|
||||
|
||||
qdio_siga_sync_q(q);
|
||||
if (need_siga_sync(q))
|
||||
qdio_siga_sync_q(q);
|
||||
get_buf_state(q, q->first_to_check, &state, 0);
|
||||
|
||||
if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
|
||||
|
@ -644,9 +645,12 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
|
|||
int count, stop;
|
||||
unsigned char state;
|
||||
|
||||
if (((queue_type(q) != QDIO_IQDIO_QFMT) && !pci_out_supported(q)) ||
|
||||
(queue_type(q) == QDIO_IQDIO_QFMT && multicast_outbound(q)))
|
||||
qdio_siga_sync_q(q);
|
||||
if (need_siga_sync(q))
|
||||
if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
|
||||
!pci_out_supported(q)) ||
|
||||
(queue_type(q) == QDIO_IQDIO_QFMT &&
|
||||
multicast_outbound(q)))
|
||||
qdio_siga_sync_q(q);
|
||||
|
||||
/*
|
||||
* Don't check 128 buffers, as otherwise qdio_inbound_q_moved
|
||||
|
@ -818,7 +822,8 @@ static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
|
|||
static void __tiqdio_inbound_processing(struct qdio_q *q)
|
||||
{
|
||||
qperf_inc(q, tasklet_inbound);
|
||||
qdio_sync_after_thinint(q);
|
||||
if (need_siga_sync(q) && need_siga_sync_after_ai(q))
|
||||
qdio_sync_queues(q);
|
||||
|
||||
/*
|
||||
* The interrupt could be caused by a PCI request. Check the
|
||||
|
@ -898,16 +903,14 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
|
|||
tasklet_schedule(&q->tasklet);
|
||||
}
|
||||
|
||||
if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
|
||||
if (!pci_out_supported(q))
|
||||
return;
|
||||
|
||||
for_each_output_queue(irq_ptr, q, i) {
|
||||
if (qdio_outbound_q_done(q))
|
||||
continue;
|
||||
|
||||
if (!siga_syncs_out_pci(q))
|
||||
if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
|
||||
qdio_siga_sync_q(q);
|
||||
|
||||
tasklet_schedule(&q->tasklet);
|
||||
}
|
||||
}
|
||||
|
@ -970,6 +973,7 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
|
|||
return;
|
||||
}
|
||||
|
||||
kstat_cpu(smp_processor_id()).irqs[IOINT_QDI]++;
|
||||
if (irq_ptr->perf_stat_enabled)
|
||||
irq_ptr->perf_stat.qdio_int++;
|
||||
|
||||
|
@ -1273,7 +1277,6 @@ int qdio_establish(struct qdio_initialize *init_data)
|
|||
}
|
||||
|
||||
qdio_setup_ssqd_info(irq_ptr);
|
||||
DBF_EVENT("qDmmwc:%2x", irq_ptr->ssqd_desc.mmwc);
|
||||
DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac);
|
||||
|
||||
/* qebsm is now setup if available, initialize buffer states */
|
||||
|
@ -1445,52 +1448,38 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
|
|||
used = atomic_add_return(count, &q->nr_buf_used);
|
||||
BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
|
||||
|
||||
if (used == QDIO_MAX_BUFFERS_PER_Q)
|
||||
qperf_inc(q, outbound_queue_full);
|
||||
|
||||
if (callflags & QDIO_FLAG_PCI_OUT) {
|
||||
q->u.out.pci_out_enabled = 1;
|
||||
qperf_inc(q, pci_request_int);
|
||||
}
|
||||
else
|
||||
} else
|
||||
q->u.out.pci_out_enabled = 0;
|
||||
|
||||
if (queue_type(q) == QDIO_IQDIO_QFMT) {
|
||||
if (multicast_outbound(q))
|
||||
/* One SIGA-W per buffer required for unicast HiperSockets. */
|
||||
WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
|
||||
|
||||
rc = qdio_kick_outbound_q(q);
|
||||
} else if (need_siga_sync(q)) {
|
||||
rc = qdio_siga_sync_q(q);
|
||||
} else {
|
||||
/* try to fast requeue buffers */
|
||||
get_buf_state(q, prev_buf(bufnr), &state, 0);
|
||||
if (state != SLSB_CU_OUTPUT_PRIMED)
|
||||
rc = qdio_kick_outbound_q(q);
|
||||
else
|
||||
if ((q->irq_ptr->ssqd_desc.mmwc > 1) &&
|
||||
(count > 1) &&
|
||||
(count <= q->irq_ptr->ssqd_desc.mmwc)) {
|
||||
/* exploit enhanced SIGA */
|
||||
q->u.out.use_enh_siga = 1;
|
||||
rc = qdio_kick_outbound_q(q);
|
||||
} else {
|
||||
/*
|
||||
* One siga-w per buffer required for unicast
|
||||
* HiperSockets.
|
||||
*/
|
||||
q->u.out.use_enh_siga = 0;
|
||||
while (count--) {
|
||||
rc = qdio_kick_outbound_q(q);
|
||||
if (rc)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
goto out;
|
||||
qperf_inc(q, fast_requeue);
|
||||
}
|
||||
|
||||
if (need_siga_sync(q)) {
|
||||
qdio_siga_sync_q(q);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* try to fast requeue buffers */
|
||||
get_buf_state(q, prev_buf(bufnr), &state, 0);
|
||||
if (state != SLSB_CU_OUTPUT_PRIMED)
|
||||
rc = qdio_kick_outbound_q(q);
|
||||
/* in case of SIGA errors we must process the error immediately */
|
||||
if (used >= q->u.out.scan_threshold || rc)
|
||||
tasklet_schedule(&q->tasklet);
|
||||
else
|
||||
qperf_inc(q, fast_requeue);
|
||||
|
||||
out:
|
||||
tasklet_schedule(&q->tasklet);
|
||||
/* free the SBALs in case of no further traffic */
|
||||
if (!timer_pending(&q->u.out.timer))
|
||||
mod_timer(&q->u.out.timer, jiffies + HZ);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -1550,7 +1539,7 @@ int qdio_start_irq(struct ccw_device *cdev, int nr)
|
|||
|
||||
WARN_ON(queue_irqs_enabled(q));
|
||||
|
||||
if (!shared_ind(q->irq_ptr))
|
||||
if (!shared_ind(q->irq_ptr->dsci))
|
||||
xchg(q->irq_ptr->dsci, 0);
|
||||
|
||||
qdio_stop_polling(q);
|
||||
|
@ -1560,7 +1549,7 @@ int qdio_start_irq(struct ccw_device *cdev, int nr)
|
|||
* We need to check again to not lose initiative after
|
||||
* resetting the ACK state.
|
||||
*/
|
||||
if (!shared_ind(q->irq_ptr) && *q->irq_ptr->dsci)
|
||||
if (!shared_ind(q->irq_ptr->dsci) && *q->irq_ptr->dsci)
|
||||
goto rescan;
|
||||
if (!qdio_inbound_q_done(q))
|
||||
goto rescan;
|
||||
|
@ -1600,12 +1589,14 @@ int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
|
|||
q = irq_ptr->input_qs[nr];
|
||||
WARN_ON(queue_irqs_enabled(q));
|
||||
|
||||
qdio_sync_after_thinint(q);
|
||||
|
||||
/*
|
||||
* The interrupt could be caused by a PCI request. Check the
|
||||
* PCI capable outbound queues.
|
||||
* Cannot rely on automatic sync after interrupt since queues may
|
||||
* also be examined without interrupt.
|
||||
*/
|
||||
if (need_siga_sync(q))
|
||||
qdio_sync_queues(q);
|
||||
|
||||
/* check the PCI capable outbound queues. */
|
||||
qdio_check_outbound_after_thinint(q);
|
||||
|
||||
if (!qdio_inbound_q_moved(q))
|
||||
|
|
|
@ -178,6 +178,7 @@ static void setup_queues(struct qdio_irq *irq_ptr,
|
|||
setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i);
|
||||
|
||||
q->is_input_q = 0;
|
||||
q->u.out.scan_threshold = qdio_init->scan_threshold;
|
||||
setup_storage_lists(q, irq_ptr, output_sbal_array, i);
|
||||
output_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
|
||||
|
||||
|
@ -196,14 +197,10 @@ static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac)
|
|||
irq_ptr->siga_flag.output = 1;
|
||||
if (qdioac & AC1_SIGA_SYNC_NEEDED)
|
||||
irq_ptr->siga_flag.sync = 1;
|
||||
if (qdioac & AC1_AUTOMATIC_SYNC_ON_THININT)
|
||||
irq_ptr->siga_flag.no_sync_ti = 1;
|
||||
if (qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI)
|
||||
irq_ptr->siga_flag.no_sync_out_pci = 1;
|
||||
|
||||
if (irq_ptr->siga_flag.no_sync_out_pci &&
|
||||
irq_ptr->siga_flag.no_sync_ti)
|
||||
irq_ptr->siga_flag.no_sync_out_ti = 1;
|
||||
if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_THININT))
|
||||
irq_ptr->siga_flag.sync_after_ai = 1;
|
||||
if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI))
|
||||
irq_ptr->siga_flag.sync_out_after_pci = 1;
|
||||
}
|
||||
|
||||
static void check_and_setup_qebsm(struct qdio_irq *irq_ptr,
|
||||
|
@ -451,7 +448,7 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
|
|||
char s[80];
|
||||
|
||||
snprintf(s, 80, "qdio: %s %s on SC %x using "
|
||||
"AI:%d QEBSM:%d PCI:%d TDD:%d SIGA:%s%s%s%s%s%s\n",
|
||||
"AI:%d QEBSM:%d PCI:%d TDD:%d SIGA:%s%s%s%s%s\n",
|
||||
dev_name(&cdev->dev),
|
||||
(irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" :
|
||||
((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"),
|
||||
|
@ -463,9 +460,8 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
|
|||
(irq_ptr->siga_flag.input) ? "R" : " ",
|
||||
(irq_ptr->siga_flag.output) ? "W" : " ",
|
||||
(irq_ptr->siga_flag.sync) ? "S" : " ",
|
||||
(!irq_ptr->siga_flag.no_sync_ti) ? "A" : " ",
|
||||
(!irq_ptr->siga_flag.no_sync_out_ti) ? "O" : " ",
|
||||
(!irq_ptr->siga_flag.no_sync_out_pci) ? "P" : " ");
|
||||
(irq_ptr->siga_flag.sync_after_ai) ? "A" : " ",
|
||||
(irq_ptr->siga_flag.sync_out_after_pci) ? "P" : " ");
|
||||
printk(KERN_INFO "%s", s);
|
||||
}
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
*/
|
||||
#include <linux/io.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/debug.h>
|
||||
#include <asm/qdio.h>
|
||||
|
@ -35,22 +36,8 @@ static u8 *tiqdio_alsi;
|
|||
|
||||
struct indicator_t *q_indicators;
|
||||
|
||||
static int css_qdio_omit_svs;
|
||||
|
||||
static u64 last_ai_time;
|
||||
|
||||
static inline unsigned long do_clear_global_summary(void)
|
||||
{
|
||||
register unsigned long __fn asm("1") = 3;
|
||||
register unsigned long __tmp asm("2");
|
||||
register unsigned long __time asm("3");
|
||||
|
||||
asm volatile(
|
||||
" .insn rre,0xb2650000,2,0"
|
||||
: "+d" (__fn), "=d" (__tmp), "=d" (__time));
|
||||
return __time;
|
||||
}
|
||||
|
||||
/* returns addr for the device state change indicator */
|
||||
static u32 *get_indicator(void)
|
||||
{
|
||||
|
@ -83,10 +70,6 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
|
|||
struct qdio_q *q;
|
||||
int i;
|
||||
|
||||
/* No TDD facility? If we must use SIGA-s we can also omit SVS. */
|
||||
if (!css_qdio_omit_svs && irq_ptr->siga_flag.sync)
|
||||
css_qdio_omit_svs = 1;
|
||||
|
||||
mutex_lock(&tiq_list_lock);
|
||||
for_each_input_queue(irq_ptr, q, i)
|
||||
list_add_rcu(&q->entry, &tiq_list);
|
||||
|
@ -112,9 +95,9 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
|
|||
}
|
||||
}
|
||||
|
||||
static inline int shared_ind_used(void)
|
||||
static inline u32 shared_ind_set(void)
|
||||
{
|
||||
return atomic_read(&q_indicators[TIQDIO_SHARED_IND].count);
|
||||
return q_indicators[TIQDIO_SHARED_IND].ind;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -124,20 +107,11 @@ static inline int shared_ind_used(void)
|
|||
*/
|
||||
static void tiqdio_thinint_handler(void *alsi, void *data)
|
||||
{
|
||||
u32 si_used = shared_ind_set();
|
||||
struct qdio_q *q;
|
||||
|
||||
last_ai_time = S390_lowcore.int_clock;
|
||||
|
||||
/*
|
||||
* SVS only when needed: issue SVS to benefit from iqdio interrupt
|
||||
* avoidance (SVS clears adapter interrupt suppression overwrite).
|
||||
*/
|
||||
if (!css_qdio_omit_svs)
|
||||
do_clear_global_summary();
|
||||
|
||||
/* reset local summary indicator */
|
||||
if (shared_ind_used())
|
||||
xchg(tiqdio_alsi, 0);
|
||||
kstat_cpu(smp_processor_id()).irqs[IOINT_QAI]++;
|
||||
|
||||
/* protect tiq_list entries, only changed in activate or shutdown */
|
||||
rcu_read_lock();
|
||||
|
@ -146,7 +120,10 @@ static void tiqdio_thinint_handler(void *alsi, void *data)
|
|||
list_for_each_entry_rcu(q, &tiq_list, entry) {
|
||||
|
||||
/* only process queues from changed sets */
|
||||
if (!*q->irq_ptr->dsci)
|
||||
if (unlikely(shared_ind(q->irq_ptr->dsci))) {
|
||||
if (!si_used)
|
||||
continue;
|
||||
} else if (!*q->irq_ptr->dsci)
|
||||
continue;
|
||||
|
||||
if (q->u.in.queue_start_poll) {
|
||||
|
@ -162,7 +139,7 @@ static void tiqdio_thinint_handler(void *alsi, void *data)
|
|||
q->irq_ptr->int_parm);
|
||||
} else {
|
||||
/* only clear it if the indicator is non-shared */
|
||||
if (!shared_ind(q->irq_ptr))
|
||||
if (!shared_ind(q->irq_ptr->dsci))
|
||||
xchg(q->irq_ptr->dsci, 0);
|
||||
/*
|
||||
* Call inbound processing but not directly
|
||||
|
@ -178,13 +155,8 @@ static void tiqdio_thinint_handler(void *alsi, void *data)
|
|||
* If the shared indicator was used clear it now after all queues
|
||||
* were processed.
|
||||
*/
|
||||
if (shared_ind_used()) {
|
||||
if (si_used && shared_ind_set())
|
||||
xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
|
||||
|
||||
/* prevent racing */
|
||||
if (*tiqdio_alsi)
|
||||
xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1 << 7);
|
||||
}
|
||||
}
|
||||
|
||||
static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
|
||||
|
@ -269,12 +241,6 @@ int qdio_establish_thinint(struct qdio_irq *irq_ptr)
|
|||
{
|
||||
if (!is_thinint_irq(irq_ptr))
|
||||
return 0;
|
||||
|
||||
/* Check for aif time delay disablement. If installed,
|
||||
* omit SVS even under LPAR
|
||||
*/
|
||||
if (css_general_characteristics.aif_tdd)
|
||||
css_qdio_omit_svs = 1;
|
||||
return set_subchannel_ind(irq_ptr, 0);
|
||||
}
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#define KMSG_COMPONENT "ap"
|
||||
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/delay.h>
|
||||
|
@ -154,7 +155,7 @@ static inline int ap_instructions_available(void)
|
|||
*/
|
||||
static int ap_interrupts_available(void)
|
||||
{
|
||||
return test_facility(1) && test_facility(2);
|
||||
return test_facility(2) && test_facility(65);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -221,6 +222,69 @@ ap_queue_interruption_control(ap_qid_t qid, void *ind)
|
|||
}
|
||||
#endif
|
||||
|
||||
static inline struct ap_queue_status __ap_4096_commands_available(ap_qid_t qid,
|
||||
int *support)
|
||||
{
|
||||
register unsigned long reg0 asm ("0") = 0UL | qid | (1UL << 23);
|
||||
register struct ap_queue_status reg1 asm ("1");
|
||||
register unsigned long reg2 asm ("2") = 0UL;
|
||||
|
||||
asm volatile(
|
||||
".long 0xb2af0000\n"
|
||||
"0: la %1,0\n"
|
||||
"1:\n"
|
||||
EX_TABLE(0b, 1b)
|
||||
: "+d" (reg0), "=d" (reg1), "=d" (reg2)
|
||||
:
|
||||
: "cc");
|
||||
|
||||
if (reg2 & 0x6000000000000000ULL)
|
||||
*support = 1;
|
||||
else
|
||||
*support = 0;
|
||||
|
||||
return reg1;
|
||||
}
|
||||
|
||||
/**
|
||||
* ap_4096_commands_availablen(): Check for availability of 4096 bit RSA
|
||||
* support.
|
||||
* @qid: The AP queue number
|
||||
*
|
||||
* Returns 1 if 4096 bit RSA keys are support fo the AP, returns 0 if not.
|
||||
*/
|
||||
int ap_4096_commands_available(ap_qid_t qid)
|
||||
{
|
||||
struct ap_queue_status status;
|
||||
int i, support = 0;
|
||||
status = __ap_4096_commands_available(qid, &support);
|
||||
|
||||
for (i = 0; i < AP_MAX_RESET; i++) {
|
||||
switch (status.response_code) {
|
||||
case AP_RESPONSE_NORMAL:
|
||||
return support;
|
||||
case AP_RESPONSE_RESET_IN_PROGRESS:
|
||||
case AP_RESPONSE_BUSY:
|
||||
break;
|
||||
case AP_RESPONSE_Q_NOT_AVAIL:
|
||||
case AP_RESPONSE_DECONFIGURED:
|
||||
case AP_RESPONSE_CHECKSTOPPED:
|
||||
case AP_RESPONSE_INVALID_ADDRESS:
|
||||
return 0;
|
||||
case AP_RESPONSE_OTHERWISE_CHANGED:
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
if (i < AP_MAX_RESET - 1) {
|
||||
udelay(5);
|
||||
status = __ap_4096_commands_available(qid, &support);
|
||||
}
|
||||
}
|
||||
return support;
|
||||
}
|
||||
EXPORT_SYMBOL(ap_4096_commands_available);
|
||||
|
||||
/**
|
||||
* ap_queue_enable_interruption(): Enable interruption on an AP.
|
||||
* @qid: The AP queue number
|
||||
|
@ -1042,6 +1106,7 @@ static int ap_probe_device_type(struct ap_device *ap_dev)
|
|||
|
||||
static void ap_interrupt_handler(void *unused1, void *unused2)
|
||||
{
|
||||
kstat_cpu(smp_processor_id()).irqs[IOINT_APB]++;
|
||||
tasklet_schedule(&ap_tasklet);
|
||||
}
|
||||
|
||||
|
|
|
@ -196,4 +196,6 @@ void ap_flush_queue(struct ap_device *ap_dev);
|
|||
int ap_module_init(void);
|
||||
void ap_module_exit(void);
|
||||
|
||||
int ap_4096_commands_available(ap_qid_t qid);
|
||||
|
||||
#endif /* _AP_BUS_H_ */
|
||||
|
|
|
@ -396,8 +396,15 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
|
|||
if (copied == 0) {
|
||||
unsigned int len;
|
||||
spin_unlock_bh(&zcrypt_device_lock);
|
||||
/* len is max 256 / 2 - 120 = 8 */
|
||||
len = crt->inputdatalength / 2 - 120;
|
||||
/* len is max 256 / 2 - 120 = 8
|
||||
* For bigger device just assume len of leading
|
||||
* 0s is 8 as stated in the requirements for
|
||||
* ica_rsa_modexpo_crt struct in zcrypt.h.
|
||||
*/
|
||||
if (crt->inputdatalength <= 256)
|
||||
len = crt->inputdatalength / 2 - 120;
|
||||
else
|
||||
len = 8;
|
||||
if (len > sizeof(z1))
|
||||
return -EFAULT;
|
||||
z1 = z2 = z3 = 0;
|
||||
|
@ -405,6 +412,7 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
|
|||
copy_from_user(&z2, crt->bp_key, len) ||
|
||||
copy_from_user(&z3, crt->u_mult_inv, len))
|
||||
return -EFAULT;
|
||||
z1 = z2 = z3 = 0;
|
||||
copied = 1;
|
||||
/*
|
||||
* We have to restart device lookup -
|
||||
|
|
|
@ -109,6 +109,7 @@ struct zcrypt_device {
|
|||
int request_count; /* # current requests. */
|
||||
|
||||
struct ap_message reply; /* Per-device reply structure. */
|
||||
int max_exp_bit_length;
|
||||
};
|
||||
|
||||
struct zcrypt_device *zcrypt_device_alloc(size_t);
|
||||
|
|
|
@ -41,7 +41,7 @@
|
|||
#define CEX2A_MIN_MOD_SIZE 1 /* 8 bits */
|
||||
#define CEX2A_MAX_MOD_SIZE 256 /* 2048 bits */
|
||||
#define CEX3A_MIN_MOD_SIZE CEX2A_MIN_MOD_SIZE
|
||||
#define CEX3A_MAX_MOD_SIZE CEX2A_MAX_MOD_SIZE
|
||||
#define CEX3A_MAX_MOD_SIZE 512 /* 4096 bits */
|
||||
|
||||
#define CEX2A_SPEED_RATING 970
|
||||
#define CEX3A_SPEED_RATING 900 /* Fixme: Needs finetuning */
|
||||
|
@ -49,8 +49,10 @@
|
|||
#define CEX2A_MAX_MESSAGE_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */
|
||||
#define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */
|
||||
|
||||
#define CEX3A_MAX_MESSAGE_SIZE CEX2A_MAX_MESSAGE_SIZE
|
||||
#define CEX3A_MAX_RESPONSE_SIZE CEX2A_MAX_RESPONSE_SIZE
|
||||
#define CEX3A_MAX_RESPONSE_SIZE 0x210 /* 512 bit modulus
|
||||
* (max outputdatalength) +
|
||||
* type80_hdr*/
|
||||
#define CEX3A_MAX_MESSAGE_SIZE sizeof(struct type50_crb3_msg)
|
||||
|
||||
#define CEX2A_CLEANUP_TIME (15*HZ)
|
||||
#define CEX3A_CLEANUP_TIME CEX2A_CLEANUP_TIME
|
||||
|
@ -110,7 +112,7 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev,
|
|||
mod = meb1->modulus + sizeof(meb1->modulus) - mod_len;
|
||||
exp = meb1->exponent + sizeof(meb1->exponent) - mod_len;
|
||||
inp = meb1->message + sizeof(meb1->message) - mod_len;
|
||||
} else {
|
||||
} else if (mod_len <= 256) {
|
||||
struct type50_meb2_msg *meb2 = ap_msg->message;
|
||||
memset(meb2, 0, sizeof(*meb2));
|
||||
ap_msg->length = sizeof(*meb2);
|
||||
|
@ -120,6 +122,17 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev,
|
|||
mod = meb2->modulus + sizeof(meb2->modulus) - mod_len;
|
||||
exp = meb2->exponent + sizeof(meb2->exponent) - mod_len;
|
||||
inp = meb2->message + sizeof(meb2->message) - mod_len;
|
||||
} else {
|
||||
/* mod_len > 256 = 4096 bit RSA Key */
|
||||
struct type50_meb3_msg *meb3 = ap_msg->message;
|
||||
memset(meb3, 0, sizeof(*meb3));
|
||||
ap_msg->length = sizeof(*meb3);
|
||||
meb3->header.msg_type_code = TYPE50_TYPE_CODE;
|
||||
meb3->header.msg_len = sizeof(*meb3);
|
||||
meb3->keyblock_type = TYPE50_MEB3_FMT;
|
||||
mod = meb3->modulus + sizeof(meb3->modulus) - mod_len;
|
||||
exp = meb3->exponent + sizeof(meb3->exponent) - mod_len;
|
||||
inp = meb3->message + sizeof(meb3->message) - mod_len;
|
||||
}
|
||||
|
||||
if (copy_from_user(mod, mex->n_modulus, mod_len) ||
|
||||
|
@ -142,7 +155,7 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
|
|||
struct ap_message *ap_msg,
|
||||
struct ica_rsa_modexpo_crt *crt)
|
||||
{
|
||||
int mod_len, short_len, long_len, long_offset;
|
||||
int mod_len, short_len, long_len, long_offset, limit;
|
||||
unsigned char *p, *q, *dp, *dq, *u, *inp;
|
||||
|
||||
mod_len = crt->inputdatalength;
|
||||
|
@ -152,14 +165,20 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
|
|||
/*
|
||||
* CEX2A cannot handle p, dp, or U > 128 bytes.
|
||||
* If we have one of these, we need to do extra checking.
|
||||
* For CEX3A the limit is 256 bytes.
|
||||
*/
|
||||
if (long_len > 128) {
|
||||
if (zdev->max_mod_size == CEX3A_MAX_MOD_SIZE)
|
||||
limit = 256;
|
||||
else
|
||||
limit = 128;
|
||||
|
||||
if (long_len > limit) {
|
||||
/*
|
||||
* zcrypt_rsa_crt already checked for the leading
|
||||
* zeroes of np_prime, bp_key and u_mult_inc.
|
||||
*/
|
||||
long_offset = long_len - 128;
|
||||
long_len = 128;
|
||||
long_offset = long_len - limit;
|
||||
long_len = limit;
|
||||
} else
|
||||
long_offset = 0;
|
||||
|
||||
|
@ -180,7 +199,7 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
|
|||
dq = crb1->dq + sizeof(crb1->dq) - short_len;
|
||||
u = crb1->u + sizeof(crb1->u) - long_len;
|
||||
inp = crb1->message + sizeof(crb1->message) - mod_len;
|
||||
} else {
|
||||
} else if (long_len <= 128) {
|
||||
struct type50_crb2_msg *crb2 = ap_msg->message;
|
||||
memset(crb2, 0, sizeof(*crb2));
|
||||
ap_msg->length = sizeof(*crb2);
|
||||
|
@ -193,6 +212,20 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
|
|||
dq = crb2->dq + sizeof(crb2->dq) - short_len;
|
||||
u = crb2->u + sizeof(crb2->u) - long_len;
|
||||
inp = crb2->message + sizeof(crb2->message) - mod_len;
|
||||
} else {
|
||||
/* long_len >= 256 */
|
||||
struct type50_crb3_msg *crb3 = ap_msg->message;
|
||||
memset(crb3, 0, sizeof(*crb3));
|
||||
ap_msg->length = sizeof(*crb3);
|
||||
crb3->header.msg_type_code = TYPE50_TYPE_CODE;
|
||||
crb3->header.msg_len = sizeof(*crb3);
|
||||
crb3->keyblock_type = TYPE50_CRB3_FMT;
|
||||
p = crb3->p + sizeof(crb3->p) - long_len;
|
||||
q = crb3->q + sizeof(crb3->q) - short_len;
|
||||
dp = crb3->dp + sizeof(crb3->dp) - long_len;
|
||||
dq = crb3->dq + sizeof(crb3->dq) - short_len;
|
||||
u = crb3->u + sizeof(crb3->u) - long_len;
|
||||
inp = crb3->message + sizeof(crb3->message) - mod_len;
|
||||
}
|
||||
|
||||
if (copy_from_user(p, crt->np_prime + long_offset, long_len) ||
|
||||
|
@ -203,7 +236,6 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
|
|||
copy_from_user(inp, crt->inputdata, mod_len))
|
||||
return -EFAULT;
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -230,7 +262,10 @@ static int convert_type80(struct zcrypt_device *zdev,
|
|||
zdev->online = 0;
|
||||
return -EAGAIN; /* repeat the request on a different device. */
|
||||
}
|
||||
BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE);
|
||||
if (zdev->user_space_type == ZCRYPT_CEX2A)
|
||||
BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE);
|
||||
else
|
||||
BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE);
|
||||
data = reply->message + t80h->len - outputdatalength;
|
||||
if (copy_to_user(outputdata, data, outputdatalength))
|
||||
return -EFAULT;
|
||||
|
@ -282,7 +317,10 @@ static void zcrypt_cex2a_receive(struct ap_device *ap_dev,
|
|||
}
|
||||
t80h = reply->message;
|
||||
if (t80h->type == TYPE80_RSP_CODE) {
|
||||
length = min(CEX2A_MAX_RESPONSE_SIZE, (int) t80h->len);
|
||||
if (ap_dev->device_type == AP_DEVICE_TYPE_CEX2A)
|
||||
length = min(CEX2A_MAX_RESPONSE_SIZE, (int) t80h->len);
|
||||
else
|
||||
length = min(CEX3A_MAX_RESPONSE_SIZE, (int) t80h->len);
|
||||
memcpy(msg->message, reply->message, length);
|
||||
} else
|
||||
memcpy(msg->message, reply->message, sizeof error_reply);
|
||||
|
@ -307,7 +345,10 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
|
|||
int rc;
|
||||
|
||||
ap_init_message(&ap_msg);
|
||||
ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
|
||||
if (zdev->user_space_type == ZCRYPT_CEX2A)
|
||||
ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
|
||||
else
|
||||
ap_msg.message = kmalloc(CEX3A_MAX_MESSAGE_SIZE, GFP_KERNEL);
|
||||
if (!ap_msg.message)
|
||||
return -ENOMEM;
|
||||
ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
|
||||
|
@ -345,7 +386,10 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev,
|
|||
int rc;
|
||||
|
||||
ap_init_message(&ap_msg);
|
||||
ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
|
||||
if (zdev->user_space_type == ZCRYPT_CEX2A)
|
||||
ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
|
||||
else
|
||||
ap_msg.message = kmalloc(CEX3A_MAX_MESSAGE_SIZE, GFP_KERNEL);
|
||||
if (!ap_msg.message)
|
||||
return -ENOMEM;
|
||||
ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
|
||||
|
@ -397,6 +441,7 @@ static int zcrypt_cex2a_probe(struct ap_device *ap_dev)
|
|||
zdev->max_mod_size = CEX2A_MAX_MOD_SIZE;
|
||||
zdev->short_crt = 1;
|
||||
zdev->speed_rating = CEX2A_SPEED_RATING;
|
||||
zdev->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
|
||||
break;
|
||||
case AP_DEVICE_TYPE_CEX3A:
|
||||
zdev = zcrypt_device_alloc(CEX3A_MAX_RESPONSE_SIZE);
|
||||
|
@ -404,8 +449,13 @@ static int zcrypt_cex2a_probe(struct ap_device *ap_dev)
|
|||
return -ENOMEM;
|
||||
zdev->user_space_type = ZCRYPT_CEX3A;
|
||||
zdev->type_string = "CEX3A";
|
||||
zdev->min_mod_size = CEX3A_MIN_MOD_SIZE;
|
||||
zdev->max_mod_size = CEX3A_MAX_MOD_SIZE;
|
||||
zdev->min_mod_size = CEX2A_MIN_MOD_SIZE;
|
||||
zdev->max_mod_size = CEX2A_MAX_MOD_SIZE;
|
||||
zdev->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
|
||||
if (ap_4096_commands_available(ap_dev->qid)) {
|
||||
zdev->max_mod_size = CEX3A_MAX_MOD_SIZE;
|
||||
zdev->max_exp_bit_length = CEX3A_MAX_MOD_SIZE;
|
||||
}
|
||||
zdev->short_crt = 1;
|
||||
zdev->speed_rating = CEX3A_SPEED_RATING;
|
||||
break;
|
||||
|
|
|
@ -51,8 +51,10 @@ struct type50_hdr {
|
|||
|
||||
#define TYPE50_MEB1_FMT 0x0001
|
||||
#define TYPE50_MEB2_FMT 0x0002
|
||||
#define TYPE50_MEB3_FMT 0x0003
|
||||
#define TYPE50_CRB1_FMT 0x0011
|
||||
#define TYPE50_CRB2_FMT 0x0012
|
||||
#define TYPE50_CRB3_FMT 0x0013
|
||||
|
||||
/* Mod-Exp, with a small modulus */
|
||||
struct type50_meb1_msg {
|
||||
|
@ -74,6 +76,16 @@ struct type50_meb2_msg {
|
|||
unsigned char message[256];
|
||||
} __attribute__((packed));
|
||||
|
||||
/* Mod-Exp, with a larger modulus */
|
||||
struct type50_meb3_msg {
|
||||
struct type50_hdr header;
|
||||
unsigned short keyblock_type; /* 0x0003 */
|
||||
unsigned char reserved[6];
|
||||
unsigned char exponent[512];
|
||||
unsigned char modulus[512];
|
||||
unsigned char message[512];
|
||||
} __attribute__((packed));
|
||||
|
||||
/* CRT, with a small modulus */
|
||||
struct type50_crb1_msg {
|
||||
struct type50_hdr header;
|
||||
|
@ -100,6 +112,19 @@ struct type50_crb2_msg {
|
|||
unsigned char message[256];
|
||||
} __attribute__((packed));
|
||||
|
||||
/* CRT, with a larger modulus */
|
||||
struct type50_crb3_msg {
|
||||
struct type50_hdr header;
|
||||
unsigned short keyblock_type; /* 0x0013 */
|
||||
unsigned char reserved[6];
|
||||
unsigned char p[256];
|
||||
unsigned char q[256];
|
||||
unsigned char dp[256];
|
||||
unsigned char dq[256];
|
||||
unsigned char u[256];
|
||||
unsigned char message[512];
|
||||
} __attribute__((packed));
|
||||
|
||||
/**
|
||||
* The type 80 response family is associated with a CEX2A card.
|
||||
*
|
||||
|
|
|
@ -373,6 +373,7 @@ static int zcrypt_pcica_probe(struct ap_device *ap_dev)
|
|||
zdev->min_mod_size = PCICA_MIN_MOD_SIZE;
|
||||
zdev->max_mod_size = PCICA_MAX_MOD_SIZE;
|
||||
zdev->speed_rating = PCICA_SPEED_RATING;
|
||||
zdev->max_exp_bit_length = PCICA_MAX_MOD_SIZE;
|
||||
ap_dev->reply = &zdev->reply;
|
||||
ap_dev->private = zdev;
|
||||
rc = zcrypt_device_register(zdev);
|
||||
|
|
|
@ -579,6 +579,7 @@ static int zcrypt_pcicc_probe(struct ap_device *ap_dev)
|
|||
zdev->min_mod_size = PCICC_MIN_MOD_SIZE;
|
||||
zdev->max_mod_size = PCICC_MAX_MOD_SIZE;
|
||||
zdev->speed_rating = PCICC_SPEED_RATING;
|
||||
zdev->max_exp_bit_length = PCICC_MAX_MOD_SIZE;
|
||||
ap_dev->reply = &zdev->reply;
|
||||
ap_dev->private = zdev;
|
||||
rc = zcrypt_device_register(zdev);
|
||||
|
|
|
@ -45,12 +45,12 @@
|
|||
#define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */
|
||||
#define PCIXCC_MAX_MOD_SIZE 256 /* 2048 bits */
|
||||
#define CEX3C_MIN_MOD_SIZE PCIXCC_MIN_MOD_SIZE
|
||||
#define CEX3C_MAX_MOD_SIZE PCIXCC_MAX_MOD_SIZE
|
||||
#define CEX3C_MAX_MOD_SIZE 512 /* 4096 bits */
|
||||
|
||||
#define PCIXCC_MCL2_SPEED_RATING 7870
|
||||
#define PCIXCC_MCL3_SPEED_RATING 7870
|
||||
#define CEX2C_SPEED_RATING 7000
|
||||
#define CEX3C_SPEED_RATING 6500 /* FIXME: needs finetuning */
|
||||
#define CEX3C_SPEED_RATING 6500
|
||||
|
||||
#define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c /* max size type6 v2 crt message */
|
||||
#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */
|
||||
|
@ -567,6 +567,15 @@ static int convert_response_ica(struct zcrypt_device *zdev,
|
|||
case TYPE88_RSP_CODE:
|
||||
return convert_error(zdev, reply);
|
||||
case TYPE86_RSP_CODE:
|
||||
if (msg->cprbx.ccp_rtcode &&
|
||||
(msg->cprbx.ccp_rscode == 0x14f) &&
|
||||
(outputdatalength > 256)) {
|
||||
if (zdev->max_exp_bit_length <= 17) {
|
||||
zdev->max_exp_bit_length = 17;
|
||||
return -EAGAIN;
|
||||
} else
|
||||
return -EINVAL;
|
||||
}
|
||||
if (msg->hdr.reply_code)
|
||||
return convert_error(zdev, reply);
|
||||
if (msg->cprbx.cprb_ver_id == 0x02)
|
||||
|
@ -1052,11 +1061,13 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
|
|||
zdev->speed_rating = PCIXCC_MCL2_SPEED_RATING;
|
||||
zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
|
||||
zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
|
||||
zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
|
||||
} else {
|
||||
zdev->type_string = "PCIXCC_MCL3";
|
||||
zdev->speed_rating = PCIXCC_MCL3_SPEED_RATING;
|
||||
zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
|
||||
zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
|
||||
zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
|
||||
}
|
||||
break;
|
||||
case AP_DEVICE_TYPE_CEX2C:
|
||||
|
@ -1065,6 +1076,7 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
|
|||
zdev->speed_rating = CEX2C_SPEED_RATING;
|
||||
zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
|
||||
zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
|
||||
zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
|
||||
break;
|
||||
case AP_DEVICE_TYPE_CEX3C:
|
||||
zdev->user_space_type = ZCRYPT_CEX3C;
|
||||
|
@ -1072,6 +1084,7 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
|
|||
zdev->speed_rating = CEX3C_SPEED_RATING;
|
||||
zdev->min_mod_size = CEX3C_MIN_MOD_SIZE;
|
||||
zdev->max_mod_size = CEX3C_MAX_MOD_SIZE;
|
||||
zdev->max_exp_bit_length = CEX3C_MAX_MOD_SIZE;
|
||||
break;
|
||||
default:
|
||||
goto out_free;
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
* Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/err.h>
|
||||
|
@ -25,6 +26,7 @@
|
|||
#include <asm/kvm_virtio.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/s390_ext.h>
|
||||
#include <asm/irq.h>
|
||||
|
||||
#define VIRTIO_SUBCODE_64 0x0D00
|
||||
|
||||
|
@ -379,6 +381,7 @@ static void kvm_extint_handler(unsigned int ext_int_code,
|
|||
u16 subcode;
|
||||
u32 param;
|
||||
|
||||
kstat_cpu(smp_processor_id()).irqs[EXTINT_VRT]++;
|
||||
subcode = ext_int_code >> 16;
|
||||
if ((subcode & 0xff00) != VIRTIO_SUBCODE_64)
|
||||
return;
|
||||
|
|
|
@ -2,7 +2,8 @@ menu "S/390 network device drivers"
|
|||
depends on NETDEVICES && S390
|
||||
|
||||
config LCS
|
||||
tristate "Lan Channel Station Interface"
|
||||
def_tristate m
|
||||
prompt "Lan Channel Station Interface"
|
||||
depends on CCW && NETDEVICES && (NET_ETHERNET || TR || FDDI)
|
||||
help
|
||||
Select this option if you want to use LCS networking on IBM System z.
|
||||
|
@ -12,7 +13,8 @@ config LCS
|
|||
If you do not know what it is, it's safe to choose Y.
|
||||
|
||||
config CTCM
|
||||
tristate "CTC and MPC SNA device support"
|
||||
def_tristate m
|
||||
prompt "CTC and MPC SNA device support"
|
||||
depends on CCW && NETDEVICES
|
||||
help
|
||||
Select this option if you want to use channel-to-channel
|
||||
|
@ -26,7 +28,8 @@ config CTCM
|
|||
If you do not need any channel-to-channel connection, choose N.
|
||||
|
||||
config NETIUCV
|
||||
tristate "IUCV network device support (VM only)"
|
||||
def_tristate m
|
||||
prompt "IUCV network device support (VM only)"
|
||||
depends on IUCV && NETDEVICES
|
||||
help
|
||||
Select this option if you want to use inter-user communication
|
||||
|
@ -37,14 +40,16 @@ config NETIUCV
|
|||
The module name is netiucv. If unsure, choose Y.
|
||||
|
||||
config SMSGIUCV
|
||||
tristate "IUCV special message support (VM only)"
|
||||
def_tristate m
|
||||
prompt "IUCV special message support (VM only)"
|
||||
depends on IUCV
|
||||
help
|
||||
Select this option if you want to be able to receive SMSG messages
|
||||
from other VM guest systems.
|
||||
|
||||
config SMSGIUCV_EVENT
|
||||
tristate "Deliver IUCV special messages as uevents (VM only)"
|
||||
def_tristate m
|
||||
prompt "Deliver IUCV special messages as uevents (VM only)"
|
||||
depends on SMSGIUCV
|
||||
help
|
||||
Select this option to deliver CP special messages (SMSGs) as
|
||||
|
@ -54,7 +59,8 @@ config SMSGIUCV_EVENT
|
|||
To compile as a module, choose M. The module name is "smsgiucv_app".
|
||||
|
||||
config CLAW
|
||||
tristate "CLAW device support"
|
||||
def_tristate m
|
||||
prompt "CLAW device support"
|
||||
depends on CCW && NETDEVICES
|
||||
help
|
||||
This driver supports channel attached CLAW devices.
|
||||
|
@ -64,7 +70,8 @@ config CLAW
|
|||
To compile into the kernel, choose Y.
|
||||
|
||||
config QETH
|
||||
tristate "Gigabit Ethernet device support"
|
||||
def_tristate y
|
||||
prompt "Gigabit Ethernet device support"
|
||||
depends on CCW && NETDEVICES && IP_MULTICAST && QDIO
|
||||
help
|
||||
This driver supports the IBM System z OSA Express adapters
|
||||
|
@ -78,25 +85,25 @@ config QETH
|
|||
The module name is qeth.
|
||||
|
||||
config QETH_L2
|
||||
tristate "qeth layer 2 device support"
|
||||
depends on QETH
|
||||
help
|
||||
Select this option to be able to run qeth devices in layer 2 mode.
|
||||
To compile as a module, choose M. The module name is qeth_l2.
|
||||
If unsure, choose y.
|
||||
def_tristate y
|
||||
prompt "qeth layer 2 device support"
|
||||
depends on QETH
|
||||
help
|
||||
Select this option to be able to run qeth devices in layer 2 mode.
|
||||
To compile as a module, choose M. The module name is qeth_l2.
|
||||
If unsure, choose y.
|
||||
|
||||
config QETH_L3
|
||||
tristate "qeth layer 3 device support"
|
||||
depends on QETH
|
||||
help
|
||||
Select this option to be able to run qeth devices in layer 3 mode.
|
||||
To compile as a module choose M. The module name is qeth_l3.
|
||||
If unsure, choose Y.
|
||||
def_tristate y
|
||||
prompt "qeth layer 3 device support"
|
||||
depends on QETH
|
||||
help
|
||||
Select this option to be able to run qeth devices in layer 3 mode.
|
||||
To compile as a module choose M. The module name is qeth_l3.
|
||||
If unsure, choose Y.
|
||||
|
||||
config QETH_IPV6
|
||||
bool
|
||||
depends on (QETH_L3 = IPV6) || (QETH_L3 && IPV6 = 'y')
|
||||
default y
|
||||
def_bool y if (QETH_L3 = IPV6) || (QETH_L3 && IPV6 = 'y')
|
||||
|
||||
config CCWGROUP
|
||||
tristate
|
||||
|
|
|
@ -63,6 +63,7 @@
|
|||
|
||||
#define KMSG_COMPONENT "claw"
|
||||
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <asm/ccwdev.h>
|
||||
#include <asm/ccwgroup.h>
|
||||
#include <asm/debug.h>
|
||||
|
@ -640,6 +641,7 @@ claw_irq_handler(struct ccw_device *cdev,
|
|||
struct claw_env *p_env;
|
||||
struct chbk *p_ch_r=NULL;
|
||||
|
||||
kstat_cpu(smp_processor_id()).irqs[IOINT_CLW]++;
|
||||
CLAW_DBF_TEXT(4, trace, "clawirq");
|
||||
/* Bypass all 'unsolicited interrupts' */
|
||||
privptr = dev_get_drvdata(&cdev->dev);
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#define KMSG_COMPONENT "ctcm"
|
||||
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
|
@ -1204,6 +1205,7 @@ static void ctcm_irq_handler(struct ccw_device *cdev,
|
|||
int cstat;
|
||||
int dstat;
|
||||
|
||||
kstat_cpu(smp_processor_id()).irqs[IOINT_CTC]++;
|
||||
CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
|
||||
"Enter %s(%s)", CTCM_FUNTAIL, dev_name(&cdev->dev));
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#define KMSG_COMPONENT "lcs"
|
||||
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/if.h>
|
||||
#include <linux/netdevice.h>
|
||||
|
@ -1398,6 +1399,7 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
|
|||
int rc, index;
|
||||
int cstat, dstat;
|
||||
|
||||
kstat_cpu(smp_processor_id()).irqs[IOINT_LCS]++;
|
||||
if (lcs_check_irb_error(cdev, irb))
|
||||
return;
|
||||
|
||||
|
|
|
@ -3831,6 +3831,8 @@ static int qeth_qdio_establish(struct qeth_card *card)
|
|||
init_data.int_parm = (unsigned long) card;
|
||||
init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
|
||||
init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
|
||||
init_data.scan_threshold =
|
||||
(card->info.type == QETH_CARD_TYPE_IQD) ? 8 : 32;
|
||||
|
||||
if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
|
||||
QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
|
||||
|
|
|
@ -290,6 +290,8 @@ static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
|
|||
id->int_parm = (unsigned long) qdio;
|
||||
id->input_sbal_addr_array = (void **) (qdio->res_q);
|
||||
id->output_sbal_addr_array = (void **) (qdio->req_q);
|
||||
id->scan_threshold =
|
||||
QDIO_MAX_BUFFERS_PER_Q - ZFCP_QDIO_MAX_SBALS_PER_REQ * 2;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
#define KMSG_COMPONENT "iucv"
|
||||
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
@ -1804,6 +1805,7 @@ static void iucv_external_interrupt(unsigned int ext_int_code,
|
|||
struct iucv_irq_data *p;
|
||||
struct iucv_irq_list *work;
|
||||
|
||||
kstat_cpu(smp_processor_id()).irqs[EXTINT_IUC]++;
|
||||
p = iucv_irq_data[smp_processor_id()];
|
||||
if (p->ippathid >= iucv_max_pathid) {
|
||||
WARN_ON(p->ippathid >= iucv_max_pathid);
|
||||
|
|
4
tools/perf/arch/s390/Makefile
Normal file
4
tools/perf/arch/s390/Makefile
Normal file
|
@ -0,0 +1,4 @@
|
|||
ifndef NO_DWARF
|
||||
PERF_HAVE_DWARF_REGS := 1
|
||||
LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
|
||||
endif
|
22
tools/perf/arch/s390/util/dwarf-regs.c
Normal file
22
tools/perf/arch/s390/util/dwarf-regs.c
Normal file
|
@ -0,0 +1,22 @@
|
|||
/*
|
||||
* Mapping of DWARF debug register numbers into register names.
|
||||
*
|
||||
* Copyright IBM Corp. 2010
|
||||
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
|
||||
*
|
||||
*/
|
||||
|
||||
#include <libio.h>
|
||||
#include <dwarf-regs.h>
|
||||
|
||||
#define NUM_GPRS 16
|
||||
|
||||
static const char *gpr_names[NUM_GPRS] = {
|
||||
"%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
|
||||
"%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
|
||||
};
|
||||
|
||||
const char *get_arch_regstr(unsigned int n)
|
||||
{
|
||||
return (n >= NUM_GPRS) ? NULL : gpr_names[n];
|
||||
}
|
Loading…
Reference in a new issue