mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-11 23:50:25 +00:00
Merge branch 'master'
This commit is contained in:
commit
2acab771b7
@ -33,7 +33,9 @@ pci_alloc_consistent(struct pci_dev *dev, size_t size,
|
||||
|
||||
Consistent memory is memory for which a write by either the device or
|
||||
the processor can immediately be read by the processor or device
|
||||
without having to worry about caching effects.
|
||||
without having to worry about caching effects. (You may however need
|
||||
to make sure to flush the processor's write buffers before telling
|
||||
devices to read that memory.)
|
||||
|
||||
This routine allocates a region of <size> bytes of consistent memory.
|
||||
it also returns a <dma_handle> which may be cast to an unsigned
|
||||
@ -304,12 +306,12 @@ dma address with dma_mapping_error(). A non zero return value means the mapping
|
||||
could not be created and the driver should take appropriate action (eg
|
||||
reduce current DMA mapping usage or delay and try again later).
|
||||
|
||||
int
|
||||
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction direction)
|
||||
int
|
||||
pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
|
||||
int nents, int direction)
|
||||
int
|
||||
dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction direction)
|
||||
int
|
||||
pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
|
||||
int nents, int direction)
|
||||
|
||||
Maps a scatter gather list from the block layer.
|
||||
|
||||
@ -327,12 +329,33 @@ critical that the driver do something, in the case of a block driver
|
||||
aborting the request or even oopsing is better than doing nothing and
|
||||
corrupting the filesystem.
|
||||
|
||||
void
|
||||
dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
|
||||
enum dma_data_direction direction)
|
||||
void
|
||||
pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
|
||||
int nents, int direction)
|
||||
With scatterlists, you use the resulting mapping like this:
|
||||
|
||||
int i, count = dma_map_sg(dev, sglist, nents, direction);
|
||||
struct scatterlist *sg;
|
||||
|
||||
for (i = 0, sg = sglist; i < count; i++, sg++) {
|
||||
hw_address[i] = sg_dma_address(sg);
|
||||
hw_len[i] = sg_dma_len(sg);
|
||||
}
|
||||
|
||||
where nents is the number of entries in the sglist.
|
||||
|
||||
The implementation is free to merge several consecutive sglist entries
|
||||
into one (e.g. with an IOMMU, or if several pages just happen to be
|
||||
physically contiguous) and returns the actual number of sg entries it
|
||||
mapped them to. On failure 0, is returned.
|
||||
|
||||
Then you should loop count times (note: this can be less than nents times)
|
||||
and use sg_dma_address() and sg_dma_len() macros where you previously
|
||||
accessed sg->address and sg->length as shown above.
|
||||
|
||||
void
|
||||
dma_unmap_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nhwentries, enum dma_data_direction direction)
|
||||
void
|
||||
pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
|
||||
int nents, int direction)
|
||||
|
||||
unmap the previously mapped scatter/gather list. All the parameters
|
||||
must be the same as those and passed in to the scatter/gather mapping
|
||||
|
@ -58,11 +58,15 @@ translating each of those pages back to a kernel address using
|
||||
something like __va(). [ EDIT: Update this when we integrate
|
||||
Gerd Knorr's generic code which does this. ]
|
||||
|
||||
This rule also means that you may not use kernel image addresses
|
||||
(ie. items in the kernel's data/text/bss segment, or your driver's)
|
||||
nor may you use kernel stack addresses for DMA. Both of these items
|
||||
might be mapped somewhere entirely different than the rest of physical
|
||||
memory.
|
||||
This rule also means that you may use neither kernel image addresses
|
||||
(items in data/text/bss segments), nor module image addresses, nor
|
||||
stack addresses for DMA. These could all be mapped somewhere entirely
|
||||
different than the rest of physical memory. Even if those classes of
|
||||
memory could physically work with DMA, you'd need to ensure the I/O
|
||||
buffers were cacheline-aligned. Without that, you'd see cacheline
|
||||
sharing problems (data corruption) on CPUs with DMA-incoherent caches.
|
||||
(The CPU could write to one word, DMA would write to a different one
|
||||
in the same cache line, and one of them could be overwritten.)
|
||||
|
||||
Also, this means that you cannot take the return of a kmap()
|
||||
call and DMA to/from that. This is similar to vmalloc().
|
||||
@ -284,6 +288,11 @@ There are two types of DMA mappings:
|
||||
|
||||
in order to get correct behavior on all platforms.
|
||||
|
||||
Also, on some platforms your driver may need to flush CPU write
|
||||
buffers in much the same way as it needs to flush write buffers
|
||||
found in PCI bridges (such as by reading a register's value
|
||||
after writing it).
|
||||
|
||||
- Streaming DMA mappings which are usually mapped for one DMA transfer,
|
||||
unmapped right after it (unless you use pci_dma_sync_* below) and for which
|
||||
hardware can optimize for sequential accesses.
|
||||
@ -303,6 +312,9 @@ There are two types of DMA mappings:
|
||||
|
||||
Neither type of DMA mapping has alignment restrictions that come
|
||||
from PCI, although some devices may have such restrictions.
|
||||
Also, systems with caches that aren't DMA-coherent will work better
|
||||
when the underlying buffers don't share cache lines with other data.
|
||||
|
||||
|
||||
Using Consistent DMA mappings.
|
||||
|
||||
|
@ -12,18 +12,22 @@ meant as a replacement for the older, individual drivers:
|
||||
teletext adapters)
|
||||
|
||||
It currently supports the following devices:
|
||||
* Philips adapter
|
||||
* home brew teletext adapter
|
||||
* Velleman K8000 adapter
|
||||
* ELV adapter
|
||||
* Analog Devices evaluation boards (ADM1025, ADM1030, ADM1031, ADM1032)
|
||||
* Barco LPT->DVI (K5800236) adapter
|
||||
* (type=0) Philips adapter
|
||||
* (type=1) home brew teletext adapter
|
||||
* (type=2) Velleman K8000 adapter
|
||||
* (type=3) ELV adapter
|
||||
* (type=4) Analog Devices ADM1032 evaluation board
|
||||
* (type=5) Analog Devices evaluation boards: ADM1025, ADM1030, ADM1031
|
||||
* (type=6) Barco LPT->DVI (K5800236) adapter
|
||||
|
||||
These devices use different pinout configurations, so you have to tell
|
||||
the driver what you have, using the type module parameter. There is no
|
||||
way to autodetect the devices. Support for different pinout configurations
|
||||
can be easily added when needed.
|
||||
|
||||
Earlier kernels defaulted to type=0 (Philips). But now, if the type
|
||||
parameter is missing, the driver will simply fail to initialize.
|
||||
|
||||
|
||||
Building your own adapter
|
||||
-------------------------
|
||||
|
166
Documentation/networking/xfrm_sync.txt
Normal file
166
Documentation/networking/xfrm_sync.txt
Normal file
@ -0,0 +1,166 @@
|
||||
|
||||
The sync patches work is based on initial patches from
|
||||
Krisztian <hidden@balabit.hu> and others and additional patches
|
||||
from Jamal <hadi@cyberus.ca>.
|
||||
|
||||
The end goal for syncing is to be able to insert attributes + generate
|
||||
events so that the an SA can be safely moved from one machine to another
|
||||
for HA purposes.
|
||||
The idea is to synchronize the SA so that the takeover machine can do
|
||||
the processing of the SA as accurate as possible if it has access to it.
|
||||
|
||||
We already have the ability to generate SA add/del/upd events.
|
||||
These patches add ability to sync and have accurate lifetime byte (to
|
||||
ensure proper decay of SAs) and replay counters to avoid replay attacks
|
||||
with as minimal loss at failover time.
|
||||
This way a backup stays as closely uptodate as an active member.
|
||||
|
||||
Because the above items change for every packet the SA receives,
|
||||
it is possible for a lot of the events to be generated.
|
||||
For this reason, we also add a nagle-like algorithm to restrict
|
||||
the events. i.e we are going to set thresholds to say "let me
|
||||
know if the replay sequence threshold is reached or 10 secs have passed"
|
||||
These thresholds are set system-wide via sysctls or can be updated
|
||||
per SA.
|
||||
|
||||
The identified items that need to be synchronized are:
|
||||
- the lifetime byte counter
|
||||
note that: lifetime time limit is not important if you assume the failover
|
||||
machine is known ahead of time since the decay of the time countdown
|
||||
is not driven by packet arrival.
|
||||
- the replay sequence for both inbound and outbound
|
||||
|
||||
1) Message Structure
|
||||
----------------------
|
||||
|
||||
nlmsghdr:aevent_id:optional-TLVs.
|
||||
|
||||
The netlink message types are:
|
||||
|
||||
XFRM_MSG_NEWAE and XFRM_MSG_GETAE.
|
||||
|
||||
A XFRM_MSG_GETAE does not have TLVs.
|
||||
A XFRM_MSG_NEWAE will have at least two TLVs (as is
|
||||
discussed further below).
|
||||
|
||||
aevent_id structure looks like:
|
||||
|
||||
struct xfrm_aevent_id {
|
||||
struct xfrm_usersa_id sa_id;
|
||||
__u32 flags;
|
||||
};
|
||||
|
||||
xfrm_usersa_id in this message layout identifies the SA.
|
||||
|
||||
flags are used to indicate different things. The possible
|
||||
flags are:
|
||||
XFRM_AE_RTHR=1, /* replay threshold*/
|
||||
XFRM_AE_RVAL=2, /* replay value */
|
||||
XFRM_AE_LVAL=4, /* lifetime value */
|
||||
XFRM_AE_ETHR=8, /* expiry timer threshold */
|
||||
XFRM_AE_CR=16, /* Event cause is replay update */
|
||||
XFRM_AE_CE=32, /* Event cause is timer expiry */
|
||||
XFRM_AE_CU=64, /* Event cause is policy update */
|
||||
|
||||
How these flags are used is dependent on the direction of the
|
||||
message (kernel<->user) as well the cause (config, query or event).
|
||||
This is described below in the different messages.
|
||||
|
||||
The pid will be set appropriately in netlink to recognize direction
|
||||
(0 to the kernel and pid = processid that created the event
|
||||
when going from kernel to user space)
|
||||
|
||||
A program needs to subscribe to multicast group XFRMNLGRP_AEVENTS
|
||||
to get notified of these events.
|
||||
|
||||
2) TLVS reflect the different parameters:
|
||||
-----------------------------------------
|
||||
|
||||
a) byte value (XFRMA_LTIME_VAL)
|
||||
This TLV carries the running/current counter for byte lifetime since
|
||||
last event.
|
||||
|
||||
b)replay value (XFRMA_REPLAY_VAL)
|
||||
This TLV carries the running/current counter for replay sequence since
|
||||
last event.
|
||||
|
||||
c)replay threshold (XFRMA_REPLAY_THRESH)
|
||||
This TLV carries the threshold being used by the kernel to trigger events
|
||||
when the replay sequence is exceeded.
|
||||
|
||||
d) expiry timer (XFRMA_ETIMER_THRESH)
|
||||
This is a timer value in milliseconds which is used as the nagle
|
||||
value to rate limit the events.
|
||||
|
||||
3) Default configurations for the parameters:
|
||||
----------------------------------------------
|
||||
|
||||
By default these events should be turned off unless there is
|
||||
at least one listener registered to listen to the multicast
|
||||
group XFRMNLGRP_AEVENTS.
|
||||
|
||||
Programs installing SAs will need to specify the two thresholds, however,
|
||||
in order to not change existing applications such as racoon
|
||||
we also provide default threshold values for these different parameters
|
||||
in case they are not specified.
|
||||
|
||||
the two sysctls/proc entries are:
|
||||
a) /proc/sys/net/core/sysctl_xfrm_aevent_etime
|
||||
used to provide default values for the XFRMA_ETIMER_THRESH in incremental
|
||||
units of time of 100ms. The default is 10 (1 second)
|
||||
|
||||
b) /proc/sys/net/core/sysctl_xfrm_aevent_rseqth
|
||||
used to provide default values for XFRMA_REPLAY_THRESH parameter
|
||||
in incremental packet count. The default is two packets.
|
||||
|
||||
4) Message types
|
||||
----------------
|
||||
|
||||
a) XFRM_MSG_GETAE issued by user-->kernel.
|
||||
XFRM_MSG_GETAE does not carry any TLVs.
|
||||
The response is a XFRM_MSG_NEWAE which is formatted based on what
|
||||
XFRM_MSG_GETAE queried for.
|
||||
The response will always have XFRMA_LTIME_VAL and XFRMA_REPLAY_VAL TLVs.
|
||||
*if XFRM_AE_RTHR flag is set, then XFRMA_REPLAY_THRESH is also retrieved
|
||||
*if XFRM_AE_ETHR flag is set, then XFRMA_ETIMER_THRESH is also retrieved
|
||||
|
||||
b) XFRM_MSG_NEWAE is issued by either user space to configure
|
||||
or kernel to announce events or respond to a XFRM_MSG_GETAE.
|
||||
|
||||
i) user --> kernel to configure a specific SA.
|
||||
any of the values or threshold parameters can be updated by passing the
|
||||
appropriate TLV.
|
||||
A response is issued back to the sender in user space to indicate success
|
||||
or failure.
|
||||
In the case of success, additionally an event with
|
||||
XFRM_MSG_NEWAE is also issued to any listeners as described in iii).
|
||||
|
||||
ii) kernel->user direction as a response to XFRM_MSG_GETAE
|
||||
The response will always have XFRMA_LTIME_VAL and XFRMA_REPLAY_VAL TLVs.
|
||||
The threshold TLVs will be included if explicitly requested in
|
||||
the XFRM_MSG_GETAE message.
|
||||
|
||||
iii) kernel->user to report as event if someone sets any values or
|
||||
thresholds for an SA using XFRM_MSG_NEWAE (as described in #i above).
|
||||
In such a case XFRM_AE_CU flag is set to inform the user that
|
||||
the change happened as a result of an update.
|
||||
The message will always have XFRMA_LTIME_VAL and XFRMA_REPLAY_VAL TLVs.
|
||||
|
||||
iv) kernel->user to report event when replay threshold or a timeout
|
||||
is exceeded.
|
||||
In such a case either XFRM_AE_CR (replay exceeded) or XFRM_AE_CE (timeout
|
||||
happened) is set to inform the user what happened.
|
||||
Note the two flags are mutually exclusive.
|
||||
The message will always have XFRMA_LTIME_VAL and XFRMA_REPLAY_VAL TLVs.
|
||||
|
||||
Exceptions to threshold settings
|
||||
--------------------------------
|
||||
|
||||
If you have an SA that is getting hit by traffic in bursts such that
|
||||
there is a period where the timer threshold expires with no packets
|
||||
seen, then an odd behavior is seen as follows:
|
||||
The first packet arrival after a timer expiry will trigger a timeout
|
||||
aevent; i.e we dont wait for a timeout period or a packet threshold
|
||||
to be reached. This is done for simplicity and efficiency reasons.
|
||||
|
||||
-JHS
|
@ -3058,13 +3058,6 @@ M: khali@linux-fr.org
|
||||
L: lm-sensors@lm-sensors.org
|
||||
S: Odd Fixes
|
||||
|
||||
WAN ROUTER & SANGOMA WANPIPE DRIVERS & API (X.25, FRAME RELAY, PPP, CISCO HDLC)
|
||||
P: Nenad Corbic
|
||||
M: ncorbic@sangoma.com
|
||||
M: dm@sangoma.com
|
||||
W: http://www.sangoma.com
|
||||
S: Supported
|
||||
|
||||
WATCHDOG DEVICE DRIVERS
|
||||
P: Wim Van Sebroeck
|
||||
M: wim@iguana.be
|
||||
|
@ -6,7 +6,7 @@ extra-y := head.o init_task.o vmlinux.lds
|
||||
|
||||
obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \
|
||||
ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \
|
||||
pci-dma.o i386_ksyms.o i387.o dmi_scan.o bootflag.o \
|
||||
pci-dma.o i386_ksyms.o i387.o bootflag.o \
|
||||
quirks.o i8237.o topology.o alternative.o
|
||||
|
||||
obj-y += cpu/
|
||||
|
@ -314,3 +314,4 @@ ENTRY(sys_call_table)
|
||||
.long sys_get_robust_list
|
||||
.long sys_splice
|
||||
.long sys_sync_file_range
|
||||
.long sys_tee /* 315 */
|
||||
|
@ -588,7 +588,10 @@ static __init int via_router_probe(struct irq_router *r,
|
||||
case PCI_DEVICE_ID_VIA_82C596:
|
||||
case PCI_DEVICE_ID_VIA_82C686:
|
||||
case PCI_DEVICE_ID_VIA_8231:
|
||||
case PCI_DEVICE_ID_VIA_8233A:
|
||||
case PCI_DEVICE_ID_VIA_8235:
|
||||
case PCI_DEVICE_ID_VIA_8237:
|
||||
case PCI_DEVICE_ID_VIA_8237_SATA:
|
||||
/* FIXME: add new ones for 8233/5 */
|
||||
r->name = "VIA";
|
||||
r->get = pirq_via_get;
|
||||
|
@ -7,7 +7,7 @@ extra-y := head.o init_task.o vmlinux.lds
|
||||
obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \
|
||||
irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \
|
||||
salinfo.o semaphore.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \
|
||||
unwind.o mca.o mca_asm.o topology.o dmi_scan.o
|
||||
unwind.o mca.o mca_asm.o topology.o
|
||||
|
||||
obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
|
||||
obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o
|
||||
@ -30,7 +30,6 @@ obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o
|
||||
obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o
|
||||
obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o
|
||||
mca_recovery-y += mca_drv.o mca_drv_asm.o
|
||||
dmi_scan-y += ../../i386/kernel/dmi_scan.o
|
||||
|
||||
# The gate DSO image is built using a special linker script.
|
||||
targets += gate.so gate-syms.o
|
||||
|
@ -1609,5 +1609,6 @@ sys_call_table:
|
||||
data8 sys_set_robust_list
|
||||
data8 sys_get_robust_list
|
||||
data8 sys_sync_file_range // 1300
|
||||
data8 sys_tee
|
||||
|
||||
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
|
||||
|
@ -963,7 +963,7 @@ no_mod:
|
||||
*/
|
||||
|
||||
static void
|
||||
ia64_wait_for_slaves(int monarch)
|
||||
ia64_wait_for_slaves(int monarch, const char *type)
|
||||
{
|
||||
int c, wait = 0, missing = 0;
|
||||
for_each_online_cpu(c) {
|
||||
@ -989,7 +989,7 @@ ia64_wait_for_slaves(int monarch)
|
||||
}
|
||||
if (!missing)
|
||||
goto all_in;
|
||||
printk(KERN_INFO "OS MCA slave did not rendezvous on cpu");
|
||||
printk(KERN_INFO "OS %s slave did not rendezvous on cpu", type);
|
||||
for_each_online_cpu(c) {
|
||||
if (c == monarch)
|
||||
continue;
|
||||
@ -1000,7 +1000,7 @@ ia64_wait_for_slaves(int monarch)
|
||||
return;
|
||||
|
||||
all_in:
|
||||
printk(KERN_INFO "All OS MCA slaves have reached rendezvous\n");
|
||||
printk(KERN_INFO "All OS %s slaves have reached rendezvous\n", type);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1038,7 +1038,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
|
||||
if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0)
|
||||
== NOTIFY_STOP)
|
||||
ia64_mca_spin(__FUNCTION__);
|
||||
ia64_wait_for_slaves(cpu);
|
||||
ia64_wait_for_slaves(cpu, "MCA");
|
||||
|
||||
/* Wakeup all the processors which are spinning in the rendezvous loop.
|
||||
* They will leave SAL, then spin in the OS with interrupts disabled
|
||||
@ -1429,7 +1429,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
|
||||
*/
|
||||
printk("Delaying for 5 seconds...\n");
|
||||
udelay(5*1000000);
|
||||
ia64_wait_for_slaves(cpu);
|
||||
ia64_wait_for_slaves(cpu, "INIT");
|
||||
/* If nobody intercepts DIE_INIT_MONARCH_PROCESS then we drop through
|
||||
* to default_monarch_init_process() above and just print all the
|
||||
* tasks.
|
||||
|
@ -519,6 +519,68 @@ void __cpuinit *per_cpu_init(void)
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#ifdef CONFIG_VIRTUAL_MEM_MAP
|
||||
static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i)
|
||||
{
|
||||
unsigned long end_address, hole_next_pfn;
|
||||
unsigned long stop_address;
|
||||
|
||||
end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
|
||||
end_address = PAGE_ALIGN(end_address);
|
||||
|
||||
stop_address = (unsigned long) &vmem_map[
|
||||
pgdat->node_start_pfn + pgdat->node_spanned_pages];
|
||||
|
||||
do {
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
|
||||
pgd = pgd_offset_k(end_address);
|
||||
if (pgd_none(*pgd)) {
|
||||
end_address += PGDIR_SIZE;
|
||||
continue;
|
||||
}
|
||||
|
||||
pud = pud_offset(pgd, end_address);
|
||||
if (pud_none(*pud)) {
|
||||
end_address += PUD_SIZE;
|
||||
continue;
|
||||
}
|
||||
|
||||
pmd = pmd_offset(pud, end_address);
|
||||
if (pmd_none(*pmd)) {
|
||||
end_address += PMD_SIZE;
|
||||
continue;
|
||||
}
|
||||
|
||||
pte = pte_offset_kernel(pmd, end_address);
|
||||
retry_pte:
|
||||
if (pte_none(*pte)) {
|
||||
end_address += PAGE_SIZE;
|
||||
pte++;
|
||||
if ((end_address < stop_address) &&
|
||||
(end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
|
||||
goto retry_pte;
|
||||
continue;
|
||||
}
|
||||
/* Found next valid vmem_map page */
|
||||
break;
|
||||
} while (end_address < stop_address);
|
||||
|
||||
end_address = min(end_address, stop_address);
|
||||
end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
|
||||
hole_next_pfn = end_address / sizeof(struct page);
|
||||
return hole_next_pfn - pgdat->node_start_pfn;
|
||||
}
|
||||
#else
|
||||
static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i)
|
||||
{
|
||||
return i + 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* show_mem - give short summary of memory stats
|
||||
*
|
||||
@ -547,8 +609,10 @@ void show_mem(void)
|
||||
struct page *page;
|
||||
if (pfn_valid(pgdat->node_start_pfn + i))
|
||||
page = pfn_to_page(pgdat->node_start_pfn + i);
|
||||
else
|
||||
else {
|
||||
i = find_next_valid_pfn_for_pgdat(pgdat, i) - 1;
|
||||
continue;
|
||||
}
|
||||
if (PageReserved(page))
|
||||
reserved++;
|
||||
else if (PageSwapCache(page))
|
||||
|
@ -323,3 +323,4 @@ COMPAT_SYS(pselect6)
|
||||
COMPAT_SYS(ppoll)
|
||||
SYSCALL(unshare)
|
||||
SYSCALL(splice)
|
||||
SYSCALL(tee)
|
||||
|
@ -75,7 +75,7 @@ sys_call_table:
|
||||
/*265*/ .long sys_timer_delete, sys_timer_create, sys_nis_syscall, sys_io_setup, sys_io_destroy
|
||||
/*270*/ .long sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink
|
||||
/*275*/ .long sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid
|
||||
/*280*/ .long sys_ni_syscall, sys_add_key, sys_request_key, sys_keyctl, sys_openat
|
||||
/*280*/ .long sys_tee, sys_add_key, sys_request_key, sys_keyctl, sys_openat
|
||||
/*285*/ .long sys_mkdirat, sys_mknodat, sys_fchownat, sys_futimesat, sys_fstatat64
|
||||
/*290*/ .long sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat
|
||||
/*295*/ .long sys_fchmodat, sys_faccessat, sys_pselect6, sys_ppoll, sys_unshare
|
||||
|
@ -138,6 +138,7 @@ SIGN2(sys32_ioprio_get, sys_ioprio_get, %o0, %o1)
|
||||
SIGN3(sys32_ioprio_set, sys_ioprio_set, %o0, %o1, %o2)
|
||||
SIGN2(sys32_splice, sys_splice, %o0, %o1)
|
||||
SIGN2(sys32_sync_file_range, compat_sync_file_range, %o0, %o5)
|
||||
SIGN2(sys32_tee, sys_tee, %o0, %o1)
|
||||
|
||||
.globl sys32_mmap2
|
||||
sys32_mmap2:
|
||||
|
@ -76,7 +76,7 @@ sys_call_table32:
|
||||
.word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy
|
||||
/*270*/ .word sys32_io_submit, sys_io_cancel, compat_sys_io_getevents, sys32_mq_open, sys_mq_unlink
|
||||
.word compat_sys_mq_timedsend, compat_sys_mq_timedreceive, compat_sys_mq_notify, compat_sys_mq_getsetattr, compat_sys_waitid
|
||||
/*280*/ .word sys_ni_syscall, sys_add_key, sys_request_key, sys_keyctl, compat_sys_openat
|
||||
/*280*/ .word sys32_tee, sys_add_key, sys_request_key, sys_keyctl, compat_sys_openat
|
||||
.word sys_mkdirat, sys_mknodat, sys_fchownat, compat_sys_futimesat, compat_sys_fstatat64
|
||||
/*285*/ .word sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat
|
||||
.word sys_fchmodat, sys_faccessat, compat_sys_pselect6, compat_sys_ppoll, sys_unshare
|
||||
@ -145,7 +145,7 @@ sys_call_table:
|
||||
.word sys_timer_delete, sys_timer_create, sys_ni_syscall, sys_io_setup, sys_io_destroy
|
||||
/*270*/ .word sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink
|
||||
.word sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid
|
||||
/*280*/ .word sys_nis_syscall, sys_add_key, sys_request_key, sys_keyctl, sys_openat
|
||||
/*280*/ .word sys_tee, sys_add_key, sys_request_key, sys_keyctl, sys_openat
|
||||
.word sys_mkdirat, sys_mknodat, sys_fchownat, sys_futimesat, sys_fstatat64
|
||||
/*285*/ .word sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat
|
||||
.word sys_fchmodat, sys_faccessat, sys_pselect6, sys_ppoll, sys_unshare
|
||||
|
@ -8,7 +8,7 @@ obj-y := process.o signal.o entry.o traps.o irq.o \
|
||||
ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_x86_64.o \
|
||||
x8664_ksyms.o i387.o syscall.o vsyscall.o \
|
||||
setup64.o bootflag.o e820.o reboot.o quirks.o i8237.o \
|
||||
dmi_scan.o pci-dma.o pci-nommu.o
|
||||
pci-dma.o pci-nommu.o
|
||||
|
||||
obj-$(CONFIG_X86_MCE) += mce.o
|
||||
obj-$(CONFIG_X86_MCE_INTEL) += mce_intel.o
|
||||
@ -49,5 +49,3 @@ intel_cacheinfo-y += ../../i386/kernel/cpu/intel_cacheinfo.o
|
||||
quirks-y += ../../i386/kernel/quirks.o
|
||||
i8237-y += ../../i386/kernel/i8237.o
|
||||
msr-$(subst m,y,$(CONFIG_X86_MSR)) += ../../i386/kernel/msr.o
|
||||
dmi_scan-y += ../../i386/kernel/dmi_scan.o
|
||||
|
||||
|
@ -350,16 +350,51 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* sg_scsi_ioctl -- handle deprecated SCSI_IOCTL_SEND_COMMAND ioctl
|
||||
* @file: file this ioctl operates on (optional)
|
||||
* @q: request queue to send scsi commands down
|
||||
* @disk: gendisk to operate on (option)
|
||||
* @sic: userspace structure describing the command to perform
|
||||
*
|
||||
* Send down the scsi command described by @sic to the device below
|
||||
* the request queue @q. If @file is non-NULL it's used to perform
|
||||
* fine-grained permission checks that allow users to send down
|
||||
* non-destructive SCSI commands. If the caller has a struct gendisk
|
||||
* available it should be passed in as @disk to allow the low level
|
||||
* driver to use the information contained in it. A non-NULL @disk
|
||||
* is only allowed if the caller knows that the low level driver doesn't
|
||||
* need it (e.g. in the scsi subsystem).
|
||||
*
|
||||
* Notes:
|
||||
* - This interface is deprecated - users should use the SG_IO
|
||||
* interface instead, as this is a more flexible approach to
|
||||
* performing SCSI commands on a device.
|
||||
* - The SCSI command length is determined by examining the 1st byte
|
||||
* of the given command. There is no way to override this.
|
||||
* - Data transfers are limited to PAGE_SIZE
|
||||
* - The length (x + y) must be at least OMAX_SB_LEN bytes long to
|
||||
* accommodate the sense buffer when an error occurs.
|
||||
* The sense buffer is truncated to OMAX_SB_LEN (16) bytes so that
|
||||
* old code will not be surprised.
|
||||
* - If a Unix error occurs (e.g. ENOMEM) then the user will receive
|
||||
* a negative return and the Unix error code in 'errno'.
|
||||
* If the SCSI command succeeds then 0 is returned.
|
||||
* Positive numbers returned are the compacted SCSI error codes (4
|
||||
* bytes in one int) where the lowest byte is the SCSI status.
|
||||
*/
|
||||
#define OMAX_SB_LEN 16 /* For backward compatibility */
|
||||
|
||||
static int sg_scsi_ioctl(struct file *file, request_queue_t *q,
|
||||
struct gendisk *bd_disk, Scsi_Ioctl_Command __user *sic)
|
||||
int sg_scsi_ioctl(struct file *file, struct request_queue *q,
|
||||
struct gendisk *disk, struct scsi_ioctl_command __user *sic)
|
||||
{
|
||||
struct request *rq;
|
||||
int err;
|
||||
unsigned int in_len, out_len, bytes, opcode, cmdlen;
|
||||
char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
|
||||
|
||||
if (!sic)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* get in an out lengths, verify they don't exceed a page worth of data
|
||||
*/
|
||||
@ -393,45 +428,53 @@ static int sg_scsi_ioctl(struct file *file, request_queue_t *q,
|
||||
if (copy_from_user(rq->cmd, sic->data, cmdlen))
|
||||
goto error;
|
||||
|
||||
if (copy_from_user(buffer, sic->data + cmdlen, in_len))
|
||||
if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
|
||||
goto error;
|
||||
|
||||
err = verify_command(file, rq->cmd);
|
||||
if (err)
|
||||
goto error;
|
||||
|
||||
/* default. possible overriden later */
|
||||
rq->retries = 5;
|
||||
|
||||
switch (opcode) {
|
||||
case SEND_DIAGNOSTIC:
|
||||
case FORMAT_UNIT:
|
||||
rq->timeout = FORMAT_UNIT_TIMEOUT;
|
||||
break;
|
||||
case START_STOP:
|
||||
rq->timeout = START_STOP_TIMEOUT;
|
||||
break;
|
||||
case MOVE_MEDIUM:
|
||||
rq->timeout = MOVE_MEDIUM_TIMEOUT;
|
||||
break;
|
||||
case READ_ELEMENT_STATUS:
|
||||
rq->timeout = READ_ELEMENT_STATUS_TIMEOUT;
|
||||
break;
|
||||
case READ_DEFECT_DATA:
|
||||
rq->timeout = READ_DEFECT_DATA_TIMEOUT;
|
||||
break;
|
||||
default:
|
||||
rq->timeout = BLK_DEFAULT_TIMEOUT;
|
||||
break;
|
||||
case SEND_DIAGNOSTIC:
|
||||
case FORMAT_UNIT:
|
||||
rq->timeout = FORMAT_UNIT_TIMEOUT;
|
||||
rq->retries = 1;
|
||||
break;
|
||||
case START_STOP:
|
||||
rq->timeout = START_STOP_TIMEOUT;
|
||||
break;
|
||||
case MOVE_MEDIUM:
|
||||
rq->timeout = MOVE_MEDIUM_TIMEOUT;
|
||||
break;
|
||||
case READ_ELEMENT_STATUS:
|
||||
rq->timeout = READ_ELEMENT_STATUS_TIMEOUT;
|
||||
break;
|
||||
case READ_DEFECT_DATA:
|
||||
rq->timeout = READ_DEFECT_DATA_TIMEOUT;
|
||||
rq->retries = 1;
|
||||
break;
|
||||
default:
|
||||
rq->timeout = BLK_DEFAULT_TIMEOUT;
|
||||
break;
|
||||
}
|
||||
|
||||
if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) {
|
||||
err = DRIVER_ERROR << 24;
|
||||
goto out;
|
||||
}
|
||||
|
||||
memset(sense, 0, sizeof(sense));
|
||||
rq->sense = sense;
|
||||
rq->sense_len = 0;
|
||||
|
||||
rq->data = buffer;
|
||||
rq->data_len = bytes;
|
||||
rq->flags |= REQ_BLOCK_PC;
|
||||
rq->retries = 0;
|
||||
|
||||
blk_execute_rq(q, bd_disk, rq, 0);
|
||||
blk_execute_rq(q, disk, rq, 0);
|
||||
|
||||
out:
|
||||
err = rq->errors & 0xff; /* only 8 bit SCSI status */
|
||||
if (err) {
|
||||
if (rq->sense_len && rq->sense) {
|
||||
@ -450,7 +493,7 @@ error:
|
||||
blk_put_request(rq);
|
||||
return err;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(sg_scsi_ioctl);
|
||||
|
||||
/* Send basic block requests */
|
||||
static int __blk_send_generic(request_queue_t *q, struct gendisk *bd_disk, int cmd, int data)
|
||||
|
@ -188,6 +188,11 @@ static ssize_t driver_bind(struct device_driver *drv,
|
||||
up(&dev->sem);
|
||||
if (dev->parent)
|
||||
up(&dev->parent->sem);
|
||||
|
||||
if (err > 0) /* success */
|
||||
err = count;
|
||||
else if (err == 0) /* driver didn't accept device */
|
||||
err = -ENODEV;
|
||||
}
|
||||
put_device(dev);
|
||||
put_bus(bus);
|
||||
|
@ -562,14 +562,13 @@ int class_device_add(struct class_device *class_dev)
|
||||
kobject_uevent(&class_dev->kobj, KOBJ_ADD);
|
||||
|
||||
/* notify any interfaces this device is now here */
|
||||
if (parent_class) {
|
||||
down(&parent_class->sem);
|
||||
list_add_tail(&class_dev->node, &parent_class->children);
|
||||
list_for_each_entry(class_intf, &parent_class->interfaces, node)
|
||||
if (class_intf->add)
|
||||
class_intf->add(class_dev, class_intf);
|
||||
up(&parent_class->sem);
|
||||
down(&parent_class->sem);
|
||||
list_add_tail(&class_dev->node, &parent_class->children);
|
||||
list_for_each_entry(class_intf, &parent_class->interfaces, node) {
|
||||
if (class_intf->add)
|
||||
class_intf->add(class_dev, class_intf);
|
||||
}
|
||||
up(&parent_class->sem);
|
||||
|
||||
register_done:
|
||||
if (error) {
|
||||
|
@ -209,7 +209,7 @@ static void __device_release_driver(struct device * dev)
|
||||
sysfs_remove_link(&dev->kobj, "driver");
|
||||
klist_remove(&dev->knode_driver);
|
||||
|
||||
if (dev->bus->remove)
|
||||
if (dev->bus && dev->bus->remove)
|
||||
dev->bus->remove(dev);
|
||||
else if (drv->remove)
|
||||
drv->remove(dev);
|
||||
|
@ -10,6 +10,8 @@
|
||||
|
||||
#include <linux/vt_kern.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/pm.h>
|
||||
#include "../base.h"
|
||||
#include "power.h"
|
||||
|
||||
@ -58,6 +60,7 @@ int suspend_device(struct device * dev, pm_message_t state)
|
||||
if (dev->bus && dev->bus->suspend && !dev->power.power_state.event) {
|
||||
dev_dbg(dev, "suspending\n");
|
||||
error = dev->bus->suspend(dev, state);
|
||||
suspend_report_result(dev->bus->suspend, error);
|
||||
}
|
||||
up(&dev->sem);
|
||||
return error;
|
||||
@ -169,3 +172,12 @@ int device_power_down(pm_message_t state)
|
||||
|
||||
EXPORT_SYMBOL_GPL(device_power_down);
|
||||
|
||||
void __suspend_report_result(const char *function, void *fn, int ret)
|
||||
{
|
||||
if (ret) {
|
||||
printk(KERN_ERR "%s(): ", function);
|
||||
print_fn_descriptor_symbol("%s() returns ", (unsigned long)fn);
|
||||
printk("%d\n", ret);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__suspend_report_result);
|
||||
|
@ -64,6 +64,12 @@ static struct gatt_mask efficeon_generic_masks[] =
|
||||
{.mask = 0x00000001, .type = 0}
|
||||
};
|
||||
|
||||
/* This function does the same thing as mask_memory() for this chipset... */
|
||||
static inline unsigned long efficeon_mask_memory(unsigned long addr)
|
||||
{
|
||||
return addr | 0x00000001;
|
||||
}
|
||||
|
||||
static struct aper_size_info_lvl2 efficeon_generic_sizes[4] =
|
||||
{
|
||||
{256, 65536, 0},
|
||||
@ -251,7 +257,7 @@ static int efficeon_insert_memory(struct agp_memory * mem, off_t pg_start, int t
|
||||
last_page = NULL;
|
||||
for (i = 0; i < count; i++) {
|
||||
int index = pg_start + i;
|
||||
unsigned long insert = mem->memory[i];
|
||||
unsigned long insert = efficeon_mask_memory(mem->memory[i]);
|
||||
|
||||
page = (unsigned int *) efficeon_private.l1_table[index >> 10];
|
||||
|
||||
|
@ -2734,7 +2734,7 @@ static void __do_SAK(void *arg)
|
||||
printk(KERN_NOTICE "SAK: killed process %d"
|
||||
" (%s): fd#%d opened to the tty\n",
|
||||
p->pid, p->comm, i);
|
||||
send_sig(SIGKILL, p, 1);
|
||||
force_sig(SIGKILL, p);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,8 @@
|
||||
#
|
||||
# Makefile for the linux kernel.
|
||||
#
|
||||
obj-$(CONFIG_EDD) += edd.o
|
||||
obj-$(CONFIG_DMI) += dmi_scan.o
|
||||
obj-$(CONFIG_EDD) += edd.o
|
||||
obj-$(CONFIG_EFI_VARS) += efivars.o
|
||||
obj-$(CONFIG_EFI_PCDP) += pcdp.o
|
||||
obj-$(CONFIG_DELL_RBU) += dell_rbu.o
|
||||
|
@ -27,7 +27,7 @@ static char * __init dmi_string(struct dmi_header *dm, u8 s)
|
||||
else
|
||||
printk(KERN_ERR "dmi_string: out of memory.\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return str;
|
||||
}
|
||||
@ -41,7 +41,7 @@ static int __init dmi_table(u32 base, int len, int num,
|
||||
{
|
||||
u8 *buf, *data;
|
||||
int i = 0;
|
||||
|
||||
|
||||
buf = dmi_ioremap(base, len);
|
||||
if (buf == NULL)
|
||||
return -1;
|
||||
@ -49,9 +49,9 @@ static int __init dmi_table(u32 base, int len, int num,
|
||||
data = buf;
|
||||
|
||||
/*
|
||||
* Stop when we see all the items the table claimed to have
|
||||
* OR we run off the end of the table (also happens)
|
||||
*/
|
||||
* Stop when we see all the items the table claimed to have
|
||||
* OR we run off the end of the table (also happens)
|
||||
*/
|
||||
while ((i < num) && (data - buf + sizeof(struct dmi_header)) <= len) {
|
||||
struct dmi_header *dm = (struct dmi_header *)data;
|
||||
/*
|
||||
@ -75,7 +75,7 @@ static int __init dmi_checksum(u8 *buf)
|
||||
{
|
||||
u8 sum = 0;
|
||||
int a;
|
||||
|
||||
|
||||
for (a = 0; a < 15; a++)
|
||||
sum += buf[a];
|
||||
|
@ -1161,7 +1161,7 @@ w83792d_detect(struct i2c_adapter *adapter, int address, int kind)
|
||||
bank. */
|
||||
if (kind < 0) {
|
||||
if (w83792d_read_value(client, W83792D_REG_CONFIG) & 0x80) {
|
||||
dev_warn(dev, "Detection failed at step 3\n");
|
||||
dev_dbg(dev, "Detection failed at step 1\n");
|
||||
goto ERROR1;
|
||||
}
|
||||
val1 = w83792d_read_value(client, W83792D_REG_BANK);
|
||||
@ -1170,6 +1170,7 @@ w83792d_detect(struct i2c_adapter *adapter, int address, int kind)
|
||||
if (!(val1 & 0x07)) { /* is Bank0 */
|
||||
if (((!(val1 & 0x80)) && (val2 != 0xa3)) ||
|
||||
((val1 & 0x80) && (val2 != 0x5c))) {
|
||||
dev_dbg(dev, "Detection failed at step 2\n");
|
||||
goto ERROR1;
|
||||
}
|
||||
}
|
||||
@ -1177,7 +1178,7 @@ w83792d_detect(struct i2c_adapter *adapter, int address, int kind)
|
||||
should match */
|
||||
if (w83792d_read_value(client,
|
||||
W83792D_REG_I2C_ADDR) != address) {
|
||||
dev_warn(dev, "Detection failed at step 5\n");
|
||||
dev_dbg(dev, "Detection failed at step 3\n");
|
||||
goto ERROR1;
|
||||
}
|
||||
}
|
||||
|
@ -286,7 +286,10 @@ config I2C_PARPORT
|
||||
This driver is a replacement for (and was inspired by) an older
|
||||
driver named i2c-philips-par. The new driver supports more devices,
|
||||
and makes it easier to add support for new devices.
|
||||
|
||||
|
||||
An adapter type parameter is now mandatory. Please read the file
|
||||
Documentation/i2c/busses/i2c-parport for details.
|
||||
|
||||
Another driver exists, named i2c-parport-light, which doesn't depend
|
||||
on the parport driver. This is meant for embedded systems. Don't say
|
||||
Y here if you intend to say Y or M there.
|
||||
|
@ -121,9 +121,14 @@ static struct i2c_adapter parport_adapter = {
|
||||
|
||||
static int __init i2c_parport_init(void)
|
||||
{
|
||||
if (type < 0 || type >= ARRAY_SIZE(adapter_parm)) {
|
||||
if (type < 0) {
|
||||
printk(KERN_WARNING "i2c-parport: adapter type unspecified\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (type >= ARRAY_SIZE(adapter_parm)) {
|
||||
printk(KERN_WARNING "i2c-parport: invalid type (%d)\n", type);
|
||||
type = 0;
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (base == 0) {
|
||||
|
@ -241,9 +241,14 @@ static struct parport_driver i2c_parport_driver = {
|
||||
|
||||
static int __init i2c_parport_init(void)
|
||||
{
|
||||
if (type < 0 || type >= ARRAY_SIZE(adapter_parm)) {
|
||||
if (type < 0) {
|
||||
printk(KERN_WARNING "i2c-parport: adapter type unspecified\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (type >= ARRAY_SIZE(adapter_parm)) {
|
||||
printk(KERN_WARNING "i2c-parport: invalid type (%d)\n", type);
|
||||
type = 0;
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return parport_register_driver(&i2c_parport_driver);
|
||||
|
@ -90,7 +90,7 @@ static struct adapter_parm adapter_parm[] = {
|
||||
},
|
||||
};
|
||||
|
||||
static int type;
|
||||
static int type = -1;
|
||||
module_param(type, int, 0);
|
||||
MODULE_PARM_DESC(type,
|
||||
"Type of adapter:\n"
|
||||
|
@ -43,13 +43,6 @@
|
||||
#include <linux/init.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
/*
|
||||
HISTORY:
|
||||
2003-05-11 1.0.0 Updated from lm_sensors project for kernel 2.5
|
||||
(was i2c-sis645.c from lm_sensors 2.7.0)
|
||||
*/
|
||||
#define SIS96x_VERSION "1.0.0"
|
||||
|
||||
/* base address register in PCI config space */
|
||||
#define SIS96x_BAR 0x04
|
||||
|
||||
@ -337,7 +330,6 @@ static struct pci_driver sis96x_driver = {
|
||||
|
||||
static int __init i2c_sis96x_init(void)
|
||||
{
|
||||
printk(KERN_INFO "i2c-sis96x version %s\n", SIS96x_VERSION);
|
||||
return pci_register_driver(&sis96x_driver);
|
||||
}
|
||||
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include <linux/rtc.h>
|
||||
#include <linux/bcd.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#define DS1374_REG_TOD0 0x00
|
||||
#define DS1374_REG_TOD1 0x01
|
||||
@ -139,7 +140,7 @@ ulong ds1374_get_rtc_time(void)
|
||||
return t1;
|
||||
}
|
||||
|
||||
static void ds1374_set_tlet(ulong arg)
|
||||
static void ds1374_set_work(void *arg)
|
||||
{
|
||||
ulong t1, t2;
|
||||
int limit = 10; /* arbitrary retry limit */
|
||||
@ -168,17 +169,18 @@ static void ds1374_set_tlet(ulong arg)
|
||||
|
||||
static ulong new_time;
|
||||
|
||||
static DECLARE_TASKLET_DISABLED(ds1374_tasklet, ds1374_set_tlet,
|
||||
(ulong) & new_time);
|
||||
static struct workqueue_struct *ds1374_workqueue;
|
||||
|
||||
static DECLARE_WORK(ds1374_work, ds1374_set_work, &new_time);
|
||||
|
||||
int ds1374_set_rtc_time(ulong nowtime)
|
||||
{
|
||||
new_time = nowtime;
|
||||
|
||||
if (in_interrupt())
|
||||
tasklet_schedule(&ds1374_tasklet);
|
||||
queue_work(ds1374_workqueue, &ds1374_work);
|
||||
else
|
||||
ds1374_set_tlet((ulong) & new_time);
|
||||
ds1374_set_work(&new_time);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -204,6 +206,8 @@ static int ds1374_probe(struct i2c_adapter *adap, int addr, int kind)
|
||||
client->adapter = adap;
|
||||
client->driver = &ds1374_driver;
|
||||
|
||||
ds1374_workqueue = create_singlethread_workqueue("ds1374");
|
||||
|
||||
if ((rc = i2c_attach_client(client)) != 0) {
|
||||
kfree(client);
|
||||
return rc;
|
||||
@ -227,7 +231,7 @@ static int ds1374_detach(struct i2c_client *client)
|
||||
|
||||
if ((rc = i2c_detach_client(client)) == 0) {
|
||||
kfree(i2c_get_clientdata(client));
|
||||
tasklet_kill(&ds1374_tasklet);
|
||||
destroy_workqueue(ds1374_workqueue);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include <linux/rtc.h>
|
||||
#include <linux/bcd.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#include <asm/time.h>
|
||||
#include <asm/rtc.h>
|
||||
@ -111,7 +112,7 @@ m41t00_get_rtc_time(void)
|
||||
}
|
||||
|
||||
static void
|
||||
m41t00_set_tlet(ulong arg)
|
||||
m41t00_set(void *arg)
|
||||
{
|
||||
struct rtc_time tm;
|
||||
ulong nowtime = *(ulong *)arg;
|
||||
@ -145,9 +146,9 @@ m41t00_set_tlet(ulong arg)
|
||||
return;
|
||||
}
|
||||
|
||||
static ulong new_time;
|
||||
|
||||
DECLARE_TASKLET_DISABLED(m41t00_tasklet, m41t00_set_tlet, (ulong)&new_time);
|
||||
static ulong new_time;
|
||||
static struct workqueue_struct *m41t00_wq;
|
||||
static DECLARE_WORK(m41t00_work, m41t00_set, &new_time);
|
||||
|
||||
int
|
||||
m41t00_set_rtc_time(ulong nowtime)
|
||||
@ -155,9 +156,9 @@ m41t00_set_rtc_time(ulong nowtime)
|
||||
new_time = nowtime;
|
||||
|
||||
if (in_interrupt())
|
||||
tasklet_schedule(&m41t00_tasklet);
|
||||
queue_work(m41t00_wq, &m41t00_work);
|
||||
else
|
||||
m41t00_set_tlet((ulong)&new_time);
|
||||
m41t00_set(&new_time);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -189,6 +190,7 @@ m41t00_probe(struct i2c_adapter *adap, int addr, int kind)
|
||||
return rc;
|
||||
}
|
||||
|
||||
m41t00_wq = create_singlethread_workqueue("m41t00");
|
||||
save_client = client;
|
||||
return 0;
|
||||
}
|
||||
@ -206,7 +208,7 @@ m41t00_detach(struct i2c_client *client)
|
||||
|
||||
if ((rc = i2c_detach_client(client)) == 0) {
|
||||
kfree(client);
|
||||
tasklet_kill(&m41t00_tasklet);
|
||||
destroy_workqueue(m41t00_wq);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
@ -302,7 +302,7 @@ static void ib_cache_setup_one(struct ib_device *device)
|
||||
kmalloc(sizeof *device->cache.pkey_cache *
|
||||
(end_port(device) - start_port(device) + 1), GFP_KERNEL);
|
||||
device->cache.gid_cache =
|
||||
kmalloc(sizeof *device->cache.pkey_cache *
|
||||
kmalloc(sizeof *device->cache.gid_cache *
|
||||
(end_port(device) - start_port(device) + 1), GFP_KERNEL);
|
||||
|
||||
if (!device->cache.pkey_cache || !device->cache.gid_cache) {
|
||||
|
@ -2311,6 +2311,7 @@ static void local_completions(void *data)
|
||||
local = list_entry(mad_agent_priv->local_list.next,
|
||||
struct ib_mad_local_private,
|
||||
completion_list);
|
||||
list_del(&local->completion_list);
|
||||
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
|
||||
if (local->mad_priv) {
|
||||
recv_mad_agent = local->recv_mad_agent;
|
||||
@ -2362,7 +2363,6 @@ local_send_completion:
|
||||
&mad_send_wc);
|
||||
|
||||
spin_lock_irqsave(&mad_agent_priv->lock, flags);
|
||||
list_del(&local->completion_list);
|
||||
atomic_dec(&mad_agent_priv->refcount);
|
||||
if (!recv)
|
||||
kmem_cache_free(ib_mad_cache, local->mad_priv);
|
||||
|
@ -45,6 +45,40 @@
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include <rdma/ib_cache.h>
|
||||
|
||||
int ib_rate_to_mult(enum ib_rate rate)
|
||||
{
|
||||
switch (rate) {
|
||||
case IB_RATE_2_5_GBPS: return 1;
|
||||
case IB_RATE_5_GBPS: return 2;
|
||||
case IB_RATE_10_GBPS: return 4;
|
||||
case IB_RATE_20_GBPS: return 8;
|
||||
case IB_RATE_30_GBPS: return 12;
|
||||
case IB_RATE_40_GBPS: return 16;
|
||||
case IB_RATE_60_GBPS: return 24;
|
||||
case IB_RATE_80_GBPS: return 32;
|
||||
case IB_RATE_120_GBPS: return 48;
|
||||
default: return -1;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(ib_rate_to_mult);
|
||||
|
||||
enum ib_rate mult_to_ib_rate(int mult)
|
||||
{
|
||||
switch (mult) {
|
||||
case 1: return IB_RATE_2_5_GBPS;
|
||||
case 2: return IB_RATE_5_GBPS;
|
||||
case 4: return IB_RATE_10_GBPS;
|
||||
case 8: return IB_RATE_20_GBPS;
|
||||
case 12: return IB_RATE_30_GBPS;
|
||||
case 16: return IB_RATE_40_GBPS;
|
||||
case 24: return IB_RATE_60_GBPS;
|
||||
case 32: return IB_RATE_80_GBPS;
|
||||
case 48: return IB_RATE_120_GBPS;
|
||||
default: return IB_RATE_PORT_CURRENT;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(mult_to_ib_rate);
|
||||
|
||||
/* Protection domains */
|
||||
|
||||
struct ib_pd *ib_alloc_pd(struct ib_device *device)
|
||||
|
@ -7,10 +7,11 @@ config INFINIBAND_MTHCA
|
||||
("Tavor") and the MT25208 PCI Express HCA ("Arbel").
|
||||
|
||||
config INFINIBAND_MTHCA_DEBUG
|
||||
bool "Verbose debugging output"
|
||||
bool "Verbose debugging output" if EMBEDDED
|
||||
depends on INFINIBAND_MTHCA
|
||||
default n
|
||||
default y
|
||||
---help---
|
||||
This option causes the mthca driver produce a bunch of debug
|
||||
messages. Select this is you are developing the driver or
|
||||
trying to diagnose a problem.
|
||||
This option causes debugging code to be compiled into the
|
||||
mthca driver. The output can be turned on via the
|
||||
debug_level module parameter (which can also be set after
|
||||
the driver is loaded through sysfs).
|
||||
|
@ -1,7 +1,3 @@
|
||||
ifdef CONFIG_INFINIBAND_MTHCA_DEBUG
|
||||
EXTRA_CFLAGS += -DDEBUG
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_INFINIBAND_MTHCA) += ib_mthca.o
|
||||
|
||||
ib_mthca-y := mthca_main.o mthca_cmd.o mthca_profile.o mthca_reset.o \
|
||||
|
@ -42,6 +42,20 @@
|
||||
|
||||
#include "mthca_dev.h"
|
||||
|
||||
enum {
|
||||
MTHCA_RATE_TAVOR_FULL = 0,
|
||||
MTHCA_RATE_TAVOR_1X = 1,
|
||||
MTHCA_RATE_TAVOR_4X = 2,
|
||||
MTHCA_RATE_TAVOR_1X_DDR = 3
|
||||
};
|
||||
|
||||
enum {
|
||||
MTHCA_RATE_MEMFREE_FULL = 0,
|
||||
MTHCA_RATE_MEMFREE_QUARTER = 1,
|
||||
MTHCA_RATE_MEMFREE_EIGHTH = 2,
|
||||
MTHCA_RATE_MEMFREE_HALF = 3
|
||||
};
|
||||
|
||||
struct mthca_av {
|
||||
__be32 port_pd;
|
||||
u8 reserved1;
|
||||
@ -55,6 +69,90 @@ struct mthca_av {
|
||||
__be32 dgid[4];
|
||||
};
|
||||
|
||||
static enum ib_rate memfree_rate_to_ib(u8 mthca_rate, u8 port_rate)
|
||||
{
|
||||
switch (mthca_rate) {
|
||||
case MTHCA_RATE_MEMFREE_EIGHTH:
|
||||
return mult_to_ib_rate(port_rate >> 3);
|
||||
case MTHCA_RATE_MEMFREE_QUARTER:
|
||||
return mult_to_ib_rate(port_rate >> 2);
|
||||
case MTHCA_RATE_MEMFREE_HALF:
|
||||
return mult_to_ib_rate(port_rate >> 1);
|
||||
case MTHCA_RATE_MEMFREE_FULL:
|
||||
default:
|
||||
return mult_to_ib_rate(port_rate);
|
||||
}
|
||||
}
|
||||
|
||||
static enum ib_rate tavor_rate_to_ib(u8 mthca_rate, u8 port_rate)
|
||||
{
|
||||
switch (mthca_rate) {
|
||||
case MTHCA_RATE_TAVOR_1X: return IB_RATE_2_5_GBPS;
|
||||
case MTHCA_RATE_TAVOR_1X_DDR: return IB_RATE_5_GBPS;
|
||||
case MTHCA_RATE_TAVOR_4X: return IB_RATE_10_GBPS;
|
||||
default: return port_rate;
|
||||
}
|
||||
}
|
||||
|
||||
enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u8 port)
|
||||
{
|
||||
if (mthca_is_memfree(dev)) {
|
||||
/* Handle old Arbel FW */
|
||||
if (dev->limits.stat_rate_support == 0x3 && mthca_rate)
|
||||
return IB_RATE_2_5_GBPS;
|
||||
|
||||
return memfree_rate_to_ib(mthca_rate, dev->rate[port - 1]);
|
||||
} else
|
||||
return tavor_rate_to_ib(mthca_rate, dev->rate[port - 1]);
|
||||
}
|
||||
|
||||
static u8 ib_rate_to_memfree(u8 req_rate, u8 cur_rate)
|
||||
{
|
||||
if (cur_rate <= req_rate)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Inter-packet delay (IPD) to get from rate X down to a rate
|
||||
* no more than Y is (X - 1) / Y.
|
||||
*/
|
||||
switch ((cur_rate - 1) / req_rate) {
|
||||
case 0: return MTHCA_RATE_MEMFREE_FULL;
|
||||
case 1: return MTHCA_RATE_MEMFREE_HALF;
|
||||
case 2: /* fall through */
|
||||
case 3: return MTHCA_RATE_MEMFREE_QUARTER;
|
||||
default: return MTHCA_RATE_MEMFREE_EIGHTH;
|
||||
}
|
||||
}
|
||||
|
||||
static u8 ib_rate_to_tavor(u8 static_rate)
|
||||
{
|
||||
switch (static_rate) {
|
||||
case IB_RATE_2_5_GBPS: return MTHCA_RATE_TAVOR_1X;
|
||||
case IB_RATE_5_GBPS: return MTHCA_RATE_TAVOR_1X_DDR;
|
||||
case IB_RATE_10_GBPS: return MTHCA_RATE_TAVOR_4X;
|
||||
default: return MTHCA_RATE_TAVOR_FULL;
|
||||
}
|
||||
}
|
||||
|
||||
u8 mthca_get_rate(struct mthca_dev *dev, int static_rate, u8 port)
|
||||
{
|
||||
u8 rate;
|
||||
|
||||
if (!static_rate || ib_rate_to_mult(static_rate) >= dev->rate[port - 1])
|
||||
return 0;
|
||||
|
||||
if (mthca_is_memfree(dev))
|
||||
rate = ib_rate_to_memfree(ib_rate_to_mult(static_rate),
|
||||
dev->rate[port - 1]);
|
||||
else
|
||||
rate = ib_rate_to_tavor(static_rate);
|
||||
|
||||
if (!(dev->limits.stat_rate_support & (1 << rate)))
|
||||
rate = 1;
|
||||
|
||||
return rate;
|
||||
}
|
||||
|
||||
int mthca_create_ah(struct mthca_dev *dev,
|
||||
struct mthca_pd *pd,
|
||||
struct ib_ah_attr *ah_attr,
|
||||
@ -107,7 +205,7 @@ on_hca_fail:
|
||||
av->g_slid = ah_attr->src_path_bits;
|
||||
av->dlid = cpu_to_be16(ah_attr->dlid);
|
||||
av->msg_sr = (3 << 4) | /* 2K message */
|
||||
ah_attr->static_rate;
|
||||
mthca_get_rate(dev, ah_attr->static_rate, ah_attr->port_num);
|
||||
av->sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
|
||||
if (ah_attr->ah_flags & IB_AH_GRH) {
|
||||
av->g_slid |= 0x80;
|
||||
|
@ -965,6 +965,7 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
|
||||
u32 *outbox;
|
||||
u8 field;
|
||||
u16 size;
|
||||
u16 stat_rate;
|
||||
int err;
|
||||
|
||||
#define QUERY_DEV_LIM_OUT_SIZE 0x100
|
||||
@ -995,6 +996,7 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
|
||||
#define QUERY_DEV_LIM_MTU_WIDTH_OFFSET 0x36
|
||||
#define QUERY_DEV_LIM_VL_PORT_OFFSET 0x37
|
||||
#define QUERY_DEV_LIM_MAX_GID_OFFSET 0x3b
|
||||
#define QUERY_DEV_LIM_RATE_SUPPORT_OFFSET 0x3c
|
||||
#define QUERY_DEV_LIM_MAX_PKEY_OFFSET 0x3f
|
||||
#define QUERY_DEV_LIM_FLAGS_OFFSET 0x44
|
||||
#define QUERY_DEV_LIM_RSVD_UAR_OFFSET 0x48
|
||||
@ -1086,6 +1088,8 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
|
||||
dev_lim->num_ports = field & 0xf;
|
||||
MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_GID_OFFSET);
|
||||
dev_lim->max_gids = 1 << (field & 0xf);
|
||||
MTHCA_GET(stat_rate, outbox, QUERY_DEV_LIM_RATE_SUPPORT_OFFSET);
|
||||
dev_lim->stat_rate_support = stat_rate;
|
||||
MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_PKEY_OFFSET);
|
||||
dev_lim->max_pkeys = 1 << (field & 0xf);
|
||||
MTHCA_GET(dev_lim->flags, outbox, QUERY_DEV_LIM_FLAGS_OFFSET);
|
||||
|
@ -146,6 +146,7 @@ struct mthca_dev_lim {
|
||||
int max_vl;
|
||||
int num_ports;
|
||||
int max_gids;
|
||||
u16 stat_rate_support;
|
||||
int max_pkeys;
|
||||
u32 flags;
|
||||
int reserved_uars;
|
||||
|
@ -151,6 +151,7 @@ struct mthca_limits {
|
||||
int reserved_qps;
|
||||
int num_srqs;
|
||||
int max_srq_wqes;
|
||||
int max_srq_sge;
|
||||
int reserved_srqs;
|
||||
int num_eecs;
|
||||
int reserved_eecs;
|
||||
@ -172,6 +173,7 @@ struct mthca_limits {
|
||||
int reserved_pds;
|
||||
u32 page_size_cap;
|
||||
u32 flags;
|
||||
u16 stat_rate_support;
|
||||
u8 port_width_cap;
|
||||
};
|
||||
|
||||
@ -353,10 +355,24 @@ struct mthca_dev {
|
||||
struct ib_mad_agent *send_agent[MTHCA_MAX_PORTS][2];
|
||||
struct ib_ah *sm_ah[MTHCA_MAX_PORTS];
|
||||
spinlock_t sm_lock;
|
||||
u8 rate[MTHCA_MAX_PORTS];
|
||||
};
|
||||
|
||||
#define mthca_dbg(mdev, format, arg...) \
|
||||
dev_dbg(&mdev->pdev->dev, format, ## arg)
|
||||
#ifdef CONFIG_INFINIBAND_MTHCA_DEBUG
|
||||
extern int mthca_debug_level;
|
||||
|
||||
#define mthca_dbg(mdev, format, arg...) \
|
||||
do { \
|
||||
if (mthca_debug_level) \
|
||||
dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ## arg); \
|
||||
} while (0)
|
||||
|
||||
#else /* CONFIG_INFINIBAND_MTHCA_DEBUG */
|
||||
|
||||
#define mthca_dbg(mdev, format, arg...) do { (void) mdev; } while (0)
|
||||
|
||||
#endif /* CONFIG_INFINIBAND_MTHCA_DEBUG */
|
||||
|
||||
#define mthca_err(mdev, format, arg...) \
|
||||
dev_err(&mdev->pdev->dev, format, ## arg)
|
||||
#define mthca_info(mdev, format, arg...) \
|
||||
@ -492,6 +508,7 @@ void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq);
|
||||
int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
|
||||
enum ib_srq_attr_mask attr_mask);
|
||||
int mthca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
|
||||
int mthca_max_srq_sge(struct mthca_dev *dev);
|
||||
void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
|
||||
enum ib_event_type event_type);
|
||||
void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr);
|
||||
@ -542,6 +559,8 @@ int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah,
|
||||
struct ib_ud_header *header);
|
||||
int mthca_ah_query(struct ib_ah *ibah, struct ib_ah_attr *attr);
|
||||
int mthca_ah_grh_present(struct mthca_ah *ah);
|
||||
u8 mthca_get_rate(struct mthca_dev *dev, int static_rate, u8 port);
|
||||
enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u8 port);
|
||||
|
||||
int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
|
||||
int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
|
||||
|
@ -49,6 +49,30 @@ enum {
|
||||
MTHCA_VENDOR_CLASS2 = 0xa
|
||||
};
|
||||
|
||||
int mthca_update_rate(struct mthca_dev *dev, u8 port_num)
|
||||
{
|
||||
struct ib_port_attr *tprops = NULL;
|
||||
int ret;
|
||||
|
||||
tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
|
||||
if (!tprops)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = ib_query_port(&dev->ib_dev, port_num, tprops);
|
||||
if (ret) {
|
||||
printk(KERN_WARNING "ib_query_port failed (%d) for %s port %d\n",
|
||||
ret, dev->ib_dev.name, port_num);
|
||||
goto out;
|
||||
}
|
||||
|
||||
dev->rate[port_num - 1] = tprops->active_speed *
|
||||
ib_width_enum_to_int(tprops->active_width);
|
||||
|
||||
out:
|
||||
kfree(tprops);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void update_sm_ah(struct mthca_dev *dev,
|
||||
u8 port_num, u16 lid, u8 sl)
|
||||
{
|
||||
@ -90,6 +114,7 @@ static void smp_snoop(struct ib_device *ibdev,
|
||||
mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
|
||||
mad->mad_hdr.method == IB_MGMT_METHOD_SET) {
|
||||
if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) {
|
||||
mthca_update_rate(to_mdev(ibdev), port_num);
|
||||
update_sm_ah(to_mdev(ibdev), port_num,
|
||||
be16_to_cpup((__be16 *) (mad->data + 58)),
|
||||
(*(u8 *) (mad->data + 76)) & 0xf);
|
||||
@ -246,6 +271,7 @@ int mthca_create_agents(struct mthca_dev *dev)
|
||||
{
|
||||
struct ib_mad_agent *agent;
|
||||
int p, q;
|
||||
int ret;
|
||||
|
||||
spin_lock_init(&dev->sm_lock);
|
||||
|
||||
@ -255,11 +281,23 @@ int mthca_create_agents(struct mthca_dev *dev)
|
||||
q ? IB_QPT_GSI : IB_QPT_SMI,
|
||||
NULL, 0, send_handler,
|
||||
NULL, NULL);
|
||||
if (IS_ERR(agent))
|
||||
if (IS_ERR(agent)) {
|
||||
ret = PTR_ERR(agent);
|
||||
goto err;
|
||||
}
|
||||
dev->send_agent[p][q] = agent;
|
||||
}
|
||||
|
||||
|
||||
for (p = 1; p <= dev->limits.num_ports; ++p) {
|
||||
ret = mthca_update_rate(dev, p);
|
||||
if (ret) {
|
||||
mthca_err(dev, "Failed to obtain port %d rate."
|
||||
" aborting.\n", p);
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
@ -268,7 +306,7 @@ err:
|
||||
if (dev->send_agent[p][q])
|
||||
ib_unregister_mad_agent(dev->send_agent[p][q]);
|
||||
|
||||
return PTR_ERR(agent);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void __devexit mthca_free_agents(struct mthca_dev *dev)
|
||||
|
@ -52,6 +52,14 @@ MODULE_DESCRIPTION("Mellanox InfiniBand HCA low-level driver");
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
MODULE_VERSION(DRV_VERSION);
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_MTHCA_DEBUG
|
||||
|
||||
int mthca_debug_level = 0;
|
||||
module_param_named(debug_level, mthca_debug_level, int, 0644);
|
||||
MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
|
||||
|
||||
#endif /* CONFIG_INFINIBAND_MTHCA_DEBUG */
|
||||
|
||||
#ifdef CONFIG_PCI_MSI
|
||||
|
||||
static int msi_x = 0;
|
||||
@ -69,6 +77,10 @@ MODULE_PARM_DESC(msi, "attempt to use MSI if nonzero");
|
||||
|
||||
#endif /* CONFIG_PCI_MSI */
|
||||
|
||||
static int tune_pci = 0;
|
||||
module_param(tune_pci, int, 0444);
|
||||
MODULE_PARM_DESC(tune_pci, "increase PCI burst from the default set by BIOS if nonzero");
|
||||
|
||||
static const char mthca_version[] __devinitdata =
|
||||
DRV_NAME ": Mellanox InfiniBand HCA driver v"
|
||||
DRV_VERSION " (" DRV_RELDATE ")\n";
|
||||
@ -90,6 +102,9 @@ static int __devinit mthca_tune_pci(struct mthca_dev *mdev)
|
||||
int cap;
|
||||
u16 val;
|
||||
|
||||
if (!tune_pci)
|
||||
return 0;
|
||||
|
||||
/* First try to max out Read Byte Count */
|
||||
cap = pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX);
|
||||
if (cap) {
|
||||
@ -176,6 +191,7 @@ static int __devinit mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim
|
||||
mdev->limits.reserved_srqs = dev_lim->reserved_srqs;
|
||||
mdev->limits.reserved_eecs = dev_lim->reserved_eecs;
|
||||
mdev->limits.max_desc_sz = dev_lim->max_desc_sz;
|
||||
mdev->limits.max_srq_sge = mthca_max_srq_sge(mdev);
|
||||
/*
|
||||
* Subtract 1 from the limit because we need to allocate a
|
||||
* spare CQE so the HCA HW can tell the difference between an
|
||||
@ -191,6 +207,18 @@ static int __devinit mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim
|
||||
mdev->limits.port_width_cap = dev_lim->max_port_width;
|
||||
mdev->limits.page_size_cap = ~(u32) (dev_lim->min_page_sz - 1);
|
||||
mdev->limits.flags = dev_lim->flags;
|
||||
/*
|
||||
* For old FW that doesn't return static rate support, use a
|
||||
* value of 0x3 (only static rate values of 0 or 1 are handled),
|
||||
* except on Sinai, where even old FW can handle static rate
|
||||
* values of 2 and 3.
|
||||
*/
|
||||
if (dev_lim->stat_rate_support)
|
||||
mdev->limits.stat_rate_support = dev_lim->stat_rate_support;
|
||||
else if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
|
||||
mdev->limits.stat_rate_support = 0xf;
|
||||
else
|
||||
mdev->limits.stat_rate_support = 0x3;
|
||||
|
||||
/* IB_DEVICE_RESIZE_MAX_WR not supported by driver.
|
||||
May be doable since hardware supports it for SRQ.
|
||||
|
@ -106,7 +106,7 @@ static int mthca_query_device(struct ib_device *ibdev,
|
||||
props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
|
||||
props->max_srq = mdev->limits.num_srqs - mdev->limits.reserved_srqs;
|
||||
props->max_srq_wr = mdev->limits.max_srq_wqes;
|
||||
props->max_srq_sge = mdev->limits.max_sg;
|
||||
props->max_srq_sge = mdev->limits.max_srq_sge;
|
||||
props->local_ca_ack_delay = mdev->limits.local_ca_ack_delay;
|
||||
props->atomic_cap = mdev->limits.flags & DEV_LIM_FLAG_ATOMIC ?
|
||||
IB_ATOMIC_HCA : IB_ATOMIC_NONE;
|
||||
|
@ -257,6 +257,8 @@ struct mthca_qp {
|
||||
atomic_t refcount;
|
||||
u32 qpn;
|
||||
int is_direct;
|
||||
u8 port; /* for SQP and memfree use only */
|
||||
u8 alt_port; /* for memfree use only */
|
||||
u8 transport;
|
||||
u8 state;
|
||||
u8 atomic_rd_en;
|
||||
@ -278,7 +280,6 @@ struct mthca_qp {
|
||||
|
||||
struct mthca_sqp {
|
||||
struct mthca_qp qp;
|
||||
int port;
|
||||
int pkey_index;
|
||||
u32 qkey;
|
||||
u32 send_psn;
|
||||
|
@ -248,6 +248,9 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
|
||||
return;
|
||||
}
|
||||
|
||||
if (event_type == IB_EVENT_PATH_MIG)
|
||||
qp->port = qp->alt_port;
|
||||
|
||||
event.device = &dev->ib_dev;
|
||||
event.event = event_type;
|
||||
event.element.qp = &qp->ibqp;
|
||||
@ -392,10 +395,16 @@ static void to_ib_ah_attr(struct mthca_dev *dev, struct ib_ah_attr *ib_ah_attr,
|
||||
{
|
||||
memset(ib_ah_attr, 0, sizeof *path);
|
||||
ib_ah_attr->port_num = (be32_to_cpu(path->port_pkey) >> 24) & 0x3;
|
||||
|
||||
if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->limits.num_ports)
|
||||
return;
|
||||
|
||||
ib_ah_attr->dlid = be16_to_cpu(path->rlid);
|
||||
ib_ah_attr->sl = be32_to_cpu(path->sl_tclass_flowlabel) >> 28;
|
||||
ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f;
|
||||
ib_ah_attr->static_rate = path->static_rate & 0x7;
|
||||
ib_ah_attr->static_rate = mthca_rate_to_ib(dev,
|
||||
path->static_rate & 0x7,
|
||||
ib_ah_attr->port_num);
|
||||
ib_ah_attr->ah_flags = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
|
||||
if (ib_ah_attr->ah_flags) {
|
||||
ib_ah_attr->grh.sgid_index = path->mgid_index & (dev->limits.gid_table_len - 1);
|
||||
@ -455,8 +464,10 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
|
||||
qp_attr->cap.max_recv_sge = qp->rq.max_gs;
|
||||
qp_attr->cap.max_inline_data = qp->max_inline_data;
|
||||
|
||||
to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
|
||||
to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
|
||||
if (qp->transport == RC || qp->transport == UC) {
|
||||
to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
|
||||
to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
|
||||
}
|
||||
|
||||
qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f;
|
||||
qp_attr->alt_pkey_index = be32_to_cpu(context->alt_path.port_pkey) & 0x7f;
|
||||
@ -484,11 +495,11 @@ out:
|
||||
}
|
||||
|
||||
static int mthca_path_set(struct mthca_dev *dev, struct ib_ah_attr *ah,
|
||||
struct mthca_qp_path *path)
|
||||
struct mthca_qp_path *path, u8 port)
|
||||
{
|
||||
path->g_mylmc = ah->src_path_bits & 0x7f;
|
||||
path->rlid = cpu_to_be16(ah->dlid);
|
||||
path->static_rate = !!ah->static_rate;
|
||||
path->static_rate = mthca_get_rate(dev, ah->static_rate, port);
|
||||
|
||||
if (ah->ah_flags & IB_AH_GRH) {
|
||||
if (ah->grh.sgid_index >= dev->limits.gid_table_len) {
|
||||
@ -634,7 +645,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
|
||||
|
||||
if (qp->transport == MLX)
|
||||
qp_context->pri_path.port_pkey |=
|
||||
cpu_to_be32(to_msqp(qp)->port << 24);
|
||||
cpu_to_be32(qp->port << 24);
|
||||
else {
|
||||
if (attr_mask & IB_QP_PORT) {
|
||||
qp_context->pri_path.port_pkey |=
|
||||
@ -657,7 +668,8 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
|
||||
}
|
||||
|
||||
if (attr_mask & IB_QP_AV) {
|
||||
if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path))
|
||||
if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path,
|
||||
attr_mask & IB_QP_PORT ? attr->port_num : qp->port))
|
||||
return -EINVAL;
|
||||
|
||||
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);
|
||||
@ -681,7 +693,8 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path))
|
||||
if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path,
|
||||
attr->alt_ah_attr.port_num))
|
||||
return -EINVAL;
|
||||
|
||||
qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index |
|
||||
@ -791,6 +804,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
|
||||
qp->atomic_rd_en = attr->qp_access_flags;
|
||||
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
|
||||
qp->resp_depth = attr->max_dest_rd_atomic;
|
||||
if (attr_mask & IB_QP_PORT)
|
||||
qp->port = attr->port_num;
|
||||
if (attr_mask & IB_QP_ALT_PATH)
|
||||
qp->alt_port = attr->alt_port_num;
|
||||
|
||||
if (is_sqp(dev, qp))
|
||||
store_attrs(to_msqp(qp), attr, attr_mask);
|
||||
@ -802,13 +819,13 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
|
||||
if (is_qp0(dev, qp)) {
|
||||
if (cur_state != IB_QPS_RTR &&
|
||||
new_state == IB_QPS_RTR)
|
||||
init_port(dev, to_msqp(qp)->port);
|
||||
init_port(dev, qp->port);
|
||||
|
||||
if (cur_state != IB_QPS_RESET &&
|
||||
cur_state != IB_QPS_ERR &&
|
||||
(new_state == IB_QPS_RESET ||
|
||||
new_state == IB_QPS_ERR))
|
||||
mthca_CLOSE_IB(dev, to_msqp(qp)->port, &status);
|
||||
mthca_CLOSE_IB(dev, qp->port, &status);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1212,6 +1229,9 @@ int mthca_alloc_qp(struct mthca_dev *dev,
|
||||
if (qp->qpn == -1)
|
||||
return -ENOMEM;
|
||||
|
||||
/* initialize port to zero for error-catching. */
|
||||
qp->port = 0;
|
||||
|
||||
err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
|
||||
send_policy, qp);
|
||||
if (err) {
|
||||
@ -1261,7 +1281,7 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
sqp->port = port;
|
||||
sqp->qp.port = port;
|
||||
sqp->qp.qpn = mqpn;
|
||||
sqp->qp.transport = MLX;
|
||||
|
||||
@ -1404,10 +1424,10 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
|
||||
sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
|
||||
sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
|
||||
if (!sqp->qp.ibqp.qp_num)
|
||||
ib_get_cached_pkey(&dev->ib_dev, sqp->port,
|
||||
ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
|
||||
sqp->pkey_index, &pkey);
|
||||
else
|
||||
ib_get_cached_pkey(&dev->ib_dev, sqp->port,
|
||||
ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
|
||||
wr->wr.ud.pkey_index, &pkey);
|
||||
sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
|
||||
sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
|
||||
|
@ -192,7 +192,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
|
||||
|
||||
/* Sanity check SRQ size before proceeding */
|
||||
if (attr->max_wr > dev->limits.max_srq_wqes ||
|
||||
attr->max_sge > dev->limits.max_sg)
|
||||
attr->max_sge > dev->limits.max_srq_sge)
|
||||
return -EINVAL;
|
||||
|
||||
srq->max = attr->max_wr;
|
||||
@ -660,6 +660,31 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
|
||||
return err;
|
||||
}
|
||||
|
||||
int mthca_max_srq_sge(struct mthca_dev *dev)
|
||||
{
|
||||
if (mthca_is_memfree(dev))
|
||||
return dev->limits.max_sg;
|
||||
|
||||
/*
|
||||
* SRQ allocations are based on powers of 2 for Tavor,
|
||||
* (although they only need to be multiples of 16 bytes).
|
||||
*
|
||||
* Therefore, we need to base the max number of sg entries on
|
||||
* the largest power of 2 descriptor size that is <= to the
|
||||
* actual max WQE descriptor size, rather than return the
|
||||
* max_sg value given by the firmware (which is based on WQE
|
||||
* sizes as multiples of 16, not powers of 2).
|
||||
*
|
||||
* If SRQ implementation is changed for Tavor to be based on
|
||||
* multiples of 16, the calculation below can be deleted and
|
||||
* the FW max_sg value returned.
|
||||
*/
|
||||
return min_t(int, dev->limits.max_sg,
|
||||
((1 << (fls(dev->limits.max_desc_sz) - 1)) -
|
||||
sizeof (struct mthca_next_seg)) /
|
||||
sizeof (struct mthca_data_seg));
|
||||
}
|
||||
|
||||
int __devinit mthca_init_srq_table(struct mthca_dev *dev)
|
||||
{
|
||||
int err;
|
||||
|
@ -10,8 +10,9 @@ config INFINIBAND_IPOIB
|
||||
group: <http://www.ietf.org/html.charters/ipoib-charter.html>.
|
||||
|
||||
config INFINIBAND_IPOIB_DEBUG
|
||||
bool "IP-over-InfiniBand debugging"
|
||||
bool "IP-over-InfiniBand debugging" if EMBEDDED
|
||||
depends on INFINIBAND_IPOIB
|
||||
default y
|
||||
---help---
|
||||
This option causes debugging code to be compiled into the
|
||||
IPoIB driver. The output can be turned on via the
|
||||
|
@ -65,6 +65,8 @@ enum {
|
||||
|
||||
IPOIB_RX_RING_SIZE = 128,
|
||||
IPOIB_TX_RING_SIZE = 64,
|
||||
IPOIB_MAX_QUEUE_SIZE = 8192,
|
||||
IPOIB_MIN_QUEUE_SIZE = 2,
|
||||
|
||||
IPOIB_NUM_WC = 4,
|
||||
|
||||
@ -230,6 +232,9 @@ static inline struct ipoib_neigh **to_ipoib_neigh(struct neighbour *neigh)
|
||||
INFINIBAND_ALEN, sizeof(void *));
|
||||
}
|
||||
|
||||
struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neigh);
|
||||
void ipoib_neigh_free(struct ipoib_neigh *neigh);
|
||||
|
||||
extern struct workqueue_struct *ipoib_workqueue;
|
||||
|
||||
/* functions */
|
||||
@ -329,6 +334,8 @@ static inline void ipoib_unregister_debugfs(void) { }
|
||||
#define ipoib_warn(priv, format, arg...) \
|
||||
ipoib_printk(KERN_WARNING, priv, format , ## arg)
|
||||
|
||||
extern int ipoib_sendq_size;
|
||||
extern int ipoib_recvq_size;
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
|
||||
extern int ipoib_debug_level;
|
||||
|
@ -213,7 +213,7 @@ static int ipoib_path_seq_show(struct seq_file *file, void *iter_ptr)
|
||||
gid_buf, path.pathrec.dlid ? "yes" : "no");
|
||||
|
||||
if (path.pathrec.dlid) {
|
||||
rate = ib_sa_rate_enum_to_int(path.pathrec.rate) * 25;
|
||||
rate = ib_rate_to_mult(path.pathrec.rate) * 25;
|
||||
|
||||
seq_printf(file,
|
||||
" DLID: 0x%04x\n"
|
||||
|
@ -161,7 +161,7 @@ static int ipoib_ib_post_receives(struct net_device *dev)
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < IPOIB_RX_RING_SIZE; ++i) {
|
||||
for (i = 0; i < ipoib_recvq_size; ++i) {
|
||||
if (ipoib_alloc_rx_skb(dev, i)) {
|
||||
ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
|
||||
return -ENOMEM;
|
||||
@ -187,7 +187,7 @@ static void ipoib_ib_handle_wc(struct net_device *dev,
|
||||
if (wr_id & IPOIB_OP_RECV) {
|
||||
wr_id &= ~IPOIB_OP_RECV;
|
||||
|
||||
if (wr_id < IPOIB_RX_RING_SIZE) {
|
||||
if (wr_id < ipoib_recvq_size) {
|
||||
struct sk_buff *skb = priv->rx_ring[wr_id].skb;
|
||||
dma_addr_t addr = priv->rx_ring[wr_id].mapping;
|
||||
|
||||
@ -252,9 +252,9 @@ static void ipoib_ib_handle_wc(struct net_device *dev,
|
||||
struct ipoib_tx_buf *tx_req;
|
||||
unsigned long flags;
|
||||
|
||||
if (wr_id >= IPOIB_TX_RING_SIZE) {
|
||||
if (wr_id >= ipoib_sendq_size) {
|
||||
ipoib_warn(priv, "completion event with wrid %d (> %d)\n",
|
||||
wr_id, IPOIB_TX_RING_SIZE);
|
||||
wr_id, ipoib_sendq_size);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -275,7 +275,7 @@ static void ipoib_ib_handle_wc(struct net_device *dev,
|
||||
spin_lock_irqsave(&priv->tx_lock, flags);
|
||||
++priv->tx_tail;
|
||||
if (netif_queue_stopped(dev) &&
|
||||
priv->tx_head - priv->tx_tail <= IPOIB_TX_RING_SIZE / 2)
|
||||
priv->tx_head - priv->tx_tail <= ipoib_sendq_size >> 1)
|
||||
netif_wake_queue(dev);
|
||||
spin_unlock_irqrestore(&priv->tx_lock, flags);
|
||||
|
||||
@ -344,13 +344,13 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
|
||||
* means we have to make sure everything is properly recorded and
|
||||
* our state is consistent before we call post_send().
|
||||
*/
|
||||
tx_req = &priv->tx_ring[priv->tx_head & (IPOIB_TX_RING_SIZE - 1)];
|
||||
tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
|
||||
tx_req->skb = skb;
|
||||
addr = dma_map_single(priv->ca->dma_device, skb->data, skb->len,
|
||||
DMA_TO_DEVICE);
|
||||
pci_unmap_addr_set(tx_req, mapping, addr);
|
||||
|
||||
if (unlikely(post_send(priv, priv->tx_head & (IPOIB_TX_RING_SIZE - 1),
|
||||
if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
|
||||
address->ah, qpn, addr, skb->len))) {
|
||||
ipoib_warn(priv, "post_send failed\n");
|
||||
++priv->stats.tx_errors;
|
||||
@ -363,7 +363,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
|
||||
address->last_send = priv->tx_head;
|
||||
++priv->tx_head;
|
||||
|
||||
if (priv->tx_head - priv->tx_tail == IPOIB_TX_RING_SIZE) {
|
||||
if (priv->tx_head - priv->tx_tail == ipoib_sendq_size) {
|
||||
ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
|
||||
netif_stop_queue(dev);
|
||||
}
|
||||
@ -488,7 +488,7 @@ static int recvs_pending(struct net_device *dev)
|
||||
int pending = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < IPOIB_RX_RING_SIZE; ++i)
|
||||
for (i = 0; i < ipoib_recvq_size; ++i)
|
||||
if (priv->rx_ring[i].skb)
|
||||
++pending;
|
||||
|
||||
@ -527,7 +527,7 @@ int ipoib_ib_dev_stop(struct net_device *dev)
|
||||
*/
|
||||
while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
|
||||
tx_req = &priv->tx_ring[priv->tx_tail &
|
||||
(IPOIB_TX_RING_SIZE - 1)];
|
||||
(ipoib_sendq_size - 1)];
|
||||
dma_unmap_single(priv->ca->dma_device,
|
||||
pci_unmap_addr(tx_req, mapping),
|
||||
tx_req->skb->len,
|
||||
@ -536,7 +536,7 @@ int ipoib_ib_dev_stop(struct net_device *dev)
|
||||
++priv->tx_tail;
|
||||
}
|
||||
|
||||
for (i = 0; i < IPOIB_RX_RING_SIZE; ++i)
|
||||
for (i = 0; i < ipoib_recvq_size; ++i)
|
||||
if (priv->rx_ring[i].skb) {
|
||||
dma_unmap_single(priv->ca->dma_device,
|
||||
pci_unmap_addr(&priv->rx_ring[i],
|
||||
|
@ -41,6 +41,7 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#include <linux/if_arp.h> /* For ARPHRD_xxx */
|
||||
|
||||
@ -53,6 +54,14 @@ MODULE_AUTHOR("Roland Dreier");
|
||||
MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
|
||||
int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE;
|
||||
int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE;
|
||||
|
||||
module_param_named(send_queue_size, ipoib_sendq_size, int, 0444);
|
||||
MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
|
||||
module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
|
||||
MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
|
||||
int ipoib_debug_level;
|
||||
|
||||
@ -252,8 +261,8 @@ static void path_free(struct net_device *dev, struct ipoib_path *path)
|
||||
*/
|
||||
if (neigh->ah)
|
||||
ipoib_put_ah(neigh->ah);
|
||||
*to_ipoib_neigh(neigh->neighbour) = NULL;
|
||||
kfree(neigh);
|
||||
|
||||
ipoib_neigh_free(neigh);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
@ -327,9 +336,8 @@ void ipoib_flush_paths(struct net_device *dev)
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
struct ipoib_path *path, *tp;
|
||||
LIST_HEAD(remove_list);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
spin_lock_irq(&priv->lock);
|
||||
|
||||
list_splice(&priv->path_list, &remove_list);
|
||||
INIT_LIST_HEAD(&priv->path_list);
|
||||
@ -337,14 +345,15 @@ void ipoib_flush_paths(struct net_device *dev)
|
||||
list_for_each_entry(path, &remove_list, list)
|
||||
rb_erase(&path->rb_node, &priv->path_tree);
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
list_for_each_entry_safe(path, tp, &remove_list, list) {
|
||||
if (path->query)
|
||||
ib_sa_cancel_query(path->query_id, path->query);
|
||||
spin_unlock_irq(&priv->lock);
|
||||
wait_for_completion(&path->done);
|
||||
path_free(dev, path);
|
||||
spin_lock_irq(&priv->lock);
|
||||
}
|
||||
spin_unlock_irq(&priv->lock);
|
||||
}
|
||||
|
||||
static void path_rec_completion(int status,
|
||||
@ -373,16 +382,9 @@ static void path_rec_completion(int status,
|
||||
struct ib_ah_attr av = {
|
||||
.dlid = be16_to_cpu(pathrec->dlid),
|
||||
.sl = pathrec->sl,
|
||||
.port_num = priv->port
|
||||
.port_num = priv->port,
|
||||
.static_rate = pathrec->rate
|
||||
};
|
||||
int path_rate = ib_sa_rate_enum_to_int(pathrec->rate);
|
||||
|
||||
if (path_rate > 0 && priv->local_rate > path_rate)
|
||||
av.static_rate = (priv->local_rate - 1) / path_rate;
|
||||
|
||||
ipoib_dbg(priv, "static_rate %d for local port %dX, path %dX\n",
|
||||
av.static_rate, priv->local_rate,
|
||||
ib_sa_rate_enum_to_int(pathrec->rate));
|
||||
|
||||
ah = ipoib_create_ah(dev, priv->pd, &av);
|
||||
}
|
||||
@ -481,7 +483,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
|
||||
struct ipoib_path *path;
|
||||
struct ipoib_neigh *neigh;
|
||||
|
||||
neigh = kmalloc(sizeof *neigh, GFP_ATOMIC);
|
||||
neigh = ipoib_neigh_alloc(skb->dst->neighbour);
|
||||
if (!neigh) {
|
||||
++priv->stats.tx_dropped;
|
||||
dev_kfree_skb_any(skb);
|
||||
@ -489,8 +491,6 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
|
||||
}
|
||||
|
||||
skb_queue_head_init(&neigh->queue);
|
||||
neigh->neighbour = skb->dst->neighbour;
|
||||
*to_ipoib_neigh(skb->dst->neighbour) = neigh;
|
||||
|
||||
/*
|
||||
* We can only be called from ipoib_start_xmit, so we're
|
||||
@ -503,7 +503,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
|
||||
path = path_rec_create(dev,
|
||||
(union ib_gid *) (skb->dst->neighbour->ha + 4));
|
||||
if (!path)
|
||||
goto err;
|
||||
goto err_path;
|
||||
|
||||
__path_add(dev, path);
|
||||
}
|
||||
@ -521,17 +521,17 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
|
||||
__skb_queue_tail(&neigh->queue, skb);
|
||||
|
||||
if (!path->query && path_rec_start(dev, path))
|
||||
goto err;
|
||||
goto err_list;
|
||||
}
|
||||
|
||||
spin_unlock(&priv->lock);
|
||||
return;
|
||||
|
||||
err:
|
||||
*to_ipoib_neigh(skb->dst->neighbour) = NULL;
|
||||
err_list:
|
||||
list_del(&neigh->list);
|
||||
kfree(neigh);
|
||||
|
||||
err_path:
|
||||
ipoib_neigh_free(neigh);
|
||||
++priv->stats.tx_dropped;
|
||||
dev_kfree_skb_any(skb);
|
||||
|
||||
@ -763,8 +763,7 @@ static void ipoib_neigh_destructor(struct neighbour *n)
|
||||
if (neigh->ah)
|
||||
ah = neigh->ah;
|
||||
list_del(&neigh->list);
|
||||
*to_ipoib_neigh(n) = NULL;
|
||||
kfree(neigh);
|
||||
ipoib_neigh_free(neigh);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
@ -773,6 +772,26 @@ static void ipoib_neigh_destructor(struct neighbour *n)
|
||||
ipoib_put_ah(ah);
|
||||
}
|
||||
|
||||
struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour)
|
||||
{
|
||||
struct ipoib_neigh *neigh;
|
||||
|
||||
neigh = kmalloc(sizeof *neigh, GFP_ATOMIC);
|
||||
if (!neigh)
|
||||
return NULL;
|
||||
|
||||
neigh->neighbour = neighbour;
|
||||
*to_ipoib_neigh(neighbour) = neigh;
|
||||
|
||||
return neigh;
|
||||
}
|
||||
|
||||
void ipoib_neigh_free(struct ipoib_neigh *neigh)
|
||||
{
|
||||
*to_ipoib_neigh(neigh->neighbour) = NULL;
|
||||
kfree(neigh);
|
||||
}
|
||||
|
||||
static int ipoib_neigh_setup_dev(struct net_device *dev, struct neigh_parms *parms)
|
||||
{
|
||||
parms->neigh_destructor = ipoib_neigh_destructor;
|
||||
@ -785,20 +804,19 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
|
||||
/* Allocate RX/TX "rings" to hold queued skbs */
|
||||
|
||||
priv->rx_ring = kzalloc(IPOIB_RX_RING_SIZE * sizeof (struct ipoib_rx_buf),
|
||||
priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
|
||||
GFP_KERNEL);
|
||||
if (!priv->rx_ring) {
|
||||
printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
|
||||
ca->name, IPOIB_RX_RING_SIZE);
|
||||
ca->name, ipoib_recvq_size);
|
||||
goto out;
|
||||
}
|
||||
|
||||
priv->tx_ring = kzalloc(IPOIB_TX_RING_SIZE * sizeof (struct ipoib_tx_buf),
|
||||
priv->tx_ring = kzalloc(ipoib_sendq_size * sizeof *priv->tx_ring,
|
||||
GFP_KERNEL);
|
||||
if (!priv->tx_ring) {
|
||||
printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
|
||||
ca->name, IPOIB_TX_RING_SIZE);
|
||||
ca->name, ipoib_sendq_size);
|
||||
goto out_rx_ring_cleanup;
|
||||
}
|
||||
|
||||
@ -866,7 +884,7 @@ static void ipoib_setup(struct net_device *dev)
|
||||
dev->hard_header_len = IPOIB_ENCAP_LEN + INFINIBAND_ALEN;
|
||||
dev->addr_len = INFINIBAND_ALEN;
|
||||
dev->type = ARPHRD_INFINIBAND;
|
||||
dev->tx_queue_len = IPOIB_TX_RING_SIZE * 2;
|
||||
dev->tx_queue_len = ipoib_sendq_size * 2;
|
||||
dev->features = NETIF_F_VLAN_CHALLENGED | NETIF_F_LLTX;
|
||||
|
||||
/* MTU will be reset when mcast join happens */
|
||||
@ -1118,6 +1136,14 @@ static int __init ipoib_init_module(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size);
|
||||
ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE);
|
||||
ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE);
|
||||
|
||||
ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
|
||||
ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
|
||||
ipoib_sendq_size = max(ipoib_sendq_size, IPOIB_MIN_QUEUE_SIZE);
|
||||
|
||||
ret = ipoib_register_debugfs();
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -114,8 +114,7 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
|
||||
*/
|
||||
if (neigh->ah)
|
||||
ipoib_put_ah(neigh->ah);
|
||||
*to_ipoib_neigh(neigh->neighbour) = NULL;
|
||||
kfree(neigh);
|
||||
ipoib_neigh_free(neigh);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
@ -251,6 +250,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
|
||||
.port_num = priv->port,
|
||||
.sl = mcast->mcmember.sl,
|
||||
.ah_flags = IB_AH_GRH,
|
||||
.static_rate = mcast->mcmember.rate,
|
||||
.grh = {
|
||||
.flow_label = be32_to_cpu(mcast->mcmember.flow_label),
|
||||
.hop_limit = mcast->mcmember.hop_limit,
|
||||
@ -258,17 +258,8 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
|
||||
.traffic_class = mcast->mcmember.traffic_class
|
||||
}
|
||||
};
|
||||
int path_rate = ib_sa_rate_enum_to_int(mcast->mcmember.rate);
|
||||
|
||||
av.grh.dgid = mcast->mcmember.mgid;
|
||||
|
||||
if (path_rate > 0 && priv->local_rate > path_rate)
|
||||
av.static_rate = (priv->local_rate - 1) / path_rate;
|
||||
|
||||
ipoib_dbg_mcast(priv, "static_rate %d for local port %dX, mcmember %dX\n",
|
||||
av.static_rate, priv->local_rate,
|
||||
ib_sa_rate_enum_to_int(mcast->mcmember.rate));
|
||||
|
||||
ah = ipoib_create_ah(dev, priv->pd, &av);
|
||||
if (!ah) {
|
||||
ipoib_warn(priv, "ib_address_create failed\n");
|
||||
@ -618,6 +609,22 @@ int ipoib_mcast_start_thread(struct net_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void wait_for_mcast_join(struct ipoib_dev_priv *priv,
|
||||
struct ipoib_mcast *mcast)
|
||||
{
|
||||
spin_lock_irq(&priv->lock);
|
||||
if (mcast && mcast->query) {
|
||||
ib_sa_cancel_query(mcast->query_id, mcast->query);
|
||||
mcast->query = NULL;
|
||||
spin_unlock_irq(&priv->lock);
|
||||
ipoib_dbg_mcast(priv, "waiting for MGID " IPOIB_GID_FMT "\n",
|
||||
IPOIB_GID_ARG(mcast->mcmember.mgid));
|
||||
wait_for_completion(&mcast->done);
|
||||
}
|
||||
else
|
||||
spin_unlock_irq(&priv->lock);
|
||||
}
|
||||
|
||||
int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
@ -637,28 +644,10 @@ int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
|
||||
if (flush)
|
||||
flush_workqueue(ipoib_workqueue);
|
||||
|
||||
spin_lock_irq(&priv->lock);
|
||||
if (priv->broadcast && priv->broadcast->query) {
|
||||
ib_sa_cancel_query(priv->broadcast->query_id, priv->broadcast->query);
|
||||
priv->broadcast->query = NULL;
|
||||
spin_unlock_irq(&priv->lock);
|
||||
ipoib_dbg_mcast(priv, "waiting for bcast\n");
|
||||
wait_for_completion(&priv->broadcast->done);
|
||||
} else
|
||||
spin_unlock_irq(&priv->lock);
|
||||
wait_for_mcast_join(priv, priv->broadcast);
|
||||
|
||||
list_for_each_entry(mcast, &priv->multicast_list, list) {
|
||||
spin_lock_irq(&priv->lock);
|
||||
if (mcast->query) {
|
||||
ib_sa_cancel_query(mcast->query_id, mcast->query);
|
||||
mcast->query = NULL;
|
||||
spin_unlock_irq(&priv->lock);
|
||||
ipoib_dbg_mcast(priv, "waiting for MGID " IPOIB_GID_FMT "\n",
|
||||
IPOIB_GID_ARG(mcast->mcmember.mgid));
|
||||
wait_for_completion(&mcast->done);
|
||||
} else
|
||||
spin_unlock_irq(&priv->lock);
|
||||
}
|
||||
list_for_each_entry(mcast, &priv->multicast_list, list)
|
||||
wait_for_mcast_join(priv, mcast);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -772,13 +761,11 @@ out:
|
||||
if (skb->dst &&
|
||||
skb->dst->neighbour &&
|
||||
!*to_ipoib_neigh(skb->dst->neighbour)) {
|
||||
struct ipoib_neigh *neigh = kmalloc(sizeof *neigh, GFP_ATOMIC);
|
||||
struct ipoib_neigh *neigh = ipoib_neigh_alloc(skb->dst->neighbour);
|
||||
|
||||
if (neigh) {
|
||||
kref_get(&mcast->ah->ref);
|
||||
neigh->ah = mcast->ah;
|
||||
neigh->neighbour = skb->dst->neighbour;
|
||||
*to_ipoib_neigh(skb->dst->neighbour) = neigh;
|
||||
list_add_tail(&neigh->list, &mcast->neigh_list);
|
||||
}
|
||||
}
|
||||
@ -913,6 +900,7 @@ void ipoib_mcast_restart_task(void *dev_ptr)
|
||||
|
||||
/* We have to cancel outside of the spinlock */
|
||||
list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
|
||||
wait_for_mcast_join(priv, mcast);
|
||||
ipoib_mcast_leave(mcast->dev, mcast);
|
||||
ipoib_mcast_free(mcast);
|
||||
}
|
||||
|
@ -159,8 +159,8 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
struct ib_qp_init_attr init_attr = {
|
||||
.cap = {
|
||||
.max_send_wr = IPOIB_TX_RING_SIZE,
|
||||
.max_recv_wr = IPOIB_RX_RING_SIZE,
|
||||
.max_send_wr = ipoib_sendq_size,
|
||||
.max_recv_wr = ipoib_recvq_size,
|
||||
.max_send_sge = 1,
|
||||
.max_recv_sge = 1
|
||||
},
|
||||
@ -175,7 +175,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
|
||||
}
|
||||
|
||||
priv->cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, dev,
|
||||
IPOIB_TX_RING_SIZE + IPOIB_RX_RING_SIZE + 1);
|
||||
ipoib_sendq_size + ipoib_recvq_size + 1);
|
||||
if (IS_ERR(priv->cq)) {
|
||||
printk(KERN_WARNING "%s: failed to create CQ\n", ca->name);
|
||||
goto out_free_pd;
|
||||
|
@ -1434,6 +1434,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
|
||||
p = match_strdup(args);
|
||||
if (strlen(p) != 32) {
|
||||
printk(KERN_WARNING PFX "bad dest GID parameter '%s'\n", p);
|
||||
kfree(p);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -109,7 +109,7 @@ isdn_ppp_free(isdn_net_local * lp)
|
||||
{
|
||||
struct ippp_struct *is;
|
||||
|
||||
if (lp->ppp_slot < 0 || lp->ppp_slot > ISDN_MAX_CHANNELS) {
|
||||
if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) {
|
||||
printk(KERN_ERR "%s: ppp_slot(%d) out of range\n",
|
||||
__FUNCTION__, lp->ppp_slot);
|
||||
return 0;
|
||||
@ -126,7 +126,7 @@ isdn_ppp_free(isdn_net_local * lp)
|
||||
lp->netdev->pb->ref_ct--;
|
||||
spin_unlock(&lp->netdev->pb->lock);
|
||||
#endif /* CONFIG_ISDN_MPP */
|
||||
if (lp->ppp_slot < 0 || lp->ppp_slot > ISDN_MAX_CHANNELS) {
|
||||
if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) {
|
||||
printk(KERN_ERR "%s: ppp_slot(%d) now invalid\n",
|
||||
__FUNCTION__, lp->ppp_slot);
|
||||
return 0;
|
||||
@ -279,7 +279,7 @@ isdn_ppp_open(int min, struct file *file)
|
||||
int slot;
|
||||
struct ippp_struct *is;
|
||||
|
||||
if (min < 0 || min > ISDN_MAX_CHANNELS)
|
||||
if (min < 0 || min >= ISDN_MAX_CHANNELS)
|
||||
return -ENODEV;
|
||||
|
||||
slot = isdn_ppp_get_slot();
|
||||
@ -1042,7 +1042,7 @@ isdn_ppp_push_higher(isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buff
|
||||
if (lp->master) { // FIXME?
|
||||
mlp = (isdn_net_local *) lp->master->priv;
|
||||
slot = mlp->ppp_slot;
|
||||
if (slot < 0 || slot > ISDN_MAX_CHANNELS) {
|
||||
if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
|
||||
printk(KERN_ERR "isdn_ppp_push_higher: master->ppp_slot(%d)\n",
|
||||
lp->ppp_slot);
|
||||
goto drop_packet;
|
||||
@ -1264,7 +1264,7 @@ isdn_ppp_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
/* we have our lp locked from now on */
|
||||
|
||||
slot = lp->ppp_slot;
|
||||
if (slot < 0 || slot > ISDN_MAX_CHANNELS) {
|
||||
if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
|
||||
printk(KERN_ERR "isdn_ppp_xmit: lp->ppp_slot(%d)\n",
|
||||
lp->ppp_slot);
|
||||
kfree_skb(skb);
|
||||
@ -1603,7 +1603,7 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
|
||||
mp = net_dev->pb;
|
||||
stats = &mp->stats;
|
||||
slot = lp->ppp_slot;
|
||||
if (slot < 0 || slot > ISDN_MAX_CHANNELS) {
|
||||
if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
|
||||
printk(KERN_ERR "%s: lp->ppp_slot(%d)\n",
|
||||
__FUNCTION__, lp->ppp_slot);
|
||||
stats->frame_drops++;
|
||||
@ -1640,7 +1640,7 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
|
||||
is->last_link_seqno = minseq = newseq;
|
||||
for (lpq = net_dev->queue;;) {
|
||||
slot = lpq->ppp_slot;
|
||||
if (slot < 0 || slot > ISDN_MAX_CHANNELS) {
|
||||
if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
|
||||
printk(KERN_ERR "%s: lpq->ppp_slot(%d)\n",
|
||||
__FUNCTION__, lpq->ppp_slot);
|
||||
} else {
|
||||
@ -2648,7 +2648,7 @@ static void isdn_ppp_receive_ccp(isdn_net_dev *net_dev, isdn_net_local *lp,
|
||||
|
||||
printk(KERN_DEBUG "Received CCP frame from peer slot(%d)\n",
|
||||
lp->ppp_slot);
|
||||
if (lp->ppp_slot < 0 || lp->ppp_slot > ISDN_MAX_CHANNELS) {
|
||||
if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) {
|
||||
printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n",
|
||||
__FUNCTION__, lp->ppp_slot);
|
||||
return;
|
||||
@ -2658,7 +2658,7 @@ static void isdn_ppp_receive_ccp(isdn_net_dev *net_dev, isdn_net_local *lp,
|
||||
|
||||
if(lp->master) {
|
||||
int slot = ((isdn_net_local *) (lp->master->priv))->ppp_slot;
|
||||
if (slot < 0 || slot > ISDN_MAX_CHANNELS) {
|
||||
if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
|
||||
printk(KERN_ERR "%s: slot(%d) out of range\n",
|
||||
__FUNCTION__, slot);
|
||||
return;
|
||||
@ -2845,7 +2845,7 @@ static void isdn_ppp_send_ccp(isdn_net_dev *net_dev, isdn_net_local *lp, struct
|
||||
|
||||
if (lp->master) {
|
||||
slot = ((isdn_net_local *) (lp->master->priv))->ppp_slot;
|
||||
if (slot < 0 || slot > ISDN_MAX_CHANNELS) {
|
||||
if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
|
||||
printk(KERN_ERR "%s: slot(%d) out of range\n",
|
||||
__FUNCTION__, slot);
|
||||
return;
|
||||
|
@ -163,6 +163,7 @@ void md_new_event(mddev_t *mddev)
|
||||
{
|
||||
atomic_inc(&md_event_count);
|
||||
wake_up(&md_event_waiters);
|
||||
sysfs_notify(&mddev->kobj, NULL, "sync_action");
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(md_new_event);
|
||||
|
||||
|
@ -366,7 +366,15 @@ mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure,
|
||||
static int
|
||||
mptsas_slave_configure(struct scsi_device *sdev)
|
||||
{
|
||||
sas_read_port_mode_page(sdev);
|
||||
struct Scsi_Host *host = sdev->host;
|
||||
MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
|
||||
|
||||
/*
|
||||
* RAID volumes placed beyond the last expected port.
|
||||
* Ignore sending sas mode pages in that case..
|
||||
*/
|
||||
if (sdev->channel < hd->ioc->num_ports)
|
||||
sas_read_port_mode_page(sdev);
|
||||
|
||||
return mptscsih_slave_configure(sdev);
|
||||
}
|
||||
|
@ -1815,14 +1815,14 @@ static int irda_usb_probe(struct usb_interface *intf,
|
||||
self->needspatch = (ret < 0);
|
||||
if (ret < 0) {
|
||||
printk("patch_device failed\n");
|
||||
goto err_out_4;
|
||||
goto err_out_5;
|
||||
}
|
||||
|
||||
/* replace IrDA class descriptor with what patched device is now reporting */
|
||||
irda_desc = irda_usb_find_class_desc (self->usbintf);
|
||||
if (irda_desc == NULL) {
|
||||
ret = -ENODEV;
|
||||
goto err_out_4;
|
||||
goto err_out_5;
|
||||
}
|
||||
if (self->irda_desc)
|
||||
kfree (self->irda_desc);
|
||||
@ -1832,6 +1832,8 @@ static int irda_usb_probe(struct usb_interface *intf,
|
||||
|
||||
return 0;
|
||||
|
||||
err_out_5:
|
||||
unregister_netdev(self->netdev);
|
||||
err_out_4:
|
||||
kfree(self->speed_buff);
|
||||
err_out_3:
|
||||
|
@ -225,6 +225,8 @@ static int __init smsc_superio_lpc(unsigned short cfg_base);
|
||||
#ifdef CONFIG_PCI
|
||||
static int __init preconfigure_smsc_chip(struct smsc_ircc_subsystem_configuration *conf);
|
||||
static int __init preconfigure_through_82801(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf);
|
||||
static void __init preconfigure_ali_port(struct pci_dev *dev,
|
||||
unsigned short port);
|
||||
static int __init preconfigure_through_ali(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf);
|
||||
static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
|
||||
unsigned short ircc_fir,
|
||||
@ -2327,9 +2329,14 @@ static int __init smsc_superio_lpc(unsigned short cfg_base)
|
||||
* pre-configuration not properly done by the BIOS (especially laptops)
|
||||
* This code is based in part on smcinit.c, tosh1800-smcinit.c
|
||||
* and tosh2450-smcinit.c. The table lists the device entries
|
||||
* for ISA bridges with an LPC (Local Peripheral Configurator)
|
||||
* that are in turn used to configure the SMSC device with default
|
||||
* SIR and FIR I/O ports, DMA and IRQ.
|
||||
* for ISA bridges with an LPC (Low Pin Count) controller which
|
||||
* handles the communication with the SMSC device. After the LPC
|
||||
* controller is initialized through PCI, the SMSC device is initialized
|
||||
* through a dedicated port in the ISA port-mapped I/O area, this latter
|
||||
* area is used to configure the SMSC device with default
|
||||
* SIR and FIR I/O ports, DMA and IRQ. Different vendors have
|
||||
* used different sets of parameters and different control port
|
||||
* addresses making a subsystem device table necessary.
|
||||
*/
|
||||
#ifdef CONFIG_PCI
|
||||
#define PCIID_VENDOR_INTEL 0x8086
|
||||
@ -2340,9 +2347,10 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __dev
|
||||
.device = 0x24cc,
|
||||
.subvendor = 0x103c,
|
||||
.subdevice = 0x088c,
|
||||
.sir_io = 0x02f8, /* Quite certain these are the same for nc8000 as for nc6000 */
|
||||
/* Quite certain these are the same for nc8000 as for nc6000 */
|
||||
.sir_io = 0x02f8,
|
||||
.fir_io = 0x0130,
|
||||
.fir_irq = 0x09,
|
||||
.fir_irq = 0x05,
|
||||
.fir_dma = 0x03,
|
||||
.cfg_base = 0x004e,
|
||||
.preconfigure = preconfigure_through_82801,
|
||||
@ -2355,60 +2363,79 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __dev
|
||||
.subdevice = 0x0890,
|
||||
.sir_io = 0x02f8,
|
||||
.fir_io = 0x0130,
|
||||
.fir_irq = 0x09,
|
||||
.fir_irq = 0x05,
|
||||
.fir_dma = 0x03,
|
||||
.cfg_base = 0x004e,
|
||||
.preconfigure = preconfigure_through_82801,
|
||||
.name = "HP nc6000",
|
||||
},
|
||||
{
|
||||
.vendor = PCIID_VENDOR_INTEL, /* Intel 82801DB/DBL (ICH4/ICH4-L) LPC Interface Bridge */
|
||||
/* Intel 82801DB/DBL (ICH4/ICH4-L) LPC Interface Bridge */
|
||||
.vendor = PCIID_VENDOR_INTEL,
|
||||
.device = 0x24c0,
|
||||
.subvendor = 0x1179,
|
||||
.subdevice = 0xffff, /* 0xffff is "any", Not sure, 0x0001 or 0x0002 */
|
||||
.subdevice = 0xffff, /* 0xffff is "any" */
|
||||
.sir_io = 0x03f8,
|
||||
.fir_io = 0x0130,
|
||||
.fir_irq = 0x07,
|
||||
.fir_dma = 0x01,
|
||||
.cfg_base = 0x002e,
|
||||
.preconfigure = preconfigure_through_82801,
|
||||
.name = "Toshiba Satellite 2450",
|
||||
.name = "Toshiba laptop with Intel 82801DB/DBL LPC bridge",
|
||||
},
|
||||
{
|
||||
.vendor = PCIID_VENDOR_INTEL, /* Intel 82801CAM ISA bridge */
|
||||
.device = 0x248c, /* Some use 24cc? */
|
||||
.device = 0x248c,
|
||||
.subvendor = 0x1179,
|
||||
.subdevice = 0xffff, /* 0xffff is "any", Not sure, 0x0001 or 0x0002 */
|
||||
.subdevice = 0xffff, /* 0xffff is "any" */
|
||||
.sir_io = 0x03f8,
|
||||
.fir_io = 0x0130,
|
||||
.fir_irq = 0x03,
|
||||
.fir_dma = 0x03,
|
||||
.cfg_base = 0x002e,
|
||||
.preconfigure = preconfigure_through_82801,
|
||||
.name = "Toshiba Satellite 5100/5200, Tecra 9100",
|
||||
.name = "Toshiba laptop with Intel 82801CAM ISA bridge",
|
||||
},
|
||||
{
|
||||
.vendor = PCIID_VENDOR_ALI, /* ALi M1533/M1535 PCI to ISA Bridge [Aladdin IV/V/V+] */
|
||||
/* 82801DBM (ICH4-M) LPC Interface Bridge */
|
||||
.vendor = PCIID_VENDOR_INTEL,
|
||||
.device = 0x24cc,
|
||||
.subvendor = 0x1179,
|
||||
.subdevice = 0xffff, /* 0xffff is "any" */
|
||||
.sir_io = 0x03f8,
|
||||
.fir_io = 0x0130,
|
||||
.fir_irq = 0x03,
|
||||
.fir_dma = 0x03,
|
||||
.cfg_base = 0x002e,
|
||||
.preconfigure = preconfigure_through_82801,
|
||||
.name = "Toshiba laptop with Intel 8281DBM LPC bridge",
|
||||
},
|
||||
{
|
||||
/* ALi M1533/M1535 PCI to ISA Bridge [Aladdin IV/V/V+] */
|
||||
.vendor = PCIID_VENDOR_ALI,
|
||||
.device = 0x1533,
|
||||
.subvendor = 0x1179,
|
||||
.subdevice = 0xffff, /* 0xffff is "any", Not sure, 0x0001 or 0x0002 */
|
||||
.subdevice = 0xffff, /* 0xffff is "any" */
|
||||
.sir_io = 0x02e8,
|
||||
.fir_io = 0x02f8,
|
||||
.fir_irq = 0x07,
|
||||
.fir_dma = 0x03,
|
||||
.cfg_base = 0x002e,
|
||||
.preconfigure = preconfigure_through_ali,
|
||||
.name = "Toshiba Satellite 1800",
|
||||
.name = "Toshiba laptop with ALi ISA bridge",
|
||||
},
|
||||
{ } // Terminator
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* This sets up the basic SMSC parameters (FIR port, SIR port, FIR DMA, FIR IRQ)
|
||||
* This sets up the basic SMSC parameters
|
||||
* (FIR port, SIR port, FIR DMA, FIR IRQ)
|
||||
* through the chip configuration port.
|
||||
*/
|
||||
static int __init preconfigure_smsc_chip(struct smsc_ircc_subsystem_configuration *conf)
|
||||
static int __init preconfigure_smsc_chip(struct
|
||||
smsc_ircc_subsystem_configuration
|
||||
*conf)
|
||||
{
|
||||
unsigned short iobase = conf->cfg_base;
|
||||
unsigned char tmpbyte;
|
||||
@ -2416,7 +2443,9 @@ static int __init preconfigure_smsc_chip(struct smsc_ircc_subsystem_configuratio
|
||||
outb(LPC47N227_CFGACCESSKEY, iobase); // enter configuration state
|
||||
outb(SMSCSIOFLAT_DEVICEID_REG, iobase); // set for device ID
|
||||
tmpbyte = inb(iobase +1); // Read device ID
|
||||
IRDA_DEBUG(0, "Detected Chip id: 0x%02x, setting up registers...\n",tmpbyte);
|
||||
IRDA_DEBUG(0,
|
||||
"Detected Chip id: 0x%02x, setting up registers...\n",
|
||||
tmpbyte);
|
||||
|
||||
/* Disable UART1 and set up SIR I/O port */
|
||||
outb(0x24, iobase); // select CR24 - UART1 base addr
|
||||
@ -2426,6 +2455,7 @@ static int __init preconfigure_smsc_chip(struct smsc_ircc_subsystem_configuratio
|
||||
tmpbyte = inb(iobase + 1);
|
||||
if (tmpbyte != (conf->sir_io >> 2) ) {
|
||||
IRDA_WARNING("ERROR: could not configure SIR ioport.\n");
|
||||
IRDA_WARNING("Try to supply ircc_cfg argument.\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
@ -2461,7 +2491,8 @@ static int __init preconfigure_smsc_chip(struct smsc_ircc_subsystem_configuratio
|
||||
|
||||
outb(SMSCSIOFLAT_UARTMODE0C_REG, iobase); // CR0C - UART mode
|
||||
tmpbyte = inb(iobase + 1);
|
||||
tmpbyte &= ~SMSCSIOFLAT_UART2MODE_MASK | SMSCSIOFLAT_UART2MODE_VAL_IRDA;
|
||||
tmpbyte &= ~SMSCSIOFLAT_UART2MODE_MASK |
|
||||
SMSCSIOFLAT_UART2MODE_VAL_IRDA;
|
||||
outb(tmpbyte, iobase + 1); // enable IrDA (HPSIR) mode, high speed
|
||||
|
||||
outb(LPC47N227_APMBOOTDRIVE_REG, iobase); // CR07 - Auto Pwr Mgt/boot drive sel
|
||||
@ -2486,53 +2517,226 @@ static int __init preconfigure_smsc_chip(struct smsc_ircc_subsystem_configuratio
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* 82801CAM registers */
|
||||
/* 82801CAM generic registers */
|
||||
#define VID 0x00
|
||||
#define DID 0x02
|
||||
#define PIRQA_ROUT 0x60
|
||||
#define PIRQ_A_D_ROUT 0x60
|
||||
#define SIRQ_CNTL 0x64
|
||||
#define PIRQ_E_H_ROUT 0x68
|
||||
#define PCI_DMA_C 0x90
|
||||
/* LPC-specific registers */
|
||||
#define COM_DEC 0xe0
|
||||
#define GEN1_DEC 0xe4
|
||||
#define LPC_EN 0xe6
|
||||
#define GEN2_DEC 0xec
|
||||
/*
|
||||
* Sets up the I/O range using the 82801CAM ISA bridge, 82801DBM LPC bridge or
|
||||
* Intel 82801DB/DBL (ICH4/ICH4-L) LPC Interface Bridge. They all work the same way!
|
||||
* Sets up the I/O range using the 82801CAM ISA bridge, 82801DBM LPC bridge
|
||||
* or Intel 82801DB/DBL (ICH4/ICH4-L) LPC Interface Bridge.
|
||||
* They all work the same way!
|
||||
*/
|
||||
static int __init preconfigure_through_82801(struct pci_dev *dev,
|
||||
struct smsc_ircc_subsystem_configuration *conf)
|
||||
struct
|
||||
smsc_ircc_subsystem_configuration
|
||||
*conf)
|
||||
{
|
||||
unsigned short tmpword;
|
||||
int ret;
|
||||
unsigned char tmpbyte;
|
||||
|
||||
IRDA_MESSAGE("Setting up the SMSC device via the 82801 controller.\n");
|
||||
pci_write_config_byte(dev, COM_DEC, 0x10);
|
||||
IRDA_MESSAGE("Setting up Intel 82801 controller and SMSC device\n");
|
||||
/*
|
||||
* Select the range for the COMA COM port (SIR)
|
||||
* Register COM_DEC:
|
||||
* Bit 7: reserved
|
||||
* Bit 6-4, COMB decode range
|
||||
* Bit 3: reserved
|
||||
* Bit 2-0, COMA decode range
|
||||
*
|
||||
* Decode ranges:
|
||||
* 000 = 0x3f8-0x3ff (COM1)
|
||||
* 001 = 0x2f8-0x2ff (COM2)
|
||||
* 010 = 0x220-0x227
|
||||
* 011 = 0x228-0x22f
|
||||
* 100 = 0x238-0x23f
|
||||
* 101 = 0x2e8-0x2ef (COM4)
|
||||
* 110 = 0x338-0x33f
|
||||
* 111 = 0x3e8-0x3ef (COM3)
|
||||
*/
|
||||
pci_read_config_byte(dev, COM_DEC, &tmpbyte);
|
||||
tmpbyte &= 0xf8; /* mask COMA bits */
|
||||
switch(conf->sir_io) {
|
||||
case 0x3f8:
|
||||
tmpbyte |= 0x00;
|
||||
break;
|
||||
case 0x2f8:
|
||||
tmpbyte |= 0x01;
|
||||
break;
|
||||
case 0x220:
|
||||
tmpbyte |= 0x02;
|
||||
break;
|
||||
case 0x228:
|
||||
tmpbyte |= 0x03;
|
||||
break;
|
||||
case 0x238:
|
||||
tmpbyte |= 0x04;
|
||||
break;
|
||||
case 0x2e8:
|
||||
tmpbyte |= 0x05;
|
||||
break;
|
||||
case 0x338:
|
||||
tmpbyte |= 0x06;
|
||||
break;
|
||||
case 0x3e8:
|
||||
tmpbyte |= 0x07;
|
||||
break;
|
||||
default:
|
||||
tmpbyte |= 0x01; /* COM2 default */
|
||||
}
|
||||
IRDA_DEBUG(1, "COM_DEC (write): 0x%02x\n", tmpbyte);
|
||||
pci_write_config_byte(dev, COM_DEC, tmpbyte);
|
||||
|
||||
/* Enable LPC */
|
||||
pci_read_config_word(dev, LPC_EN, &tmpword); /* LPC_EN register */
|
||||
tmpword &= 0xfffd; /* mask bit 1 */
|
||||
tmpword |= 0x0001; /* set bit 0 : COMA addr range enable */
|
||||
/* Enable Low Pin Count interface */
|
||||
pci_read_config_word(dev, LPC_EN, &tmpword);
|
||||
/* These seem to be set up at all times,
|
||||
* just make sure it is properly set.
|
||||
*/
|
||||
switch(conf->cfg_base) {
|
||||
case 0x04e:
|
||||
tmpword |= 0x2000;
|
||||
break;
|
||||
case 0x02e:
|
||||
tmpword |= 0x1000;
|
||||
break;
|
||||
case 0x062:
|
||||
tmpword |= 0x0800;
|
||||
break;
|
||||
case 0x060:
|
||||
tmpword |= 0x0400;
|
||||
break;
|
||||
default:
|
||||
IRDA_WARNING("Uncommon I/O base address: 0x%04x\n",
|
||||
conf->cfg_base);
|
||||
break;
|
||||
}
|
||||
tmpword &= 0xfffd; /* disable LPC COMB */
|
||||
tmpword |= 0x0001; /* set bit 0 : enable LPC COMA addr range (GEN2) */
|
||||
IRDA_DEBUG(1, "LPC_EN (write): 0x%04x\n", tmpword);
|
||||
pci_write_config_word(dev, LPC_EN, tmpword);
|
||||
|
||||
/* Setup DMA */
|
||||
pci_write_config_word(dev, PCI_DMA_C, 0xc0c0); /* LPC I/F DMA on, channel 3 -- rtm (?? PCI DMA ?) */
|
||||
pci_write_config_word(dev, GEN2_DEC, 0x131); /* LPC I/F 2nd decode range */
|
||||
/*
|
||||
* Configure LPC DMA channel
|
||||
* PCI_DMA_C bits:
|
||||
* Bit 15-14: DMA channel 7 select
|
||||
* Bit 13-12: DMA channel 6 select
|
||||
* Bit 11-10: DMA channel 5 select
|
||||
* Bit 9-8: Reserved
|
||||
* Bit 7-6: DMA channel 3 select
|
||||
* Bit 5-4: DMA channel 2 select
|
||||
* Bit 3-2: DMA channel 1 select
|
||||
* Bit 1-0: DMA channel 0 select
|
||||
* 00 = Reserved value
|
||||
* 01 = PC/PCI DMA
|
||||
* 10 = Reserved value
|
||||
* 11 = LPC I/F DMA
|
||||
*/
|
||||
pci_read_config_word(dev, PCI_DMA_C, &tmpword);
|
||||
switch(conf->fir_dma) {
|
||||
case 0x07:
|
||||
tmpword |= 0xc000;
|
||||
break;
|
||||
case 0x06:
|
||||
tmpword |= 0x3000;
|
||||
break;
|
||||
case 0x05:
|
||||
tmpword |= 0x0c00;
|
||||
break;
|
||||
case 0x03:
|
||||
tmpword |= 0x00c0;
|
||||
break;
|
||||
case 0x02:
|
||||
tmpword |= 0x0030;
|
||||
break;
|
||||
case 0x01:
|
||||
tmpword |= 0x000c;
|
||||
break;
|
||||
case 0x00:
|
||||
tmpword |= 0x0003;
|
||||
break;
|
||||
default:
|
||||
break; /* do not change settings */
|
||||
}
|
||||
IRDA_DEBUG(1, "PCI_DMA_C (write): 0x%04x\n", tmpword);
|
||||
pci_write_config_word(dev, PCI_DMA_C, tmpword);
|
||||
|
||||
/*
|
||||
* GEN2_DEC bits:
|
||||
* Bit 15-4: Generic I/O range
|
||||
* Bit 3-1: reserved (read as 0)
|
||||
* Bit 0: enable GEN2 range on LPC I/F
|
||||
*/
|
||||
tmpword = conf->fir_io & 0xfff8;
|
||||
tmpword |= 0x0001;
|
||||
IRDA_DEBUG(1, "GEN2_DEC (write): 0x%04x\n", tmpword);
|
||||
pci_write_config_word(dev, GEN2_DEC, tmpword);
|
||||
|
||||
/* Pre-configure chip */
|
||||
ret = preconfigure_smsc_chip(conf);
|
||||
return preconfigure_smsc_chip(conf);
|
||||
}
|
||||
|
||||
/* Disable LPC */
|
||||
pci_read_config_word(dev, LPC_EN, &tmpword); /* LPC_EN register */
|
||||
tmpword &= 0xfffc; /* mask bit 1 and bit 0, COMA addr range disable */
|
||||
pci_write_config_word(dev, LPC_EN, tmpword);
|
||||
return ret;
|
||||
/*
|
||||
* Pre-configure a certain port on the ALi 1533 bridge.
|
||||
* This is based on reverse-engineering since ALi does not
|
||||
* provide any data sheet for the 1533 chip.
|
||||
*/
|
||||
static void __init preconfigure_ali_port(struct pci_dev *dev,
|
||||
unsigned short port)
|
||||
{
|
||||
unsigned char reg;
|
||||
/* These bits obviously control the different ports */
|
||||
unsigned char mask;
|
||||
unsigned char tmpbyte;
|
||||
|
||||
switch(port) {
|
||||
case 0x0130:
|
||||
case 0x0178:
|
||||
reg = 0xb0;
|
||||
mask = 0x80;
|
||||
break;
|
||||
case 0x03f8:
|
||||
reg = 0xb4;
|
||||
mask = 0x80;
|
||||
break;
|
||||
case 0x02f8:
|
||||
reg = 0xb4;
|
||||
mask = 0x30;
|
||||
break;
|
||||
case 0x02e8:
|
||||
reg = 0xb4;
|
||||
mask = 0x08;
|
||||
break;
|
||||
default:
|
||||
IRDA_ERROR("Failed to configure unsupported port on ALi 1533 bridge: 0x%04x\n", port);
|
||||
return;
|
||||
}
|
||||
|
||||
pci_read_config_byte(dev, reg, &tmpbyte);
|
||||
/* Turn on the right bits */
|
||||
tmpbyte |= mask;
|
||||
pci_write_config_byte(dev, reg, tmpbyte);
|
||||
IRDA_MESSAGE("Activated ALi 1533 ISA bridge port 0x%04x.\n", port);
|
||||
return;
|
||||
}
|
||||
|
||||
static int __init preconfigure_through_ali(struct pci_dev *dev,
|
||||
struct smsc_ircc_subsystem_configuration *conf)
|
||||
struct
|
||||
smsc_ircc_subsystem_configuration
|
||||
*conf)
|
||||
{
|
||||
/* TODO: put in ALi 1533 configuration here. */
|
||||
IRDA_MESSAGE("SORRY: %s has an unsupported bridge controller (ALi): not pre-configured.\n", conf->name);
|
||||
return -ENODEV;
|
||||
/* Configure the two ports on the ALi 1533 */
|
||||
preconfigure_ali_port(dev, conf->sir_io);
|
||||
preconfigure_ali_port(dev, conf->fir_io);
|
||||
|
||||
/* Pre-configure chip */
|
||||
return preconfigure_smsc_chip(conf);
|
||||
}
|
||||
|
||||
static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
|
||||
@ -2552,9 +2756,10 @@ static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
|
||||
struct smsc_ircc_subsystem_configuration *conf;
|
||||
|
||||
/*
|
||||
* Cache the subsystem vendor/device: some manufacturers fail to set
|
||||
* this for all components, so we save it in case there is just
|
||||
* 0x0000 0x0000 on the device we want to check.
|
||||
* Cache the subsystem vendor/device:
|
||||
* some manufacturers fail to set this for all components,
|
||||
* so we save it in case there is just 0x0000 0x0000 on the
|
||||
* device we want to check.
|
||||
*/
|
||||
if (dev->subsystem_vendor != 0x0000U) {
|
||||
ss_vendor = dev->subsystem_vendor;
|
||||
@ -2564,13 +2769,20 @@ static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
|
||||
for( ; conf->subvendor; conf++) {
|
||||
if(conf->vendor == dev->vendor &&
|
||||
conf->device == dev->device &&
|
||||
conf->subvendor == ss_vendor && /* Sometimes these are cached values */
|
||||
(conf->subdevice == ss_device || conf->subdevice == 0xffff)) {
|
||||
struct smsc_ircc_subsystem_configuration tmpconf;
|
||||
conf->subvendor == ss_vendor &&
|
||||
/* Sometimes these are cached values */
|
||||
(conf->subdevice == ss_device ||
|
||||
conf->subdevice == 0xffff)) {
|
||||
struct smsc_ircc_subsystem_configuration
|
||||
tmpconf;
|
||||
|
||||
memcpy(&tmpconf, conf, sizeof(struct smsc_ircc_subsystem_configuration));
|
||||
memcpy(&tmpconf, conf,
|
||||
sizeof(struct smsc_ircc_subsystem_configuration));
|
||||
|
||||
/* Override the default values with anything passed in as parameter */
|
||||
/*
|
||||
* Override the default values with anything
|
||||
* passed in as parameter
|
||||
*/
|
||||
if (ircc_cfg != 0)
|
||||
tmpconf.cfg_base = ircc_cfg;
|
||||
if (ircc_fir != 0)
|
||||
|
@ -410,103 +410,6 @@ config WAN_ROUTER_DRIVERS
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config VENDOR_SANGOMA
|
||||
tristate "Sangoma WANPIPE(tm) multiprotocol cards"
|
||||
depends on WAN_ROUTER_DRIVERS && WAN_ROUTER && (PCI || ISA) && BROKEN
|
||||
---help---
|
||||
Driver for S514-PCI/ISA Synchronous Data Link Adapters (SDLA).
|
||||
|
||||
WANPIPE from Sangoma Technologies Inc. <http://www.sangoma.com/>
|
||||
is a family of intelligent multiprotocol WAN adapters with data
|
||||
transfer rates up to 4Mbps. Cards support:
|
||||
|
||||
- X.25, Frame Relay, PPP, Cisco HDLC protocols.
|
||||
|
||||
- API for protocols like HDLC (LAPB), HDLC Streaming, X.25,
|
||||
Frame Relay and BiSync.
|
||||
|
||||
- Ethernet Bridging over Frame Relay protocol.
|
||||
|
||||
- MULTILINK PPP
|
||||
|
||||
- Async PPP (Modem Dialup)
|
||||
|
||||
The next questions will ask you about the protocols you want
|
||||
the driver to support.
|
||||
|
||||
If you have one or more of these cards, say M to this option;
|
||||
and read <file:Documentation/networking/wan-router.txt>.
|
||||
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called wanpipe.
|
||||
|
||||
config WANPIPE_CHDLC
|
||||
bool "WANPIPE Cisco HDLC support"
|
||||
depends on VENDOR_SANGOMA
|
||||
---help---
|
||||
Connect a WANPIPE card to a leased line using the Cisco HDLC.
|
||||
|
||||
- Supports Dual Port Cisco HDLC on the S514-PCI/S508-ISA cards
|
||||
which allows user to build applications using the HDLC streaming API.
|
||||
|
||||
- CHDLC Streaming MULTILINK PPP that can bind multiple WANPIPE T1
|
||||
cards into a single logical channel.
|
||||
|
||||
Say Y and the Cisco HDLC support, HDLC streaming API and
|
||||
MULTILINK PPP will be included in the driver.
|
||||
|
||||
config WANPIPE_FR
|
||||
bool "WANPIPE Frame Relay support"
|
||||
depends on VENDOR_SANGOMA
|
||||
help
|
||||
Connect a WANPIPE card to a Frame Relay network, or use Frame Relay
|
||||
API to develop custom applications.
|
||||
|
||||
Contains the Ethernet Bridging over Frame Relay feature, where
|
||||
a WANPIPE frame relay link can be directly connected to the Linux
|
||||
kernel bridge. The Frame Relay option is supported on S514-PCI
|
||||
and S508-ISA cards.
|
||||
|
||||
Say Y and the Frame Relay support will be included in the driver.
|
||||
|
||||
config WANPIPE_X25
|
||||
bool "WANPIPE X.25 support"
|
||||
depends on VENDOR_SANGOMA
|
||||
help
|
||||
Connect a WANPIPE card to an X.25 network.
|
||||
|
||||
Includes the X.25 API support for custom applications over the
|
||||
X.25 protocol. The X.25 option is supported on S514-PCI and
|
||||
S508-ISA cards.
|
||||
|
||||
Say Y and the X.25 support will be included in the driver.
|
||||
|
||||
config WANPIPE_PPP
|
||||
bool "WANPIPE PPP support"
|
||||
depends on VENDOR_SANGOMA
|
||||
help
|
||||
Connect a WANPIPE card to a leased line using Point-to-Point
|
||||
Protocol (PPP).
|
||||
|
||||
The PPP option is supported on S514-PCI/S508-ISA cards.
|
||||
|
||||
Say Y and the PPP support will be included in the driver.
|
||||
|
||||
config WANPIPE_MULTPPP
|
||||
bool "WANPIPE Multi-Port PPP support"
|
||||
depends on VENDOR_SANGOMA
|
||||
help
|
||||
Connect a WANPIPE card to a leased line using Point-to-Point
|
||||
Protocol (PPP).
|
||||
|
||||
Uses in-kernel SyncPPP protocol over the Sangoma HDLC Streaming
|
||||
adapter. In this case each Sangoma adapter port can support an
|
||||
independent PPP connection. For example, a single Quad-Port PCI
|
||||
adapter can support up to four independent PPP links. The PPP
|
||||
option is supported on S514-PCI/S508-ISA cards.
|
||||
|
||||
Say Y and the Multi-Port PPP support will be included in the driver.
|
||||
|
||||
config CYCLADES_SYNC
|
||||
tristate "Cyclom 2X(tm) cards (EXPERIMENTAL)"
|
||||
depends on WAN_ROUTER_DRIVERS && (PCI || ISA)
|
||||
|
@ -5,14 +5,6 @@
|
||||
# Rewritten to use lists instead of if-statements.
|
||||
#
|
||||
|
||||
wanpipe-y := sdlamain.o sdla_ft1.o
|
||||
wanpipe-$(CONFIG_WANPIPE_X25) += sdla_x25.o
|
||||
wanpipe-$(CONFIG_WANPIPE_FR) += sdla_fr.o
|
||||
wanpipe-$(CONFIG_WANPIPE_CHDLC) += sdla_chdlc.o
|
||||
wanpipe-$(CONFIG_WANPIPE_PPP) += sdla_ppp.o
|
||||
wanpipe-$(CONFIG_WANPIPE_MULTPPP) += wanpipe_multppp.o
|
||||
wanpipe-objs := $(wanpipe-y)
|
||||
|
||||
cyclomx-y := cycx_main.o
|
||||
cyclomx-$(CONFIG_CYCLOMX_X25) += cycx_x25.o
|
||||
cyclomx-objs := $(cyclomx-y)
|
||||
@ -43,11 +35,6 @@ obj-$(CONFIG_LANMEDIA) += lmc/
|
||||
|
||||
obj-$(CONFIG_DLCI) += dlci.o
|
||||
obj-$(CONFIG_SDLA) += sdla.o
|
||||
ifeq ($(CONFIG_WANPIPE_MULTPPP),y)
|
||||
obj-$(CONFIG_VENDOR_SANGOMA) += sdladrv.o wanpipe.o syncppp.o
|
||||
else
|
||||
obj-$(CONFIG_VENDOR_SANGOMA) += sdladrv.o wanpipe.o
|
||||
endif
|
||||
obj-$(CONFIG_CYCLADES_SYNC) += cycx_drv.o cyclomx.o
|
||||
obj-$(CONFIG_LAPBETHER) += lapbether.o
|
||||
obj-$(CONFIG_SBNI) += sbni.o
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,345 +0,0 @@
|
||||
/*****************************************************************************
|
||||
* sdla_chdlc.c WANPIPE(tm) Multiprotocol WAN Link Driver. Cisco HDLC module.
|
||||
*
|
||||
* Authors: Nenad Corbic <ncorbic@sangoma.com>
|
||||
* Gideon Hack
|
||||
*
|
||||
* Copyright: (c) 1995-1999 Sangoma Technologies Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
* ============================================================================
|
||||
* Sep 30, 1999 Nenad Corbic Fixed dynamic IP and route setup.
|
||||
* Sep 23, 1999 Nenad Corbic Added SMP support, fixed tracing
|
||||
* Sep 13, 1999 Nenad Corbic Split up Port 0 and 1 into separate devices.
|
||||
* Jun 02, 1999 Gideon Hack Added support for the S514 adapter.
|
||||
* Oct 30, 1998 Jaspreet Singh Added Support for CHDLC API (HDLC STREAMING).
|
||||
* Oct 28, 1998 Jaspreet Singh Added Support for Dual Port CHDLC.
|
||||
* Aug 07, 1998 David Fong Initial version.
|
||||
*****************************************************************************/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h> /* printk(), and other useful stuff */
|
||||
#include <linux/stddef.h> /* offsetof(), etc. */
|
||||
#include <linux/errno.h> /* return codes */
|
||||
#include <linux/string.h> /* inline memset(), etc. */
|
||||
#include <linux/slab.h> /* kmalloc(), kfree() */
|
||||
#include <linux/wanrouter.h> /* WAN router definitions */
|
||||
#include <linux/wanpipe.h> /* WANPIPE common user API definitions */
|
||||
#include <linux/if_arp.h> /* ARPHRD_* defines */
|
||||
#include <linux/jiffies.h> /* time_after() macro */
|
||||
|
||||
#include <linux/inetdevice.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#include <linux/in.h> /* sockaddr_in */
|
||||
#include <linux/inet.h>
|
||||
#include <linux/if.h>
|
||||
#include <asm/byteorder.h> /* htons(), etc. */
|
||||
#include <linux/sdlapci.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
#include <linux/sdla_chdlc.h> /* CHDLC firmware API definitions */
|
||||
|
||||
/****** Defines & Macros ****************************************************/
|
||||
|
||||
/* reasons for enabling the timer interrupt on the adapter */
|
||||
#define TMR_INT_ENABLED_UDP 0x0001
|
||||
#define TMR_INT_ENABLED_UPDATE 0x0002
|
||||
|
||||
#define CHDLC_DFLT_DATA_LEN 1500 /* default MTU */
|
||||
#define CHDLC_HDR_LEN 1
|
||||
|
||||
#define IFF_POINTTOPOINT 0x10
|
||||
|
||||
#define WANPIPE 0x00
|
||||
#define API 0x01
|
||||
#define CHDLC_API 0x01
|
||||
|
||||
#define PORT(x) (x == 0 ? "PRIMARY" : "SECONDARY" )
|
||||
|
||||
|
||||
/******Data Structures*****************************************************/
|
||||
|
||||
/* This structure is placed in the private data area of the device structure.
|
||||
* The card structure used to occupy the private area but now the following
|
||||
* structure will incorporate the card structure along with CHDLC specific data
|
||||
*/
|
||||
|
||||
typedef struct chdlc_private_area
|
||||
{
|
||||
struct net_device *slave;
|
||||
sdla_t *card;
|
||||
int TracingEnabled; /* For enabling Tracing */
|
||||
unsigned long curr_trace_addr; /* Used for Tracing */
|
||||
unsigned long start_trace_addr;
|
||||
unsigned long end_trace_addr;
|
||||
unsigned long base_addr_trace_buffer;
|
||||
unsigned long end_addr_trace_buffer;
|
||||
unsigned short number_trace_elements;
|
||||
unsigned available_buffer_space;
|
||||
unsigned long router_start_time;
|
||||
unsigned char route_status;
|
||||
unsigned char route_removed;
|
||||
unsigned long tick_counter; /* For 5s timeout counter */
|
||||
unsigned long router_up_time;
|
||||
u32 IP_address; /* IP addressing */
|
||||
u32 IP_netmask;
|
||||
unsigned char mc; /* Mulitcast support on/off */
|
||||
unsigned short udp_pkt_lgth; /* udp packet processing */
|
||||
char udp_pkt_src;
|
||||
char udp_pkt_data[MAX_LGTH_UDP_MGNT_PKT];
|
||||
unsigned short timer_int_enabled;
|
||||
char update_comms_stats; /* updating comms stats */
|
||||
//FIXME: add driver stats as per frame relay!
|
||||
|
||||
} chdlc_private_area_t;
|
||||
|
||||
/* Route Status options */
|
||||
#define NO_ROUTE 0x00
|
||||
#define ADD_ROUTE 0x01
|
||||
#define ROUTE_ADDED 0x02
|
||||
#define REMOVE_ROUTE 0x03
|
||||
|
||||
|
||||
/****** Function Prototypes *************************************************/
|
||||
/* WAN link driver entry points. These are called by the WAN router module. */
|
||||
static int wpft1_exec (struct sdla *card, void *u_cmd, void *u_data);
|
||||
static int chdlc_read_version (sdla_t* card, char* str);
|
||||
static int chdlc_error (sdla_t *card, int err, CHDLC_MAILBOX_STRUCT *mb);
|
||||
|
||||
/****** Public Functions ****************************************************/
|
||||
|
||||
/*============================================================================
|
||||
* Cisco HDLC protocol initialization routine.
|
||||
*
|
||||
* This routine is called by the main WANPIPE module during setup. At this
|
||||
* point adapter is completely initialized and firmware is running.
|
||||
* o read firmware version (to make sure it's alive)
|
||||
* o configure adapter
|
||||
* o initialize protocol-specific fields of the adapter data space.
|
||||
*
|
||||
* Return: 0 o.k.
|
||||
* < 0 failure.
|
||||
*/
|
||||
int wpft1_init (sdla_t* card, wandev_conf_t* conf)
|
||||
{
|
||||
unsigned char port_num;
|
||||
int err;
|
||||
|
||||
union
|
||||
{
|
||||
char str[80];
|
||||
} u;
|
||||
volatile CHDLC_MAILBOX_STRUCT* mb;
|
||||
CHDLC_MAILBOX_STRUCT* mb1;
|
||||
unsigned long timeout;
|
||||
|
||||
/* Verify configuration ID */
|
||||
if (conf->config_id != WANCONFIG_CHDLC) {
|
||||
printk(KERN_INFO "%s: invalid configuration ID %u!\n",
|
||||
card->devname, conf->config_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Use primary port */
|
||||
card->u.c.comm_port = 0;
|
||||
|
||||
|
||||
/* Initialize protocol-specific fields */
|
||||
if(card->hw.type != SDLA_S514){
|
||||
card->mbox = (void *) card->hw.dpmbase;
|
||||
}else{
|
||||
card->mbox = (void *) card->hw.dpmbase + PRI_BASE_ADDR_MB_STRUCT;
|
||||
}
|
||||
|
||||
mb = mb1 = card->mbox;
|
||||
|
||||
if (!card->configured){
|
||||
|
||||
/* The board will place an 'I' in the return code to indicate that it is
|
||||
ready to accept commands. We expect this to be completed in less
|
||||
than 1 second. */
|
||||
|
||||
timeout = jiffies;
|
||||
while (mb->return_code != 'I') /* Wait 1s for board to initialize */
|
||||
if (time_after(jiffies, timeout + 1*HZ)) break;
|
||||
|
||||
if (mb->return_code != 'I') {
|
||||
printk(KERN_INFO
|
||||
"%s: Initialization not completed by adapter\n",
|
||||
card->devname);
|
||||
printk(KERN_INFO "Please contact Sangoma representative.\n");
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
/* Read firmware version. Note that when adapter initializes, it
|
||||
* clears the mailbox, so it may appear that the first command was
|
||||
* executed successfully when in fact it was merely erased. To work
|
||||
* around this, we execute the first command twice.
|
||||
*/
|
||||
|
||||
if (chdlc_read_version(card, u.str))
|
||||
return -EIO;
|
||||
|
||||
printk(KERN_INFO "%s: Running FT1 Configuration firmware v%s\n",
|
||||
card->devname, u.str);
|
||||
|
||||
card->isr = NULL;
|
||||
card->poll = NULL;
|
||||
card->exec = &wpft1_exec;
|
||||
card->wandev.update = NULL;
|
||||
card->wandev.new_if = NULL;
|
||||
card->wandev.del_if = NULL;
|
||||
card->wandev.state = WAN_DUALPORT;
|
||||
card->wandev.udp_port = conf->udp_port;
|
||||
|
||||
card->wandev.new_if_cnt = 0;
|
||||
|
||||
/* This is for the ports link state */
|
||||
card->u.c.state = WAN_DISCONNECTED;
|
||||
|
||||
/* reset the number of times the 'update()' proc has been called */
|
||||
card->u.c.update_call_count = 0;
|
||||
|
||||
card->wandev.ttl = 0x7F;
|
||||
card->wandev.interface = 0;
|
||||
|
||||
card->wandev.clocking = 0;
|
||||
|
||||
port_num = card->u.c.comm_port;
|
||||
|
||||
/* Setup Port Bps */
|
||||
|
||||
card->wandev.bps = 0;
|
||||
|
||||
card->wandev.mtu = MIN_LGTH_CHDLC_DATA_CFG;
|
||||
|
||||
/* Set up the interrupt status area */
|
||||
/* Read the CHDLC Configuration and obtain:
|
||||
* Ptr to shared memory infor struct
|
||||
* Use this pointer to calculate the value of card->u.c.flags !
|
||||
*/
|
||||
mb1->buffer_length = 0;
|
||||
mb1->command = READ_CHDLC_CONFIGURATION;
|
||||
err = sdla_exec(mb1) ? mb1->return_code : CMD_TIMEOUT;
|
||||
if(err != COMMAND_OK) {
|
||||
chdlc_error(card, err, mb1);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if(card->hw.type == SDLA_S514){
|
||||
card->u.c.flags = (void *)(card->hw.dpmbase +
|
||||
(((CHDLC_CONFIGURATION_STRUCT *)mb1->data)->
|
||||
ptr_shared_mem_info_struct));
|
||||
}else{
|
||||
card->u.c.flags = (void *)(card->hw.dpmbase +
|
||||
(((CHDLC_CONFIGURATION_STRUCT *)mb1->data)->
|
||||
ptr_shared_mem_info_struct % SDLA_WINDOWSIZE));
|
||||
}
|
||||
|
||||
card->wandev.state = WAN_FT1_READY;
|
||||
printk(KERN_INFO "%s: FT1 Config Ready !\n",card->devname);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int wpft1_exec(sdla_t *card, void *u_cmd, void *u_data)
|
||||
{
|
||||
CHDLC_MAILBOX_STRUCT* mbox = card->mbox;
|
||||
int len;
|
||||
|
||||
if (copy_from_user((void*)&mbox->command, u_cmd, sizeof(ft1_exec_cmd_t))){
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
len = mbox->buffer_length;
|
||||
|
||||
if (len) {
|
||||
if( copy_from_user((void*)&mbox->data, u_data, len)){
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
|
||||
/* execute command */
|
||||
if (!sdla_exec(mbox)){
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* return result */
|
||||
if( copy_to_user(u_cmd, (void*)&mbox->command, sizeof(ft1_exec_cmd_t))){
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
len = mbox->buffer_length;
|
||||
|
||||
if (len && u_data && copy_to_user(u_data, (void*)&mbox->data, len)){
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
/*============================================================================
|
||||
* Read firmware code version.
|
||||
* Put code version as ASCII string in str.
|
||||
*/
|
||||
static int chdlc_read_version (sdla_t* card, char* str)
|
||||
{
|
||||
CHDLC_MAILBOX_STRUCT* mb = card->mbox;
|
||||
int len;
|
||||
char err;
|
||||
mb->buffer_length = 0;
|
||||
mb->command = READ_CHDLC_CODE_VERSION;
|
||||
err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
|
||||
|
||||
if(err != COMMAND_OK) {
|
||||
chdlc_error(card,err,mb);
|
||||
}
|
||||
else if (str) { /* is not null */
|
||||
len = mb->buffer_length;
|
||||
memcpy(str, mb->data, len);
|
||||
str[len] = '\0';
|
||||
}
|
||||
return (err);
|
||||
}
|
||||
|
||||
/*============================================================================
|
||||
* Firmware error handler.
|
||||
* This routine is called whenever firmware command returns non-zero
|
||||
* return code.
|
||||
*
|
||||
* Return zero if previous command has to be cancelled.
|
||||
*/
|
||||
static int chdlc_error (sdla_t *card, int err, CHDLC_MAILBOX_STRUCT *mb)
|
||||
{
|
||||
unsigned cmd = mb->command;
|
||||
|
||||
switch (err) {
|
||||
|
||||
case CMD_TIMEOUT:
|
||||
printk(KERN_ERR "%s: command 0x%02X timed out!\n",
|
||||
card->devname, cmd);
|
||||
break;
|
||||
|
||||
case S514_BOTH_PORTS_SAME_CLK_MODE:
|
||||
if(cmd == SET_CHDLC_CONFIGURATION) {
|
||||
printk(KERN_INFO
|
||||
"%s: Configure both ports for the same clock source\n",
|
||||
card->devname);
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
printk(KERN_INFO "%s: command 0x%02X returned 0x%02X!\n",
|
||||
card->devname, cmd, err);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
MODULE_LICENSE("GPL");
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -360,9 +360,6 @@ static int __init rpaphp_init(void)
|
||||
while ((dn = of_find_node_by_type(dn, "pci")))
|
||||
rpaphp_add_slot(dn);
|
||||
|
||||
if (!num_slots)
|
||||
return -ENODEV;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -504,6 +504,201 @@ void pci_scan_msi_device(struct pci_dev *dev)
|
||||
nr_reserved_vectors++;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
int pci_save_msi_state(struct pci_dev *dev)
|
||||
{
|
||||
int pos, i = 0;
|
||||
u16 control;
|
||||
struct pci_cap_saved_state *save_state;
|
||||
u32 *cap;
|
||||
|
||||
pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
|
||||
if (pos <= 0 || dev->no_msi)
|
||||
return 0;
|
||||
|
||||
pci_read_config_word(dev, msi_control_reg(pos), &control);
|
||||
if (!(control & PCI_MSI_FLAGS_ENABLE))
|
||||
return 0;
|
||||
|
||||
save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u32) * 5,
|
||||
GFP_KERNEL);
|
||||
if (!save_state) {
|
||||
printk(KERN_ERR "Out of memory in pci_save_msi_state\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
cap = &save_state->data[0];
|
||||
|
||||
pci_read_config_dword(dev, pos, &cap[i++]);
|
||||
control = cap[0] >> 16;
|
||||
pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, &cap[i++]);
|
||||
if (control & PCI_MSI_FLAGS_64BIT) {
|
||||
pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, &cap[i++]);
|
||||
pci_read_config_dword(dev, pos + PCI_MSI_DATA_64, &cap[i++]);
|
||||
} else
|
||||
pci_read_config_dword(dev, pos + PCI_MSI_DATA_32, &cap[i++]);
|
||||
if (control & PCI_MSI_FLAGS_MASKBIT)
|
||||
pci_read_config_dword(dev, pos + PCI_MSI_MASK_BIT, &cap[i++]);
|
||||
disable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
|
||||
save_state->cap_nr = PCI_CAP_ID_MSI;
|
||||
pci_add_saved_cap(dev, save_state);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void pci_restore_msi_state(struct pci_dev *dev)
|
||||
{
|
||||
int i = 0, pos;
|
||||
u16 control;
|
||||
struct pci_cap_saved_state *save_state;
|
||||
u32 *cap;
|
||||
|
||||
save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSI);
|
||||
pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
|
||||
if (!save_state || pos <= 0)
|
||||
return;
|
||||
cap = &save_state->data[0];
|
||||
|
||||
control = cap[i++] >> 16;
|
||||
pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, cap[i++]);
|
||||
if (control & PCI_MSI_FLAGS_64BIT) {
|
||||
pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, cap[i++]);
|
||||
pci_write_config_dword(dev, pos + PCI_MSI_DATA_64, cap[i++]);
|
||||
} else
|
||||
pci_write_config_dword(dev, pos + PCI_MSI_DATA_32, cap[i++]);
|
||||
if (control & PCI_MSI_FLAGS_MASKBIT)
|
||||
pci_write_config_dword(dev, pos + PCI_MSI_MASK_BIT, cap[i++]);
|
||||
pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
|
||||
enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
|
||||
pci_remove_saved_cap(save_state);
|
||||
kfree(save_state);
|
||||
}
|
||||
|
||||
int pci_save_msix_state(struct pci_dev *dev)
|
||||
{
|
||||
int pos;
|
||||
u16 control;
|
||||
struct pci_cap_saved_state *save_state;
|
||||
|
||||
pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
|
||||
if (pos <= 0 || dev->no_msi)
|
||||
return 0;
|
||||
|
||||
pci_read_config_word(dev, msi_control_reg(pos), &control);
|
||||
if (!(control & PCI_MSIX_FLAGS_ENABLE))
|
||||
return 0;
|
||||
save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u16),
|
||||
GFP_KERNEL);
|
||||
if (!save_state) {
|
||||
printk(KERN_ERR "Out of memory in pci_save_msix_state\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
*((u16 *)&save_state->data[0]) = control;
|
||||
|
||||
disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
|
||||
save_state->cap_nr = PCI_CAP_ID_MSIX;
|
||||
pci_add_saved_cap(dev, save_state);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void pci_restore_msix_state(struct pci_dev *dev)
|
||||
{
|
||||
u16 save;
|
||||
int pos;
|
||||
int vector, head, tail = 0;
|
||||
void __iomem *base;
|
||||
int j;
|
||||
struct msg_address address;
|
||||
struct msg_data data;
|
||||
struct msi_desc *entry;
|
||||
int temp;
|
||||
struct pci_cap_saved_state *save_state;
|
||||
|
||||
save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSIX);
|
||||
if (!save_state)
|
||||
return;
|
||||
save = *((u16 *)&save_state->data[0]);
|
||||
pci_remove_saved_cap(save_state);
|
||||
kfree(save_state);
|
||||
|
||||
pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
|
||||
if (pos <= 0)
|
||||
return;
|
||||
|
||||
/* route the table */
|
||||
temp = dev->irq;
|
||||
if (msi_lookup_vector(dev, PCI_CAP_ID_MSIX))
|
||||
return;
|
||||
vector = head = dev->irq;
|
||||
while (head != tail) {
|
||||
entry = msi_desc[vector];
|
||||
base = entry->mask_base;
|
||||
j = entry->msi_attrib.entry_nr;
|
||||
|
||||
msi_address_init(&address);
|
||||
msi_data_init(&data, vector);
|
||||
|
||||
address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK;
|
||||
address.lo_address.value |= entry->msi_attrib.current_cpu <<
|
||||
MSI_TARGET_CPU_SHIFT;
|
||||
|
||||
writel(address.lo_address.value,
|
||||
base + j * PCI_MSIX_ENTRY_SIZE +
|
||||
PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
|
||||
writel(address.hi_address,
|
||||
base + j * PCI_MSIX_ENTRY_SIZE +
|
||||
PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
|
||||
writel(*(u32*)&data,
|
||||
base + j * PCI_MSIX_ENTRY_SIZE +
|
||||
PCI_MSIX_ENTRY_DATA_OFFSET);
|
||||
|
||||
tail = msi_desc[vector]->link.tail;
|
||||
vector = tail;
|
||||
}
|
||||
dev->irq = temp;
|
||||
|
||||
pci_write_config_word(dev, msi_control_reg(pos), save);
|
||||
enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void msi_register_init(struct pci_dev *dev, struct msi_desc *entry)
|
||||
{
|
||||
struct msg_address address;
|
||||
struct msg_data data;
|
||||
int pos, vector = dev->irq;
|
||||
u16 control;
|
||||
|
||||
pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
|
||||
pci_read_config_word(dev, msi_control_reg(pos), &control);
|
||||
/* Configure MSI capability structure */
|
||||
msi_address_init(&address);
|
||||
msi_data_init(&data, vector);
|
||||
entry->msi_attrib.current_cpu = ((address.lo_address.u.dest_id >>
|
||||
MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK);
|
||||
pci_write_config_dword(dev, msi_lower_address_reg(pos),
|
||||
address.lo_address.value);
|
||||
if (is_64bit_address(control)) {
|
||||
pci_write_config_dword(dev,
|
||||
msi_upper_address_reg(pos), address.hi_address);
|
||||
pci_write_config_word(dev,
|
||||
msi_data_reg(pos, 1), *((u32*)&data));
|
||||
} else
|
||||
pci_write_config_word(dev,
|
||||
msi_data_reg(pos, 0), *((u32*)&data));
|
||||
if (entry->msi_attrib.maskbit) {
|
||||
unsigned int maskbits, temp;
|
||||
/* All MSIs are unmasked by default, Mask them all */
|
||||
pci_read_config_dword(dev,
|
||||
msi_mask_bits_reg(pos, is_64bit_address(control)),
|
||||
&maskbits);
|
||||
temp = (1 << multi_msi_capable(control));
|
||||
temp = ((temp - 1) & ~temp);
|
||||
maskbits |= temp;
|
||||
pci_write_config_dword(dev,
|
||||
msi_mask_bits_reg(pos, is_64bit_address(control)),
|
||||
maskbits);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* msi_capability_init - configure device's MSI capability structure
|
||||
* @dev: pointer to the pci_dev data structure of MSI device function
|
||||
@ -516,8 +711,6 @@ void pci_scan_msi_device(struct pci_dev *dev)
|
||||
static int msi_capability_init(struct pci_dev *dev)
|
||||
{
|
||||
struct msi_desc *entry;
|
||||
struct msg_address address;
|
||||
struct msg_data data;
|
||||
int pos, vector;
|
||||
u16 control;
|
||||
|
||||
@ -549,33 +742,8 @@ static int msi_capability_init(struct pci_dev *dev)
|
||||
/* Replace with MSI handler */
|
||||
irq_handler_init(PCI_CAP_ID_MSI, vector, entry->msi_attrib.maskbit);
|
||||
/* Configure MSI capability structure */
|
||||
msi_address_init(&address);
|
||||
msi_data_init(&data, vector);
|
||||
entry->msi_attrib.current_cpu = ((address.lo_address.u.dest_id >>
|
||||
MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK);
|
||||
pci_write_config_dword(dev, msi_lower_address_reg(pos),
|
||||
address.lo_address.value);
|
||||
if (is_64bit_address(control)) {
|
||||
pci_write_config_dword(dev,
|
||||
msi_upper_address_reg(pos), address.hi_address);
|
||||
pci_write_config_word(dev,
|
||||
msi_data_reg(pos, 1), *((u32*)&data));
|
||||
} else
|
||||
pci_write_config_word(dev,
|
||||
msi_data_reg(pos, 0), *((u32*)&data));
|
||||
if (entry->msi_attrib.maskbit) {
|
||||
unsigned int maskbits, temp;
|
||||
/* All MSIs are unmasked by default, Mask them all */
|
||||
pci_read_config_dword(dev,
|
||||
msi_mask_bits_reg(pos, is_64bit_address(control)),
|
||||
&maskbits);
|
||||
temp = (1 << multi_msi_capable(control));
|
||||
temp = ((temp - 1) & ~temp);
|
||||
maskbits |= temp;
|
||||
pci_write_config_dword(dev,
|
||||
msi_mask_bits_reg(pos, is_64bit_address(control)),
|
||||
maskbits);
|
||||
}
|
||||
msi_register_init(dev, entry);
|
||||
|
||||
attach_msi_entry(entry, vector);
|
||||
/* Set MSI enabled bits */
|
||||
enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
|
||||
@ -731,6 +899,7 @@ int pci_enable_msi(struct pci_dev* dev)
|
||||
vector_irq[dev->irq] = -1;
|
||||
nr_released_vectors--;
|
||||
spin_unlock_irqrestore(&msi_lock, flags);
|
||||
msi_register_init(dev, msi_desc[dev->irq]);
|
||||
enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
|
||||
return 0;
|
||||
}
|
||||
|
@ -271,10 +271,12 @@ static int pci_device_suspend(struct device * dev, pm_message_t state)
|
||||
struct pci_driver * drv = pci_dev->driver;
|
||||
int i = 0;
|
||||
|
||||
if (drv && drv->suspend)
|
||||
if (drv && drv->suspend) {
|
||||
i = drv->suspend(pci_dev, state);
|
||||
else
|
||||
suspend_report_result(drv->suspend, i);
|
||||
} else {
|
||||
pci_save_state(pci_dev);
|
||||
}
|
||||
return i;
|
||||
}
|
||||
|
||||
|
@ -307,9 +307,11 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state)
|
||||
* Can enter D0 from any state, but if we can only go deeper
|
||||
* to sleep if we're already in a low power state
|
||||
*/
|
||||
if (state != PCI_D0 && dev->current_state > state)
|
||||
if (state != PCI_D0 && dev->current_state > state) {
|
||||
printk(KERN_ERR "%s(): %s: state=%d, current state=%d\n",
|
||||
__FUNCTION__, pci_name(dev), state, dev->current_state);
|
||||
return -EINVAL;
|
||||
else if (dev->current_state == state)
|
||||
} else if (dev->current_state == state)
|
||||
return 0; /* we're already there */
|
||||
|
||||
/* find PCI PM capability in list */
|
||||
@ -444,6 +446,10 @@ pci_save_state(struct pci_dev *dev)
|
||||
/* XXX: 100% dword access ok here? */
|
||||
for (i = 0; i < 16; i++)
|
||||
pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]);
|
||||
if ((i = pci_save_msi_state(dev)) != 0)
|
||||
return i;
|
||||
if ((i = pci_save_msix_state(dev)) != 0)
|
||||
return i;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -458,6 +464,8 @@ pci_restore_state(struct pci_dev *dev)
|
||||
|
||||
for (i = 0; i < 16; i++)
|
||||
pci_write_config_dword(dev,i * 4, dev->saved_config_space[i]);
|
||||
pci_restore_msi_state(dev);
|
||||
pci_restore_msix_state(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -55,6 +55,17 @@ void pci_no_msi(void);
|
||||
static inline void disable_msi_mode(struct pci_dev *dev, int pos, int type) { }
|
||||
static inline void pci_no_msi(void) { }
|
||||
#endif
|
||||
#if defined(CONFIG_PCI_MSI) && defined(CONFIG_PM)
|
||||
int pci_save_msi_state(struct pci_dev *dev);
|
||||
int pci_save_msix_state(struct pci_dev *dev);
|
||||
void pci_restore_msi_state(struct pci_dev *dev);
|
||||
void pci_restore_msix_state(struct pci_dev *dev);
|
||||
#else
|
||||
static inline int pci_save_msi_state(struct pci_dev *dev) { return 0; }
|
||||
static inline int pci_save_msix_state(struct pci_dev *dev) { return 0; }
|
||||
static inline void pci_restore_msi_state(struct pci_dev *dev) {}
|
||||
static inline void pci_restore_msix_state(struct pci_dev *dev) {}
|
||||
#endif
|
||||
|
||||
extern int pcie_mch_quirk;
|
||||
extern struct device_attribute pci_dev_attrs[];
|
||||
|
@ -592,7 +592,7 @@ static void __init quirk_amd_8131_ioapic(struct pci_dev *dev)
|
||||
pci_write_config_byte( dev, AMD8131_MISC, tmp);
|
||||
}
|
||||
}
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_APIC, quirk_amd_8131_ioapic );
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic);
|
||||
|
||||
static void __init quirk_svw_msi(struct pci_dev *dev)
|
||||
{
|
||||
@ -921,6 +921,7 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
|
||||
if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB) {
|
||||
switch (dev->subsystem_device) {
|
||||
case 0x1882: /* M6V notebook */
|
||||
case 0x1977: /* A6VA notebook */
|
||||
asus_hides_smbus = 1;
|
||||
}
|
||||
}
|
||||
@ -999,6 +1000,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asu
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc );
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc );
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc );
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc );
|
||||
|
||||
static void __init asus_hides_smbus_lpc_ich6(struct pci_dev *dev)
|
||||
{
|
||||
|
@ -65,6 +65,7 @@
|
||||
2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher.
|
||||
2.26.02.006 - Fix 9550SX pchip reset timeout.
|
||||
Add big endian support.
|
||||
2.26.02.007 - Disable local interrupts during kmap/unmap_atomic().
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
@ -88,7 +89,7 @@
|
||||
#include "3w-9xxx.h"
|
||||
|
||||
/* Globals */
|
||||
#define TW_DRIVER_VERSION "2.26.02.006"
|
||||
#define TW_DRIVER_VERSION "2.26.02.007"
|
||||
static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
|
||||
static unsigned int twa_device_extension_count;
|
||||
static int twa_major = -1;
|
||||
@ -1942,9 +1943,13 @@ static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int re
|
||||
}
|
||||
if (tw_dev->srb[request_id]->use_sg == 1) {
|
||||
struct scatterlist *sg = (struct scatterlist *)tw_dev->srb[request_id]->request_buffer;
|
||||
char *buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
|
||||
char *buf;
|
||||
unsigned long flags = 0;
|
||||
local_irq_save(flags);
|
||||
buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
|
||||
memcpy(buf, tw_dev->generic_buffer_virt[request_id], sg->length);
|
||||
kunmap_atomic(buf - sg->offset, KM_IRQ0);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
}
|
||||
} /* End twa_scsiop_execute_scsi_complete() */
|
||||
|
@ -1079,7 +1079,7 @@ config SCSI_SYM53C8XX_DMA_ADDRESSING_MODE
|
||||
memory using PCI DAC cycles.
|
||||
|
||||
config SCSI_SYM53C8XX_DEFAULT_TAGS
|
||||
int "default tagged command queue depth"
|
||||
int "Default tagged command queue depth"
|
||||
depends on SCSI_SYM53C8XX_2
|
||||
default "16"
|
||||
help
|
||||
@ -1090,7 +1090,7 @@ config SCSI_SYM53C8XX_DEFAULT_TAGS
|
||||
exceed CONFIG_SCSI_SYM53C8XX_MAX_TAGS.
|
||||
|
||||
config SCSI_SYM53C8XX_MAX_TAGS
|
||||
int "maximum number of queued commands"
|
||||
int "Maximum number of queued commands"
|
||||
depends on SCSI_SYM53C8XX_2
|
||||
default "64"
|
||||
help
|
||||
@ -1099,13 +1099,14 @@ config SCSI_SYM53C8XX_MAX_TAGS
|
||||
possible. The driver supports up to 256 queued commands per device.
|
||||
This value is used as a compiled-in hard limit.
|
||||
|
||||
config SCSI_SYM53C8XX_IOMAPPED
|
||||
bool "use port IO"
|
||||
config SCSI_SYM53C8XX_MMIO
|
||||
bool "Use memory mapped IO"
|
||||
depends on SCSI_SYM53C8XX_2
|
||||
default y
|
||||
help
|
||||
If you say Y here, the driver will use port IO to access
|
||||
the card. This is significantly slower then using memory
|
||||
mapped IO. Most people should answer N.
|
||||
Memory mapped IO is faster than Port IO. Most people should
|
||||
answer Y here, but some machines may have problems. If you have
|
||||
to answer N here, please report the problem to the maintainer.
|
||||
|
||||
config SCSI_IPR
|
||||
tristate "IBM Power Linux RAID adapter support"
|
||||
@ -1309,15 +1310,6 @@ config SCSI_QLOGIC_FAS
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called qlogicfas.
|
||||
|
||||
config SCSI_QLOGIC_FC
|
||||
tristate "Qlogic ISP FC SCSI support"
|
||||
depends on PCI && SCSI
|
||||
help
|
||||
This is a driver for the QLogic ISP2100 SCSI-FCP host adapter.
|
||||
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called qlogicfc.
|
||||
|
||||
config SCSI_QLOGIC_FC_FIRMWARE
|
||||
bool "Include loadable firmware in driver"
|
||||
depends on SCSI_QLOGIC_FC
|
||||
|
@ -78,7 +78,6 @@ obj-$(CONFIG_SCSI_NCR_Q720) += NCR_Q720_mod.o
|
||||
obj-$(CONFIG_SCSI_SYM53C416) += sym53c416.o
|
||||
obj-$(CONFIG_SCSI_QLOGIC_FAS) += qlogicfas408.o qlogicfas.o
|
||||
obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o
|
||||
obj-$(CONFIG_SCSI_QLOGIC_FC) += qlogicfc.o
|
||||
obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o
|
||||
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/
|
||||
obj-$(CONFIG_SCSI_LPFC) += lpfc/
|
||||
|
@ -149,20 +149,20 @@ static int dacmode = -1;
|
||||
|
||||
static int commit = -1;
|
||||
|
||||
module_param(nondasd, int, 0);
|
||||
module_param(nondasd, int, S_IRUGO|S_IWUSR);
|
||||
MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices. 0=off, 1=on");
|
||||
module_param(dacmode, int, 0);
|
||||
module_param(dacmode, int, S_IRUGO|S_IWUSR);
|
||||
MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC. 0=off, 1=on");
|
||||
module_param(commit, int, 0);
|
||||
module_param(commit, int, S_IRUGO|S_IWUSR);
|
||||
MODULE_PARM_DESC(commit, "Control whether a COMMIT_CONFIG is issued to the adapter for foreign arrays.\nThis is typically needed in systems that do not have a BIOS. 0=off, 1=on");
|
||||
|
||||
int numacb = -1;
|
||||
module_param(numacb, int, S_IRUGO|S_IWUSR);
|
||||
MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control blocks (FIB) allocated. Valid\nvalues are 512 and down. Default is to use suggestion from Firmware.");
|
||||
MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control blocks (FIB) allocated. Valid values are 512 and down. Default is to use suggestion from Firmware.");
|
||||
|
||||
int acbsize = -1;
|
||||
module_param(acbsize, int, S_IRUGO|S_IWUSR);
|
||||
MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB) size. Valid values are 512,\n2048, 4096 and 8192. Default is to use suggestion from Firmware.");
|
||||
MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB) size. Valid values are 512, 2048, 4096 and 8192. Default is to use suggestion from Firmware.");
|
||||
/**
|
||||
* aac_get_config_status - check the adapter configuration
|
||||
* @common: adapter to query
|
||||
@ -387,6 +387,7 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
|
||||
struct scsi_cmnd * scsicmd;
|
||||
|
||||
scsicmd = (struct scsi_cmnd *) context;
|
||||
scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
|
||||
|
||||
dprintk((KERN_DEBUG "get_container_name_callback[cpu %d]: t = %ld.\n", smp_processor_id(), jiffies));
|
||||
if (fibptr == NULL)
|
||||
@ -453,8 +454,10 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid)
|
||||
/*
|
||||
* Check that the command queued to the controller
|
||||
*/
|
||||
if (status == -EINPROGRESS)
|
||||
if (status == -EINPROGRESS) {
|
||||
scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status);
|
||||
aac_fib_complete(cmd_fibcontext);
|
||||
@ -907,9 +910,10 @@ static void io_callback(void *context, struct fib * fibptr)
|
||||
u32 cid;
|
||||
|
||||
scsicmd = (struct scsi_cmnd *) context;
|
||||
scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
|
||||
|
||||
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
|
||||
cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun);
|
||||
cid = scmd_id(scsicmd);
|
||||
|
||||
if (nblank(dprintk(x))) {
|
||||
u64 lba;
|
||||
@ -1151,8 +1155,10 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
|
||||
/*
|
||||
* Check that the command queued to the controller
|
||||
*/
|
||||
if (status == -EINPROGRESS)
|
||||
if (status == -EINPROGRESS) {
|
||||
scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
printk(KERN_WARNING "aac_read: aac_fib_send failed with status: %d.\n", status);
|
||||
/*
|
||||
@ -1318,8 +1324,8 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
|
||||
/*
|
||||
* Check that the command queued to the controller
|
||||
*/
|
||||
if (status == -EINPROGRESS)
|
||||
{
|
||||
if (status == -EINPROGRESS) {
|
||||
scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1341,6 +1347,7 @@ static void synchronize_callback(void *context, struct fib *fibptr)
|
||||
struct scsi_cmnd *cmd;
|
||||
|
||||
cmd = context;
|
||||
cmd->SCp.phase = AAC_OWNER_MIDLEVEL;
|
||||
|
||||
dprintk((KERN_DEBUG "synchronize_callback[cpu %d]: t = %ld.\n",
|
||||
smp_processor_id(), jiffies));
|
||||
@ -1354,7 +1361,7 @@ static void synchronize_callback(void *context, struct fib *fibptr)
|
||||
else {
|
||||
struct scsi_device *sdev = cmd->device;
|
||||
struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
|
||||
u32 cid = ID_LUN_TO_CONTAINER(sdev->id, sdev->lun);
|
||||
u32 cid = sdev_id(sdev);
|
||||
printk(KERN_WARNING
|
||||
"synchronize_callback: synchronize failed, status = %d\n",
|
||||
le32_to_cpu(synchronizereply->status));
|
||||
@ -1386,12 +1393,12 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid)
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* Wait for all commands to complete to this specific
|
||||
* target (block).
|
||||
* Wait for all outstanding queued commands to complete to this
|
||||
* specific target (block).
|
||||
*/
|
||||
spin_lock_irqsave(&sdev->list_lock, flags);
|
||||
list_for_each_entry(cmd, &sdev->cmd_list, list)
|
||||
if (cmd != scsicmd && cmd->serial_number != 0) {
|
||||
if (cmd != scsicmd && cmd->SCp.phase == AAC_OWNER_FIRMWARE) {
|
||||
++active;
|
||||
break;
|
||||
}
|
||||
@ -1434,8 +1441,10 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid)
|
||||
/*
|
||||
* Check that the command queued to the controller
|
||||
*/
|
||||
if (status == -EINPROGRESS)
|
||||
if (status == -EINPROGRESS) {
|
||||
scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
printk(KERN_WARNING
|
||||
"aac_synchronize: aac_fib_send failed with status: %d.\n", status);
|
||||
@ -1458,7 +1467,6 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
||||
struct Scsi_Host *host = scsicmd->device->host;
|
||||
struct aac_dev *dev = (struct aac_dev *)host->hostdata;
|
||||
struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* If the bus, id or lun is out of range, return fail
|
||||
@ -1466,13 +1474,14 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
||||
* itself.
|
||||
*/
|
||||
if (scmd_id(scsicmd) != host->this_id) {
|
||||
if ((scsicmd->device->channel == CONTAINER_CHANNEL)) {
|
||||
if( (scsicmd->device->id >= dev->maximum_num_containers) || (scsicmd->device->lun != 0)){
|
||||
if ((scmd_channel(scsicmd) == CONTAINER_CHANNEL)) {
|
||||
if((scmd_id(scsicmd) >= dev->maximum_num_containers) ||
|
||||
(scsicmd->device->lun != 0)) {
|
||||
scsicmd->result = DID_NO_CONNECT << 16;
|
||||
scsicmd->scsi_done(scsicmd);
|
||||
return 0;
|
||||
}
|
||||
cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun);
|
||||
cid = scmd_id(scsicmd);
|
||||
|
||||
/*
|
||||
* If the target container doesn't exist, it may have
|
||||
@ -1548,7 +1557,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
||||
{
|
||||
struct inquiry_data inq_data;
|
||||
|
||||
dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", scsicmd->device->id));
|
||||
dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", scmd_id(scsicmd)));
|
||||
memset(&inq_data, 0, sizeof (struct inquiry_data));
|
||||
|
||||
inq_data.inqd_ver = 2; /* claim compliance to SCSI-2 */
|
||||
@ -1598,13 +1607,14 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
||||
cp[11] = 0;
|
||||
cp[12] = 0;
|
||||
aac_internal_transfer(scsicmd, cp, 0,
|
||||
min((unsigned int)scsicmd->cmnd[13], sizeof(cp)));
|
||||
min_t(size_t, scsicmd->cmnd[13], sizeof(cp)));
|
||||
if (sizeof(cp) < scsicmd->cmnd[13]) {
|
||||
unsigned int len, offset = sizeof(cp);
|
||||
|
||||
memset(cp, 0, offset);
|
||||
do {
|
||||
len = min(scsicmd->cmnd[13]-offset, sizeof(cp));
|
||||
len = min_t(size_t, scsicmd->cmnd[13] - offset,
|
||||
sizeof(cp));
|
||||
aac_internal_transfer(scsicmd, cp, offset, len);
|
||||
} while ((offset += len) < scsicmd->cmnd[13]);
|
||||
}
|
||||
@ -1728,24 +1738,19 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
||||
* containers to /dev/sd device names
|
||||
*/
|
||||
|
||||
spin_unlock_irq(host->host_lock);
|
||||
if (scsicmd->request->rq_disk)
|
||||
strlcpy(fsa_dev_ptr[cid].devname,
|
||||
scsicmd->request->rq_disk->disk_name,
|
||||
min(sizeof(fsa_dev_ptr[cid].devname),
|
||||
sizeof(scsicmd->request->rq_disk->disk_name) + 1));
|
||||
ret = aac_read(scsicmd, cid);
|
||||
spin_lock_irq(host->host_lock);
|
||||
return ret;
|
||||
|
||||
return aac_read(scsicmd, cid);
|
||||
|
||||
case WRITE_6:
|
||||
case WRITE_10:
|
||||
case WRITE_12:
|
||||
case WRITE_16:
|
||||
spin_unlock_irq(host->host_lock);
|
||||
ret = aac_write(scsicmd, cid);
|
||||
spin_lock_irq(host->host_lock);
|
||||
return ret;
|
||||
return aac_write(scsicmd, cid);
|
||||
|
||||
case SYNCHRONIZE_CACHE:
|
||||
/* Issue FIB to tell Firmware to flush it's cache */
|
||||
@ -1778,7 +1783,7 @@ static int query_disk(struct aac_dev *dev, void __user *arg)
|
||||
if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))
|
||||
return -EFAULT;
|
||||
if (qd.cnum == -1)
|
||||
qd.cnum = ID_LUN_TO_CONTAINER(qd.id, qd.lun);
|
||||
qd.cnum = qd.id;
|
||||
else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1))
|
||||
{
|
||||
if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers)
|
||||
@ -1890,6 +1895,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
|
||||
struct scsi_cmnd *scsicmd;
|
||||
|
||||
scsicmd = (struct scsi_cmnd *) context;
|
||||
scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
|
||||
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
|
||||
|
||||
if (fibptr == NULL)
|
||||
@ -2068,14 +2074,13 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
|
||||
u32 timeout;
|
||||
|
||||
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
|
||||
if (scsicmd->device->id >= dev->maximum_num_physicals ||
|
||||
if (scmd_id(scsicmd) >= dev->maximum_num_physicals ||
|
||||
scsicmd->device->lun > 7) {
|
||||
scsicmd->result = DID_NO_CONNECT << 16;
|
||||
scsicmd->scsi_done(scsicmd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
|
||||
switch(scsicmd->sc_data_direction){
|
||||
case DMA_TO_DEVICE:
|
||||
flag = SRB_DataOut;
|
||||
@ -2103,8 +2108,8 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
|
||||
|
||||
srbcmd = (struct aac_srb*) fib_data(cmd_fibcontext);
|
||||
srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
|
||||
srbcmd->channel = cpu_to_le32(aac_logical_to_phys(scsicmd->device->channel));
|
||||
srbcmd->id = cpu_to_le32(scsicmd->device->id);
|
||||
srbcmd->channel = cpu_to_le32(aac_logical_to_phys(scmd_channel(scsicmd)));
|
||||
srbcmd->id = cpu_to_le32(scmd_id(scsicmd));
|
||||
srbcmd->lun = cpu_to_le32(scsicmd->device->lun);
|
||||
srbcmd->flags = cpu_to_le32(flag);
|
||||
timeout = scsicmd->timeout_per_command/HZ;
|
||||
@ -2161,7 +2166,8 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
|
||||
/*
|
||||
* Check that the command queued to the controller
|
||||
*/
|
||||
if (status == -EINPROGRESS){
|
||||
if (status == -EINPROGRESS) {
|
||||
scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2192,8 +2198,6 @@ static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* psg)
|
||||
scsicmd->sc_data_direction);
|
||||
psg->count = cpu_to_le32(sg_count);
|
||||
|
||||
byte_count = 0;
|
||||
|
||||
for (i = 0; i < sg_count; i++) {
|
||||
psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg));
|
||||
psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
|
||||
@ -2249,18 +2253,17 @@ static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* p
|
||||
|
||||
sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
|
||||
scsicmd->sc_data_direction);
|
||||
psg->count = cpu_to_le32(sg_count);
|
||||
|
||||
byte_count = 0;
|
||||
|
||||
for (i = 0; i < sg_count; i++) {
|
||||
int count = sg_dma_len(sg);
|
||||
addr = sg_dma_address(sg);
|
||||
psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
|
||||
psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
|
||||
psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
|
||||
byte_count += sg_dma_len(sg);
|
||||
psg->sg[i].count = cpu_to_le32(count);
|
||||
byte_count += count;
|
||||
sg++;
|
||||
}
|
||||
psg->count = cpu_to_le32(sg_count);
|
||||
/* hba wants the size to be exact */
|
||||
if(byte_count > scsicmd->request_bufflen){
|
||||
u32 temp = le32_to_cpu(psg->sg[i-1].count) -
|
||||
@ -2275,16 +2278,15 @@ static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* p
|
||||
}
|
||||
}
|
||||
else if(scsicmd->request_bufflen) {
|
||||
u64 addr;
|
||||
addr = pci_map_single(dev->pdev,
|
||||
scsicmd->SCp.dma_handle = pci_map_single(dev->pdev,
|
||||
scsicmd->request_buffer,
|
||||
scsicmd->request_bufflen,
|
||||
scsicmd->sc_data_direction);
|
||||
addr = scsicmd->SCp.dma_handle;
|
||||
psg->count = cpu_to_le32(1);
|
||||
psg->sg[0].addr[0] = cpu_to_le32(addr & 0xffffffff);
|
||||
psg->sg[0].addr[1] = cpu_to_le32(addr >> 32);
|
||||
psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);
|
||||
scsicmd->SCp.dma_handle = addr;
|
||||
byte_count = scsicmd->request_bufflen;
|
||||
}
|
||||
return byte_count;
|
||||
|
@ -10,6 +10,10 @@
|
||||
* D E F I N E S
|
||||
*----------------------------------------------------------------------------*/
|
||||
|
||||
#ifndef AAC_DRIVER_BUILD
|
||||
# define AAC_DRIVER_BUILD 2409
|
||||
# define AAC_DRIVER_BRANCH "-mh1"
|
||||
#endif
|
||||
#define MAXIMUM_NUM_CONTAINERS 32
|
||||
|
||||
#define AAC_NUM_MGT_FIB 8
|
||||
@ -25,7 +29,6 @@
|
||||
* These macros convert from physical channels to virtual channels
|
||||
*/
|
||||
#define CONTAINER_CHANNEL (0)
|
||||
#define ID_LUN_TO_CONTAINER(id, lun) (id)
|
||||
#define CONTAINER_TO_CHANNEL(cont) (CONTAINER_CHANNEL)
|
||||
#define CONTAINER_TO_ID(cont) (cont)
|
||||
#define CONTAINER_TO_LUN(cont) (0)
|
||||
@ -789,6 +792,7 @@ struct fsa_dev_info {
|
||||
u64 size;
|
||||
u32 type;
|
||||
u32 config_waiting_on;
|
||||
unsigned long config_waiting_stamp;
|
||||
u16 queue_depth;
|
||||
u8 config_needed;
|
||||
u8 valid;
|
||||
@ -1771,6 +1775,11 @@ static inline u32 cap_to_cyls(sector_t capacity, u32 divisor)
|
||||
}
|
||||
|
||||
struct scsi_cmnd;
|
||||
/* SCp.phase values */
|
||||
#define AAC_OWNER_MIDLEVEL 0x101
|
||||
#define AAC_OWNER_LOWLEVEL 0x102
|
||||
#define AAC_OWNER_ERROR_HANDLER 0x103
|
||||
#define AAC_OWNER_FIRMWARE 0x106
|
||||
|
||||
const char *aac_driverinfo(struct Scsi_Host *);
|
||||
struct fib *aac_fib_alloc(struct aac_dev *dev);
|
||||
|
@ -38,6 +38,8 @@
|
||||
#include <linux/completion.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <asm/semaphore.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
@ -293,6 +295,16 @@ return_fib:
|
||||
status = 0;
|
||||
} else {
|
||||
spin_unlock_irqrestore(&dev->fib_lock, flags);
|
||||
/* If someone killed the AIF aacraid thread, restart it */
|
||||
status = !dev->aif_thread;
|
||||
if (status && dev->queues && dev->fsa_dev) {
|
||||
/* Be paranoid, be very paranoid! */
|
||||
kthread_stop(dev->thread);
|
||||
ssleep(1);
|
||||
dev->aif_thread = 0;
|
||||
dev->thread = kthread_run(aac_command_thread, dev, dev->name);
|
||||
ssleep(1);
|
||||
}
|
||||
if (f.wait) {
|
||||
if(down_interruptible(&fibctx->wait_sem) < 0) {
|
||||
status = -EINTR;
|
||||
|
@ -767,9 +767,9 @@ void aac_printf(struct aac_dev *dev, u32 val)
|
||||
if (cp[length] != 0)
|
||||
cp[length] = 0;
|
||||
if (level == LOG_AAC_HIGH_ERROR)
|
||||
printk(KERN_WARNING "aacraid:%s", cp);
|
||||
printk(KERN_WARNING "%s:%s", dev->name, cp);
|
||||
else
|
||||
printk(KERN_INFO "aacraid:%s", cp);
|
||||
printk(KERN_INFO "%s:%s", dev->name, cp);
|
||||
}
|
||||
memset(cp, 0, 256);
|
||||
}
|
||||
@ -784,6 +784,7 @@ void aac_printf(struct aac_dev *dev, u32 val)
|
||||
* dispatches it to the appropriate routine for handling.
|
||||
*/
|
||||
|
||||
#define AIF_SNIFF_TIMEOUT (30*HZ)
|
||||
static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
|
||||
{
|
||||
struct hw_fib * hw_fib = fibptr->hw_fib;
|
||||
@ -837,6 +838,7 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
|
||||
if (device) {
|
||||
dev->fsa_dev[container].config_needed = CHANGE;
|
||||
dev->fsa_dev[container].config_waiting_on = AifEnConfigChange;
|
||||
dev->fsa_dev[container].config_waiting_stamp = jiffies;
|
||||
scsi_device_put(device);
|
||||
}
|
||||
}
|
||||
@ -849,13 +851,15 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
|
||||
if (container != (u32)-1) {
|
||||
if (container >= dev->maximum_num_containers)
|
||||
break;
|
||||
if (dev->fsa_dev[container].config_waiting_on ==
|
||||
le32_to_cpu(*(u32 *)aifcmd->data))
|
||||
if ((dev->fsa_dev[container].config_waiting_on ==
|
||||
le32_to_cpu(*(u32 *)aifcmd->data)) &&
|
||||
time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
|
||||
dev->fsa_dev[container].config_waiting_on = 0;
|
||||
} else for (container = 0;
|
||||
container < dev->maximum_num_containers; ++container) {
|
||||
if (dev->fsa_dev[container].config_waiting_on ==
|
||||
le32_to_cpu(*(u32 *)aifcmd->data))
|
||||
if ((dev->fsa_dev[container].config_waiting_on ==
|
||||
le32_to_cpu(*(u32 *)aifcmd->data)) &&
|
||||
time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
|
||||
dev->fsa_dev[container].config_waiting_on = 0;
|
||||
}
|
||||
break;
|
||||
@ -872,6 +876,7 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
|
||||
dev->fsa_dev[container].config_needed = ADD;
|
||||
dev->fsa_dev[container].config_waiting_on =
|
||||
AifEnConfigChange;
|
||||
dev->fsa_dev[container].config_waiting_stamp = jiffies;
|
||||
break;
|
||||
|
||||
/*
|
||||
@ -884,6 +889,7 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
|
||||
dev->fsa_dev[container].config_needed = DELETE;
|
||||
dev->fsa_dev[container].config_waiting_on =
|
||||
AifEnConfigChange;
|
||||
dev->fsa_dev[container].config_waiting_stamp = jiffies;
|
||||
break;
|
||||
|
||||
/*
|
||||
@ -894,11 +900,13 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
|
||||
container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
|
||||
if (container >= dev->maximum_num_containers)
|
||||
break;
|
||||
if (dev->fsa_dev[container].config_waiting_on)
|
||||
if (dev->fsa_dev[container].config_waiting_on &&
|
||||
time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
|
||||
break;
|
||||
dev->fsa_dev[container].config_needed = CHANGE;
|
||||
dev->fsa_dev[container].config_waiting_on =
|
||||
AifEnConfigChange;
|
||||
dev->fsa_dev[container].config_waiting_stamp = jiffies;
|
||||
break;
|
||||
|
||||
case AifEnConfigChange:
|
||||
@ -913,13 +921,15 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
|
||||
if (container != (u32)-1) {
|
||||
if (container >= dev->maximum_num_containers)
|
||||
break;
|
||||
if (dev->fsa_dev[container].config_waiting_on ==
|
||||
le32_to_cpu(*(u32 *)aifcmd->data))
|
||||
if ((dev->fsa_dev[container].config_waiting_on ==
|
||||
le32_to_cpu(*(u32 *)aifcmd->data)) &&
|
||||
time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
|
||||
dev->fsa_dev[container].config_waiting_on = 0;
|
||||
} else for (container = 0;
|
||||
container < dev->maximum_num_containers; ++container) {
|
||||
if (dev->fsa_dev[container].config_waiting_on ==
|
||||
le32_to_cpu(*(u32 *)aifcmd->data))
|
||||
if ((dev->fsa_dev[container].config_waiting_on ==
|
||||
le32_to_cpu(*(u32 *)aifcmd->data)) &&
|
||||
time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
|
||||
dev->fsa_dev[container].config_waiting_on = 0;
|
||||
}
|
||||
break;
|
||||
@ -946,6 +956,8 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
|
||||
dev->fsa_dev[container].config_waiting_on =
|
||||
AifEnContainerChange;
|
||||
dev->fsa_dev[container].config_needed = ADD;
|
||||
dev->fsa_dev[container].config_waiting_stamp =
|
||||
jiffies;
|
||||
}
|
||||
}
|
||||
if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero))
|
||||
@ -961,6 +973,8 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
|
||||
dev->fsa_dev[container].config_waiting_on =
|
||||
AifEnContainerChange;
|
||||
dev->fsa_dev[container].config_needed = DELETE;
|
||||
dev->fsa_dev[container].config_waiting_stamp =
|
||||
jiffies;
|
||||
}
|
||||
}
|
||||
break;
|
||||
@ -969,8 +983,9 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
|
||||
device_config_needed = NOTHING;
|
||||
for (container = 0; container < dev->maximum_num_containers;
|
||||
++container) {
|
||||
if ((dev->fsa_dev[container].config_waiting_on == 0)
|
||||
&& (dev->fsa_dev[container].config_needed != NOTHING)) {
|
||||
if ((dev->fsa_dev[container].config_waiting_on == 0) &&
|
||||
(dev->fsa_dev[container].config_needed != NOTHING) &&
|
||||
time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) {
|
||||
device_config_needed =
|
||||
dev->fsa_dev[container].config_needed;
|
||||
dev->fsa_dev[container].config_needed = NOTHING;
|
||||
|
@ -27,12 +27,6 @@
|
||||
* Abstract: Linux Driver entry module for Adaptec RAID Array Controller
|
||||
*/
|
||||
|
||||
#define AAC_DRIVER_VERSION "1.1-4"
|
||||
#ifndef AAC_DRIVER_BRANCH
|
||||
#define AAC_DRIVER_BRANCH ""
|
||||
#endif
|
||||
#define AAC_DRIVER_BUILD_DATE __DATE__ " " __TIME__
|
||||
#define AAC_DRIVERNAME "aacraid"
|
||||
|
||||
#include <linux/compat.h>
|
||||
#include <linux/blkdev.h>
|
||||
@ -62,6 +56,13 @@
|
||||
|
||||
#include "aacraid.h"
|
||||
|
||||
#define AAC_DRIVER_VERSION "1.1-5"
|
||||
#ifndef AAC_DRIVER_BRANCH
|
||||
#define AAC_DRIVER_BRANCH ""
|
||||
#endif
|
||||
#define AAC_DRIVER_BUILD_DATE __DATE__ " " __TIME__
|
||||
#define AAC_DRIVERNAME "aacraid"
|
||||
|
||||
#ifdef AAC_DRIVER_BUILD
|
||||
#define _str(x) #x
|
||||
#define str(x) _str(x)
|
||||
@ -73,7 +74,7 @@
|
||||
MODULE_AUTHOR("Red Hat Inc and Adaptec");
|
||||
MODULE_DESCRIPTION("Dell PERC2, 2/Si, 3/Si, 3/Di, "
|
||||
"Adaptec Advanced Raid Products, "
|
||||
"and HP NetRAID-4M SCSI driver");
|
||||
"HP NetRAID-4M, IBM ServeRAID & ICP SCSI driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION(AAC_DRIVER_FULL_VERSION);
|
||||
|
||||
@ -243,6 +244,7 @@ static struct aac_driver_ident aac_drivers[] = {
|
||||
static int aac_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
|
||||
{
|
||||
cmd->scsi_done = done;
|
||||
cmd->SCp.phase = AAC_OWNER_LOWLEVEL;
|
||||
return (aac_scsi_cmd(cmd) ? FAILED : 0);
|
||||
}
|
||||
|
||||
@ -471,7 +473,8 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
|
||||
__shost_for_each_device(dev, host) {
|
||||
spin_lock_irqsave(&dev->list_lock, flags);
|
||||
list_for_each_entry(command, &dev->cmd_list, list) {
|
||||
if (command->serial_number) {
|
||||
if ((command != cmd) &&
|
||||
(command->SCp.phase == AAC_OWNER_FIRMWARE)) {
|
||||
active++;
|
||||
break;
|
||||
}
|
||||
@ -569,12 +572,12 @@ static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long
|
||||
|
||||
f = compat_alloc_user_space(sizeof(*f));
|
||||
ret = 0;
|
||||
if (clear_user(f, sizeof(*f) != sizeof(*f)))
|
||||
if (clear_user(f, sizeof(*f)) != sizeof(*f))
|
||||
ret = -EFAULT;
|
||||
if (copy_in_user(f, (void __user *)arg, sizeof(struct fib_ioctl) - sizeof(u32)))
|
||||
ret = -EFAULT;
|
||||
if (!ret)
|
||||
ret = aac_do_ioctl(dev, cmd, (void __user *)arg);
|
||||
ret = aac_do_ioctl(dev, cmd, f);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -687,6 +690,18 @@ static ssize_t aac_show_serial_number(struct class_device *class_dev,
|
||||
return len;
|
||||
}
|
||||
|
||||
static ssize_t aac_show_max_channel(struct class_device *class_dev, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n",
|
||||
class_to_shost(class_dev)->max_channel);
|
||||
}
|
||||
|
||||
static ssize_t aac_show_max_id(struct class_device *class_dev, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n",
|
||||
class_to_shost(class_dev)->max_id);
|
||||
}
|
||||
|
||||
|
||||
static struct class_device_attribute aac_model = {
|
||||
.attr = {
|
||||
@ -730,6 +745,20 @@ static struct class_device_attribute aac_serial_number = {
|
||||
},
|
||||
.show = aac_show_serial_number,
|
||||
};
|
||||
static struct class_device_attribute aac_max_channel = {
|
||||
.attr = {
|
||||
.name = "max_channel",
|
||||
.mode = S_IRUGO,
|
||||
},
|
||||
.show = aac_show_max_channel,
|
||||
};
|
||||
static struct class_device_attribute aac_max_id = {
|
||||
.attr = {
|
||||
.name = "max_id",
|
||||
.mode = S_IRUGO,
|
||||
},
|
||||
.show = aac_show_max_id,
|
||||
};
|
||||
|
||||
static struct class_device_attribute *aac_attrs[] = {
|
||||
&aac_model,
|
||||
@ -738,6 +767,8 @@ static struct class_device_attribute *aac_attrs[] = {
|
||||
&aac_monitor_version,
|
||||
&aac_bios_version,
|
||||
&aac_serial_number,
|
||||
&aac_max_channel,
|
||||
&aac_max_id,
|
||||
NULL
|
||||
};
|
||||
|
||||
@ -775,6 +806,7 @@ static struct scsi_host_template aac_driver_template = {
|
||||
.cmd_per_lun = AAC_NUM_IO_FIB,
|
||||
#endif
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.emulated = 1,
|
||||
};
|
||||
|
||||
|
||||
@ -798,10 +830,11 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
|
||||
error = pci_enable_device(pdev);
|
||||
if (error)
|
||||
goto out;
|
||||
error = -ENODEV;
|
||||
|
||||
if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) ||
|
||||
pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))
|
||||
goto out;
|
||||
goto out_disable_pdev;
|
||||
/*
|
||||
* If the quirk31 bit is set, the adapter needs adapter
|
||||
* to driver communication memory to be allocated below 2gig
|
||||
@ -809,7 +842,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
|
||||
if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
|
||||
if (pci_set_dma_mask(pdev, DMA_31BIT_MASK) ||
|
||||
pci_set_consistent_dma_mask(pdev, DMA_31BIT_MASK))
|
||||
goto out;
|
||||
goto out_disable_pdev;
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
||||
@ -904,9 +937,9 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
|
||||
* physical channels are address by their actual physical number+1
|
||||
*/
|
||||
if (aac->nondasd_support == 1)
|
||||
shost->max_channel = aac->maximum_num_channels + 1;
|
||||
shost->max_channel = aac->maximum_num_channels;
|
||||
else
|
||||
shost->max_channel = 1;
|
||||
shost->max_channel = 0;
|
||||
|
||||
aac_get_config_status(aac);
|
||||
aac_get_containers(aac);
|
||||
@ -1020,7 +1053,8 @@ static int __init aac_init(void)
|
||||
|
||||
static void __exit aac_exit(void)
|
||||
{
|
||||
unregister_chrdev(aac_cfg_major, "aac");
|
||||
if (aac_cfg_major > -1)
|
||||
unregister_chrdev(aac_cfg_major, "aac");
|
||||
pci_unregister_driver(&aac_pci_driver);
|
||||
}
|
||||
|
||||
|
@ -183,7 +183,7 @@ static int rkt_sync_cmd(struct aac_dev *dev, u32 command,
|
||||
/*
|
||||
* Yield the processor in case we are slow
|
||||
*/
|
||||
schedule_timeout_uninterruptible(1);
|
||||
msleep(1);
|
||||
}
|
||||
if (ok != 1) {
|
||||
/*
|
||||
@ -343,7 +343,7 @@ static int aac_rkt_check_health(struct aac_dev *dev)
|
||||
NULL, NULL, NULL, NULL, NULL);
|
||||
pci_free_consistent(dev->pdev, sizeof(struct POSTSTATUS),
|
||||
post, paddr);
|
||||
if ((buffer[0] == '0') && (buffer[1] == 'x')) {
|
||||
if ((buffer[0] == '0') && ((buffer[1] == 'x') || (buffer[1] == 'X'))) {
|
||||
ret = (buffer[2] <= '9') ? (buffer[2] - '0') : (buffer[2] - 'A' + 10);
|
||||
ret <<= 4;
|
||||
ret += (buffer[3] <= '9') ? (buffer[3] - '0') : (buffer[3] - 'A' + 10);
|
||||
|
@ -183,7 +183,7 @@ static int rx_sync_cmd(struct aac_dev *dev, u32 command,
|
||||
/*
|
||||
* Yield the processor in case we are slow
|
||||
*/
|
||||
schedule_timeout_uninterruptible(1);
|
||||
msleep(1);
|
||||
}
|
||||
if (ok != 1) {
|
||||
/*
|
||||
@ -342,7 +342,7 @@ static int aac_rx_check_health(struct aac_dev *dev)
|
||||
NULL, NULL, NULL, NULL, NULL);
|
||||
pci_free_consistent(dev->pdev, sizeof(struct POSTSTATUS),
|
||||
post, paddr);
|
||||
if ((buffer[0] == '0') && (buffer[1] == 'x')) {
|
||||
if ((buffer[0] == '0') && ((buffer[1] == 'x') || (buffer[1] == 'X'))) {
|
||||
ret = (buffer[2] <= '9') ? (buffer[2] - '0') : (buffer[2] - 'A' + 10);
|
||||
ret <<= 4;
|
||||
ret += (buffer[3] <= '9') ? (buffer[3] - '0') : (buffer[3] - 'A' + 10);
|
||||
|
@ -189,7 +189,7 @@ static int sa_sync_cmd(struct aac_dev *dev, u32 command,
|
||||
ok = 1;
|
||||
break;
|
||||
}
|
||||
schedule_timeout_uninterruptible(1);
|
||||
msleep(1);
|
||||
}
|
||||
|
||||
if (ok != 1)
|
||||
|
@ -372,7 +372,7 @@ typedef enum {
|
||||
AHD_CURRENT_SENSING = 0x40000,
|
||||
AHD_SCB_CONFIG_USED = 0x80000,/* No SEEPROM but SCB had info. */
|
||||
AHD_HP_BOARD = 0x100000,
|
||||
AHD_RESET_POLL_ACTIVE = 0x200000,
|
||||
AHD_BUS_RESET_ACTIVE = 0x200000,
|
||||
AHD_UPDATE_PEND_CMDS = 0x400000,
|
||||
AHD_RUNNING_QOUTFIFO = 0x800000,
|
||||
AHD_HAD_FIRST_SEL = 0x1000000
|
||||
@ -589,7 +589,7 @@ typedef enum {
|
||||
SCB_PACKETIZED = 0x00800,
|
||||
SCB_EXPECT_PPR_BUSFREE = 0x01000,
|
||||
SCB_PKT_SENSE = 0x02000,
|
||||
SCB_CMDPHASE_ABORT = 0x04000,
|
||||
SCB_EXTERNAL_RESET = 0x04000,/* Device was reset externally */
|
||||
SCB_ON_COL_LIST = 0x08000,
|
||||
SCB_SILENT = 0x10000 /*
|
||||
* Be quiet about transmission type
|
||||
|
@ -207,7 +207,6 @@ static void ahd_add_scb_to_free_list(struct ahd_softc *ahd,
|
||||
static u_int ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid,
|
||||
u_int prev, u_int next, u_int tid);
|
||||
static void ahd_reset_current_bus(struct ahd_softc *ahd);
|
||||
static ahd_callback_t ahd_reset_poll;
|
||||
static ahd_callback_t ahd_stat_timer;
|
||||
#ifdef AHD_DUMP_SEQ
|
||||
static void ahd_dumpseq(struct ahd_softc *ahd);
|
||||
@ -1054,12 +1053,10 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
|
||||
* If a target takes us into the command phase
|
||||
* assume that it has been externally reset and
|
||||
* has thus lost our previous packetized negotiation
|
||||
* agreement. Since we have not sent an identify
|
||||
* message and may not have fully qualified the
|
||||
* connection, we change our command to TUR, assert
|
||||
* ATN and ABORT the task when we go to message in
|
||||
* phase. The OSM will see the REQUEUE_REQUEST
|
||||
* status and retry the command.
|
||||
* agreement.
|
||||
* Revert to async/narrow transfers until we
|
||||
* can renegotiate with the device and notify
|
||||
* the OSM about the reset.
|
||||
*/
|
||||
scbid = ahd_get_scbptr(ahd);
|
||||
scb = ahd_lookup_scb(ahd, scbid);
|
||||
@ -1086,31 +1083,15 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
|
||||
ahd_set_syncrate(ahd, &devinfo, /*period*/0,
|
||||
/*offset*/0, /*ppr_options*/0,
|
||||
AHD_TRANS_ACTIVE, /*paused*/TRUE);
|
||||
ahd_outb(ahd, SCB_CDB_STORE, 0);
|
||||
ahd_outb(ahd, SCB_CDB_STORE+1, 0);
|
||||
ahd_outb(ahd, SCB_CDB_STORE+2, 0);
|
||||
ahd_outb(ahd, SCB_CDB_STORE+3, 0);
|
||||
ahd_outb(ahd, SCB_CDB_STORE+4, 0);
|
||||
ahd_outb(ahd, SCB_CDB_STORE+5, 0);
|
||||
ahd_outb(ahd, SCB_CDB_LEN, 6);
|
||||
scb->hscb->control &= ~(TAG_ENB|SCB_TAG_TYPE);
|
||||
scb->hscb->control |= MK_MESSAGE;
|
||||
ahd_outb(ahd, SCB_CONTROL, scb->hscb->control);
|
||||
ahd_outb(ahd, MSG_OUT, HOST_MSG);
|
||||
ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid);
|
||||
/*
|
||||
* The lun is 0, regardless of the SCB's lun
|
||||
* as we have not sent an identify message.
|
||||
*/
|
||||
ahd_outb(ahd, SAVED_LUN, 0);
|
||||
ahd_outb(ahd, SEQ_FLAGS, 0);
|
||||
ahd_assert_atn(ahd);
|
||||
scb->flags &= ~SCB_PACKETIZED;
|
||||
scb->flags |= SCB_ABORT|SCB_CMDPHASE_ABORT;
|
||||
scb->flags |= SCB_EXTERNAL_RESET;
|
||||
ahd_freeze_devq(ahd, scb);
|
||||
ahd_set_transaction_status(scb, CAM_REQUEUE_REQ);
|
||||
ahd_freeze_scb(scb);
|
||||
|
||||
/* Notify XPT */
|
||||
ahd_send_async(ahd, devinfo.channel, devinfo.target,
|
||||
CAM_LUN_WILDCARD, AC_SENT_BDR, NULL);
|
||||
|
||||
/*
|
||||
* Allow the sequencer to continue with
|
||||
* non-pack processing.
|
||||
@ -1534,6 +1515,18 @@ ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat)
|
||||
lqistat1 = ahd_inb(ahd, LQISTAT1);
|
||||
lqostat0 = ahd_inb(ahd, LQOSTAT0);
|
||||
busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME;
|
||||
|
||||
/*
|
||||
* Ignore external resets after a bus reset.
|
||||
*/
|
||||
if (((status & SCSIRSTI) != 0) && (ahd->flags & AHD_BUS_RESET_ACTIVE))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Clear bus reset flag
|
||||
*/
|
||||
ahd->flags &= ~AHD_BUS_RESET_ACTIVE;
|
||||
|
||||
if ((status0 & (SELDI|SELDO)) != 0) {
|
||||
u_int simode0;
|
||||
|
||||
@ -2207,22 +2200,6 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
|
||||
if (sent_msg == MSG_ABORT_TAG)
|
||||
tag = SCB_GET_TAG(scb);
|
||||
|
||||
if ((scb->flags & SCB_CMDPHASE_ABORT) != 0) {
|
||||
/*
|
||||
* This abort is in response to an
|
||||
* unexpected switch to command phase
|
||||
* for a packetized connection. Since
|
||||
* the identify message was never sent,
|
||||
* "saved lun" is 0. We really want to
|
||||
* abort only the SCB that encountered
|
||||
* this error, which could have a different
|
||||
* lun. The SCB will be retried so the OS
|
||||
* will see the UA after renegotiating to
|
||||
* packetized.
|
||||
*/
|
||||
tag = SCB_GET_TAG(scb);
|
||||
saved_lun = scb->hscb->lun;
|
||||
}
|
||||
found = ahd_abort_scbs(ahd, target, 'A', saved_lun,
|
||||
tag, ROLE_INITIATOR,
|
||||
CAM_REQ_ABORTED);
|
||||
@ -7847,6 +7824,17 @@ ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset)
|
||||
int found;
|
||||
u_int fifo;
|
||||
u_int next_fifo;
|
||||
uint8_t scsiseq;
|
||||
|
||||
/*
|
||||
* Check if the last bus reset is cleared
|
||||
*/
|
||||
if (ahd->flags & AHD_BUS_RESET_ACTIVE) {
|
||||
printf("%s: bus reset still active\n",
|
||||
ahd_name(ahd));
|
||||
return 0;
|
||||
}
|
||||
ahd->flags |= AHD_BUS_RESET_ACTIVE;
|
||||
|
||||
ahd->pending_device = NULL;
|
||||
|
||||
@ -7860,6 +7848,12 @@ ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset)
|
||||
/* Make sure the sequencer is in a safe location. */
|
||||
ahd_clear_critical_section(ahd);
|
||||
|
||||
/*
|
||||
* Run our command complete fifos to ensure that we perform
|
||||
* completion processing on any commands that 'completed'
|
||||
* before the reset occurred.
|
||||
*/
|
||||
ahd_run_qoutfifo(ahd);
|
||||
#ifdef AHD_TARGET_MODE
|
||||
if ((ahd->flags & AHD_TARGETROLE) != 0) {
|
||||
ahd_run_tqinfifo(ahd, /*paused*/TRUE);
|
||||
@ -7924,30 +7918,14 @@ ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset)
|
||||
ahd_clear_fifo(ahd, 1);
|
||||
|
||||
/*
|
||||
* Revert to async/narrow transfers until we renegotiate.
|
||||
* Reenable selections
|
||||
*/
|
||||
ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) | ENSCSIRST);
|
||||
scsiseq = ahd_inb(ahd, SCSISEQ_TEMPLATE);
|
||||
ahd_outb(ahd, SCSISEQ1, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP));
|
||||
|
||||
max_scsiid = (ahd->features & AHD_WIDE) ? 15 : 7;
|
||||
for (target = 0; target <= max_scsiid; target++) {
|
||||
|
||||
if (ahd->enabled_targets[target] == NULL)
|
||||
continue;
|
||||
for (initiator = 0; initiator <= max_scsiid; initiator++) {
|
||||
struct ahd_devinfo devinfo;
|
||||
|
||||
ahd_compile_devinfo(&devinfo, target, initiator,
|
||||
CAM_LUN_WILDCARD,
|
||||
'A', ROLE_UNKNOWN);
|
||||
ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
|
||||
AHD_TRANS_CUR, /*paused*/TRUE);
|
||||
ahd_set_syncrate(ahd, &devinfo, /*period*/0,
|
||||
/*offset*/0, /*ppr_options*/0,
|
||||
AHD_TRANS_CUR, /*paused*/TRUE);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef AHD_TARGET_MODE
|
||||
max_scsiid = (ahd->features & AHD_WIDE) ? 15 : 7;
|
||||
|
||||
/*
|
||||
* Send an immediate notify ccb to all target more peripheral
|
||||
* drivers affected by this action.
|
||||
@ -7975,53 +7953,33 @@ ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset)
|
||||
/* Notify the XPT that a bus reset occurred */
|
||||
ahd_send_async(ahd, devinfo.channel, CAM_TARGET_WILDCARD,
|
||||
CAM_LUN_WILDCARD, AC_BUS_RESET, NULL);
|
||||
ahd_restart(ahd);
|
||||
|
||||
/*
|
||||
* Freeze the SIMQ until our poller can determine that
|
||||
* the bus reset has really gone away. We set the initial
|
||||
* timer to 0 to have the check performed as soon as possible
|
||||
* from the timer context.
|
||||
* Revert to async/narrow transfers until we renegotiate.
|
||||
*/
|
||||
if ((ahd->flags & AHD_RESET_POLL_ACTIVE) == 0) {
|
||||
ahd->flags |= AHD_RESET_POLL_ACTIVE;
|
||||
ahd_freeze_simq(ahd);
|
||||
ahd_timer_reset(&ahd->reset_timer, 0, ahd_reset_poll, ahd);
|
||||
for (target = 0; target <= max_scsiid; target++) {
|
||||
|
||||
if (ahd->enabled_targets[target] == NULL)
|
||||
continue;
|
||||
for (initiator = 0; initiator <= max_scsiid; initiator++) {
|
||||
struct ahd_devinfo devinfo;
|
||||
|
||||
ahd_compile_devinfo(&devinfo, target, initiator,
|
||||
CAM_LUN_WILDCARD,
|
||||
'A', ROLE_UNKNOWN);
|
||||
ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
|
||||
AHD_TRANS_CUR, /*paused*/TRUE);
|
||||
ahd_set_syncrate(ahd, &devinfo, /*period*/0,
|
||||
/*offset*/0, /*ppr_options*/0,
|
||||
AHD_TRANS_CUR, /*paused*/TRUE);
|
||||
}
|
||||
}
|
||||
|
||||
ahd_restart(ahd);
|
||||
|
||||
return (found);
|
||||
}
|
||||
|
||||
|
||||
#define AHD_RESET_POLL_US 1000
|
||||
static void
|
||||
ahd_reset_poll(void *arg)
|
||||
{
|
||||
struct ahd_softc *ahd = arg;
|
||||
u_int scsiseq1;
|
||||
u_long s;
|
||||
|
||||
ahd_lock(ahd, &s);
|
||||
ahd_pause(ahd);
|
||||
ahd_update_modes(ahd);
|
||||
ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
|
||||
ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI);
|
||||
if ((ahd_inb(ahd, SSTAT1) & SCSIRSTI) != 0) {
|
||||
ahd_timer_reset(&ahd->reset_timer, AHD_RESET_POLL_US,
|
||||
ahd_reset_poll, ahd);
|
||||
ahd_unpause(ahd);
|
||||
ahd_unlock(ahd, &s);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Reset is now low. Complete chip reinitialization. */
|
||||
ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) | ENSCSIRST);
|
||||
scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE);
|
||||
ahd_outb(ahd, SCSISEQ1, scsiseq1 & (ENSELI|ENRSELI|ENAUTOATNP));
|
||||
ahd_unpause(ahd);
|
||||
ahd->flags &= ~AHD_RESET_POLL_ACTIVE;
|
||||
ahd_unlock(ahd, &s);
|
||||
ahd_release_simq(ahd);
|
||||
}
|
||||
|
||||
/**************************** Statistics Processing ***************************/
|
||||
static void
|
||||
ahd_stat_timer(void *arg)
|
||||
|
@ -782,6 +782,7 @@ ahd_linux_bus_reset(struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct ahd_softc *ahd;
|
||||
int found;
|
||||
unsigned long flags;
|
||||
|
||||
ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
|
||||
#ifdef AHD_DEBUG
|
||||
@ -789,8 +790,11 @@ ahd_linux_bus_reset(struct scsi_cmnd *cmd)
|
||||
printf("%s: Bus reset called for cmd %p\n",
|
||||
ahd_name(ahd), cmd);
|
||||
#endif
|
||||
ahd_lock(ahd, &flags);
|
||||
|
||||
found = ahd_reset_channel(ahd, scmd_channel(cmd) + 'A',
|
||||
/*initiate reset*/TRUE);
|
||||
ahd_unlock(ahd, &flags);
|
||||
|
||||
if (bootverbose)
|
||||
printf("%s: SCSI bus reset delivered. "
|
||||
|
@ -168,7 +168,7 @@ static void release_event_pool(struct event_pool *pool,
|
||||
++in_use;
|
||||
if (pool->events[i].ext_list) {
|
||||
dma_free_coherent(hostdata->dev,
|
||||
SG_ALL * sizeof(struct memory_descriptor),
|
||||
SG_ALL * sizeof(struct srp_direct_buf),
|
||||
pool->events[i].ext_list,
|
||||
pool->events[i].ext_list_token);
|
||||
}
|
||||
@ -284,40 +284,37 @@ static void set_srp_direction(struct scsi_cmnd *cmd,
|
||||
struct srp_cmd *srp_cmd,
|
||||
int numbuf)
|
||||
{
|
||||
u8 fmt;
|
||||
|
||||
if (numbuf == 0)
|
||||
return;
|
||||
|
||||
if (numbuf == 1) {
|
||||
if (numbuf == 1)
|
||||
fmt = SRP_DATA_DESC_DIRECT;
|
||||
else {
|
||||
fmt = SRP_DATA_DESC_INDIRECT;
|
||||
numbuf = min(numbuf, MAX_INDIRECT_BUFS);
|
||||
|
||||
if (cmd->sc_data_direction == DMA_TO_DEVICE)
|
||||
srp_cmd->data_out_format = SRP_DIRECT_BUFFER;
|
||||
else
|
||||
srp_cmd->data_in_format = SRP_DIRECT_BUFFER;
|
||||
} else {
|
||||
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
|
||||
srp_cmd->data_out_format = SRP_INDIRECT_BUFFER;
|
||||
srp_cmd->data_out_count =
|
||||
numbuf < MAX_INDIRECT_BUFS ?
|
||||
numbuf: MAX_INDIRECT_BUFS;
|
||||
} else {
|
||||
srp_cmd->data_in_format = SRP_INDIRECT_BUFFER;
|
||||
srp_cmd->data_in_count =
|
||||
numbuf < MAX_INDIRECT_BUFS ?
|
||||
numbuf: MAX_INDIRECT_BUFS;
|
||||
}
|
||||
srp_cmd->data_out_desc_cnt = numbuf;
|
||||
else
|
||||
srp_cmd->data_in_desc_cnt = numbuf;
|
||||
}
|
||||
|
||||
if (cmd->sc_data_direction == DMA_TO_DEVICE)
|
||||
srp_cmd->buf_fmt = fmt << 4;
|
||||
else
|
||||
srp_cmd->buf_fmt = fmt;
|
||||
}
|
||||
|
||||
static void unmap_sg_list(int num_entries,
|
||||
static void unmap_sg_list(int num_entries,
|
||||
struct device *dev,
|
||||
struct memory_descriptor *md)
|
||||
{
|
||||
struct srp_direct_buf *md)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < num_entries; ++i) {
|
||||
dma_unmap_single(dev,
|
||||
md[i].virtual_address,
|
||||
md[i].length, DMA_BIDIRECTIONAL);
|
||||
}
|
||||
for (i = 0; i < num_entries; ++i)
|
||||
dma_unmap_single(dev, md[i].va, md[i].len, DMA_BIDIRECTIONAL);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -330,23 +327,26 @@ static void unmap_cmd_data(struct srp_cmd *cmd,
|
||||
struct srp_event_struct *evt_struct,
|
||||
struct device *dev)
|
||||
{
|
||||
if ((cmd->data_out_format == SRP_NO_BUFFER) &&
|
||||
(cmd->data_in_format == SRP_NO_BUFFER))
|
||||
u8 out_fmt, in_fmt;
|
||||
|
||||
out_fmt = cmd->buf_fmt >> 4;
|
||||
in_fmt = cmd->buf_fmt & ((1U << 4) - 1);
|
||||
|
||||
if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC)
|
||||
return;
|
||||
else if ((cmd->data_out_format == SRP_DIRECT_BUFFER) ||
|
||||
(cmd->data_in_format == SRP_DIRECT_BUFFER)) {
|
||||
struct memory_descriptor *data =
|
||||
(struct memory_descriptor *)cmd->additional_data;
|
||||
dma_unmap_single(dev, data->virtual_address, data->length,
|
||||
DMA_BIDIRECTIONAL);
|
||||
else if (out_fmt == SRP_DATA_DESC_DIRECT ||
|
||||
in_fmt == SRP_DATA_DESC_DIRECT) {
|
||||
struct srp_direct_buf *data =
|
||||
(struct srp_direct_buf *) cmd->add_data;
|
||||
dma_unmap_single(dev, data->va, data->len, DMA_BIDIRECTIONAL);
|
||||
} else {
|
||||
struct indirect_descriptor *indirect =
|
||||
(struct indirect_descriptor *)cmd->additional_data;
|
||||
int num_mapped = indirect->head.length /
|
||||
sizeof(indirect->list[0]);
|
||||
struct srp_indirect_buf *indirect =
|
||||
(struct srp_indirect_buf *) cmd->add_data;
|
||||
int num_mapped = indirect->table_desc.len /
|
||||
sizeof(struct srp_direct_buf);
|
||||
|
||||
if (num_mapped <= MAX_INDIRECT_BUFS) {
|
||||
unmap_sg_list(num_mapped, dev, &indirect->list[0]);
|
||||
unmap_sg_list(num_mapped, dev, &indirect->desc_list[0]);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -356,17 +356,17 @@ static void unmap_cmd_data(struct srp_cmd *cmd,
|
||||
|
||||
static int map_sg_list(int num_entries,
|
||||
struct scatterlist *sg,
|
||||
struct memory_descriptor *md)
|
||||
struct srp_direct_buf *md)
|
||||
{
|
||||
int i;
|
||||
u64 total_length = 0;
|
||||
|
||||
for (i = 0; i < num_entries; ++i) {
|
||||
struct memory_descriptor *descr = md + i;
|
||||
struct srp_direct_buf *descr = md + i;
|
||||
struct scatterlist *sg_entry = &sg[i];
|
||||
descr->virtual_address = sg_dma_address(sg_entry);
|
||||
descr->length = sg_dma_len(sg_entry);
|
||||
descr->memory_handle = 0;
|
||||
descr->va = sg_dma_address(sg_entry);
|
||||
descr->len = sg_dma_len(sg_entry);
|
||||
descr->key = 0;
|
||||
total_length += sg_dma_len(sg_entry);
|
||||
}
|
||||
return total_length;
|
||||
@ -389,10 +389,10 @@ static int map_sg_data(struct scsi_cmnd *cmd,
|
||||
int sg_mapped;
|
||||
u64 total_length = 0;
|
||||
struct scatterlist *sg = cmd->request_buffer;
|
||||
struct memory_descriptor *data =
|
||||
(struct memory_descriptor *)srp_cmd->additional_data;
|
||||
struct indirect_descriptor *indirect =
|
||||
(struct indirect_descriptor *)data;
|
||||
struct srp_direct_buf *data =
|
||||
(struct srp_direct_buf *) srp_cmd->add_data;
|
||||
struct srp_indirect_buf *indirect =
|
||||
(struct srp_indirect_buf *) data;
|
||||
|
||||
sg_mapped = dma_map_sg(dev, sg, cmd->use_sg, DMA_BIDIRECTIONAL);
|
||||
|
||||
@ -403,9 +403,9 @@ static int map_sg_data(struct scsi_cmnd *cmd,
|
||||
|
||||
/* special case; we can use a single direct descriptor */
|
||||
if (sg_mapped == 1) {
|
||||
data->virtual_address = sg_dma_address(&sg[0]);
|
||||
data->length = sg_dma_len(&sg[0]);
|
||||
data->memory_handle = 0;
|
||||
data->va = sg_dma_address(&sg[0]);
|
||||
data->len = sg_dma_len(&sg[0]);
|
||||
data->key = 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -416,25 +416,26 @@ static int map_sg_data(struct scsi_cmnd *cmd,
|
||||
return 0;
|
||||
}
|
||||
|
||||
indirect->head.virtual_address = 0;
|
||||
indirect->head.length = sg_mapped * sizeof(indirect->list[0]);
|
||||
indirect->head.memory_handle = 0;
|
||||
indirect->table_desc.va = 0;
|
||||
indirect->table_desc.len = sg_mapped * sizeof(struct srp_direct_buf);
|
||||
indirect->table_desc.key = 0;
|
||||
|
||||
if (sg_mapped <= MAX_INDIRECT_BUFS) {
|
||||
total_length = map_sg_list(sg_mapped, sg, &indirect->list[0]);
|
||||
indirect->total_length = total_length;
|
||||
total_length = map_sg_list(sg_mapped, sg,
|
||||
&indirect->desc_list[0]);
|
||||
indirect->len = total_length;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* get indirect table */
|
||||
if (!evt_struct->ext_list) {
|
||||
evt_struct->ext_list =(struct memory_descriptor*)
|
||||
evt_struct->ext_list = (struct srp_direct_buf *)
|
||||
dma_alloc_coherent(dev,
|
||||
SG_ALL * sizeof(struct memory_descriptor),
|
||||
&evt_struct->ext_list_token, 0);
|
||||
SG_ALL * sizeof(struct srp_direct_buf),
|
||||
&evt_struct->ext_list_token, 0);
|
||||
if (!evt_struct->ext_list) {
|
||||
printk(KERN_ERR
|
||||
"ibmvscsi: Can't allocate memory for indirect table\n");
|
||||
printk(KERN_ERR
|
||||
"ibmvscsi: Can't allocate memory for indirect table\n");
|
||||
return 0;
|
||||
|
||||
}
|
||||
@ -442,11 +443,11 @@ static int map_sg_data(struct scsi_cmnd *cmd,
|
||||
|
||||
total_length = map_sg_list(sg_mapped, sg, evt_struct->ext_list);
|
||||
|
||||
indirect->total_length = total_length;
|
||||
indirect->head.virtual_address = evt_struct->ext_list_token;
|
||||
indirect->head.length = sg_mapped * sizeof(indirect->list[0]);
|
||||
memcpy(indirect->list, evt_struct->ext_list,
|
||||
MAX_INDIRECT_BUFS * sizeof(struct memory_descriptor));
|
||||
indirect->len = total_length;
|
||||
indirect->table_desc.va = evt_struct->ext_list_token;
|
||||
indirect->table_desc.len = sg_mapped * sizeof(indirect->desc_list[0]);
|
||||
memcpy(indirect->desc_list, evt_struct->ext_list,
|
||||
MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf));
|
||||
|
||||
return 1;
|
||||
}
|
||||
@ -463,20 +464,20 @@ static int map_sg_data(struct scsi_cmnd *cmd,
|
||||
static int map_single_data(struct scsi_cmnd *cmd,
|
||||
struct srp_cmd *srp_cmd, struct device *dev)
|
||||
{
|
||||
struct memory_descriptor *data =
|
||||
(struct memory_descriptor *)srp_cmd->additional_data;
|
||||
struct srp_direct_buf *data =
|
||||
(struct srp_direct_buf *) srp_cmd->add_data;
|
||||
|
||||
data->virtual_address =
|
||||
data->va =
|
||||
dma_map_single(dev, cmd->request_buffer,
|
||||
cmd->request_bufflen,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(data->virtual_address)) {
|
||||
if (dma_mapping_error(data->va)) {
|
||||
printk(KERN_ERR
|
||||
"ibmvscsi: Unable to map request_buffer for command!\n");
|
||||
return 0;
|
||||
}
|
||||
data->length = cmd->request_bufflen;
|
||||
data->memory_handle = 0;
|
||||
data->len = cmd->request_bufflen;
|
||||
data->key = 0;
|
||||
|
||||
set_srp_direction(cmd, srp_cmd, 1);
|
||||
|
||||
@ -548,7 +549,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
|
||||
|
||||
/* Copy the IU into the transfer area */
|
||||
*evt_struct->xfer_iu = evt_struct->iu;
|
||||
evt_struct->xfer_iu->srp.generic.tag = (u64)evt_struct;
|
||||
evt_struct->xfer_iu->srp.rsp.tag = (u64)evt_struct;
|
||||
|
||||
/* Add this to the sent list. We need to do this
|
||||
* before we actually send
|
||||
@ -586,27 +587,27 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
|
||||
struct srp_rsp *rsp = &evt_struct->xfer_iu->srp.rsp;
|
||||
struct scsi_cmnd *cmnd = evt_struct->cmnd;
|
||||
|
||||
if (unlikely(rsp->type != SRP_RSP_TYPE)) {
|
||||
if (unlikely(rsp->opcode != SRP_RSP)) {
|
||||
if (printk_ratelimit())
|
||||
printk(KERN_WARNING
|
||||
"ibmvscsi: bad SRP RSP type %d\n",
|
||||
rsp->type);
|
||||
rsp->opcode);
|
||||
}
|
||||
|
||||
if (cmnd) {
|
||||
cmnd->result = rsp->status;
|
||||
if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION)
|
||||
memcpy(cmnd->sense_buffer,
|
||||
rsp->sense_and_response_data,
|
||||
rsp->sense_data_list_length);
|
||||
rsp->data,
|
||||
rsp->sense_data_len);
|
||||
unmap_cmd_data(&evt_struct->iu.srp.cmd,
|
||||
evt_struct,
|
||||
evt_struct->hostdata->dev);
|
||||
|
||||
if (rsp->doover)
|
||||
cmnd->resid = rsp->data_out_residual_count;
|
||||
else if (rsp->diover)
|
||||
cmnd->resid = rsp->data_in_residual_count;
|
||||
if (rsp->flags & SRP_RSP_FLAG_DOOVER)
|
||||
cmnd->resid = rsp->data_out_res_cnt;
|
||||
else if (rsp->flags & SRP_RSP_FLAG_DIOVER)
|
||||
cmnd->resid = rsp->data_in_res_cnt;
|
||||
}
|
||||
|
||||
if (evt_struct->cmnd_done)
|
||||
@ -633,10 +634,11 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
|
||||
{
|
||||
struct srp_cmd *srp_cmd;
|
||||
struct srp_event_struct *evt_struct;
|
||||
struct indirect_descriptor *indirect;
|
||||
struct srp_indirect_buf *indirect;
|
||||
struct ibmvscsi_host_data *hostdata =
|
||||
(struct ibmvscsi_host_data *)&cmnd->device->host->hostdata;
|
||||
u16 lun = lun_from_dev(cmnd->device);
|
||||
u8 out_fmt, in_fmt;
|
||||
|
||||
evt_struct = get_event_struct(&hostdata->pool);
|
||||
if (!evt_struct)
|
||||
@ -644,8 +646,8 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
|
||||
|
||||
/* Set up the actual SRP IU */
|
||||
srp_cmd = &evt_struct->iu.srp.cmd;
|
||||
memset(srp_cmd, 0x00, sizeof(*srp_cmd));
|
||||
srp_cmd->type = SRP_CMD_TYPE;
|
||||
memset(srp_cmd, 0x00, SRP_MAX_IU_LEN);
|
||||
srp_cmd->opcode = SRP_CMD;
|
||||
memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd));
|
||||
srp_cmd->lun = ((u64) lun) << 48;
|
||||
|
||||
@ -664,13 +666,15 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
|
||||
evt_struct->cmnd_done = done;
|
||||
|
||||
/* Fix up dma address of the buffer itself */
|
||||
indirect = (struct indirect_descriptor *)srp_cmd->additional_data;
|
||||
if (((srp_cmd->data_out_format == SRP_INDIRECT_BUFFER) ||
|
||||
(srp_cmd->data_in_format == SRP_INDIRECT_BUFFER)) &&
|
||||
(indirect->head.virtual_address == 0)) {
|
||||
indirect->head.virtual_address = evt_struct->crq.IU_data_ptr +
|
||||
offsetof(struct srp_cmd, additional_data) +
|
||||
offsetof(struct indirect_descriptor, list);
|
||||
indirect = (struct srp_indirect_buf *) srp_cmd->add_data;
|
||||
out_fmt = srp_cmd->buf_fmt >> 4;
|
||||
in_fmt = srp_cmd->buf_fmt & ((1U << 4) - 1);
|
||||
if ((in_fmt == SRP_DATA_DESC_INDIRECT ||
|
||||
out_fmt == SRP_DATA_DESC_INDIRECT) &&
|
||||
indirect->table_desc.va == 0) {
|
||||
indirect->table_desc.va = evt_struct->crq.IU_data_ptr +
|
||||
offsetof(struct srp_cmd, add_data) +
|
||||
offsetof(struct srp_indirect_buf, desc_list);
|
||||
}
|
||||
|
||||
return ibmvscsi_send_srp_event(evt_struct, hostdata);
|
||||
@ -780,10 +784,10 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
|
||||
static void login_rsp(struct srp_event_struct *evt_struct)
|
||||
{
|
||||
struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
|
||||
switch (evt_struct->xfer_iu->srp.generic.type) {
|
||||
case SRP_LOGIN_RSP_TYPE: /* it worked! */
|
||||
switch (evt_struct->xfer_iu->srp.login_rsp.opcode) {
|
||||
case SRP_LOGIN_RSP: /* it worked! */
|
||||
break;
|
||||
case SRP_LOGIN_REJ_TYPE: /* refused! */
|
||||
case SRP_LOGIN_REJ: /* refused! */
|
||||
printk(KERN_INFO "ibmvscsi: SRP_LOGIN_REJ reason %u\n",
|
||||
evt_struct->xfer_iu->srp.login_rej.reason);
|
||||
/* Login failed. */
|
||||
@ -792,7 +796,7 @@ static void login_rsp(struct srp_event_struct *evt_struct)
|
||||
default:
|
||||
printk(KERN_ERR
|
||||
"ibmvscsi: Invalid login response typecode 0x%02x!\n",
|
||||
evt_struct->xfer_iu->srp.generic.type);
|
||||
evt_struct->xfer_iu->srp.login_rsp.opcode);
|
||||
/* Login failed. */
|
||||
atomic_set(&hostdata->request_limit, -1);
|
||||
return;
|
||||
@ -800,17 +804,17 @@ static void login_rsp(struct srp_event_struct *evt_struct)
|
||||
|
||||
printk(KERN_INFO "ibmvscsi: SRP_LOGIN succeeded\n");
|
||||
|
||||
if (evt_struct->xfer_iu->srp.login_rsp.request_limit_delta >
|
||||
if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta >
|
||||
(max_requests - 2))
|
||||
evt_struct->xfer_iu->srp.login_rsp.request_limit_delta =
|
||||
evt_struct->xfer_iu->srp.login_rsp.req_lim_delta =
|
||||
max_requests - 2;
|
||||
|
||||
/* Now we know what the real request-limit is */
|
||||
atomic_set(&hostdata->request_limit,
|
||||
evt_struct->xfer_iu->srp.login_rsp.request_limit_delta);
|
||||
evt_struct->xfer_iu->srp.login_rsp.req_lim_delta);
|
||||
|
||||
hostdata->host->can_queue =
|
||||
evt_struct->xfer_iu->srp.login_rsp.request_limit_delta - 2;
|
||||
evt_struct->xfer_iu->srp.login_rsp.req_lim_delta - 2;
|
||||
|
||||
if (hostdata->host->can_queue < 1) {
|
||||
printk(KERN_ERR "ibmvscsi: Invalid request_limit_delta\n");
|
||||
@ -849,18 +853,19 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
|
||||
|
||||
login = &evt_struct->iu.srp.login_req;
|
||||
memset(login, 0x00, sizeof(struct srp_login_req));
|
||||
login->type = SRP_LOGIN_REQ_TYPE;
|
||||
login->max_requested_initiator_to_target_iulen = sizeof(union srp_iu);
|
||||
login->required_buffer_formats = 0x0006;
|
||||
login->opcode = SRP_LOGIN_REQ;
|
||||
login->req_it_iu_len = sizeof(union srp_iu);
|
||||
login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
|
||||
|
||||
spin_lock_irqsave(hostdata->host->host_lock, flags);
|
||||
/* Start out with a request limit of 1, since this is negotiated in
|
||||
* the login request we are just sending
|
||||
*/
|
||||
atomic_set(&hostdata->request_limit, 1);
|
||||
|
||||
spin_lock_irqsave(hostdata->host->host_lock, flags);
|
||||
rc = ibmvscsi_send_srp_event(evt_struct, hostdata);
|
||||
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
|
||||
printk("ibmvscsic: sent SRP login\n");
|
||||
return rc;
|
||||
};
|
||||
|
||||
@ -928,13 +933,13 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
|
||||
|
||||
/* Set up an abort SRP command */
|
||||
memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
|
||||
tsk_mgmt->type = SRP_TSK_MGMT_TYPE;
|
||||
tsk_mgmt->opcode = SRP_TSK_MGMT;
|
||||
tsk_mgmt->lun = ((u64) lun) << 48;
|
||||
tsk_mgmt->task_mgmt_flags = 0x01; /* ABORT TASK */
|
||||
tsk_mgmt->managed_task_tag = (u64) found_evt;
|
||||
tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
|
||||
tsk_mgmt->task_tag = (u64) found_evt;
|
||||
|
||||
printk(KERN_INFO "ibmvscsi: aborting command. lun 0x%lx, tag 0x%lx\n",
|
||||
tsk_mgmt->lun, tsk_mgmt->managed_task_tag);
|
||||
tsk_mgmt->lun, tsk_mgmt->task_tag);
|
||||
|
||||
evt->sync_srp = &srp_rsp;
|
||||
init_completion(&evt->comp);
|
||||
@ -948,25 +953,25 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
|
||||
wait_for_completion(&evt->comp);
|
||||
|
||||
/* make sure we got a good response */
|
||||
if (unlikely(srp_rsp.srp.generic.type != SRP_RSP_TYPE)) {
|
||||
if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
|
||||
if (printk_ratelimit())
|
||||
printk(KERN_WARNING
|
||||
"ibmvscsi: abort bad SRP RSP type %d\n",
|
||||
srp_rsp.srp.generic.type);
|
||||
srp_rsp.srp.rsp.opcode);
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
if (srp_rsp.srp.rsp.rspvalid)
|
||||
rsp_rc = *((int *)srp_rsp.srp.rsp.sense_and_response_data);
|
||||
if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID)
|
||||
rsp_rc = *((int *)srp_rsp.srp.rsp.data);
|
||||
else
|
||||
rsp_rc = srp_rsp.srp.rsp.status;
|
||||
|
||||
if (rsp_rc) {
|
||||
if (printk_ratelimit())
|
||||
printk(KERN_WARNING
|
||||
"ibmvscsi: abort code %d for task tag 0x%lx\n",
|
||||
"ibmvscsi: abort code %d for task tag 0x%lx\n",
|
||||
rsp_rc,
|
||||
tsk_mgmt->managed_task_tag);
|
||||
tsk_mgmt->task_tag);
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
@ -987,13 +992,13 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
|
||||
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
|
||||
printk(KERN_INFO
|
||||
"ibmvscsi: aborted task tag 0x%lx completed\n",
|
||||
tsk_mgmt->managed_task_tag);
|
||||
tsk_mgmt->task_tag);
|
||||
return SUCCESS;
|
||||
}
|
||||
|
||||
printk(KERN_INFO
|
||||
"ibmvscsi: successfully aborted task tag 0x%lx\n",
|
||||
tsk_mgmt->managed_task_tag);
|
||||
tsk_mgmt->task_tag);
|
||||
|
||||
cmd->result = (DID_ABORT << 16);
|
||||
list_del(&found_evt->list);
|
||||
@ -1040,9 +1045,9 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
|
||||
|
||||
/* Set up a lun reset SRP command */
|
||||
memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
|
||||
tsk_mgmt->type = SRP_TSK_MGMT_TYPE;
|
||||
tsk_mgmt->opcode = SRP_TSK_MGMT;
|
||||
tsk_mgmt->lun = ((u64) lun) << 48;
|
||||
tsk_mgmt->task_mgmt_flags = 0x08; /* LUN RESET */
|
||||
tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
|
||||
|
||||
printk(KERN_INFO "ibmvscsi: resetting device. lun 0x%lx\n",
|
||||
tsk_mgmt->lun);
|
||||
@ -1059,16 +1064,16 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
|
||||
wait_for_completion(&evt->comp);
|
||||
|
||||
/* make sure we got a good response */
|
||||
if (unlikely(srp_rsp.srp.generic.type != SRP_RSP_TYPE)) {
|
||||
if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
|
||||
if (printk_ratelimit())
|
||||
printk(KERN_WARNING
|
||||
"ibmvscsi: reset bad SRP RSP type %d\n",
|
||||
srp_rsp.srp.generic.type);
|
||||
srp_rsp.srp.rsp.opcode);
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
if (srp_rsp.srp.rsp.rspvalid)
|
||||
rsp_rc = *((int *)srp_rsp.srp.rsp.sense_and_response_data);
|
||||
if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID)
|
||||
rsp_rc = *((int *)srp_rsp.srp.rsp.data);
|
||||
else
|
||||
rsp_rc = srp_rsp.srp.rsp.status;
|
||||
|
||||
@ -1076,8 +1081,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
|
||||
if (printk_ratelimit())
|
||||
printk(KERN_WARNING
|
||||
"ibmvscsi: reset code %d for task tag 0x%lx\n",
|
||||
rsp_rc,
|
||||
tsk_mgmt->managed_task_tag);
|
||||
rsp_rc, tsk_mgmt->task_tag);
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
@ -1179,6 +1183,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
|
||||
/* We need to re-setup the interpartition connection */
|
||||
printk(KERN_INFO
|
||||
"ibmvscsi: Re-enabling adapter!\n");
|
||||
atomic_set(&hostdata->request_limit, -1);
|
||||
purge_requests(hostdata, DID_REQUEUE);
|
||||
if (ibmvscsi_reenable_crq_queue(&hostdata->queue,
|
||||
hostdata) == 0)
|
||||
@ -1226,7 +1231,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
|
||||
}
|
||||
|
||||
if (crq->format == VIOSRP_SRP_FORMAT)
|
||||
atomic_add(evt_struct->xfer_iu->srp.rsp.request_limit_delta,
|
||||
atomic_add(evt_struct->xfer_iu->srp.rsp.req_lim_delta,
|
||||
&hostdata->request_limit);
|
||||
|
||||
if (evt_struct->done)
|
||||
|
@ -68,7 +68,7 @@ struct srp_event_struct {
|
||||
void (*cmnd_done) (struct scsi_cmnd *);
|
||||
struct completion comp;
|
||||
union viosrp_iu *sync_srp;
|
||||
struct memory_descriptor *ext_list;
|
||||
struct srp_direct_buf *ext_list;
|
||||
dma_addr_t ext_list_token;
|
||||
};
|
||||
|
||||
|
@ -34,7 +34,6 @@
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include "ibmvscsi.h"
|
||||
#include "srp.h"
|
||||
|
||||
static char partition_name[97] = "UNKNOWN";
|
||||
static unsigned int partition_number = -1;
|
||||
|
@ -1,227 +0,0 @@
|
||||
/*****************************************************************************/
|
||||
/* srp.h -- SCSI RDMA Protocol definitions */
|
||||
/* */
|
||||
/* Written By: Colin Devilbis, IBM Corporation */
|
||||
/* */
|
||||
/* Copyright (C) 2003 IBM Corporation */
|
||||
/* */
|
||||
/* This program is free software; you can redistribute it and/or modify */
|
||||
/* it under the terms of the GNU General Public License as published by */
|
||||
/* the Free Software Foundation; either version 2 of the License, or */
|
||||
/* (at your option) any later version. */
|
||||
/* */
|
||||
/* This program is distributed in the hope that it will be useful, */
|
||||
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
|
||||
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
|
||||
/* GNU General Public License for more details. */
|
||||
/* */
|
||||
/* You should have received a copy of the GNU General Public License */
|
||||
/* along with this program; if not, write to the Free Software */
|
||||
/* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
/* */
|
||||
/* */
|
||||
/* This file contains structures and definitions for the SCSI RDMA Protocol */
|
||||
/* (SRP) as defined in the T10 standard available at www.t10.org. This */
|
||||
/* file was based on the 16a version of the standard */
|
||||
/* */
|
||||
/*****************************************************************************/
|
||||
#ifndef SRP_H
|
||||
#define SRP_H
|
||||
|
||||
#define SRP_VERSION "16.a"
|
||||
|
||||
#define PACKED __attribute__((packed))
|
||||
|
||||
enum srp_types {
|
||||
SRP_LOGIN_REQ_TYPE = 0x00,
|
||||
SRP_LOGIN_RSP_TYPE = 0xC0,
|
||||
SRP_LOGIN_REJ_TYPE = 0xC2,
|
||||
SRP_I_LOGOUT_TYPE = 0x03,
|
||||
SRP_T_LOGOUT_TYPE = 0x80,
|
||||
SRP_TSK_MGMT_TYPE = 0x01,
|
||||
SRP_CMD_TYPE = 0x02,
|
||||
SRP_RSP_TYPE = 0xC1,
|
||||
SRP_CRED_REQ_TYPE = 0x81,
|
||||
SRP_CRED_RSP_TYPE = 0x41,
|
||||
SRP_AER_REQ_TYPE = 0x82,
|
||||
SRP_AER_RSP_TYPE = 0x42
|
||||
};
|
||||
|
||||
enum srp_descriptor_formats {
|
||||
SRP_NO_BUFFER = 0x00,
|
||||
SRP_DIRECT_BUFFER = 0x01,
|
||||
SRP_INDIRECT_BUFFER = 0x02
|
||||
};
|
||||
|
||||
struct memory_descriptor {
|
||||
u64 virtual_address;
|
||||
u32 memory_handle;
|
||||
u32 length;
|
||||
};
|
||||
|
||||
struct indirect_descriptor {
|
||||
struct memory_descriptor head;
|
||||
u32 total_length;
|
||||
struct memory_descriptor list[1] PACKED;
|
||||
};
|
||||
|
||||
struct srp_generic {
|
||||
u8 type;
|
||||
u8 reserved1[7];
|
||||
u64 tag;
|
||||
};
|
||||
|
||||
struct srp_login_req {
|
||||
u8 type;
|
||||
u8 reserved1[7];
|
||||
u64 tag;
|
||||
u32 max_requested_initiator_to_target_iulen;
|
||||
u32 reserved2;
|
||||
u16 required_buffer_formats;
|
||||
u8 reserved3:6;
|
||||
u8 multi_channel_action:2;
|
||||
u8 reserved4;
|
||||
u32 reserved5;
|
||||
u8 initiator_port_identifier[16];
|
||||
u8 target_port_identifier[16];
|
||||
};
|
||||
|
||||
struct srp_login_rsp {
|
||||
u8 type;
|
||||
u8 reserved1[3];
|
||||
u32 request_limit_delta;
|
||||
u64 tag;
|
||||
u32 max_initiator_to_target_iulen;
|
||||
u32 max_target_to_initiator_iulen;
|
||||
u16 supported_buffer_formats;
|
||||
u8 reserved2:6;
|
||||
u8 multi_channel_result:2;
|
||||
u8 reserved3;
|
||||
u8 reserved4[24];
|
||||
};
|
||||
|
||||
struct srp_login_rej {
|
||||
u8 type;
|
||||
u8 reserved1[3];
|
||||
u32 reason;
|
||||
u64 tag;
|
||||
u64 reserved2;
|
||||
u16 supported_buffer_formats;
|
||||
u8 reserved3[6];
|
||||
};
|
||||
|
||||
struct srp_i_logout {
|
||||
u8 type;
|
||||
u8 reserved1[7];
|
||||
u64 tag;
|
||||
};
|
||||
|
||||
struct srp_t_logout {
|
||||
u8 type;
|
||||
u8 reserved1[3];
|
||||
u32 reason;
|
||||
u64 tag;
|
||||
};
|
||||
|
||||
struct srp_tsk_mgmt {
|
||||
u8 type;
|
||||
u8 reserved1[7];
|
||||
u64 tag;
|
||||
u32 reserved2;
|
||||
u64 lun PACKED;
|
||||
u8 reserved3;
|
||||
u8 reserved4;
|
||||
u8 task_mgmt_flags;
|
||||
u8 reserved5;
|
||||
u64 managed_task_tag;
|
||||
u64 reserved6;
|
||||
};
|
||||
|
||||
struct srp_cmd {
|
||||
u8 type;
|
||||
u32 reserved1 PACKED;
|
||||
u8 data_out_format:4;
|
||||
u8 data_in_format:4;
|
||||
u8 data_out_count;
|
||||
u8 data_in_count;
|
||||
u64 tag;
|
||||
u32 reserved2;
|
||||
u64 lun PACKED;
|
||||
u8 reserved3;
|
||||
u8 reserved4:5;
|
||||
u8 task_attribute:3;
|
||||
u8 reserved5;
|
||||
u8 additional_cdb_len;
|
||||
u8 cdb[16];
|
||||
u8 additional_data[0x100 - 0x30];
|
||||
};
|
||||
|
||||
struct srp_rsp {
|
||||
u8 type;
|
||||
u8 reserved1[3];
|
||||
u32 request_limit_delta;
|
||||
u64 tag;
|
||||
u16 reserved2;
|
||||
u8 reserved3:2;
|
||||
u8 diunder:1;
|
||||
u8 diover:1;
|
||||
u8 dounder:1;
|
||||
u8 doover:1;
|
||||
u8 snsvalid:1;
|
||||
u8 rspvalid:1;
|
||||
u8 status;
|
||||
u32 data_in_residual_count;
|
||||
u32 data_out_residual_count;
|
||||
u32 sense_data_list_length;
|
||||
u32 response_data_list_length;
|
||||
u8 sense_and_response_data[18];
|
||||
};
|
||||
|
||||
struct srp_cred_req {
|
||||
u8 type;
|
||||
u8 reserved1[3];
|
||||
u32 request_limit_delta;
|
||||
u64 tag;
|
||||
};
|
||||
|
||||
struct srp_cred_rsp {
|
||||
u8 type;
|
||||
u8 reserved1[7];
|
||||
u64 tag;
|
||||
};
|
||||
|
||||
struct srp_aer_req {
|
||||
u8 type;
|
||||
u8 reserved1[3];
|
||||
u32 request_limit_delta;
|
||||
u64 tag;
|
||||
u32 reserved2;
|
||||
u64 lun;
|
||||
u32 sense_data_list_length;
|
||||
u32 reserved3;
|
||||
u8 sense_data[20];
|
||||
};
|
||||
|
||||
struct srp_aer_rsp {
|
||||
u8 type;
|
||||
u8 reserved1[7];
|
||||
u64 tag;
|
||||
};
|
||||
|
||||
union srp_iu {
|
||||
struct srp_generic generic;
|
||||
struct srp_login_req login_req;
|
||||
struct srp_login_rsp login_rsp;
|
||||
struct srp_login_rej login_rej;
|
||||
struct srp_i_logout i_logout;
|
||||
struct srp_t_logout t_logout;
|
||||
struct srp_tsk_mgmt tsk_mgmt;
|
||||
struct srp_cmd cmd;
|
||||
struct srp_rsp rsp;
|
||||
struct srp_cred_req cred_req;
|
||||
struct srp_cred_rsp cred_rsp;
|
||||
struct srp_aer_req aer_req;
|
||||
struct srp_aer_rsp aer_rsp;
|
||||
};
|
||||
|
||||
#endif
|
@ -33,7 +33,22 @@
|
||||
/*****************************************************************************/
|
||||
#ifndef VIOSRP_H
|
||||
#define VIOSRP_H
|
||||
#include "srp.h"
|
||||
#include <scsi/srp.h>
|
||||
|
||||
#define SRP_VERSION "16.a"
|
||||
#define SRP_MAX_IU_LEN 256
|
||||
|
||||
union srp_iu {
|
||||
struct srp_login_req login_req;
|
||||
struct srp_login_rsp login_rsp;
|
||||
struct srp_login_rej login_rej;
|
||||
struct srp_i_logout i_logout;
|
||||
struct srp_t_logout t_logout;
|
||||
struct srp_tsk_mgmt tsk_mgmt;
|
||||
struct srp_cmd cmd;
|
||||
struct srp_rsp rsp;
|
||||
u8 reserved[SRP_MAX_IU_LEN];
|
||||
};
|
||||
|
||||
enum viosrp_crq_formats {
|
||||
VIOSRP_SRP_FORMAT = 0x01,
|
||||
|
@ -164,29 +164,6 @@ MODULE_PARM_DESC(auto_create, "Auto-create single device RAID 0 arrays when init
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION(IPR_DRIVER_VERSION);
|
||||
|
||||
static const char *ipr_gpdd_dev_end_states[] = {
|
||||
"Command complete",
|
||||
"Terminated by host",
|
||||
"Terminated by device reset",
|
||||
"Terminated by bus reset",
|
||||
"Unknown",
|
||||
"Command not started"
|
||||
};
|
||||
|
||||
static const char *ipr_gpdd_dev_bus_phases[] = {
|
||||
"Bus free",
|
||||
"Arbitration",
|
||||
"Selection",
|
||||
"Message out",
|
||||
"Command",
|
||||
"Message in",
|
||||
"Data out",
|
||||
"Data in",
|
||||
"Status",
|
||||
"Reselection",
|
||||
"Unknown"
|
||||
};
|
||||
|
||||
/* A constant array of IOASCs/URCs/Error Messages */
|
||||
static const
|
||||
struct ipr_error_table_t ipr_error_table[] = {
|
||||
@ -869,8 +846,8 @@ static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
|
||||
|
||||
if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
|
||||
if (res->sdev) {
|
||||
res->sdev->hostdata = NULL;
|
||||
res->del_from_ml = 1;
|
||||
res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
|
||||
if (ioa_cfg->allow_ml_add_del)
|
||||
schedule_work(&ioa_cfg->work_q);
|
||||
} else
|
||||
@ -1356,8 +1333,8 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
|
||||
return;
|
||||
|
||||
if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
|
||||
ipr_res_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
|
||||
"%s\n", ipr_error_table[error_index].error);
|
||||
ipr_ra_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
|
||||
"%s\n", ipr_error_table[error_index].error);
|
||||
} else {
|
||||
dev_err(&ioa_cfg->pdev->dev, "%s\n",
|
||||
ipr_error_table[error_index].error);
|
||||
@ -2107,7 +2084,6 @@ restart:
|
||||
did_work = 1;
|
||||
sdev = res->sdev;
|
||||
if (!scsi_device_get(sdev)) {
|
||||
res->sdev = NULL;
|
||||
list_move_tail(&res->queue, &ioa_cfg->free_res_q);
|
||||
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
|
||||
scsi_remove_device(sdev);
|
||||
@ -2124,6 +2100,7 @@ restart:
|
||||
bus = res->cfgte.res_addr.bus;
|
||||
target = res->cfgte.res_addr.target;
|
||||
lun = res->cfgte.res_addr.lun;
|
||||
res->add_to_ml = 0;
|
||||
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
|
||||
scsi_add_device(ioa_cfg->host, bus, target, lun);
|
||||
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
|
||||
@ -3214,7 +3191,7 @@ static int ipr_slave_configure(struct scsi_device *sdev)
|
||||
sdev->timeout = IPR_VSET_RW_TIMEOUT;
|
||||
blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
|
||||
}
|
||||
if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))
|
||||
if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
|
||||
sdev->allow_restart = 1;
|
||||
scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
|
||||
}
|
||||
@ -3303,6 +3280,44 @@ static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* ipr_device_reset - Reset the device
|
||||
* @ioa_cfg: ioa config struct
|
||||
* @res: resource entry struct
|
||||
*
|
||||
* This function issues a device reset to the affected device.
|
||||
* If the device is a SCSI device, a LUN reset will be sent
|
||||
* to the device first. If that does not work, a target reset
|
||||
* will be sent.
|
||||
*
|
||||
* Return value:
|
||||
* 0 on success / non-zero on failure
|
||||
**/
|
||||
static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
|
||||
struct ipr_resource_entry *res)
|
||||
{
|
||||
struct ipr_cmnd *ipr_cmd;
|
||||
struct ipr_ioarcb *ioarcb;
|
||||
struct ipr_cmd_pkt *cmd_pkt;
|
||||
u32 ioasc;
|
||||
|
||||
ENTER;
|
||||
ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
|
||||
ioarcb = &ipr_cmd->ioarcb;
|
||||
cmd_pkt = &ioarcb->cmd_pkt;
|
||||
|
||||
ioarcb->res_handle = res->cfgte.res_handle;
|
||||
cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
|
||||
cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
|
||||
|
||||
ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
|
||||
ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
|
||||
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
|
||||
|
||||
LEAVE;
|
||||
return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* ipr_eh_dev_reset - Reset the device
|
||||
* @scsi_cmd: scsi command struct
|
||||
@ -3319,8 +3334,7 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
|
||||
struct ipr_cmnd *ipr_cmd;
|
||||
struct ipr_ioa_cfg *ioa_cfg;
|
||||
struct ipr_resource_entry *res;
|
||||
struct ipr_cmd_pkt *cmd_pkt;
|
||||
u32 ioasc;
|
||||
int rc;
|
||||
|
||||
ENTER;
|
||||
ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
|
||||
@ -3347,25 +3361,12 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
|
||||
}
|
||||
|
||||
res->resetting_device = 1;
|
||||
|
||||
ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
|
||||
|
||||
ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
|
||||
cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
|
||||
cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
|
||||
cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
|
||||
|
||||
ipr_sdev_err(scsi_cmd->device, "Resetting device\n");
|
||||
ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
|
||||
|
||||
ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
|
||||
|
||||
scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
|
||||
rc = ipr_device_reset(ioa_cfg, res);
|
||||
res->resetting_device = 0;
|
||||
|
||||
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
|
||||
|
||||
LEAVE;
|
||||
return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
|
||||
return (rc ? FAILED : SUCCESS);
|
||||
}
|
||||
|
||||
static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
|
||||
@ -3440,7 +3441,7 @@ static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
|
||||
return;
|
||||
}
|
||||
|
||||
ipr_sdev_err(ipr_cmd->u.sdev, "Abort timed out. Resetting bus\n");
|
||||
sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
|
||||
reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
|
||||
ipr_cmd->sibling = reset_cmd;
|
||||
reset_cmd->sibling = ipr_cmd;
|
||||
@ -3504,7 +3505,8 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
|
||||
cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
|
||||
ipr_cmd->u.sdev = scsi_cmd->device;
|
||||
|
||||
ipr_sdev_err(scsi_cmd->device, "Aborting command: %02X\n", scsi_cmd->cmnd[0]);
|
||||
scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
|
||||
scsi_cmd->cmnd[0]);
|
||||
ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
|
||||
ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
|
||||
|
||||
@ -3815,8 +3817,8 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
|
||||
|
||||
if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
|
||||
scsi_cmd->result |= (DID_ERROR << 16);
|
||||
ipr_sdev_err(scsi_cmd->device,
|
||||
"Request Sense failed with IOASC: 0x%08X\n", ioasc);
|
||||
scmd_printk(KERN_ERR, scsi_cmd,
|
||||
"Request Sense failed with IOASC: 0x%08X\n", ioasc);
|
||||
} else {
|
||||
memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
|
||||
SCSI_SENSE_BUFFERSIZE);
|
||||
@ -3938,6 +3940,7 @@ static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
|
||||
* ipr_dump_ioasa - Dump contents of IOASA
|
||||
* @ioa_cfg: ioa config struct
|
||||
* @ipr_cmd: ipr command struct
|
||||
* @res: resource entry struct
|
||||
*
|
||||
* This function is invoked by the interrupt handler when ops
|
||||
* fail. It will log the IOASA if appropriate. Only called
|
||||
@ -3947,7 +3950,7 @@ static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
|
||||
* none
|
||||
**/
|
||||
static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
|
||||
struct ipr_cmnd *ipr_cmd)
|
||||
struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
|
||||
{
|
||||
int i;
|
||||
u16 data_len;
|
||||
@ -3975,16 +3978,7 @@ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
|
||||
return;
|
||||
}
|
||||
|
||||
ipr_sdev_err(ipr_cmd->scsi_cmd->device, "%s\n",
|
||||
ipr_error_table[error_index].error);
|
||||
|
||||
if ((ioasa->u.gpdd.end_state <= ARRAY_SIZE(ipr_gpdd_dev_end_states)) &&
|
||||
(ioasa->u.gpdd.bus_phase <= ARRAY_SIZE(ipr_gpdd_dev_bus_phases))) {
|
||||
ipr_sdev_err(ipr_cmd->scsi_cmd->device,
|
||||
"Device End state: %s Phase: %s\n",
|
||||
ipr_gpdd_dev_end_states[ioasa->u.gpdd.end_state],
|
||||
ipr_gpdd_dev_bus_phases[ioasa->u.gpdd.bus_phase]);
|
||||
}
|
||||
ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
|
||||
|
||||
if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
|
||||
data_len = sizeof(struct ipr_ioasa);
|
||||
@ -4141,7 +4135,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
|
||||
}
|
||||
|
||||
if (ipr_is_gscsi(res))
|
||||
ipr_dump_ioasa(ioa_cfg, ipr_cmd);
|
||||
ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
|
||||
else
|
||||
ipr_gen_sense(ipr_cmd);
|
||||
|
||||
@ -4540,7 +4534,7 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
|
||||
ipr_cmd->job_step = ipr_ioa_reset_done;
|
||||
|
||||
list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
|
||||
if (!IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))
|
||||
if (!ipr_is_scsi_disk(res))
|
||||
continue;
|
||||
|
||||
ipr_cmd->u.res = res;
|
||||
@ -4980,7 +4974,7 @@ static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
|
||||
list_for_each_entry_safe(res, temp, &old_res, queue) {
|
||||
if (res->sdev) {
|
||||
res->del_from_ml = 1;
|
||||
res->sdev->hostdata = NULL;
|
||||
res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
|
||||
list_move_tail(&res->queue, &ioa_cfg->used_res_q);
|
||||
} else {
|
||||
list_move_tail(&res->queue, &ioa_cfg->free_res_q);
|
||||
|
@ -36,8 +36,8 @@
|
||||
/*
|
||||
* Literals
|
||||
*/
|
||||
#define IPR_DRIVER_VERSION "2.1.2"
|
||||
#define IPR_DRIVER_DATE "(February 8, 2006)"
|
||||
#define IPR_DRIVER_VERSION "2.1.3"
|
||||
#define IPR_DRIVER_DATE "(March 29, 2006)"
|
||||
|
||||
/*
|
||||
* IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
|
||||
@ -133,6 +133,7 @@
|
||||
#define IPR_MAX_SCSI_RATE(width) ((320 * 10) / ((width) / 8))
|
||||
|
||||
#define IPR_IOA_RES_HANDLE 0xffffffff
|
||||
#define IPR_INVALID_RES_HANDLE 0
|
||||
#define IPR_IOA_RES_ADDR 0x00ffffff
|
||||
|
||||
/*
|
||||
@ -1191,30 +1192,17 @@ struct ipr_ucode_image_header {
|
||||
*/
|
||||
#define ipr_err(...) printk(KERN_ERR IPR_NAME ": "__VA_ARGS__)
|
||||
#define ipr_info(...) printk(KERN_INFO IPR_NAME ": "__VA_ARGS__)
|
||||
#define ipr_crit(...) printk(KERN_CRIT IPR_NAME ": "__VA_ARGS__)
|
||||
#define ipr_warn(...) printk(KERN_WARNING IPR_NAME": "__VA_ARGS__)
|
||||
#define ipr_dbg(...) IPR_DBG_CMD(printk(KERN_INFO IPR_NAME ": "__VA_ARGS__))
|
||||
|
||||
#define ipr_sdev_printk(level, sdev, fmt, args...) \
|
||||
sdev_printk(level, sdev, fmt, ## args)
|
||||
#define ipr_ra_printk(level, ioa_cfg, ra, fmt, ...) \
|
||||
printk(level IPR_NAME ": %d:%d:%d:%d: " fmt, (ioa_cfg)->host->host_no, \
|
||||
(ra).bus, (ra).target, (ra).lun, ##__VA_ARGS__)
|
||||
|
||||
#define ipr_sdev_err(sdev, fmt, ...) \
|
||||
ipr_sdev_printk(KERN_ERR, sdev, fmt, ##__VA_ARGS__)
|
||||
|
||||
#define ipr_sdev_info(sdev, fmt, ...) \
|
||||
ipr_sdev_printk(KERN_INFO, sdev, fmt, ##__VA_ARGS__)
|
||||
|
||||
#define ipr_sdev_dbg(sdev, fmt, ...) \
|
||||
IPR_DBG_CMD(ipr_sdev_printk(KERN_INFO, sdev, fmt, ##__VA_ARGS__))
|
||||
|
||||
#define ipr_res_printk(level, ioa_cfg, res, fmt, ...) \
|
||||
printk(level IPR_NAME ": %d:%d:%d:%d: " fmt, ioa_cfg->host->host_no, \
|
||||
res.bus, res.target, res.lun, ##__VA_ARGS__)
|
||||
#define ipr_ra_err(ioa_cfg, ra, fmt, ...) \
|
||||
ipr_ra_printk(KERN_ERR, ioa_cfg, ra, fmt, ##__VA_ARGS__)
|
||||
|
||||
#define ipr_res_err(ioa_cfg, res, fmt, ...) \
|
||||
ipr_res_printk(KERN_ERR, ioa_cfg, res, fmt, ##__VA_ARGS__)
|
||||
#define ipr_res_dbg(ioa_cfg, res, fmt, ...) \
|
||||
IPR_DBG_CMD(ipr_res_printk(KERN_INFO, ioa_cfg, res, fmt, ##__VA_ARGS__))
|
||||
ipr_ra_err(ioa_cfg, (res)->cfgte.res_addr, fmt, ##__VA_ARGS__)
|
||||
|
||||
#define ipr_phys_res_err(ioa_cfg, res, fmt, ...) \
|
||||
{ \
|
||||
@ -1303,6 +1291,22 @@ static inline int ipr_is_gscsi(struct ipr_resource_entry *res)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ipr_is_scsi_disk - Determine if a resource is a SCSI disk
|
||||
* @res: resource entry struct
|
||||
*
|
||||
* Return value:
|
||||
* 1 if SCSI disk / 0 if not SCSI disk
|
||||
**/
|
||||
static inline int ipr_is_scsi_disk(struct ipr_resource_entry *res)
|
||||
{
|
||||
if (ipr_is_af_dasd_device(res) ||
|
||||
(ipr_is_gscsi(res) && IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data)))
|
||||
return 1;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ipr_is_naca_model - Determine if a resource is using NACA queueing model
|
||||
* @res: resource entry struct
|
||||
|
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user