Merge branches 'tracing/ftrace' and 'linus' into tracing/core
This commit is contained in:
commit
f701d35407
58 changed files with 863 additions and 336 deletions
|
@ -869,8 +869,10 @@ and is between 256 and 4096 characters. It is defined in the file
|
|||
icn= [HW,ISDN]
|
||||
Format: <io>[,<membase>[,<icn_id>[,<icn_id2>]]]
|
||||
|
||||
ide= [HW] (E)IDE subsystem
|
||||
Format: ide=nodma or ide=doubler
|
||||
ide-core.nodma= [HW] (E)IDE subsystem
|
||||
Format: =0.0 to prevent dma on hda, =0.1 hdb =1.0 hdc
|
||||
.vlb_clock .pci_clock .noflush .noprobe .nowerr .cdrom
|
||||
.chs .ignore_cable are additional options
|
||||
See Documentation/ide/ide.txt.
|
||||
|
||||
idebus= [HW] (E)IDE subsystem - VLB/PCI bus speed
|
||||
|
|
|
@ -641,6 +641,17 @@ config DMAR
|
|||
and include PCI device scope covered by these DMA
|
||||
remapping devices.
|
||||
|
||||
config DMAR_DEFAULT_ON
|
||||
def_bool y
|
||||
prompt "Enable DMA Remapping Devices by default"
|
||||
depends on DMAR
|
||||
help
|
||||
Selecting this option will enable a DMAR device at boot time if
|
||||
one is found. If this option is not selected, DMAR support can
|
||||
be enabled by passing intel_iommu=on to the kernel. It is
|
||||
recommended you say N here while the DMAR code remains
|
||||
experimental.
|
||||
|
||||
endmenu
|
||||
|
||||
endif
|
||||
|
|
|
@ -507,7 +507,7 @@ static int iosapic_find_sharable_irq(unsigned long trigger, unsigned long pol)
|
|||
if (trigger == IOSAPIC_EDGE)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i <= NR_IRQS; i++) {
|
||||
for (i = 0; i < NR_IRQS; i++) {
|
||||
info = &iosapic_intr_info[i];
|
||||
if (info->trigger == trigger && info->polarity == pol &&
|
||||
(info->dmode == IOSAPIC_FIXED ||
|
||||
|
|
|
@ -2149,7 +2149,7 @@ unw_remove_unwind_table (void *handle)
|
|||
|
||||
/* next, remove hash table entries for this table */
|
||||
|
||||
for (index = 0; index <= UNW_HASH_SIZE; ++index) {
|
||||
for (index = 0; index < UNW_HASH_SIZE; ++index) {
|
||||
tmp = unw.cache + unw.hash[index];
|
||||
if (unw.hash[index] >= UNW_CACHE_SIZE
|
||||
|| tmp->ip < table->start || tmp->ip >= table->end)
|
||||
|
|
|
@ -367,27 +367,24 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
|
|||
static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg,
|
||||
unsigned int flags)
|
||||
{
|
||||
char *ptr = (char *) ¤t->thread.TS_FPR(reg);
|
||||
int i, ret;
|
||||
char *ptr0 = (char *) ¤t->thread.TS_FPR(reg);
|
||||
char *ptr1 = (char *) ¤t->thread.TS_FPR(reg+1);
|
||||
int i, ret, sw = 0;
|
||||
|
||||
if (!(flags & F))
|
||||
return 0;
|
||||
if (reg & 1)
|
||||
return 0; /* invalid form: FRS/FRT must be even */
|
||||
if (!(flags & SW)) {
|
||||
/* not byte-swapped - easy */
|
||||
if (!(flags & ST))
|
||||
ret = __copy_from_user(ptr, addr, 16);
|
||||
else
|
||||
ret = __copy_to_user(addr, ptr, 16);
|
||||
} else {
|
||||
/* each FPR value is byte-swapped separately */
|
||||
ret = 0;
|
||||
for (i = 0; i < 16; ++i) {
|
||||
if (!(flags & ST))
|
||||
ret |= __get_user(ptr[i^7], addr + i);
|
||||
else
|
||||
ret |= __put_user(ptr[i^7], addr + i);
|
||||
if (flags & SW)
|
||||
sw = 7;
|
||||
ret = 0;
|
||||
for (i = 0; i < 8; ++i) {
|
||||
if (!(flags & ST)) {
|
||||
ret |= __get_user(ptr0[i^sw], addr + i);
|
||||
ret |= __get_user(ptr1[i^sw], addr + i + 8);
|
||||
} else {
|
||||
ret |= __put_user(ptr0[i^sw], addr + i);
|
||||
ret |= __put_user(ptr1[i^sw], addr + i + 8);
|
||||
}
|
||||
}
|
||||
if (ret)
|
||||
|
|
|
@ -62,18 +62,19 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
|
|||
72: std r8,8(r3)
|
||||
beq+ 3f
|
||||
addi r3,r3,16
|
||||
23: ld r9,8(r4)
|
||||
.Ldo_tail:
|
||||
bf cr7*4+1,1f
|
||||
rotldi r9,r9,32
|
||||
23: lwz r9,8(r4)
|
||||
addi r4,r4,4
|
||||
73: stw r9,0(r3)
|
||||
addi r3,r3,4
|
||||
1: bf cr7*4+2,2f
|
||||
rotldi r9,r9,16
|
||||
44: lhz r9,8(r4)
|
||||
addi r4,r4,2
|
||||
74: sth r9,0(r3)
|
||||
addi r3,r3,2
|
||||
2: bf cr7*4+3,3f
|
||||
rotldi r9,r9,8
|
||||
45: lbz r9,8(r4)
|
||||
75: stb r9,0(r3)
|
||||
3: li r3,0
|
||||
blr
|
||||
|
@ -141,11 +142,24 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
|
|||
6: cmpwi cr1,r5,8
|
||||
addi r3,r3,32
|
||||
sld r9,r9,r10
|
||||
ble cr1,.Ldo_tail
|
||||
ble cr1,7f
|
||||
34: ld r0,8(r4)
|
||||
srd r7,r0,r11
|
||||
or r9,r7,r9
|
||||
b .Ldo_tail
|
||||
7:
|
||||
bf cr7*4+1,1f
|
||||
rotldi r9,r9,32
|
||||
94: stw r9,0(r3)
|
||||
addi r3,r3,4
|
||||
1: bf cr7*4+2,2f
|
||||
rotldi r9,r9,16
|
||||
95: sth r9,0(r3)
|
||||
addi r3,r3,2
|
||||
2: bf cr7*4+3,3f
|
||||
rotldi r9,r9,8
|
||||
96: stb r9,0(r3)
|
||||
3: li r3,0
|
||||
blr
|
||||
|
||||
.Ldst_unaligned:
|
||||
PPC_MTOCRF 0x01,r6 /* put #bytes to 8B bdry into cr7 */
|
||||
|
@ -218,7 +232,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
|
|||
121:
|
||||
132:
|
||||
addi r3,r3,8
|
||||
123:
|
||||
134:
|
||||
135:
|
||||
138:
|
||||
|
@ -226,6 +239,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
|
|||
140:
|
||||
141:
|
||||
142:
|
||||
123:
|
||||
144:
|
||||
145:
|
||||
|
||||
/*
|
||||
* here we have had a fault on a load and r3 points to the first
|
||||
|
@ -309,6 +325,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
|
|||
187:
|
||||
188:
|
||||
189:
|
||||
194:
|
||||
195:
|
||||
196:
|
||||
1:
|
||||
ld r6,-24(r1)
|
||||
ld r5,-8(r1)
|
||||
|
@ -329,7 +348,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
|
|||
.llong 72b,172b
|
||||
.llong 23b,123b
|
||||
.llong 73b,173b
|
||||
.llong 44b,144b
|
||||
.llong 74b,174b
|
||||
.llong 45b,145b
|
||||
.llong 75b,175b
|
||||
.llong 24b,124b
|
||||
.llong 25b,125b
|
||||
|
@ -347,6 +368,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
|
|||
.llong 79b,179b
|
||||
.llong 80b,180b
|
||||
.llong 34b,134b
|
||||
.llong 94b,194b
|
||||
.llong 95b,195b
|
||||
.llong 96b,196b
|
||||
.llong 35b,135b
|
||||
.llong 81b,181b
|
||||
.llong 36b,136b
|
||||
|
|
|
@ -53,18 +53,19 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
|
|||
3: std r8,8(r3)
|
||||
beq 3f
|
||||
addi r3,r3,16
|
||||
ld r9,8(r4)
|
||||
.Ldo_tail:
|
||||
bf cr7*4+1,1f
|
||||
rotldi r9,r9,32
|
||||
lwz r9,8(r4)
|
||||
addi r4,r4,4
|
||||
stw r9,0(r3)
|
||||
addi r3,r3,4
|
||||
1: bf cr7*4+2,2f
|
||||
rotldi r9,r9,16
|
||||
lhz r9,8(r4)
|
||||
addi r4,r4,2
|
||||
sth r9,0(r3)
|
||||
addi r3,r3,2
|
||||
2: bf cr7*4+3,3f
|
||||
rotldi r9,r9,8
|
||||
lbz r9,8(r4)
|
||||
stb r9,0(r3)
|
||||
3: ld r3,48(r1) /* return dest pointer */
|
||||
blr
|
||||
|
@ -133,11 +134,24 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
|
|||
cmpwi cr1,r5,8
|
||||
addi r3,r3,32
|
||||
sld r9,r9,r10
|
||||
ble cr1,.Ldo_tail
|
||||
ble cr1,6f
|
||||
ld r0,8(r4)
|
||||
srd r7,r0,r11
|
||||
or r9,r7,r9
|
||||
b .Ldo_tail
|
||||
6:
|
||||
bf cr7*4+1,1f
|
||||
rotldi r9,r9,32
|
||||
stw r9,0(r3)
|
||||
addi r3,r3,4
|
||||
1: bf cr7*4+2,2f
|
||||
rotldi r9,r9,16
|
||||
sth r9,0(r3)
|
||||
addi r3,r3,2
|
||||
2: bf cr7*4+3,3f
|
||||
rotldi r9,r9,8
|
||||
stb r9,0(r3)
|
||||
3: ld r3,48(r1) /* return dest pointer */
|
||||
blr
|
||||
|
||||
.Ldst_unaligned:
|
||||
PPC_MTOCRF 0x01,r6 # put #bytes to 8B bdry into cr7
|
||||
|
|
|
@ -306,6 +306,7 @@ static int jbusmc_print_dimm(int syndrome_code,
|
|||
buf[1] = '?';
|
||||
buf[2] = '?';
|
||||
buf[3] = '\0';
|
||||
return 0;
|
||||
}
|
||||
p = dp->controller;
|
||||
prop = &p->layout;
|
||||
|
|
|
@ -38,72 +38,84 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect)
|
|||
}
|
||||
}
|
||||
|
||||
void blk_recalc_rq_segments(struct request *rq)
|
||||
static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
|
||||
struct bio *bio,
|
||||
unsigned int *seg_size_ptr)
|
||||
{
|
||||
int nr_phys_segs;
|
||||
unsigned int phys_size;
|
||||
struct bio_vec *bv, *bvprv = NULL;
|
||||
int seg_size;
|
||||
int cluster;
|
||||
struct req_iterator iter;
|
||||
int high, highprv = 1;
|
||||
struct request_queue *q = rq->q;
|
||||
int cluster, i, high, highprv = 1;
|
||||
unsigned int seg_size, nr_phys_segs;
|
||||
struct bio *fbio;
|
||||
|
||||
if (!rq->bio)
|
||||
return;
|
||||
if (!bio)
|
||||
return 0;
|
||||
|
||||
fbio = bio;
|
||||
cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
|
||||
seg_size = 0;
|
||||
phys_size = nr_phys_segs = 0;
|
||||
rq_for_each_segment(bv, rq, iter) {
|
||||
/*
|
||||
* the trick here is making sure that a high page is never
|
||||
* considered part of another segment, since that might
|
||||
* change with the bounce page.
|
||||
*/
|
||||
high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
|
||||
if (high || highprv)
|
||||
goto new_segment;
|
||||
if (cluster) {
|
||||
if (seg_size + bv->bv_len > q->max_segment_size)
|
||||
goto new_segment;
|
||||
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
|
||||
goto new_segment;
|
||||
if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
|
||||
for_each_bio(bio) {
|
||||
bio_for_each_segment(bv, bio, i) {
|
||||
/*
|
||||
* the trick here is making sure that a high page is
|
||||
* never considered part of another segment, since that
|
||||
* might change with the bounce page.
|
||||
*/
|
||||
high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
|
||||
if (high || highprv)
|
||||
goto new_segment;
|
||||
if (cluster) {
|
||||
if (seg_size + bv->bv_len > q->max_segment_size)
|
||||
goto new_segment;
|
||||
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
|
||||
goto new_segment;
|
||||
if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
|
||||
goto new_segment;
|
||||
|
||||
seg_size += bv->bv_len;
|
||||
bvprv = bv;
|
||||
continue;
|
||||
}
|
||||
seg_size += bv->bv_len;
|
||||
bvprv = bv;
|
||||
continue;
|
||||
}
|
||||
new_segment:
|
||||
if (nr_phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size)
|
||||
rq->bio->bi_seg_front_size = seg_size;
|
||||
if (nr_phys_segs == 1 && seg_size >
|
||||
fbio->bi_seg_front_size)
|
||||
fbio->bi_seg_front_size = seg_size;
|
||||
|
||||
nr_phys_segs++;
|
||||
bvprv = bv;
|
||||
seg_size = bv->bv_len;
|
||||
highprv = high;
|
||||
nr_phys_segs++;
|
||||
bvprv = bv;
|
||||
seg_size = bv->bv_len;
|
||||
highprv = high;
|
||||
}
|
||||
}
|
||||
|
||||
if (nr_phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size)
|
||||
if (seg_size_ptr)
|
||||
*seg_size_ptr = seg_size;
|
||||
|
||||
return nr_phys_segs;
|
||||
}
|
||||
|
||||
void blk_recalc_rq_segments(struct request *rq)
|
||||
{
|
||||
unsigned int seg_size = 0, phys_segs;
|
||||
|
||||
phys_segs = __blk_recalc_rq_segments(rq->q, rq->bio, &seg_size);
|
||||
|
||||
if (phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size)
|
||||
rq->bio->bi_seg_front_size = seg_size;
|
||||
if (seg_size > rq->biotail->bi_seg_back_size)
|
||||
rq->biotail->bi_seg_back_size = seg_size;
|
||||
|
||||
rq->nr_phys_segments = nr_phys_segs;
|
||||
rq->nr_phys_segments = phys_segs;
|
||||
}
|
||||
|
||||
void blk_recount_segments(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
struct request rq;
|
||||
struct bio *nxt = bio->bi_next;
|
||||
rq.q = q;
|
||||
rq.bio = rq.biotail = bio;
|
||||
|
||||
bio->bi_next = NULL;
|
||||
blk_recalc_rq_segments(&rq);
|
||||
bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, NULL);
|
||||
bio->bi_next = nxt;
|
||||
bio->bi_phys_segments = rq.nr_phys_segments;
|
||||
bio->bi_flags |= (1 << BIO_SEG_VALID);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_recount_segments);
|
||||
|
|
|
@ -256,6 +256,22 @@ void blkdev_show(struct seq_file *seqf, off_t offset)
|
|||
}
|
||||
#endif /* CONFIG_PROC_FS */
|
||||
|
||||
/**
|
||||
* register_blkdev - register a new block device
|
||||
*
|
||||
* @major: the requested major device number [1..255]. If @major=0, try to
|
||||
* allocate any unused major number.
|
||||
* @name: the name of the new block device as a zero terminated string
|
||||
*
|
||||
* The @name must be unique within the system.
|
||||
*
|
||||
* The return value depends on the @major input parameter.
|
||||
* - if a major device number was requested in range [1..255] then the
|
||||
* function returns zero on success, or a negative error code
|
||||
* - if any unused major number was requested with @major=0 parameter
|
||||
* then the return value is the allocated major number in range
|
||||
* [1..255] or a negative error code otherwise
|
||||
*/
|
||||
int register_blkdev(unsigned int major, const char *name)
|
||||
{
|
||||
struct blk_major_name **n, *p;
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
#include <linux/libata.h>
|
||||
|
||||
#define DRV_NAME "pata_amd"
|
||||
#define DRV_VERSION "0.3.11"
|
||||
#define DRV_VERSION "0.4.1"
|
||||
|
||||
/**
|
||||
* timing_setup - shared timing computation and load
|
||||
|
@ -145,6 +145,13 @@ static int amd_pre_reset(struct ata_link *link, unsigned long deadline)
|
|||
return ata_sff_prereset(link, deadline);
|
||||
}
|
||||
|
||||
/**
|
||||
* amd_cable_detect - report cable type
|
||||
* @ap: port
|
||||
*
|
||||
* AMD controller/BIOS setups record the cable type in word 0x42
|
||||
*/
|
||||
|
||||
static int amd_cable_detect(struct ata_port *ap)
|
||||
{
|
||||
static const u32 bitmask[2] = {0x03, 0x0C};
|
||||
|
@ -157,6 +164,40 @@ static int amd_cable_detect(struct ata_port *ap)
|
|||
return ATA_CBL_PATA40;
|
||||
}
|
||||
|
||||
/**
|
||||
* amd_fifo_setup - set the PIO FIFO for ATA/ATAPI
|
||||
* @ap: ATA interface
|
||||
* @adev: ATA device
|
||||
*
|
||||
* Set the PCI fifo for this device according to the devices present
|
||||
* on the bus at this point in time. We need to turn the post write buffer
|
||||
* off for ATAPI devices as we may need to issue a word sized write to the
|
||||
* device as the final I/O
|
||||
*/
|
||||
|
||||
static void amd_fifo_setup(struct ata_port *ap)
|
||||
{
|
||||
struct ata_device *adev;
|
||||
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
|
||||
static const u8 fifobit[2] = { 0xC0, 0x30};
|
||||
u8 fifo = fifobit[ap->port_no];
|
||||
u8 r;
|
||||
|
||||
|
||||
ata_for_each_dev(adev, &ap->link, ENABLED) {
|
||||
if (adev->class == ATA_DEV_ATAPI)
|
||||
fifo = 0;
|
||||
}
|
||||
if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7411) /* FIFO is broken */
|
||||
fifo = 0;
|
||||
|
||||
/* On the later chips the read prefetch bits become no-op bits */
|
||||
pci_read_config_byte(pdev, 0x41, &r);
|
||||
r &= ~fifobit[ap->port_no];
|
||||
r |= fifo;
|
||||
pci_write_config_byte(pdev, 0x41, r);
|
||||
}
|
||||
|
||||
/**
|
||||
* amd33_set_piomode - set initial PIO mode data
|
||||
* @ap: ATA interface
|
||||
|
@ -167,21 +208,25 @@ static int amd_cable_detect(struct ata_port *ap)
|
|||
|
||||
static void amd33_set_piomode(struct ata_port *ap, struct ata_device *adev)
|
||||
{
|
||||
amd_fifo_setup(ap);
|
||||
timing_setup(ap, adev, 0x40, adev->pio_mode, 1);
|
||||
}
|
||||
|
||||
static void amd66_set_piomode(struct ata_port *ap, struct ata_device *adev)
|
||||
{
|
||||
amd_fifo_setup(ap);
|
||||
timing_setup(ap, adev, 0x40, adev->pio_mode, 2);
|
||||
}
|
||||
|
||||
static void amd100_set_piomode(struct ata_port *ap, struct ata_device *adev)
|
||||
{
|
||||
amd_fifo_setup(ap);
|
||||
timing_setup(ap, adev, 0x40, adev->pio_mode, 3);
|
||||
}
|
||||
|
||||
static void amd133_set_piomode(struct ata_port *ap, struct ata_device *adev)
|
||||
{
|
||||
amd_fifo_setup(ap);
|
||||
timing_setup(ap, adev, 0x40, adev->pio_mode, 4);
|
||||
}
|
||||
|
||||
|
@ -397,6 +442,16 @@ static struct ata_port_operations nv133_port_ops = {
|
|||
.set_dmamode = nv133_set_dmamode,
|
||||
};
|
||||
|
||||
static void amd_clear_fifo(struct pci_dev *pdev)
|
||||
{
|
||||
u8 fifo;
|
||||
/* Disable the FIFO, the FIFO logic will re-enable it as
|
||||
appropriate */
|
||||
pci_read_config_byte(pdev, 0x41, &fifo);
|
||||
fifo &= 0x0F;
|
||||
pci_write_config_byte(pdev, 0x41, fifo);
|
||||
}
|
||||
|
||||
static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
{
|
||||
static const struct ata_port_info info[10] = {
|
||||
|
@ -503,14 +558,8 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
|
||||
if (type < 3)
|
||||
ata_pci_bmdma_clear_simplex(pdev);
|
||||
|
||||
/* Check for AMD7411 */
|
||||
if (type == 3)
|
||||
/* FIFO is broken */
|
||||
pci_write_config_byte(pdev, 0x41, fifo & 0x0F);
|
||||
else
|
||||
pci_write_config_byte(pdev, 0x41, fifo | 0xF0);
|
||||
|
||||
if (pdev->vendor == PCI_VENDOR_ID_AMD)
|
||||
amd_clear_fifo(pdev);
|
||||
/* Cable detection on Nvidia chips doesn't work too well,
|
||||
* cache BIOS programmed UDMA mode.
|
||||
*/
|
||||
|
@ -536,18 +585,11 @@ static int amd_reinit_one(struct pci_dev *pdev)
|
|||
return rc;
|
||||
|
||||
if (pdev->vendor == PCI_VENDOR_ID_AMD) {
|
||||
u8 fifo;
|
||||
pci_read_config_byte(pdev, 0x41, &fifo);
|
||||
if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7411)
|
||||
/* FIFO is broken */
|
||||
pci_write_config_byte(pdev, 0x41, fifo & 0x0F);
|
||||
else
|
||||
pci_write_config_byte(pdev, 0x41, fifo | 0xF0);
|
||||
amd_clear_fifo(pdev);
|
||||
if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7409 ||
|
||||
pdev->device == PCI_DEVICE_ID_AMD_COBRA_7401)
|
||||
ata_pci_bmdma_clear_simplex(pdev);
|
||||
}
|
||||
|
||||
ata_host_resume(host);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -557,6 +557,9 @@ static unsigned int it821x_read_id(struct ata_device *adev,
|
|||
id[83] |= 0x4400; /* Word 83 is valid and LBA48 */
|
||||
id[86] |= 0x0400; /* LBA48 on */
|
||||
id[ATA_ID_MAJOR_VER] |= 0x1F;
|
||||
/* Clear the serial number because it's different each boot
|
||||
which breaks validation on resume */
|
||||
memset(&id[ATA_ID_SERNO], 0x20, ATA_ID_SERNO_LEN);
|
||||
}
|
||||
return err_mask;
|
||||
}
|
||||
|
|
|
@ -283,9 +283,10 @@ static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev)
|
|||
static unsigned int pdc_data_xfer_vlb(struct ata_device *dev,
|
||||
unsigned char *buf, unsigned int buflen, int rw)
|
||||
{
|
||||
if (ata_id_has_dword_io(dev->id)) {
|
||||
int slop = buflen & 3;
|
||||
/* 32bit I/O capable *and* we need to write a whole number of dwords */
|
||||
if (ata_id_has_dword_io(dev->id) && (slop == 0 || slop == 3)) {
|
||||
struct ata_port *ap = dev->link->ap;
|
||||
int slop = buflen & 3;
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
@ -735,7 +736,7 @@ static unsigned int vlb32_data_xfer(struct ata_device *adev, unsigned char *buf,
|
|||
struct ata_port *ap = adev->link->ap;
|
||||
int slop = buflen & 3;
|
||||
|
||||
if (ata_id_has_dword_io(adev->id)) {
|
||||
if (ata_id_has_dword_io(adev->id) && (slop == 0 || slop == 3)) {
|
||||
if (rw == WRITE)
|
||||
iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
|
||||
else
|
||||
|
|
|
@ -3114,19 +3114,17 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx)
|
|||
writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
|
||||
}
|
||||
|
||||
if (!IS_SOC(hpriv)) {
|
||||
/* Clear any currently outstanding host interrupt conditions */
|
||||
writelfl(0, mmio + hpriv->irq_cause_ofs);
|
||||
/* Clear any currently outstanding host interrupt conditions */
|
||||
writelfl(0, mmio + hpriv->irq_cause_ofs);
|
||||
|
||||
/* and unmask interrupt generation for host regs */
|
||||
writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
|
||||
/* and unmask interrupt generation for host regs */
|
||||
writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
|
||||
|
||||
/*
|
||||
* enable only global host interrupts for now.
|
||||
* The per-port interrupts get done later as ports are set up.
|
||||
*/
|
||||
mv_set_main_irq_mask(host, 0, PCI_ERR);
|
||||
}
|
||||
/*
|
||||
* enable only global host interrupts for now.
|
||||
* The per-port interrupts get done later as ports are set up.
|
||||
*/
|
||||
mv_set_main_irq_mask(host, 0, PCI_ERR);
|
||||
done:
|
||||
return rc;
|
||||
}
|
||||
|
|
|
@ -3611,11 +3611,15 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
|
|||
schedule_timeout_uninterruptible(30*HZ);
|
||||
|
||||
/* Now try to get the controller to respond to a no-op */
|
||||
for (i=0; i<12; i++) {
|
||||
for (i=0; i<30; i++) {
|
||||
if (cciss_noop(pdev) == 0)
|
||||
break;
|
||||
else
|
||||
printk("cciss: no-op failed%s\n", (i < 11 ? "; re-trying" : ""));
|
||||
|
||||
schedule_timeout_uninterruptible(HZ);
|
||||
}
|
||||
if (i == 30) {
|
||||
printk(KERN_ERR "cciss: controller seems dead\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
#include <linux/hdreg.h>
|
||||
#include <linux/cdrom.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
#include <xen/xenbus.h>
|
||||
#include <xen/grant_table.h>
|
||||
|
@ -82,6 +83,7 @@ struct blkfront_info
|
|||
enum blkif_state connected;
|
||||
int ring_ref;
|
||||
struct blkif_front_ring ring;
|
||||
struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
|
||||
unsigned int evtchn, irq;
|
||||
struct request_queue *rq;
|
||||
struct work_struct work;
|
||||
|
@ -204,12 +206,11 @@ static int blkif_queue_request(struct request *req)
|
|||
struct blkfront_info *info = req->rq_disk->private_data;
|
||||
unsigned long buffer_mfn;
|
||||
struct blkif_request *ring_req;
|
||||
struct req_iterator iter;
|
||||
struct bio_vec *bvec;
|
||||
unsigned long id;
|
||||
unsigned int fsect, lsect;
|
||||
int ref;
|
||||
int i, ref;
|
||||
grant_ref_t gref_head;
|
||||
struct scatterlist *sg;
|
||||
|
||||
if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
|
||||
return 1;
|
||||
|
@ -238,12 +239,13 @@ static int blkif_queue_request(struct request *req)
|
|||
if (blk_barrier_rq(req))
|
||||
ring_req->operation = BLKIF_OP_WRITE_BARRIER;
|
||||
|
||||
ring_req->nr_segments = 0;
|
||||
rq_for_each_segment(bvec, req, iter) {
|
||||
BUG_ON(ring_req->nr_segments == BLKIF_MAX_SEGMENTS_PER_REQUEST);
|
||||
buffer_mfn = pfn_to_mfn(page_to_pfn(bvec->bv_page));
|
||||
fsect = bvec->bv_offset >> 9;
|
||||
lsect = fsect + (bvec->bv_len >> 9) - 1;
|
||||
ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
|
||||
BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
|
||||
|
||||
for_each_sg(info->sg, sg, ring_req->nr_segments, i) {
|
||||
buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg)));
|
||||
fsect = sg->offset >> 9;
|
||||
lsect = fsect + (sg->length >> 9) - 1;
|
||||
/* install a grant reference. */
|
||||
ref = gnttab_claim_grant_reference(&gref_head);
|
||||
BUG_ON(ref == -ENOSPC);
|
||||
|
@ -254,16 +256,12 @@ static int blkif_queue_request(struct request *req)
|
|||
buffer_mfn,
|
||||
rq_data_dir(req) );
|
||||
|
||||
info->shadow[id].frame[ring_req->nr_segments] =
|
||||
mfn_to_pfn(buffer_mfn);
|
||||
|
||||
ring_req->seg[ring_req->nr_segments] =
|
||||
info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
|
||||
ring_req->seg[i] =
|
||||
(struct blkif_request_segment) {
|
||||
.gref = ref,
|
||||
.first_sect = fsect,
|
||||
.last_sect = lsect };
|
||||
|
||||
ring_req->nr_segments++;
|
||||
}
|
||||
|
||||
info->ring.req_prod_pvt++;
|
||||
|
@ -622,6 +620,8 @@ static int setup_blkring(struct xenbus_device *dev,
|
|||
SHARED_RING_INIT(sring);
|
||||
FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
|
||||
|
||||
sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
|
||||
|
||||
err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
|
||||
if (err < 0) {
|
||||
free_page((unsigned long)sring);
|
||||
|
|
|
@ -452,6 +452,59 @@ static void drm_setup_crtcs(struct drm_device *dev)
|
|||
kfree(modes);
|
||||
kfree(enabled);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_encoder_crtc_ok - can a given crtc drive a given encoder?
|
||||
* @encoder: encoder to test
|
||||
* @crtc: crtc to test
|
||||
*
|
||||
* Return false if @encoder can't be driven by @crtc, true otherwise.
|
||||
*/
|
||||
static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
|
||||
struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev;
|
||||
struct drm_crtc *tmp;
|
||||
int crtc_mask = 1;
|
||||
|
||||
WARN(!crtc, "checking null crtc?");
|
||||
|
||||
dev = crtc->dev;
|
||||
|
||||
list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
|
||||
if (tmp == crtc)
|
||||
break;
|
||||
crtc_mask <<= 1;
|
||||
}
|
||||
|
||||
if (encoder->possible_crtcs & crtc_mask)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check the CRTC we're going to map each output to vs. its current
|
||||
* CRTC. If they don't match, we have to disable the output and the CRTC
|
||||
* since the driver will have to re-route things.
|
||||
*/
|
||||
static void
|
||||
drm_crtc_prepare_encoders(struct drm_device *dev)
|
||||
{
|
||||
struct drm_encoder_helper_funcs *encoder_funcs;
|
||||
struct drm_encoder *encoder;
|
||||
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
||||
encoder_funcs = encoder->helper_private;
|
||||
/* Disable unused encoders */
|
||||
if (encoder->crtc == NULL)
|
||||
(*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
|
||||
/* Disable encoders whose CRTC is about to change */
|
||||
if (encoder_funcs->get_crtc &&
|
||||
encoder->crtc != (*encoder_funcs->get_crtc)(encoder))
|
||||
(*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_crtc_set_mode - set a mode
|
||||
* @crtc: CRTC to program
|
||||
|
@ -547,6 +600,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
|
|||
encoder_funcs->prepare(encoder);
|
||||
}
|
||||
|
||||
drm_crtc_prepare_encoders(dev);
|
||||
|
||||
crtc_funcs->prepare(crtc);
|
||||
|
||||
/* Set up the DPLL and any encoders state that needs to adjust or depend
|
||||
|
@ -617,7 +672,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
|
|||
struct drm_device *dev;
|
||||
struct drm_crtc **save_crtcs, *new_crtc;
|
||||
struct drm_encoder **save_encoders, *new_encoder;
|
||||
struct drm_framebuffer *old_fb;
|
||||
struct drm_framebuffer *old_fb = NULL;
|
||||
bool save_enabled;
|
||||
bool mode_changed = false;
|
||||
bool fb_changed = false;
|
||||
|
@ -668,9 +723,10 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
|
|||
* and then just flip_or_move it */
|
||||
if (set->crtc->fb != set->fb) {
|
||||
/* If we have no fb then treat it as a full mode set */
|
||||
if (set->crtc->fb == NULL)
|
||||
if (set->crtc->fb == NULL) {
|
||||
DRM_DEBUG("crtc has no fb, full mode set\n");
|
||||
mode_changed = true;
|
||||
else if ((set->fb->bits_per_pixel !=
|
||||
} else if ((set->fb->bits_per_pixel !=
|
||||
set->crtc->fb->bits_per_pixel) ||
|
||||
set->fb->depth != set->crtc->fb->depth)
|
||||
fb_changed = true;
|
||||
|
@ -682,7 +738,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
|
|||
fb_changed = true;
|
||||
|
||||
if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
|
||||
DRM_DEBUG("modes are different\n");
|
||||
DRM_DEBUG("modes are different, full mode set\n");
|
||||
drm_mode_debug_printmodeline(&set->crtc->mode);
|
||||
drm_mode_debug_printmodeline(set->mode);
|
||||
mode_changed = true;
|
||||
|
@ -708,6 +764,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
|
|||
}
|
||||
|
||||
if (new_encoder != connector->encoder) {
|
||||
DRM_DEBUG("encoder changed, full mode switch\n");
|
||||
mode_changed = true;
|
||||
connector->encoder = new_encoder;
|
||||
}
|
||||
|
@ -734,10 +791,20 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
|
|||
if (set->connectors[ro] == connector)
|
||||
new_crtc = set->crtc;
|
||||
}
|
||||
|
||||
/* Make sure the new CRTC will work with the encoder */
|
||||
if (new_crtc &&
|
||||
!drm_encoder_crtc_ok(connector->encoder, new_crtc)) {
|
||||
ret = -EINVAL;
|
||||
goto fail_set_mode;
|
||||
}
|
||||
if (new_crtc != connector->encoder->crtc) {
|
||||
DRM_DEBUG("crtc changed, full mode switch\n");
|
||||
mode_changed = true;
|
||||
connector->encoder->crtc = new_crtc;
|
||||
}
|
||||
DRM_DEBUG("setting connector %d crtc to %p\n",
|
||||
connector->base.id, new_crtc);
|
||||
}
|
||||
|
||||
/* mode_set_base is not a required function */
|
||||
|
@ -781,6 +848,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
|
|||
|
||||
fail_set_mode:
|
||||
set->crtc->enabled = save_enabled;
|
||||
set->crtc->fb = old_fb;
|
||||
count = 0;
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
if (!connector->encoder)
|
||||
|
|
|
@ -125,7 +125,7 @@ static bool edid_is_valid(struct edid *edid)
|
|||
DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
|
||||
goto bad;
|
||||
}
|
||||
if (edid->revision <= 0 || edid->revision > 3) {
|
||||
if (edid->revision > 3) {
|
||||
DRM_ERROR("EDID has minor version %d, which is not between 0-3\n", edid->revision);
|
||||
goto bad;
|
||||
}
|
||||
|
@ -320,10 +320,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
|
|||
mode->htotal = mode->hdisplay + ((pt->hblank_hi << 8) | pt->hblank_lo);
|
||||
|
||||
mode->vdisplay = (pt->vactive_hi << 8) | pt->vactive_lo;
|
||||
mode->vsync_start = mode->vdisplay + ((pt->vsync_offset_hi << 8) |
|
||||
mode->vsync_start = mode->vdisplay + ((pt->vsync_offset_hi << 4) |
|
||||
pt->vsync_offset_lo);
|
||||
mode->vsync_end = mode->vsync_start +
|
||||
((pt->vsync_pulse_width_hi << 8) |
|
||||
((pt->vsync_pulse_width_hi << 4) |
|
||||
pt->vsync_pulse_width_lo);
|
||||
mode->vtotal = mode->vdisplay + ((pt->vblank_hi << 8) | pt->vblank_lo);
|
||||
|
||||
|
|
|
@ -435,6 +435,8 @@ EXPORT_SYMBOL(drm_vblank_get);
|
|||
*/
|
||||
void drm_vblank_put(struct drm_device *dev, int crtc)
|
||||
{
|
||||
BUG_ON (atomic_read (&dev->vblank_refcount[crtc]) == 0);
|
||||
|
||||
/* Last user schedules interrupt disable */
|
||||
if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
|
||||
mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
|
||||
|
@ -460,8 +462,9 @@ void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
|
|||
* so that interrupts remain enabled in the interim.
|
||||
*/
|
||||
if (!dev->vblank_inmodeset[crtc]) {
|
||||
dev->vblank_inmodeset[crtc] = 1;
|
||||
drm_vblank_get(dev, crtc);
|
||||
dev->vblank_inmodeset[crtc] = 0x1;
|
||||
if (drm_vblank_get(dev, crtc) == 0)
|
||||
dev->vblank_inmodeset[crtc] |= 0x2;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(drm_vblank_pre_modeset);
|
||||
|
@ -473,9 +476,12 @@ void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
|
|||
if (dev->vblank_inmodeset[crtc]) {
|
||||
spin_lock_irqsave(&dev->vbl_lock, irqflags);
|
||||
dev->vblank_disable_allowed = 1;
|
||||
dev->vblank_inmodeset[crtc] = 0;
|
||||
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
|
||||
drm_vblank_put(dev, crtc);
|
||||
|
||||
if (dev->vblank_inmodeset[crtc] & 0x2)
|
||||
drm_vblank_put(dev, crtc);
|
||||
|
||||
dev->vblank_inmodeset[crtc] = 0;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(drm_vblank_post_modeset);
|
||||
|
|
|
@ -811,7 +811,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
|
|||
dev_priv->hws_map.flags = 0;
|
||||
dev_priv->hws_map.mtrr = 0;
|
||||
|
||||
drm_core_ioremap(&dev_priv->hws_map, dev);
|
||||
drm_core_ioremap_wc(&dev_priv->hws_map, dev);
|
||||
if (dev_priv->hws_map.handle == NULL) {
|
||||
i915_dma_cleanup(dev);
|
||||
dev_priv->status_gfx_addr = 0;
|
||||
|
|
|
@ -3548,7 +3548,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
|
|||
user_data = (char __user *) (uintptr_t) args->data_ptr;
|
||||
obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
|
||||
|
||||
DRM_ERROR("obj_addr %p, %lld\n", obj_addr, args->size);
|
||||
DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size);
|
||||
ret = copy_from_user(obj_addr, user_data, args->size);
|
||||
if (ret)
|
||||
return -EFAULT;
|
||||
|
|
|
@ -111,6 +111,12 @@ parse_panel_data(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
|
|||
panel_fixed_mode->clock = dvo_timing->clock * 10;
|
||||
panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
|
||||
|
||||
/* Some VBTs have bogus h/vtotal values */
|
||||
if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
|
||||
panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
|
||||
if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal)
|
||||
panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1;
|
||||
|
||||
drm_mode_set_name(panel_fixed_mode);
|
||||
|
||||
dev_priv->vbt_mode = panel_fixed_mode;
|
||||
|
|
|
@ -217,7 +217,7 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
|
|||
return false;
|
||||
}
|
||||
|
||||
#define INTELPllInvalid(s) do { DRM_DEBUG(s); return false; } while (0)
|
||||
#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
|
||||
/**
|
||||
* Returns whether the given set of divisors are valid for a given refclk with
|
||||
* the given connectors.
|
||||
|
|
|
@ -46,7 +46,7 @@ menuconfig IDE
|
|||
SMART parameters from disk drives.
|
||||
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called ide.
|
||||
module will be called ide-core.ko.
|
||||
|
||||
For further information, please read <file:Documentation/ide/ide.txt>.
|
||||
|
||||
|
|
|
@ -166,7 +166,7 @@ static unsigned int init_chipset_amd74xx(struct pci_dev *dev)
|
|||
* Check for broken FIFO support.
|
||||
*/
|
||||
if (dev->vendor == PCI_VENDOR_ID_AMD &&
|
||||
dev->vendor == PCI_DEVICE_ID_AMD_VIPER_7411)
|
||||
dev->device == PCI_DEVICE_ID_AMD_VIPER_7411)
|
||||
t &= 0x0f;
|
||||
else
|
||||
t |= 0xf0;
|
||||
|
|
|
@ -52,7 +52,7 @@ static void atiixp_set_pio_mode(ide_drive_t *drive, const u8 pio)
|
|||
{
|
||||
struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
|
||||
unsigned long flags;
|
||||
int timing_shift = (drive->dn & 2) ? 16 : 0 + (drive->dn & 1) ? 0 : 8;
|
||||
int timing_shift = (drive->dn ^ 1) * 8;
|
||||
u32 pio_timing_data;
|
||||
u16 pio_mode_data;
|
||||
|
||||
|
@ -85,7 +85,7 @@ static void atiixp_set_dma_mode(ide_drive_t *drive, const u8 speed)
|
|||
{
|
||||
struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
|
||||
unsigned long flags;
|
||||
int timing_shift = (drive->dn & 2) ? 16 : 0 + (drive->dn & 1) ? 0 : 8;
|
||||
int timing_shift = (drive->dn ^ 1) * 8;
|
||||
u32 tmp32;
|
||||
u16 tmp16;
|
||||
u16 udma_ctl = 0;
|
||||
|
|
|
@ -55,7 +55,7 @@
|
|||
|
||||
static DEFINE_MUTEX(idecd_ref_mutex);
|
||||
|
||||
static void ide_cd_release(struct kref *);
|
||||
static void ide_cd_release(struct device *);
|
||||
|
||||
static struct cdrom_info *ide_cd_get(struct gendisk *disk)
|
||||
{
|
||||
|
@ -67,7 +67,7 @@ static struct cdrom_info *ide_cd_get(struct gendisk *disk)
|
|||
if (ide_device_get(cd->drive))
|
||||
cd = NULL;
|
||||
else
|
||||
kref_get(&cd->kref);
|
||||
get_device(&cd->dev);
|
||||
|
||||
}
|
||||
mutex_unlock(&idecd_ref_mutex);
|
||||
|
@ -79,7 +79,7 @@ static void ide_cd_put(struct cdrom_info *cd)
|
|||
ide_drive_t *drive = cd->drive;
|
||||
|
||||
mutex_lock(&idecd_ref_mutex);
|
||||
kref_put(&cd->kref, ide_cd_release);
|
||||
put_device(&cd->dev);
|
||||
ide_device_put(drive);
|
||||
mutex_unlock(&idecd_ref_mutex);
|
||||
}
|
||||
|
@ -194,6 +194,14 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
|
|||
bio_sectors = max(bio_sectors(failed_command->bio), 4U);
|
||||
sector &= ~(bio_sectors - 1);
|
||||
|
||||
/*
|
||||
* The SCSI specification allows for the value
|
||||
* returned by READ CAPACITY to be up to 75 2K
|
||||
* sectors past the last readable block.
|
||||
* Therefore, if we hit a medium error within the
|
||||
* last 75 2K sectors, we decrease the saved size
|
||||
* value.
|
||||
*/
|
||||
if (sector < get_capacity(info->disk) &&
|
||||
drive->probed_capacity - sector < 4 * 75)
|
||||
set_capacity(info->disk, sector);
|
||||
|
@ -1790,15 +1798,17 @@ static void ide_cd_remove(ide_drive_t *drive)
|
|||
ide_debug_log(IDE_DBG_FUNC, "Call %s\n", __func__);
|
||||
|
||||
ide_proc_unregister_driver(drive, info->driver);
|
||||
|
||||
device_del(&info->dev);
|
||||
del_gendisk(info->disk);
|
||||
|
||||
ide_cd_put(info);
|
||||
mutex_lock(&idecd_ref_mutex);
|
||||
put_device(&info->dev);
|
||||
mutex_unlock(&idecd_ref_mutex);
|
||||
}
|
||||
|
||||
static void ide_cd_release(struct kref *kref)
|
||||
static void ide_cd_release(struct device *dev)
|
||||
{
|
||||
struct cdrom_info *info = to_ide_drv(kref, cdrom_info);
|
||||
struct cdrom_info *info = to_ide_drv(dev, cdrom_info);
|
||||
struct cdrom_device_info *devinfo = &info->devinfo;
|
||||
ide_drive_t *drive = info->drive;
|
||||
struct gendisk *g = info->disk;
|
||||
|
@ -1997,7 +2007,12 @@ static int ide_cd_probe(ide_drive_t *drive)
|
|||
|
||||
ide_init_disk(g, drive);
|
||||
|
||||
kref_init(&info->kref);
|
||||
info->dev.parent = &drive->gendev;
|
||||
info->dev.release = ide_cd_release;
|
||||
dev_set_name(&info->dev, dev_name(&drive->gendev));
|
||||
|
||||
if (device_register(&info->dev))
|
||||
goto out_free_disk;
|
||||
|
||||
info->drive = drive;
|
||||
info->driver = &ide_cdrom_driver;
|
||||
|
@ -2011,7 +2026,7 @@ static int ide_cd_probe(ide_drive_t *drive)
|
|||
g->driverfs_dev = &drive->gendev;
|
||||
g->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE;
|
||||
if (ide_cdrom_setup(drive)) {
|
||||
ide_cd_release(&info->kref);
|
||||
put_device(&info->dev);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
|
@ -2021,6 +2036,8 @@ static int ide_cd_probe(ide_drive_t *drive)
|
|||
add_disk(g);
|
||||
return 0;
|
||||
|
||||
out_free_disk:
|
||||
put_disk(g);
|
||||
out_free_cd:
|
||||
kfree(info);
|
||||
failed:
|
||||
|
|
|
@ -80,7 +80,7 @@ struct cdrom_info {
|
|||
ide_drive_t *drive;
|
||||
struct ide_driver *driver;
|
||||
struct gendisk *disk;
|
||||
struct kref kref;
|
||||
struct device dev;
|
||||
|
||||
/* Buffer for table of contents. NULL if we haven't allocated
|
||||
a TOC buffer for this device yet. */
|
||||
|
|
|
@ -25,7 +25,7 @@ module_param(debug_mask, ulong, 0644);
|
|||
|
||||
static DEFINE_MUTEX(ide_disk_ref_mutex);
|
||||
|
||||
static void ide_disk_release(struct kref *);
|
||||
static void ide_disk_release(struct device *);
|
||||
|
||||
static struct ide_disk_obj *ide_disk_get(struct gendisk *disk)
|
||||
{
|
||||
|
@ -37,7 +37,7 @@ static struct ide_disk_obj *ide_disk_get(struct gendisk *disk)
|
|||
if (ide_device_get(idkp->drive))
|
||||
idkp = NULL;
|
||||
else
|
||||
kref_get(&idkp->kref);
|
||||
get_device(&idkp->dev);
|
||||
}
|
||||
mutex_unlock(&ide_disk_ref_mutex);
|
||||
return idkp;
|
||||
|
@ -48,7 +48,7 @@ static void ide_disk_put(struct ide_disk_obj *idkp)
|
|||
ide_drive_t *drive = idkp->drive;
|
||||
|
||||
mutex_lock(&ide_disk_ref_mutex);
|
||||
kref_put(&idkp->kref, ide_disk_release);
|
||||
put_device(&idkp->dev);
|
||||
ide_device_put(drive);
|
||||
mutex_unlock(&ide_disk_ref_mutex);
|
||||
}
|
||||
|
@ -66,17 +66,18 @@ static void ide_gd_remove(ide_drive_t *drive)
|
|||
struct gendisk *g = idkp->disk;
|
||||
|
||||
ide_proc_unregister_driver(drive, idkp->driver);
|
||||
|
||||
device_del(&idkp->dev);
|
||||
del_gendisk(g);
|
||||
|
||||
drive->disk_ops->flush(drive);
|
||||
|
||||
ide_disk_put(idkp);
|
||||
mutex_lock(&ide_disk_ref_mutex);
|
||||
put_device(&idkp->dev);
|
||||
mutex_unlock(&ide_disk_ref_mutex);
|
||||
}
|
||||
|
||||
static void ide_disk_release(struct kref *kref)
|
||||
static void ide_disk_release(struct device *dev)
|
||||
{
|
||||
struct ide_disk_obj *idkp = to_ide_drv(kref, ide_disk_obj);
|
||||
struct ide_disk_obj *idkp = to_ide_drv(dev, ide_disk_obj);
|
||||
ide_drive_t *drive = idkp->drive;
|
||||
struct gendisk *g = idkp->disk;
|
||||
|
||||
|
@ -348,7 +349,12 @@ static int ide_gd_probe(ide_drive_t *drive)
|
|||
|
||||
ide_init_disk(g, drive);
|
||||
|
||||
kref_init(&idkp->kref);
|
||||
idkp->dev.parent = &drive->gendev;
|
||||
idkp->dev.release = ide_disk_release;
|
||||
dev_set_name(&idkp->dev, dev_name(&drive->gendev));
|
||||
|
||||
if (device_register(&idkp->dev))
|
||||
goto out_free_disk;
|
||||
|
||||
idkp->drive = drive;
|
||||
idkp->driver = &ide_gd_driver;
|
||||
|
@ -373,6 +379,8 @@ static int ide_gd_probe(ide_drive_t *drive)
|
|||
add_disk(g);
|
||||
return 0;
|
||||
|
||||
out_free_disk:
|
||||
put_disk(g);
|
||||
out_free_idkp:
|
||||
kfree(idkp);
|
||||
failed:
|
||||
|
|
|
@ -17,7 +17,7 @@ struct ide_disk_obj {
|
|||
ide_drive_t *drive;
|
||||
struct ide_driver *driver;
|
||||
struct gendisk *disk;
|
||||
struct kref kref;
|
||||
struct device dev;
|
||||
unsigned int openers; /* protected by BKL for now */
|
||||
|
||||
/* Last failed packet command */
|
||||
|
|
|
@ -169,7 +169,7 @@ typedef struct ide_tape_obj {
|
|||
ide_drive_t *drive;
|
||||
struct ide_driver *driver;
|
||||
struct gendisk *disk;
|
||||
struct kref kref;
|
||||
struct device dev;
|
||||
|
||||
/*
|
||||
* failed_pc points to the last failed packet command, or contains
|
||||
|
@ -267,7 +267,7 @@ static DEFINE_MUTEX(idetape_ref_mutex);
|
|||
|
||||
static struct class *idetape_sysfs_class;
|
||||
|
||||
static void ide_tape_release(struct kref *);
|
||||
static void ide_tape_release(struct device *);
|
||||
|
||||
static struct ide_tape_obj *ide_tape_get(struct gendisk *disk)
|
||||
{
|
||||
|
@ -279,7 +279,7 @@ static struct ide_tape_obj *ide_tape_get(struct gendisk *disk)
|
|||
if (ide_device_get(tape->drive))
|
||||
tape = NULL;
|
||||
else
|
||||
kref_get(&tape->kref);
|
||||
get_device(&tape->dev);
|
||||
}
|
||||
mutex_unlock(&idetape_ref_mutex);
|
||||
return tape;
|
||||
|
@ -290,7 +290,7 @@ static void ide_tape_put(struct ide_tape_obj *tape)
|
|||
ide_drive_t *drive = tape->drive;
|
||||
|
||||
mutex_lock(&idetape_ref_mutex);
|
||||
kref_put(&tape->kref, ide_tape_release);
|
||||
put_device(&tape->dev);
|
||||
ide_device_put(drive);
|
||||
mutex_unlock(&idetape_ref_mutex);
|
||||
}
|
||||
|
@ -308,7 +308,7 @@ static struct ide_tape_obj *ide_tape_chrdev_get(unsigned int i)
|
|||
mutex_lock(&idetape_ref_mutex);
|
||||
tape = idetape_devs[i];
|
||||
if (tape)
|
||||
kref_get(&tape->kref);
|
||||
get_device(&tape->dev);
|
||||
mutex_unlock(&idetape_ref_mutex);
|
||||
return tape;
|
||||
}
|
||||
|
@ -2256,15 +2256,17 @@ static void ide_tape_remove(ide_drive_t *drive)
|
|||
idetape_tape_t *tape = drive->driver_data;
|
||||
|
||||
ide_proc_unregister_driver(drive, tape->driver);
|
||||
|
||||
device_del(&tape->dev);
|
||||
ide_unregister_region(tape->disk);
|
||||
|
||||
ide_tape_put(tape);
|
||||
mutex_lock(&idetape_ref_mutex);
|
||||
put_device(&tape->dev);
|
||||
mutex_unlock(&idetape_ref_mutex);
|
||||
}
|
||||
|
||||
static void ide_tape_release(struct kref *kref)
|
||||
static void ide_tape_release(struct device *dev)
|
||||
{
|
||||
struct ide_tape_obj *tape = to_ide_drv(kref, ide_tape_obj);
|
||||
struct ide_tape_obj *tape = to_ide_drv(dev, ide_tape_obj);
|
||||
ide_drive_t *drive = tape->drive;
|
||||
struct gendisk *g = tape->disk;
|
||||
|
||||
|
@ -2407,7 +2409,12 @@ static int ide_tape_probe(ide_drive_t *drive)
|
|||
|
||||
ide_init_disk(g, drive);
|
||||
|
||||
kref_init(&tape->kref);
|
||||
tape->dev.parent = &drive->gendev;
|
||||
tape->dev.release = ide_tape_release;
|
||||
dev_set_name(&tape->dev, dev_name(&drive->gendev));
|
||||
|
||||
if (device_register(&tape->dev))
|
||||
goto out_free_disk;
|
||||
|
||||
tape->drive = drive;
|
||||
tape->driver = &idetape_driver;
|
||||
|
@ -2436,6 +2443,8 @@ static int ide_tape_probe(ide_drive_t *drive)
|
|||
|
||||
return 0;
|
||||
|
||||
out_free_disk:
|
||||
put_disk(g);
|
||||
out_free_tape:
|
||||
kfree(tape);
|
||||
failed:
|
||||
|
|
|
@ -337,6 +337,7 @@ static int ide_set_dev_param_mask(const char *s, struct kernel_param *kp)
|
|||
int a, b, i, j = 1;
|
||||
unsigned int *dev_param_mask = (unsigned int *)kp->arg;
|
||||
|
||||
/* controller . device (0 or 1) [ : 1 (set) | 0 (clear) ] */
|
||||
if (sscanf(s, "%d.%d:%d", &a, &b, &j) != 3 &&
|
||||
sscanf(s, "%d.%d", &a, &b) != 2)
|
||||
return -EINVAL;
|
||||
|
@ -349,7 +350,7 @@ static int ide_set_dev_param_mask(const char *s, struct kernel_param *kp)
|
|||
if (j)
|
||||
*dev_param_mask |= (1 << i);
|
||||
else
|
||||
*dev_param_mask &= (1 << i);
|
||||
*dev_param_mask &= ~(1 << i);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -392,6 +393,8 @@ static int ide_set_disk_chs(const char *str, struct kernel_param *kp)
|
|||
{
|
||||
int a, b, c = 0, h = 0, s = 0, i, j = 1;
|
||||
|
||||
/* controller . device (0 or 1) : Cylinders , Heads , Sectors */
|
||||
/* controller . device (0 or 1) : 1 (use CHS) | 0 (ignore CHS) */
|
||||
if (sscanf(str, "%d.%d:%d,%d,%d", &a, &b, &c, &h, &s) != 5 &&
|
||||
sscanf(str, "%d.%d:%d", &a, &b, &j) != 3)
|
||||
return -EINVAL;
|
||||
|
@ -407,7 +410,7 @@ static int ide_set_disk_chs(const char *str, struct kernel_param *kp)
|
|||
if (j)
|
||||
ide_disks |= (1 << i);
|
||||
else
|
||||
ide_disks &= (1 << i);
|
||||
ide_disks &= ~(1 << i);
|
||||
|
||||
ide_disks_chs[i].cyl = c;
|
||||
ide_disks_chs[i].head = h;
|
||||
|
@ -469,6 +472,8 @@ static int ide_set_ignore_cable(const char *s, struct kernel_param *kp)
|
|||
{
|
||||
int i, j = 1;
|
||||
|
||||
/* controller (ignore) */
|
||||
/* controller : 1 (ignore) | 0 (use) */
|
||||
if (sscanf(s, "%d:%d", &i, &j) != 2 && sscanf(s, "%d", &i) != 1)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -478,7 +483,7 @@ static int ide_set_ignore_cable(const char *s, struct kernel_param *kp)
|
|||
if (j)
|
||||
ide_ignore_cable |= (1 << i);
|
||||
else
|
||||
ide_ignore_cable &= (1 << i);
|
||||
ide_ignore_cable &= ~(1 << i);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -5,9 +5,8 @@
|
|||
* May be copied or modified under the terms of the GNU General Public License
|
||||
* Based in part on the ITE vendor provided SCSI driver.
|
||||
*
|
||||
* Documentation available from
|
||||
* http://www.ite.com.tw/pc/IT8212F_V04.pdf
|
||||
* Some other documents are NDA.
|
||||
* Documentation:
|
||||
* Datasheet is freely available, some other documents under NDA.
|
||||
*
|
||||
* The ITE8212 isn't exactly a standard IDE controller. It has two
|
||||
* modes. In pass through mode then it is an IDE controller. In its smart
|
||||
|
|
|
@ -1275,7 +1275,7 @@ static void __exit ieee1394_cleanup(void)
|
|||
unregister_chrdev_region(IEEE1394_CORE_DEV, 256);
|
||||
}
|
||||
|
||||
module_init(ieee1394_init);
|
||||
fs_initcall(ieee1394_init);
|
||||
module_exit(ieee1394_cleanup);
|
||||
|
||||
/* Exported symbols */
|
||||
|
|
|
@ -1237,8 +1237,9 @@ static void end_sync_write(struct bio *bio, int error)
|
|||
update_head_pos(mirror, r1_bio);
|
||||
|
||||
if (atomic_dec_and_test(&r1_bio->remaining)) {
|
||||
md_done_sync(mddev, r1_bio->sectors, uptodate);
|
||||
sector_t s = r1_bio->sectors;
|
||||
put_buf(r1_bio);
|
||||
md_done_sync(mddev, s, uptodate);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1236,6 +1236,7 @@ static void end_sync_read(struct bio *bio, int error)
|
|||
/* for reconstruct, we always reschedule after a read.
|
||||
* for resync, only after all reads
|
||||
*/
|
||||
rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
|
||||
if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
|
||||
atomic_dec_and_test(&r10_bio->remaining)) {
|
||||
/* we have read all the blocks,
|
||||
|
@ -1243,7 +1244,6 @@ static void end_sync_read(struct bio *bio, int error)
|
|||
*/
|
||||
reschedule_retry(r10_bio);
|
||||
}
|
||||
rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
|
||||
}
|
||||
|
||||
static void end_sync_write(struct bio *bio, int error)
|
||||
|
@ -1264,11 +1264,13 @@ static void end_sync_write(struct bio *bio, int error)
|
|||
|
||||
update_head_pos(i, r10_bio);
|
||||
|
||||
rdev_dec_pending(conf->mirrors[d].rdev, mddev);
|
||||
while (atomic_dec_and_test(&r10_bio->remaining)) {
|
||||
if (r10_bio->master_bio == NULL) {
|
||||
/* the primary of several recovery bios */
|
||||
md_done_sync(mddev, r10_bio->sectors, 1);
|
||||
sector_t s = r10_bio->sectors;
|
||||
put_buf(r10_bio);
|
||||
md_done_sync(mddev, s, 1);
|
||||
break;
|
||||
} else {
|
||||
r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio;
|
||||
|
@ -1276,7 +1278,6 @@ static void end_sync_write(struct bio *bio, int error)
|
|||
r10_bio = r10_bio2;
|
||||
}
|
||||
}
|
||||
rdev_dec_pending(conf->mirrors[d].rdev, mddev);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1749,8 +1750,6 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
|
|||
if (!go_faster && conf->nr_waiting)
|
||||
msleep_interruptible(1000);
|
||||
|
||||
bitmap_cond_end_sync(mddev->bitmap, sector_nr);
|
||||
|
||||
/* Again, very different code for resync and recovery.
|
||||
* Both must result in an r10bio with a list of bios that
|
||||
* have bi_end_io, bi_sector, bi_bdev set,
|
||||
|
@ -1886,6 +1885,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
|
|||
/* resync. Schedule a read for every block at this virt offset */
|
||||
int count = 0;
|
||||
|
||||
bitmap_cond_end_sync(mddev->bitmap, sector_nr);
|
||||
|
||||
if (!bitmap_start_sync(mddev->bitmap, sector_nr,
|
||||
&sync_blocks, mddev->degraded) &&
|
||||
!conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
|
||||
|
@ -2010,13 +2011,13 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
|
|||
/* There is nowhere to write, so all non-sync
|
||||
* drives must be failed, so try the next chunk...
|
||||
*/
|
||||
{
|
||||
sector_t sec = max_sector - sector_nr;
|
||||
sectors_skipped += sec;
|
||||
if (sector_nr + max_sync < max_sector)
|
||||
max_sector = sector_nr + max_sync;
|
||||
|
||||
sectors_skipped += (max_sector - sector_nr);
|
||||
chunks_skipped ++;
|
||||
sector_nr = max_sector;
|
||||
goto skipped;
|
||||
}
|
||||
}
|
||||
|
||||
static int run(mddev_t *mddev)
|
||||
|
|
|
@ -148,7 +148,7 @@ static void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
|
|||
pci_unmap_single(dev,
|
||||
pci_unmap_addr(&txq->cmd[index]->meta, mapping),
|
||||
pci_unmap_len(&txq->cmd[index]->meta, len),
|
||||
PCI_DMA_TODEVICE);
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
|
||||
/* Unmap chunks, if any. */
|
||||
for (i = 1; i < num_tbs; i++) {
|
||||
|
@ -964,7 +964,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
|
|||
* within command buffer array. */
|
||||
txcmd_phys = pci_map_single(priv->pci_dev,
|
||||
out_cmd, sizeof(struct iwl_cmd),
|
||||
PCI_DMA_TODEVICE);
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys);
|
||||
pci_unmap_len_set(&out_cmd->meta, len, sizeof(struct iwl_cmd));
|
||||
/* Add buffer containing Tx command and MAC(!) header to TFD's
|
||||
|
@ -1115,7 +1115,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
|
|||
IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd);
|
||||
|
||||
phys_addr = pci_map_single(priv->pci_dev, out_cmd,
|
||||
len, PCI_DMA_TODEVICE);
|
||||
len, PCI_DMA_BIDIRECTIONAL);
|
||||
pci_unmap_addr_set(&out_cmd->meta, mapping, phys_addr);
|
||||
pci_unmap_len_set(&out_cmd->meta, len, len);
|
||||
phys_addr += offsetof(struct iwl_cmd, hdr);
|
||||
|
@ -1212,7 +1212,7 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
|
|||
pci_unmap_single(priv->pci_dev,
|
||||
pci_unmap_addr(&txq->cmd[cmd_idx]->meta, mapping),
|
||||
pci_unmap_len(&txq->cmd[cmd_idx]->meta, len),
|
||||
PCI_DMA_TODEVICE);
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
|
||||
for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
|
||||
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
|
||||
|
|
|
@ -330,6 +330,14 @@ parse_dmar_table(void)
|
|||
entry_header = (struct acpi_dmar_header *)(dmar + 1);
|
||||
while (((unsigned long)entry_header) <
|
||||
(((unsigned long)dmar) + dmar_tbl->length)) {
|
||||
/* Avoid looping forever on bad ACPI tables */
|
||||
if (entry_header->length == 0) {
|
||||
printk(KERN_WARNING PREFIX
|
||||
"Invalid 0-length structure\n");
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
dmar_table_print_dmar_entry(entry_header);
|
||||
|
||||
switch (entry_header->type) {
|
||||
|
@ -491,7 +499,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
|
|||
int map_size;
|
||||
u32 ver;
|
||||
static int iommu_allocated = 0;
|
||||
int agaw;
|
||||
int agaw = 0;
|
||||
|
||||
iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
|
||||
if (!iommu)
|
||||
|
@ -507,6 +515,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
|
|||
iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
|
||||
iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
|
||||
|
||||
#ifdef CONFIG_DMAR
|
||||
agaw = iommu_calculate_agaw(iommu);
|
||||
if (agaw < 0) {
|
||||
printk(KERN_ERR
|
||||
|
@ -514,6 +523,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
|
|||
iommu->seq_id);
|
||||
goto error;
|
||||
}
|
||||
#endif
|
||||
iommu->agaw = agaw;
|
||||
|
||||
/* the registers might be more than one page */
|
||||
|
@ -571,19 +581,49 @@ static inline void reclaim_free_desc(struct q_inval *qi)
|
|||
}
|
||||
}
|
||||
|
||||
static int qi_check_fault(struct intel_iommu *iommu, int index)
|
||||
{
|
||||
u32 fault;
|
||||
int head;
|
||||
struct q_inval *qi = iommu->qi;
|
||||
int wait_index = (index + 1) % QI_LENGTH;
|
||||
|
||||
fault = readl(iommu->reg + DMAR_FSTS_REG);
|
||||
|
||||
/*
|
||||
* If IQE happens, the head points to the descriptor associated
|
||||
* with the error. No new descriptors are fetched until the IQE
|
||||
* is cleared.
|
||||
*/
|
||||
if (fault & DMA_FSTS_IQE) {
|
||||
head = readl(iommu->reg + DMAR_IQH_REG);
|
||||
if ((head >> 4) == index) {
|
||||
memcpy(&qi->desc[index], &qi->desc[wait_index],
|
||||
sizeof(struct qi_desc));
|
||||
__iommu_flush_cache(iommu, &qi->desc[index],
|
||||
sizeof(struct qi_desc));
|
||||
writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Submit the queued invalidation descriptor to the remapping
|
||||
* hardware unit and wait for its completion.
|
||||
*/
|
||||
void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
|
||||
int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
|
||||
{
|
||||
int rc = 0;
|
||||
struct q_inval *qi = iommu->qi;
|
||||
struct qi_desc *hw, wait_desc;
|
||||
int wait_index, index;
|
||||
unsigned long flags;
|
||||
|
||||
if (!qi)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
hw = qi->desc;
|
||||
|
||||
|
@ -601,7 +641,8 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
|
|||
|
||||
hw[index] = *desc;
|
||||
|
||||
wait_desc.low = QI_IWD_STATUS_DATA(2) | QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
|
||||
wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
|
||||
QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
|
||||
wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
|
||||
|
||||
hw[wait_index] = wait_desc;
|
||||
|
@ -612,13 +653,11 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
|
|||
qi->free_head = (qi->free_head + 2) % QI_LENGTH;
|
||||
qi->free_cnt -= 2;
|
||||
|
||||
spin_lock(&iommu->register_lock);
|
||||
/*
|
||||
* update the HW tail register indicating the presence of
|
||||
* new descriptors.
|
||||
*/
|
||||
writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
|
||||
spin_unlock(&iommu->register_lock);
|
||||
|
||||
while (qi->desc_status[wait_index] != QI_DONE) {
|
||||
/*
|
||||
|
@ -628,15 +667,21 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
|
|||
* a deadlock where the interrupt context can wait indefinitely
|
||||
* for free slots in the queue.
|
||||
*/
|
||||
rc = qi_check_fault(iommu, index);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
spin_unlock(&qi->q_lock);
|
||||
cpu_relax();
|
||||
spin_lock(&qi->q_lock);
|
||||
}
|
||||
|
||||
qi->desc_status[index] = QI_DONE;
|
||||
out:
|
||||
qi->desc_status[index] = qi->desc_status[wait_index] = QI_DONE;
|
||||
|
||||
reclaim_free_desc(qi);
|
||||
spin_unlock_irqrestore(&qi->q_lock, flags);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -649,13 +694,13 @@ void qi_global_iec(struct intel_iommu *iommu)
|
|||
desc.low = QI_IEC_TYPE;
|
||||
desc.high = 0;
|
||||
|
||||
/* should never fail */
|
||||
qi_submit_sync(&desc, iommu);
|
||||
}
|
||||
|
||||
int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
|
||||
u64 type, int non_present_entry_flush)
|
||||
{
|
||||
|
||||
struct qi_desc desc;
|
||||
|
||||
if (non_present_entry_flush) {
|
||||
|
@ -669,10 +714,7 @@ int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
|
|||
| QI_CC_GRAN(type) | QI_CC_TYPE;
|
||||
desc.high = 0;
|
||||
|
||||
qi_submit_sync(&desc, iommu);
|
||||
|
||||
return 0;
|
||||
|
||||
return qi_submit_sync(&desc, iommu);
|
||||
}
|
||||
|
||||
int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
|
||||
|
@ -702,10 +744,7 @@ int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
|
|||
desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
|
||||
| QI_IOTLB_AM(size_order);
|
||||
|
||||
qi_submit_sync(&desc, iommu);
|
||||
|
||||
return 0;
|
||||
|
||||
return qi_submit_sync(&desc, iommu);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -207,7 +207,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
|
|||
return index;
|
||||
}
|
||||
|
||||
static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
|
||||
static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
|
||||
{
|
||||
struct qi_desc desc;
|
||||
|
||||
|
@ -215,7 +215,7 @@ static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
|
|||
| QI_IEC_SELECTIVE;
|
||||
desc.high = 0;
|
||||
|
||||
qi_submit_sync(&desc, iommu);
|
||||
return qi_submit_sync(&desc, iommu);
|
||||
}
|
||||
|
||||
int map_irq_to_irte_handle(int irq, u16 *sub_handle)
|
||||
|
@ -283,6 +283,7 @@ int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
|
|||
|
||||
int modify_irte(int irq, struct irte *irte_modified)
|
||||
{
|
||||
int rc;
|
||||
int index;
|
||||
struct irte *irte;
|
||||
struct intel_iommu *iommu;
|
||||
|
@ -303,14 +304,15 @@ int modify_irte(int irq, struct irte *irte_modified)
|
|||
set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1));
|
||||
__iommu_flush_cache(iommu, irte, sizeof(*irte));
|
||||
|
||||
qi_flush_iec(iommu, index, 0);
|
||||
|
||||
rc = qi_flush_iec(iommu, index, 0);
|
||||
spin_unlock(&irq_2_ir_lock);
|
||||
return 0;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int flush_irte(int irq)
|
||||
{
|
||||
int rc;
|
||||
int index;
|
||||
struct intel_iommu *iommu;
|
||||
struct irq_2_iommu *irq_iommu;
|
||||
|
@ -326,10 +328,10 @@ int flush_irte(int irq)
|
|||
|
||||
index = irq_iommu->irte_index + irq_iommu->sub_handle;
|
||||
|
||||
qi_flush_iec(iommu, index, irq_iommu->irte_mask);
|
||||
rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
|
||||
spin_unlock(&irq_2_ir_lock);
|
||||
|
||||
return 0;
|
||||
return rc;
|
||||
}
|
||||
|
||||
struct intel_iommu *map_ioapic_to_ir(int apic)
|
||||
|
@ -355,6 +357,7 @@ struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
|
|||
|
||||
int free_irte(int irq)
|
||||
{
|
||||
int rc = 0;
|
||||
int index, i;
|
||||
struct irte *irte;
|
||||
struct intel_iommu *iommu;
|
||||
|
@ -375,7 +378,7 @@ int free_irte(int irq)
|
|||
if (!irq_iommu->sub_handle) {
|
||||
for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
|
||||
set_64bit((unsigned long *)irte, 0);
|
||||
qi_flush_iec(iommu, index, irq_iommu->irte_mask);
|
||||
rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
|
||||
}
|
||||
|
||||
irq_iommu->iommu = NULL;
|
||||
|
@ -385,7 +388,7 @@ int free_irte(int irq)
|
|||
|
||||
spin_unlock(&irq_2_ir_lock);
|
||||
|
||||
return 0;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
|
||||
|
|
2
fs/bio.c
2
fs/bio.c
|
@ -302,7 +302,7 @@ void bio_init(struct bio *bio)
|
|||
struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
|
||||
{
|
||||
struct bio *bio = NULL;
|
||||
void *p;
|
||||
void *uninitialized_var(p);
|
||||
|
||||
if (bs) {
|
||||
p = mempool_alloc(bs->bio_pool, gfp_mask);
|
||||
|
|
|
@ -66,6 +66,9 @@ struct btrfs_inode {
|
|||
*/
|
||||
struct list_head delalloc_inodes;
|
||||
|
||||
/* the space_info for where this inode's data allocations are done */
|
||||
struct btrfs_space_info *space_info;
|
||||
|
||||
/* full 64 bit generation number, struct vfs_inode doesn't have a big
|
||||
* enough field for this.
|
||||
*/
|
||||
|
@ -94,6 +97,11 @@ struct btrfs_inode {
|
|||
*/
|
||||
u64 delalloc_bytes;
|
||||
|
||||
/* total number of bytes that may be used for this inode for
|
||||
* delalloc
|
||||
*/
|
||||
u64 reserved_bytes;
|
||||
|
||||
/*
|
||||
* the size of the file stored in the metadata on disk. data=ordered
|
||||
* means the in-memory i_size might be larger than the size on disk
|
||||
|
|
|
@ -596,13 +596,27 @@ struct btrfs_block_group_item {
|
|||
|
||||
struct btrfs_space_info {
|
||||
u64 flags;
|
||||
u64 total_bytes;
|
||||
u64 bytes_used;
|
||||
u64 bytes_pinned;
|
||||
u64 bytes_reserved;
|
||||
u64 bytes_readonly;
|
||||
int full;
|
||||
int force_alloc;
|
||||
|
||||
u64 total_bytes; /* total bytes in the space */
|
||||
u64 bytes_used; /* total bytes used on disk */
|
||||
u64 bytes_pinned; /* total bytes pinned, will be freed when the
|
||||
transaction finishes */
|
||||
u64 bytes_reserved; /* total bytes the allocator has reserved for
|
||||
current allocations */
|
||||
u64 bytes_readonly; /* total bytes that are read only */
|
||||
|
||||
/* delalloc accounting */
|
||||
u64 bytes_delalloc; /* number of bytes reserved for allocation,
|
||||
this space is not necessarily reserved yet
|
||||
by the allocator */
|
||||
u64 bytes_may_use; /* number of bytes that may be used for
|
||||
delalloc */
|
||||
|
||||
int full; /* indicates that we cannot allocate any more
|
||||
chunks for this space */
|
||||
int force_alloc; /* set if we need to force a chunk alloc for
|
||||
this space */
|
||||
|
||||
struct list_head list;
|
||||
|
||||
/* for block groups in our same type */
|
||||
|
@ -1782,6 +1796,16 @@ int btrfs_add_dead_reloc_root(struct btrfs_root *root);
|
|||
int btrfs_cleanup_reloc_trees(struct btrfs_root *root);
|
||||
int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len);
|
||||
u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags);
|
||||
void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *ionde);
|
||||
int btrfs_check_metadata_free_space(struct btrfs_root *root);
|
||||
int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode,
|
||||
u64 bytes);
|
||||
void btrfs_free_reserved_data_space(struct btrfs_root *root,
|
||||
struct inode *inode, u64 bytes);
|
||||
void btrfs_delalloc_reserve_space(struct btrfs_root *root, struct inode *inode,
|
||||
u64 bytes);
|
||||
void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode,
|
||||
u64 bytes);
|
||||
/* ctree.c */
|
||||
int btrfs_previous_item(struct btrfs_root *root,
|
||||
struct btrfs_path *path, u64 min_objectid,
|
||||
|
@ -2027,8 +2051,6 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
|
|||
unsigned long btrfs_force_ra(struct address_space *mapping,
|
||||
struct file_ra_state *ra, struct file *file,
|
||||
pgoff_t offset, pgoff_t last_index);
|
||||
int btrfs_check_free_space(struct btrfs_root *root, u64 num_required,
|
||||
int for_del);
|
||||
int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page);
|
||||
int btrfs_readpage(struct file *file, struct page *page);
|
||||
void btrfs_delete_inode(struct inode *inode);
|
||||
|
|
|
@ -60,6 +60,10 @@ static int update_block_group(struct btrfs_trans_handle *trans,
|
|||
u64 bytenr, u64 num_bytes, int alloc,
|
||||
int mark_free);
|
||||
|
||||
static int do_chunk_alloc(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *extent_root, u64 alloc_bytes,
|
||||
u64 flags, int force);
|
||||
|
||||
static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
|
||||
{
|
||||
return (cache->flags & bits) == bits;
|
||||
|
@ -1909,6 +1913,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
|
|||
found->bytes_pinned = 0;
|
||||
found->bytes_reserved = 0;
|
||||
found->bytes_readonly = 0;
|
||||
found->bytes_delalloc = 0;
|
||||
found->full = 0;
|
||||
found->force_alloc = 0;
|
||||
*space_info = found;
|
||||
|
@ -1972,6 +1977,233 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
|
|||
return flags;
|
||||
}
|
||||
|
||||
static u64 btrfs_get_alloc_profile(struct btrfs_root *root, u64 data)
|
||||
{
|
||||
struct btrfs_fs_info *info = root->fs_info;
|
||||
u64 alloc_profile;
|
||||
|
||||
if (data) {
|
||||
alloc_profile = info->avail_data_alloc_bits &
|
||||
info->data_alloc_profile;
|
||||
data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
|
||||
} else if (root == root->fs_info->chunk_root) {
|
||||
alloc_profile = info->avail_system_alloc_bits &
|
||||
info->system_alloc_profile;
|
||||
data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
|
||||
} else {
|
||||
alloc_profile = info->avail_metadata_alloc_bits &
|
||||
info->metadata_alloc_profile;
|
||||
data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
|
||||
}
|
||||
|
||||
return btrfs_reduce_alloc_profile(root, data);
|
||||
}
|
||||
|
||||
void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
|
||||
{
|
||||
u64 alloc_target;
|
||||
|
||||
alloc_target = btrfs_get_alloc_profile(root, 1);
|
||||
BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
|
||||
alloc_target);
|
||||
}
|
||||
|
||||
/*
|
||||
* for now this just makes sure we have at least 5% of our metadata space free
|
||||
* for use.
|
||||
*/
|
||||
int btrfs_check_metadata_free_space(struct btrfs_root *root)
|
||||
{
|
||||
struct btrfs_fs_info *info = root->fs_info;
|
||||
struct btrfs_space_info *meta_sinfo;
|
||||
u64 alloc_target, thresh;
|
||||
int committed = 0, ret;
|
||||
|
||||
/* get the space info for where the metadata will live */
|
||||
alloc_target = btrfs_get_alloc_profile(root, 0);
|
||||
meta_sinfo = __find_space_info(info, alloc_target);
|
||||
|
||||
again:
|
||||
spin_lock(&meta_sinfo->lock);
|
||||
if (!meta_sinfo->full)
|
||||
thresh = meta_sinfo->total_bytes * 80;
|
||||
else
|
||||
thresh = meta_sinfo->total_bytes * 95;
|
||||
|
||||
do_div(thresh, 100);
|
||||
|
||||
if (meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
|
||||
meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly > thresh) {
|
||||
struct btrfs_trans_handle *trans;
|
||||
if (!meta_sinfo->full) {
|
||||
meta_sinfo->force_alloc = 1;
|
||||
spin_unlock(&meta_sinfo->lock);
|
||||
|
||||
trans = btrfs_start_transaction(root, 1);
|
||||
if (!trans)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = do_chunk_alloc(trans, root->fs_info->extent_root,
|
||||
2 * 1024 * 1024, alloc_target, 0);
|
||||
btrfs_end_transaction(trans, root);
|
||||
goto again;
|
||||
}
|
||||
spin_unlock(&meta_sinfo->lock);
|
||||
|
||||
if (!committed) {
|
||||
committed = 1;
|
||||
trans = btrfs_join_transaction(root, 1);
|
||||
if (!trans)
|
||||
return -ENOMEM;
|
||||
ret = btrfs_commit_transaction(trans, root);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto again;
|
||||
}
|
||||
return -ENOSPC;
|
||||
}
|
||||
spin_unlock(&meta_sinfo->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This will check the space that the inode allocates from to make sure we have
|
||||
* enough space for bytes.
|
||||
*/
|
||||
int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode,
|
||||
u64 bytes)
|
||||
{
|
||||
struct btrfs_space_info *data_sinfo;
|
||||
int ret = 0, committed = 0;
|
||||
|
||||
/* make sure bytes are sectorsize aligned */
|
||||
bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
|
||||
|
||||
data_sinfo = BTRFS_I(inode)->space_info;
|
||||
again:
|
||||
/* make sure we have enough space to handle the data first */
|
||||
spin_lock(&data_sinfo->lock);
|
||||
if (data_sinfo->total_bytes - data_sinfo->bytes_used -
|
||||
data_sinfo->bytes_delalloc - data_sinfo->bytes_reserved -
|
||||
data_sinfo->bytes_pinned - data_sinfo->bytes_readonly -
|
||||
data_sinfo->bytes_may_use < bytes) {
|
||||
struct btrfs_trans_handle *trans;
|
||||
|
||||
/*
|
||||
* if we don't have enough free bytes in this space then we need
|
||||
* to alloc a new chunk.
|
||||
*/
|
||||
if (!data_sinfo->full) {
|
||||
u64 alloc_target;
|
||||
|
||||
data_sinfo->force_alloc = 1;
|
||||
spin_unlock(&data_sinfo->lock);
|
||||
|
||||
alloc_target = btrfs_get_alloc_profile(root, 1);
|
||||
trans = btrfs_start_transaction(root, 1);
|
||||
if (!trans)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = do_chunk_alloc(trans, root->fs_info->extent_root,
|
||||
bytes + 2 * 1024 * 1024,
|
||||
alloc_target, 0);
|
||||
btrfs_end_transaction(trans, root);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto again;
|
||||
}
|
||||
spin_unlock(&data_sinfo->lock);
|
||||
|
||||
/* commit the current transaction and try again */
|
||||
if (!committed) {
|
||||
committed = 1;
|
||||
trans = btrfs_join_transaction(root, 1);
|
||||
if (!trans)
|
||||
return -ENOMEM;
|
||||
ret = btrfs_commit_transaction(trans, root);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto again;
|
||||
}
|
||||
|
||||
printk(KERN_ERR "no space left, need %llu, %llu delalloc bytes"
|
||||
", %llu bytes_used, %llu bytes_reserved, "
|
||||
"%llu bytes_pinned, %llu bytes_readonly, %llu may use"
|
||||
"%llu total\n", bytes, data_sinfo->bytes_delalloc,
|
||||
data_sinfo->bytes_used, data_sinfo->bytes_reserved,
|
||||
data_sinfo->bytes_pinned, data_sinfo->bytes_readonly,
|
||||
data_sinfo->bytes_may_use, data_sinfo->total_bytes);
|
||||
return -ENOSPC;
|
||||
}
|
||||
data_sinfo->bytes_may_use += bytes;
|
||||
BTRFS_I(inode)->reserved_bytes += bytes;
|
||||
spin_unlock(&data_sinfo->lock);
|
||||
|
||||
return btrfs_check_metadata_free_space(root);
|
||||
}
|
||||
|
||||
/*
|
||||
* if there was an error for whatever reason after calling
|
||||
* btrfs_check_data_free_space, call this so we can cleanup the counters.
|
||||
*/
|
||||
void btrfs_free_reserved_data_space(struct btrfs_root *root,
|
||||
struct inode *inode, u64 bytes)
|
||||
{
|
||||
struct btrfs_space_info *data_sinfo;
|
||||
|
||||
/* make sure bytes are sectorsize aligned */
|
||||
bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
|
||||
|
||||
data_sinfo = BTRFS_I(inode)->space_info;
|
||||
spin_lock(&data_sinfo->lock);
|
||||
data_sinfo->bytes_may_use -= bytes;
|
||||
BTRFS_I(inode)->reserved_bytes -= bytes;
|
||||
spin_unlock(&data_sinfo->lock);
|
||||
}
|
||||
|
||||
/* called when we are adding a delalloc extent to the inode's io_tree */
|
||||
void btrfs_delalloc_reserve_space(struct btrfs_root *root, struct inode *inode,
|
||||
u64 bytes)
|
||||
{
|
||||
struct btrfs_space_info *data_sinfo;
|
||||
|
||||
/* get the space info for where this inode will be storing its data */
|
||||
data_sinfo = BTRFS_I(inode)->space_info;
|
||||
|
||||
/* make sure we have enough space to handle the data first */
|
||||
spin_lock(&data_sinfo->lock);
|
||||
data_sinfo->bytes_delalloc += bytes;
|
||||
|
||||
/*
|
||||
* we are adding a delalloc extent without calling
|
||||
* btrfs_check_data_free_space first. This happens on a weird
|
||||
* writepage condition, but shouldn't hurt our accounting
|
||||
*/
|
||||
if (unlikely(bytes > BTRFS_I(inode)->reserved_bytes)) {
|
||||
data_sinfo->bytes_may_use -= BTRFS_I(inode)->reserved_bytes;
|
||||
BTRFS_I(inode)->reserved_bytes = 0;
|
||||
} else {
|
||||
data_sinfo->bytes_may_use -= bytes;
|
||||
BTRFS_I(inode)->reserved_bytes -= bytes;
|
||||
}
|
||||
|
||||
spin_unlock(&data_sinfo->lock);
|
||||
}
|
||||
|
||||
/* called when we are clearing an delalloc extent from the inode's io_tree */
|
||||
void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode,
|
||||
u64 bytes)
|
||||
{
|
||||
struct btrfs_space_info *info;
|
||||
|
||||
info = BTRFS_I(inode)->space_info;
|
||||
|
||||
spin_lock(&info->lock);
|
||||
info->bytes_delalloc -= bytes;
|
||||
spin_unlock(&info->lock);
|
||||
}
|
||||
|
||||
static int do_chunk_alloc(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *extent_root, u64 alloc_bytes,
|
||||
u64 flags, int force)
|
||||
|
@ -3105,6 +3337,10 @@ static void dump_space_info(struct btrfs_space_info *info, u64 bytes)
|
|||
(unsigned long long)(info->total_bytes - info->bytes_used -
|
||||
info->bytes_pinned - info->bytes_reserved),
|
||||
(info->full) ? "" : "not ");
|
||||
printk(KERN_INFO "space_info total=%llu, pinned=%llu, delalloc=%llu,"
|
||||
" may_use=%llu, used=%llu\n", info->total_bytes,
|
||||
info->bytes_pinned, info->bytes_delalloc, info->bytes_may_use,
|
||||
info->bytes_used);
|
||||
|
||||
down_read(&info->groups_sem);
|
||||
list_for_each_entry(cache, &info->block_groups, list) {
|
||||
|
@ -3131,24 +3367,10 @@ static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans,
|
|||
{
|
||||
int ret;
|
||||
u64 search_start = 0;
|
||||
u64 alloc_profile;
|
||||
struct btrfs_fs_info *info = root->fs_info;
|
||||
|
||||
if (data) {
|
||||
alloc_profile = info->avail_data_alloc_bits &
|
||||
info->data_alloc_profile;
|
||||
data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
|
||||
} else if (root == root->fs_info->chunk_root) {
|
||||
alloc_profile = info->avail_system_alloc_bits &
|
||||
info->system_alloc_profile;
|
||||
data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
|
||||
} else {
|
||||
alloc_profile = info->avail_metadata_alloc_bits &
|
||||
info->metadata_alloc_profile;
|
||||
data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
|
||||
}
|
||||
data = btrfs_get_alloc_profile(root, data);
|
||||
again:
|
||||
data = btrfs_reduce_alloc_profile(root, data);
|
||||
/*
|
||||
* the only place that sets empty_size is btrfs_realloc_node, which
|
||||
* is not called recursively on allocations
|
||||
|
|
|
@ -1091,19 +1091,24 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
|
|||
WARN_ON(num_pages > nrptrs);
|
||||
memset(pages, 0, sizeof(struct page *) * nrptrs);
|
||||
|
||||
ret = btrfs_check_free_space(root, write_bytes, 0);
|
||||
ret = btrfs_check_data_free_space(root, inode, write_bytes);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = prepare_pages(root, file, pages, num_pages,
|
||||
pos, first_index, last_index,
|
||||
write_bytes);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
btrfs_free_reserved_data_space(root, inode,
|
||||
write_bytes);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = btrfs_copy_from_user(pos, num_pages,
|
||||
write_bytes, pages, buf);
|
||||
if (ret) {
|
||||
btrfs_free_reserved_data_space(root, inode,
|
||||
write_bytes);
|
||||
btrfs_drop_pages(pages, num_pages);
|
||||
goto out;
|
||||
}
|
||||
|
@ -1111,8 +1116,11 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
|
|||
ret = dirty_and_release_pages(NULL, root, file, pages,
|
||||
num_pages, pos, write_bytes);
|
||||
btrfs_drop_pages(pages, num_pages);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
btrfs_free_reserved_data_space(root, inode,
|
||||
write_bytes);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (will_write) {
|
||||
btrfs_fdatawrite_range(inode->i_mapping, pos,
|
||||
|
@ -1136,6 +1144,8 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
|
|||
}
|
||||
out:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
if (ret)
|
||||
err = ret;
|
||||
|
||||
out_nolock:
|
||||
kfree(pages);
|
||||
|
|
|
@ -101,34 +101,6 @@ static int btrfs_init_inode_security(struct inode *inode, struct inode *dir)
|
|||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* a very lame attempt at stopping writes when the FS is 85% full. There
|
||||
* are countless ways this is incorrect, but it is better than nothing.
|
||||
*/
|
||||
int btrfs_check_free_space(struct btrfs_root *root, u64 num_required,
|
||||
int for_del)
|
||||
{
|
||||
u64 total;
|
||||
u64 used;
|
||||
u64 thresh;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&root->fs_info->delalloc_lock);
|
||||
total = btrfs_super_total_bytes(&root->fs_info->super_copy);
|
||||
used = btrfs_super_bytes_used(&root->fs_info->super_copy);
|
||||
if (for_del)
|
||||
thresh = total * 90;
|
||||
else
|
||||
thresh = total * 85;
|
||||
|
||||
do_div(thresh, 100);
|
||||
|
||||
if (used + root->fs_info->delalloc_bytes + num_required > thresh)
|
||||
ret = -ENOSPC;
|
||||
spin_unlock(&root->fs_info->delalloc_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* this does all the hard work for inserting an inline extent into
|
||||
* the btree. The caller should have done a btrfs_drop_extents so that
|
||||
|
@ -1190,6 +1162,7 @@ static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
|
|||
*/
|
||||
if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
btrfs_delalloc_reserve_space(root, inode, end - start + 1);
|
||||
spin_lock(&root->fs_info->delalloc_lock);
|
||||
BTRFS_I(inode)->delalloc_bytes += end - start + 1;
|
||||
root->fs_info->delalloc_bytes += end - start + 1;
|
||||
|
@ -1223,9 +1196,12 @@ static int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
|
|||
(unsigned long long)end - start + 1,
|
||||
(unsigned long long)
|
||||
root->fs_info->delalloc_bytes);
|
||||
btrfs_delalloc_free_space(root, inode, (u64)-1);
|
||||
root->fs_info->delalloc_bytes = 0;
|
||||
BTRFS_I(inode)->delalloc_bytes = 0;
|
||||
} else {
|
||||
btrfs_delalloc_free_space(root, inode,
|
||||
end - start + 1);
|
||||
root->fs_info->delalloc_bytes -= end - start + 1;
|
||||
BTRFS_I(inode)->delalloc_bytes -= end - start + 1;
|
||||
}
|
||||
|
@ -2245,10 +2221,6 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
|
|||
|
||||
root = BTRFS_I(dir)->root;
|
||||
|
||||
ret = btrfs_check_free_space(root, 1, 1);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
trans = btrfs_start_transaction(root, 1);
|
||||
|
||||
btrfs_set_trans_block_group(trans, dir);
|
||||
|
@ -2261,7 +2233,6 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
|
|||
nr = trans->blocks_used;
|
||||
|
||||
btrfs_end_transaction_throttle(trans, root);
|
||||
fail:
|
||||
btrfs_btree_balance_dirty(root, nr);
|
||||
return ret;
|
||||
}
|
||||
|
@ -2284,10 +2255,6 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
|
|||
return -ENOTEMPTY;
|
||||
}
|
||||
|
||||
ret = btrfs_check_free_space(root, 1, 1);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
trans = btrfs_start_transaction(root, 1);
|
||||
btrfs_set_trans_block_group(trans, dir);
|
||||
|
||||
|
@ -2304,7 +2271,6 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
|
|||
fail_trans:
|
||||
nr = trans->blocks_used;
|
||||
ret = btrfs_end_transaction_throttle(trans, root);
|
||||
fail:
|
||||
btrfs_btree_balance_dirty(root, nr);
|
||||
|
||||
if (ret && !err)
|
||||
|
@ -2818,7 +2784,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
|
|||
if (size <= hole_start)
|
||||
return 0;
|
||||
|
||||
err = btrfs_check_free_space(root, 1, 0);
|
||||
err = btrfs_check_metadata_free_space(root);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -3014,6 +2980,7 @@ static noinline void init_btrfs_i(struct inode *inode)
|
|||
bi->last_trans = 0;
|
||||
bi->logged_trans = 0;
|
||||
bi->delalloc_bytes = 0;
|
||||
bi->reserved_bytes = 0;
|
||||
bi->disk_i_size = 0;
|
||||
bi->flags = 0;
|
||||
bi->index_cnt = (u64)-1;
|
||||
|
@ -3035,6 +3002,7 @@ static int btrfs_init_locked_inode(struct inode *inode, void *p)
|
|||
inode->i_ino = args->ino;
|
||||
init_btrfs_i(inode);
|
||||
BTRFS_I(inode)->root = args->root;
|
||||
btrfs_set_inode_space_info(args->root, inode);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3455,6 +3423,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
|
|||
BTRFS_I(inode)->index_cnt = 2;
|
||||
BTRFS_I(inode)->root = root;
|
||||
BTRFS_I(inode)->generation = trans->transid;
|
||||
btrfs_set_inode_space_info(root, inode);
|
||||
|
||||
if (mode & S_IFDIR)
|
||||
owner = 0;
|
||||
|
@ -3602,7 +3571,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
|
|||
if (!new_valid_dev(rdev))
|
||||
return -EINVAL;
|
||||
|
||||
err = btrfs_check_free_space(root, 1, 0);
|
||||
err = btrfs_check_metadata_free_space(root);
|
||||
if (err)
|
||||
goto fail;
|
||||
|
||||
|
@ -3665,7 +3634,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
|
|||
u64 objectid;
|
||||
u64 index = 0;
|
||||
|
||||
err = btrfs_check_free_space(root, 1, 0);
|
||||
err = btrfs_check_metadata_free_space(root);
|
||||
if (err)
|
||||
goto fail;
|
||||
trans = btrfs_start_transaction(root, 1);
|
||||
|
@ -3733,7 +3702,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
|
|||
return -ENOENT;
|
||||
|
||||
btrfs_inc_nlink(inode);
|
||||
err = btrfs_check_free_space(root, 1, 0);
|
||||
err = btrfs_check_metadata_free_space(root);
|
||||
if (err)
|
||||
goto fail;
|
||||
err = btrfs_set_inode_index(dir, &index);
|
||||
|
@ -3779,7 +3748,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
|
|||
u64 index = 0;
|
||||
unsigned long nr = 1;
|
||||
|
||||
err = btrfs_check_free_space(root, 1, 0);
|
||||
err = btrfs_check_metadata_free_space(root);
|
||||
if (err)
|
||||
goto out_unlock;
|
||||
|
||||
|
@ -4336,7 +4305,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page)
|
|||
u64 page_start;
|
||||
u64 page_end;
|
||||
|
||||
ret = btrfs_check_free_space(root, PAGE_CACHE_SIZE, 0);
|
||||
ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
|
@ -4349,6 +4318,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page)
|
|||
|
||||
if ((page->mapping != inode->i_mapping) ||
|
||||
(page_start >= size)) {
|
||||
btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
|
||||
/* page got truncated out from underneath us */
|
||||
goto out_unlock;
|
||||
}
|
||||
|
@ -4631,7 +4601,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
|||
if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
|
||||
return -EXDEV;
|
||||
|
||||
ret = btrfs_check_free_space(root, 1, 0);
|
||||
ret = btrfs_check_metadata_free_space(root);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
|
@ -4749,7 +4719,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
|
|||
if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
|
||||
return -ENAMETOOLONG;
|
||||
|
||||
err = btrfs_check_free_space(root, 1, 0);
|
||||
err = btrfs_check_metadata_free_space(root);
|
||||
if (err)
|
||||
goto out_fail;
|
||||
|
||||
|
|
|
@ -70,7 +70,7 @@ static noinline int create_subvol(struct btrfs_root *root,
|
|||
u64 index = 0;
|
||||
unsigned long nr = 1;
|
||||
|
||||
ret = btrfs_check_free_space(root, 1, 0);
|
||||
ret = btrfs_check_metadata_free_space(root);
|
||||
if (ret)
|
||||
goto fail_commit;
|
||||
|
||||
|
@ -203,7 +203,7 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
|
|||
if (!root->ref_cows)
|
||||
return -EINVAL;
|
||||
|
||||
ret = btrfs_check_free_space(root, 1, 0);
|
||||
ret = btrfs_check_metadata_free_space(root);
|
||||
if (ret)
|
||||
goto fail_unlock;
|
||||
|
||||
|
@ -374,7 +374,7 @@ static int btrfs_defrag_file(struct file *file)
|
|||
unsigned long i;
|
||||
int ret;
|
||||
|
||||
ret = btrfs_check_free_space(root, inode->i_size, 0);
|
||||
ret = btrfs_check_data_free_space(root, inode, inode->i_size);
|
||||
if (ret)
|
||||
return -ENOSPC;
|
||||
|
||||
|
|
|
@ -76,6 +76,7 @@ struct drm_encoder_helper_funcs {
|
|||
void (*mode_set)(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode);
|
||||
struct drm_crtc *(*get_crtc)(struct drm_encoder *encoder);
|
||||
/* detect for DAC style encoders */
|
||||
enum drm_connector_status (*detect)(struct drm_encoder *encoder,
|
||||
struct drm_connector *connector);
|
||||
|
|
|
@ -58,10 +58,10 @@ struct detailed_pixel_timing {
|
|||
u8 hsync_pulse_width_lo;
|
||||
u8 vsync_pulse_width_lo:4;
|
||||
u8 vsync_offset_lo:4;
|
||||
u8 hsync_pulse_width_hi:2;
|
||||
u8 hsync_offset_hi:2;
|
||||
u8 vsync_pulse_width_hi:2;
|
||||
u8 vsync_offset_hi:2;
|
||||
u8 hsync_pulse_width_hi:2;
|
||||
u8 hsync_offset_hi:2;
|
||||
u8 width_mm_lo;
|
||||
u8 height_mm_lo;
|
||||
u8 height_mm_hi:4;
|
||||
|
|
|
@ -708,6 +708,8 @@ struct req_iterator {
|
|||
};
|
||||
|
||||
/* This should not be used directly - use rq_for_each_segment */
|
||||
#define for_each_bio(_bio) \
|
||||
for (; _bio; _bio = _bio->bi_next)
|
||||
#define __rq_for_each_bio(_bio, rq) \
|
||||
if ((rq->bio)) \
|
||||
for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
|
||||
|
|
|
@ -663,7 +663,7 @@ typedef struct ide_drive_s ide_drive_t;
|
|||
#define to_ide_device(dev) container_of(dev, ide_drive_t, gendev)
|
||||
|
||||
#define to_ide_drv(obj, cont_type) \
|
||||
container_of(obj, struct cont_type, kref)
|
||||
container_of(obj, struct cont_type, dev)
|
||||
|
||||
#define ide_drv_g(disk, cont_type) \
|
||||
container_of((disk)->private_data, struct cont_type, driver)
|
||||
|
|
|
@ -194,6 +194,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
|
|||
/* FSTS_REG */
|
||||
#define DMA_FSTS_PPF ((u32)2)
|
||||
#define DMA_FSTS_PFO ((u32)1)
|
||||
#define DMA_FSTS_IQE (1 << 4)
|
||||
#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
|
||||
|
||||
/* FRCD_REG, 32 bits access */
|
||||
|
@ -328,7 +329,7 @@ extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
|
|||
unsigned int size_order, u64 type,
|
||||
int non_present_entry_flush);
|
||||
|
||||
extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
|
||||
extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
|
||||
|
||||
extern void *intel_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
|
||||
extern void intel_free_coherent(struct device *, size_t, void *, dma_addr_t);
|
||||
|
|
43
mm/shmem.c
43
mm/shmem.c
|
@ -169,13 +169,13 @@ static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
|
|||
*/
|
||||
static inline int shmem_acct_size(unsigned long flags, loff_t size)
|
||||
{
|
||||
return (flags & VM_ACCOUNT) ?
|
||||
security_vm_enough_memory_kern(VM_ACCT(size)) : 0;
|
||||
return (flags & VM_NORESERVE) ?
|
||||
0 : security_vm_enough_memory_kern(VM_ACCT(size));
|
||||
}
|
||||
|
||||
static inline void shmem_unacct_size(unsigned long flags, loff_t size)
|
||||
{
|
||||
if (flags & VM_ACCOUNT)
|
||||
if (!(flags & VM_NORESERVE))
|
||||
vm_unacct_memory(VM_ACCT(size));
|
||||
}
|
||||
|
||||
|
@ -187,13 +187,13 @@ static inline void shmem_unacct_size(unsigned long flags, loff_t size)
|
|||
*/
|
||||
static inline int shmem_acct_block(unsigned long flags)
|
||||
{
|
||||
return (flags & VM_ACCOUNT) ?
|
||||
0 : security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE));
|
||||
return (flags & VM_NORESERVE) ?
|
||||
security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)) : 0;
|
||||
}
|
||||
|
||||
static inline void shmem_unacct_blocks(unsigned long flags, long pages)
|
||||
{
|
||||
if (!(flags & VM_ACCOUNT))
|
||||
if (flags & VM_NORESERVE)
|
||||
vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
|
||||
}
|
||||
|
||||
|
@ -1515,8 +1515,8 @@ static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct inode *
|
||||
shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
|
||||
static struct inode *shmem_get_inode(struct super_block *sb, int mode,
|
||||
dev_t dev, unsigned long flags)
|
||||
{
|
||||
struct inode *inode;
|
||||
struct shmem_inode_info *info;
|
||||
|
@ -1537,6 +1537,7 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
|
|||
info = SHMEM_I(inode);
|
||||
memset(info, 0, (char *)inode - (char *)info);
|
||||
spin_lock_init(&info->lock);
|
||||
info->flags = flags & VM_NORESERVE;
|
||||
INIT_LIST_HEAD(&info->swaplist);
|
||||
|
||||
switch (mode & S_IFMT) {
|
||||
|
@ -1779,9 +1780,10 @@ static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
|
|||
static int
|
||||
shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
|
||||
{
|
||||
struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev);
|
||||
struct inode *inode;
|
||||
int error = -ENOSPC;
|
||||
|
||||
inode = shmem_get_inode(dir->i_sb, mode, dev, VM_NORESERVE);
|
||||
if (inode) {
|
||||
error = security_inode_init_security(inode, dir, NULL, NULL,
|
||||
NULL);
|
||||
|
@ -1920,7 +1922,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
|
|||
if (len > PAGE_CACHE_SIZE)
|
||||
return -ENAMETOOLONG;
|
||||
|
||||
inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0);
|
||||
inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
|
||||
if (!inode)
|
||||
return -ENOSPC;
|
||||
|
||||
|
@ -2332,7 +2334,7 @@ static int shmem_fill_super(struct super_block *sb,
|
|||
sb->s_flags |= MS_POSIXACL;
|
||||
#endif
|
||||
|
||||
inode = shmem_get_inode(sb, S_IFDIR | sbinfo->mode, 0);
|
||||
inode = shmem_get_inode(sb, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
|
||||
if (!inode)
|
||||
goto failed;
|
||||
inode->i_uid = sbinfo->uid;
|
||||
|
@ -2574,12 +2576,12 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#define shmem_file_operations ramfs_file_operations
|
||||
#define shmem_vm_ops generic_file_vm_ops
|
||||
#define shmem_get_inode ramfs_get_inode
|
||||
#define shmem_acct_size(a, b) 0
|
||||
#define shmem_unacct_size(a, b) do {} while (0)
|
||||
#define SHMEM_MAX_BYTES LLONG_MAX
|
||||
#define shmem_vm_ops generic_file_vm_ops
|
||||
#define shmem_file_operations ramfs_file_operations
|
||||
#define shmem_get_inode(sb, mode, dev, flags) ramfs_get_inode(sb, mode, dev)
|
||||
#define shmem_acct_size(flags, size) 0
|
||||
#define shmem_unacct_size(flags, size) do {} while (0)
|
||||
#define SHMEM_MAX_BYTES LLONG_MAX
|
||||
|
||||
#endif /* CONFIG_SHMEM */
|
||||
|
||||
|
@ -2589,7 +2591,7 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
|
|||
* shmem_file_setup - get an unlinked file living in tmpfs
|
||||
* @name: name for dentry (to be seen in /proc/<pid>/maps
|
||||
* @size: size to be set for the file
|
||||
* @flags: vm_flags
|
||||
* @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
|
||||
*/
|
||||
struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
|
||||
{
|
||||
|
@ -2623,13 +2625,10 @@ struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
|
|||
goto put_dentry;
|
||||
|
||||
error = -ENOSPC;
|
||||
inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0);
|
||||
inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0, flags);
|
||||
if (!inode)
|
||||
goto close_file;
|
||||
|
||||
#ifdef CONFIG_SHMEM
|
||||
SHMEM_I(inode)->flags = (flags & VM_NORESERVE) ? 0 : VM_ACCOUNT;
|
||||
#endif
|
||||
d_instantiate(dentry, inode);
|
||||
inode->i_size = size;
|
||||
inode->i_nlink = 0; /* It is unlinked */
|
||||
|
|
|
@ -157,7 +157,7 @@ static void resample_shrink(struct snd_pcm_plugin *plugin,
|
|||
while (dst_frames1 > 0) {
|
||||
S1 = S2;
|
||||
if (src_frames1-- > 0) {
|
||||
S1 = *src;
|
||||
S2 = *src;
|
||||
src += src_step;
|
||||
}
|
||||
if (pos & ~R_MASK) {
|
||||
|
|
|
@ -165,7 +165,7 @@ module_param_array(enable, bool, NULL, 0444);
|
|||
MODULE_PARM_DESC(enable, "Enable Audiowerk2 soundcard.");
|
||||
|
||||
static struct pci_device_id snd_aw2_ids[] = {
|
||||
{PCI_VENDOR_ID_SAA7146, PCI_DEVICE_ID_SAA7146, PCI_ANY_ID, PCI_ANY_ID,
|
||||
{PCI_VENDOR_ID_SAA7146, PCI_DEVICE_ID_SAA7146, 0, 0,
|
||||
0, 0, 0},
|
||||
{0}
|
||||
};
|
||||
|
|
|
@ -1528,6 +1528,7 @@ static struct snd_emu_chip_details emu_chip_details[] = {
|
|||
.ca0151_chip = 1,
|
||||
.spk71 = 1,
|
||||
.spdif_bug = 1,
|
||||
.invert_shared_spdif = 1, /* digital/analog switch swapped */
|
||||
.ac97_chip = 1} ,
|
||||
{.vendor = 0x1102, .device = 0x0004, .subsystem = 0x10021102,
|
||||
.driver = "Audigy2", .name = "SB Audigy 2 Platinum [SB0240P]",
|
||||
|
|
|
@ -277,18 +277,19 @@ static ssize_t init_verbs_store(struct device *dev,
|
|||
{
|
||||
struct snd_hwdep *hwdep = dev_get_drvdata(dev);
|
||||
struct hda_codec *codec = hwdep->private_data;
|
||||
char *p;
|
||||
struct hda_verb verb, *v;
|
||||
struct hda_verb *v;
|
||||
int nid, verb, param;
|
||||
|
||||
verb.nid = simple_strtoul(buf, &p, 0);
|
||||
verb.verb = simple_strtoul(p, &p, 0);
|
||||
verb.param = simple_strtoul(p, &p, 0);
|
||||
if (!verb.nid || !verb.verb || !verb.param)
|
||||
if (sscanf(buf, "%i %i %i", &nid, &verb, ¶m) != 3)
|
||||
return -EINVAL;
|
||||
if (!nid || !verb)
|
||||
return -EINVAL;
|
||||
v = snd_array_new(&codec->init_verbs);
|
||||
if (!v)
|
||||
return -ENOMEM;
|
||||
*v = verb;
|
||||
v->nid = nid;
|
||||
v->verb = verb;
|
||||
v->param = param;
|
||||
return count;
|
||||
}
|
||||
|
||||
|
|
|
@ -7017,6 +7017,7 @@ static int patch_alc882(struct hda_codec *codec)
|
|||
case 0x106b3e00: /* iMac 24 Aluminium */
|
||||
board_config = ALC885_IMAC24;
|
||||
break;
|
||||
case 0x106b00a0: /* MacBookPro3,1 - Another revision */
|
||||
case 0x106b00a1: /* Macbook (might be wrong - PCI SSID?) */
|
||||
case 0x106b00a4: /* MacbookPro4,1 */
|
||||
case 0x106b2c00: /* Macbook Pro rev3 */
|
||||
|
@ -8469,6 +8470,8 @@ static struct snd_pci_quirk alc883_cfg_tbl[] = {
|
|||
ALC888_ACER_ASPIRE_4930G),
|
||||
SND_PCI_QUIRK(0x1025, 0x015e, "Acer Aspire 6930G",
|
||||
ALC888_ACER_ASPIRE_4930G),
|
||||
SND_PCI_QUIRK(0x1025, 0x0166, "Acer Aspire 6530G",
|
||||
ALC888_ACER_ASPIRE_4930G),
|
||||
SND_PCI_QUIRK(0x1025, 0, "Acer laptop", ALC883_ACER), /* default Acer */
|
||||
SND_PCI_QUIRK(0x1028, 0x020d, "Dell Inspiron 530", ALC888_6ST_DELL),
|
||||
SND_PCI_QUIRK(0x103c, 0x2a3d, "HP Pavillion", ALC883_6ST_DIG),
|
||||
|
|
|
@ -97,12 +97,12 @@ struct pcxhr_mgr {
|
|||
int capture_chips;
|
||||
int fw_file_set;
|
||||
int firmware_num;
|
||||
int is_hr_stereo:1;
|
||||
int board_has_aes1:1; /* if 1 board has AES1 plug and SRC */
|
||||
int board_has_analog:1; /* if 0 the board is digital only */
|
||||
int board_has_mic:1; /* if 1 the board has microphone input */
|
||||
int board_aes_in_192k:1;/* if 1 the aes input plugs do support 192kHz */
|
||||
int mono_capture:1; /* if 1 the board does mono capture */
|
||||
unsigned int is_hr_stereo:1;
|
||||
unsigned int board_has_aes1:1; /* if 1 board has AES1 plug and SRC */
|
||||
unsigned int board_has_analog:1; /* if 0 the board is digital only */
|
||||
unsigned int board_has_mic:1; /* if 1 the board has microphone input */
|
||||
unsigned int board_aes_in_192k:1;/* if 1 the aes input plugs do support 192kHz */
|
||||
unsigned int mono_capture:1; /* if 1 the board does mono capture */
|
||||
|
||||
struct snd_dma_buffer hostport;
|
||||
|
||||
|
|
Loading…
Reference in a new issue