Bug-fixes:
- Fix PV spinlocks triggering jump_label code bug - Remove extraneous code in the tpm front driver - Fix ballooning out of pages when non-preemptible - Fix deadlock when using a 32-bit initial domain with large amount of memory. - Add xen_nopvpsin parameter to the documentation -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.14 (GNU/Linux) iQEcBAABAgAGBQJSQvzCAAoJEFjIrFwIi8fJyCIIAMENABapdLhrOiRdQ1Y7T5v1 4bogPDLwpVxHzwo/vnHcNpl35/dUZrC6wQa51Bkoqq0V8o1XmjFy3SY/EBGjEAvw hh4qxGY0p0NNi6hKrWC8mH9u2TcluZGm1uecabkXUhl9mrAB5oBsfJdbBZ5N69gO QXXt0j7Xwv1APwH86T0e1Lz+lulhdw2ItXP4osYkEbRYNSaaGnuwsd0Jxcb4DeMk qhKgP7QMn3C7zDDaapJo1axeYQRBNEtv5M8+0wwMleX4yX1+IBRZeQTsRfMr7RB/ 8FhssWiH15xU6Gmzgi/VR8xhTEIbQh5GWsVReGf6pqIYSxGSYTvvyhm0bVRH4JI= =c+7u -----END PGP SIGNATURE----- Merge tag 'stable/for-linus-3.12-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip Pull Xen fixes from Konrad Rzeszutek Wilk: "Bug-fixes and one update to the kernel-paramters.txt documentation. - Fix PV spinlocks triggering jump_label code bug - Remove extraneous code in the tpm front driver - Fix ballooning out of pages when non-preemptible - Fix deadlock when using a 32-bit initial domain with large amount of memory - Add xen_nopvpsin parameter to the documentation" * tag 'stable/for-linus-3.12-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: xen/spinlock: Document the xen_nopvspin parameter. xen/p2m: check MFN is in range before using the m2p table xen/balloon: don't alloc page while non-preemptible xen: Do not enable spinlocks before jump_label_init() has executed tpm: xen-tpmfront: Remove the locality sysfs attribute tpm: xen-tpmfront: Fix default durations
This commit is contained in:
commit
4b97280675
6 changed files with 63 additions and 67 deletions
|
@ -3485,6 +3485,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
||||||
the unplug protocol
|
the unplug protocol
|
||||||
never -- do not unplug even if version check succeeds
|
never -- do not unplug even if version check succeeds
|
||||||
|
|
||||||
|
xen_nopvspin [X86,XEN]
|
||||||
|
Disables the ticketlock slowpath using Xen PV
|
||||||
|
optimizations.
|
||||||
|
|
||||||
xirc2ps_cs= [NET,PCMCIA]
|
xirc2ps_cs= [NET,PCMCIA]
|
||||||
Format:
|
Format:
|
||||||
<irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
|
<irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
|
||||||
|
|
|
@ -79,30 +79,38 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn)
|
||||||
return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY;
|
return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long mfn_to_pfn(unsigned long mfn)
|
static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn)
|
||||||
{
|
{
|
||||||
unsigned long pfn;
|
unsigned long pfn;
|
||||||
int ret = 0;
|
int ret;
|
||||||
|
|
||||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
if (xen_feature(XENFEAT_auto_translated_physmap))
|
||||||
return mfn;
|
return mfn;
|
||||||
|
|
||||||
if (unlikely(mfn >= machine_to_phys_nr)) {
|
if (unlikely(mfn >= machine_to_phys_nr))
|
||||||
pfn = ~0;
|
return ~0;
|
||||||
goto try_override;
|
|
||||||
}
|
|
||||||
pfn = 0;
|
|
||||||
/*
|
/*
|
||||||
* The array access can fail (e.g., device space beyond end of RAM).
|
* The array access can fail (e.g., device space beyond end of RAM).
|
||||||
* In such cases it doesn't matter what we return (we return garbage),
|
* In such cases it doesn't matter what we return (we return garbage),
|
||||||
* but we must handle the fault without crashing!
|
* but we must handle the fault without crashing!
|
||||||
*/
|
*/
|
||||||
ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
|
ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
|
||||||
try_override:
|
|
||||||
/* ret might be < 0 if there are no entries in the m2p for mfn */
|
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
pfn = ~0;
|
return ~0;
|
||||||
else if (get_phys_to_machine(pfn) != mfn)
|
|
||||||
|
return pfn;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline unsigned long mfn_to_pfn(unsigned long mfn)
|
||||||
|
{
|
||||||
|
unsigned long pfn;
|
||||||
|
|
||||||
|
if (xen_feature(XENFEAT_auto_translated_physmap))
|
||||||
|
return mfn;
|
||||||
|
|
||||||
|
pfn = mfn_to_pfn_no_overrides(mfn);
|
||||||
|
if (get_phys_to_machine(pfn) != mfn) {
|
||||||
/*
|
/*
|
||||||
* If this appears to be a foreign mfn (because the pfn
|
* If this appears to be a foreign mfn (because the pfn
|
||||||
* doesn't map back to the mfn), then check the local override
|
* doesn't map back to the mfn), then check the local override
|
||||||
|
@ -111,6 +119,7 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn)
|
||||||
* m2p_find_override_pfn returns ~0 if it doesn't find anything.
|
* m2p_find_override_pfn returns ~0 if it doesn't find anything.
|
||||||
*/
|
*/
|
||||||
pfn = m2p_find_override_pfn(mfn, ~0);
|
pfn = m2p_find_override_pfn(mfn, ~0);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* pfn is ~0 if there are no entries in the m2p for mfn or if the
|
* pfn is ~0 if there are no entries in the m2p for mfn or if the
|
||||||
|
|
|
@ -879,7 +879,6 @@ int m2p_add_override(unsigned long mfn, struct page *page,
|
||||||
unsigned long uninitialized_var(address);
|
unsigned long uninitialized_var(address);
|
||||||
unsigned level;
|
unsigned level;
|
||||||
pte_t *ptep = NULL;
|
pte_t *ptep = NULL;
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
pfn = page_to_pfn(page);
|
pfn = page_to_pfn(page);
|
||||||
if (!PageHighMem(page)) {
|
if (!PageHighMem(page)) {
|
||||||
|
@ -926,8 +925,8 @@ int m2p_add_override(unsigned long mfn, struct page *page,
|
||||||
* frontend pages while they are being shared with the backend,
|
* frontend pages while they are being shared with the backend,
|
||||||
* because mfn_to_pfn (that ends up being called by GUPF) will
|
* because mfn_to_pfn (that ends up being called by GUPF) will
|
||||||
* return the backend pfn rather than the frontend pfn. */
|
* return the backend pfn rather than the frontend pfn. */
|
||||||
ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
|
pfn = mfn_to_pfn_no_overrides(mfn);
|
||||||
if (ret == 0 && get_phys_to_machine(pfn) == mfn)
|
if (get_phys_to_machine(pfn) == mfn)
|
||||||
set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
|
set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -942,7 +941,6 @@ int m2p_remove_override(struct page *page,
|
||||||
unsigned long uninitialized_var(address);
|
unsigned long uninitialized_var(address);
|
||||||
unsigned level;
|
unsigned level;
|
||||||
pte_t *ptep = NULL;
|
pte_t *ptep = NULL;
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
pfn = page_to_pfn(page);
|
pfn = page_to_pfn(page);
|
||||||
mfn = get_phys_to_machine(pfn);
|
mfn = get_phys_to_machine(pfn);
|
||||||
|
@ -1029,8 +1027,8 @@ int m2p_remove_override(struct page *page,
|
||||||
* the original pfn causes mfn_to_pfn(mfn) to return the frontend
|
* the original pfn causes mfn_to_pfn(mfn) to return the frontend
|
||||||
* pfn again. */
|
* pfn again. */
|
||||||
mfn &= ~FOREIGN_FRAME_BIT;
|
mfn &= ~FOREIGN_FRAME_BIT;
|
||||||
ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
|
pfn = mfn_to_pfn_no_overrides(mfn);
|
||||||
if (ret == 0 && get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
|
if (get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
|
||||||
m2p_find_override(mfn) == NULL)
|
m2p_find_override(mfn) == NULL)
|
||||||
set_phys_to_machine(pfn, mfn);
|
set_phys_to_machine(pfn, mfn);
|
||||||
|
|
||||||
|
|
|
@ -259,6 +259,14 @@ void xen_uninit_lock_cpu(int cpu)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Our init of PV spinlocks is split in two init functions due to us
|
||||||
|
* using paravirt patching and jump labels patching and having to do
|
||||||
|
* all of this before SMP code is invoked.
|
||||||
|
*
|
||||||
|
* The paravirt patching needs to be done _before_ the alternative asm code
|
||||||
|
* is started, otherwise we would not patch the core kernel code.
|
||||||
|
*/
|
||||||
void __init xen_init_spinlocks(void)
|
void __init xen_init_spinlocks(void)
|
||||||
{
|
{
|
||||||
|
|
||||||
|
@ -267,12 +275,26 @@ void __init xen_init_spinlocks(void)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
static_key_slow_inc(¶virt_ticketlocks_enabled);
|
|
||||||
|
|
||||||
pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning);
|
pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning);
|
||||||
pv_lock_ops.unlock_kick = xen_unlock_kick;
|
pv_lock_ops.unlock_kick = xen_unlock_kick;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* While the jump_label init code needs to happend _after_ the jump labels are
|
||||||
|
* enabled and before SMP is started. Hence we use pre-SMP initcall level
|
||||||
|
* init. We cannot do it in xen_init_spinlocks as that is done before
|
||||||
|
* jump labels are activated.
|
||||||
|
*/
|
||||||
|
static __init int xen_init_spinlocks_jump(void)
|
||||||
|
{
|
||||||
|
if (!xen_pvspin)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
static_key_slow_inc(¶virt_ticketlocks_enabled);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
early_initcall(xen_init_spinlocks_jump);
|
||||||
|
|
||||||
static __init int xen_parse_nopvspin(char *arg)
|
static __init int xen_parse_nopvspin(char *arg)
|
||||||
{
|
{
|
||||||
xen_pvspin = false;
|
xen_pvspin = false;
|
||||||
|
|
|
@ -142,32 +142,6 @@ static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
|
||||||
return length;
|
return length;
|
||||||
}
|
}
|
||||||
|
|
||||||
ssize_t tpm_show_locality(struct device *dev, struct device_attribute *attr,
|
|
||||||
char *buf)
|
|
||||||
{
|
|
||||||
struct tpm_chip *chip = dev_get_drvdata(dev);
|
|
||||||
struct tpm_private *priv = TPM_VPRIV(chip);
|
|
||||||
u8 locality = priv->shr->locality;
|
|
||||||
|
|
||||||
return sprintf(buf, "%d\n", locality);
|
|
||||||
}
|
|
||||||
|
|
||||||
ssize_t tpm_store_locality(struct device *dev, struct device_attribute *attr,
|
|
||||||
const char *buf, size_t len)
|
|
||||||
{
|
|
||||||
struct tpm_chip *chip = dev_get_drvdata(dev);
|
|
||||||
struct tpm_private *priv = TPM_VPRIV(chip);
|
|
||||||
u8 val;
|
|
||||||
|
|
||||||
int rv = kstrtou8(buf, 0, &val);
|
|
||||||
if (rv)
|
|
||||||
return rv;
|
|
||||||
|
|
||||||
priv->shr->locality = val;
|
|
||||||
|
|
||||||
return len;
|
|
||||||
}
|
|
||||||
|
|
||||||
static const struct file_operations vtpm_ops = {
|
static const struct file_operations vtpm_ops = {
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
.llseek = no_llseek,
|
.llseek = no_llseek,
|
||||||
|
@ -188,8 +162,6 @@ static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
|
||||||
static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
|
static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
|
||||||
static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
|
static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
|
||||||
static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
|
static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
|
||||||
static DEVICE_ATTR(locality, S_IRUGO | S_IWUSR, tpm_show_locality,
|
|
||||||
tpm_store_locality);
|
|
||||||
|
|
||||||
static struct attribute *vtpm_attrs[] = {
|
static struct attribute *vtpm_attrs[] = {
|
||||||
&dev_attr_pubek.attr,
|
&dev_attr_pubek.attr,
|
||||||
|
@ -202,7 +174,6 @@ static struct attribute *vtpm_attrs[] = {
|
||||||
&dev_attr_cancel.attr,
|
&dev_attr_cancel.attr,
|
||||||
&dev_attr_durations.attr,
|
&dev_attr_durations.attr,
|
||||||
&dev_attr_timeouts.attr,
|
&dev_attr_timeouts.attr,
|
||||||
&dev_attr_locality.attr,
|
|
||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -210,8 +181,6 @@ static struct attribute_group vtpm_attr_grp = {
|
||||||
.attrs = vtpm_attrs,
|
.attrs = vtpm_attrs,
|
||||||
};
|
};
|
||||||
|
|
||||||
#define TPM_LONG_TIMEOUT (10 * 60 * HZ)
|
|
||||||
|
|
||||||
static const struct tpm_vendor_specific tpm_vtpm = {
|
static const struct tpm_vendor_specific tpm_vtpm = {
|
||||||
.status = vtpm_status,
|
.status = vtpm_status,
|
||||||
.recv = vtpm_recv,
|
.recv = vtpm_recv,
|
||||||
|
@ -224,11 +193,6 @@ static const struct tpm_vendor_specific tpm_vtpm = {
|
||||||
.miscdev = {
|
.miscdev = {
|
||||||
.fops = &vtpm_ops,
|
.fops = &vtpm_ops,
|
||||||
},
|
},
|
||||||
.duration = {
|
|
||||||
TPM_LONG_TIMEOUT,
|
|
||||||
TPM_LONG_TIMEOUT,
|
|
||||||
TPM_LONG_TIMEOUT,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static irqreturn_t tpmif_interrupt(int dummy, void *dev_id)
|
static irqreturn_t tpmif_interrupt(int dummy, void *dev_id)
|
||||||
|
|
|
@ -398,8 +398,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
|
||||||
if (nr_pages > ARRAY_SIZE(frame_list))
|
if (nr_pages > ARRAY_SIZE(frame_list))
|
||||||
nr_pages = ARRAY_SIZE(frame_list);
|
nr_pages = ARRAY_SIZE(frame_list);
|
||||||
|
|
||||||
scratch_page = get_balloon_scratch_page();
|
|
||||||
|
|
||||||
for (i = 0; i < nr_pages; i++) {
|
for (i = 0; i < nr_pages; i++) {
|
||||||
page = alloc_page(gfp);
|
page = alloc_page(gfp);
|
||||||
if (page == NULL) {
|
if (page == NULL) {
|
||||||
|
@ -413,6 +411,12 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
|
||||||
|
|
||||||
scrub_page(page);
|
scrub_page(page);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Ballooned out frames are effectively replaced with
|
||||||
|
* a scratch frame. Ensure direct mappings and the
|
||||||
|
* p2m are consistent.
|
||||||
|
*/
|
||||||
|
scratch_page = get_balloon_scratch_page();
|
||||||
#ifdef CONFIG_XEN_HAVE_PVMMU
|
#ifdef CONFIG_XEN_HAVE_PVMMU
|
||||||
if (xen_pv_domain() && !PageHighMem(page)) {
|
if (xen_pv_domain() && !PageHighMem(page)) {
|
||||||
ret = HYPERVISOR_update_va_mapping(
|
ret = HYPERVISOR_update_va_mapping(
|
||||||
|
@ -422,24 +426,19 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
|
||||||
BUG_ON(ret);
|
BUG_ON(ret);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
|
||||||
|
|
||||||
/* Ensure that ballooned highmem pages don't have kmaps. */
|
|
||||||
kmap_flush_unused();
|
|
||||||
flush_tlb_all();
|
|
||||||
|
|
||||||
/* No more mappings: invalidate P2M and add to balloon. */
|
|
||||||
for (i = 0; i < nr_pages; i++) {
|
|
||||||
pfn = mfn_to_pfn(frame_list[i]);
|
|
||||||
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
|
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
|
||||||
unsigned long p;
|
unsigned long p;
|
||||||
p = page_to_pfn(scratch_page);
|
p = page_to_pfn(scratch_page);
|
||||||
__set_phys_to_machine(pfn, pfn_to_mfn(p));
|
__set_phys_to_machine(pfn, pfn_to_mfn(p));
|
||||||
}
|
}
|
||||||
|
put_balloon_scratch_page();
|
||||||
|
|
||||||
balloon_append(pfn_to_page(pfn));
|
balloon_append(pfn_to_page(pfn));
|
||||||
}
|
}
|
||||||
|
|
||||||
put_balloon_scratch_page();
|
/* Ensure that ballooned highmem pages don't have kmaps. */
|
||||||
|
kmap_flush_unused();
|
||||||
|
flush_tlb_all();
|
||||||
|
|
||||||
set_xen_guest_handle(reservation.extent_start, frame_list);
|
set_xen_guest_handle(reservation.extent_start, frame_list);
|
||||||
reservation.nr_extents = nr_pages;
|
reservation.nr_extents = nr_pages;
|
||||||
|
|
Loading…
Reference in a new issue