kernel-fxtec-pro1x/drivers/video/xen-fbfront.c

716 lines
18 KiB
C
Raw Normal View History

/*
* Xen para-virtual frame buffer device
*
* Copyright (C) 2005-2006 Anthony Liguori <aliguori@us.ibm.com>
* Copyright (C) 2006-2008 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
*
* Based on linux/drivers/video/q40fb.c
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*/
/*
* TODO:
*
* Switch to grant tables when they become capable of dealing with the
* frame buffer.
*/
#include <linux/console.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/fb.h>
#include <linux/module.h>
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h percpu.h is included by sched.h and module.h and thus ends up being included when building most .c files. percpu.h includes slab.h which in turn includes gfp.h making everything defined by the two files universally available and complicating inclusion dependencies. percpu.h -> slab.h dependency is about to be removed. Prepare for this change by updating users of gfp and slab facilities include those headers directly instead of assuming availability. As this conversion needs to touch large number of source files, the following script is used as the basis of conversion. http://userweb.kernel.org/~tj/misc/slabh-sweep.py The script does the followings. * Scan files for gfp and slab usages and update includes such that only the necessary includes are there. ie. if only gfp is used, gfp.h, if slab is used, slab.h. * When the script inserts a new include, it looks at the include blocks and try to put the new include such that its order conforms to its surrounding. It's put in the include block which contains core kernel includes, in the same order that the rest are ordered - alphabetical, Christmas tree, rev-Xmas-tree or at the end if there doesn't seem to be any matching order. * If the script can't find a place to put a new include (mostly because the file doesn't have fitting include block), it prints out an error message indicating which .h file needs to be added to the file. The conversion was done in the following steps. 1. The initial automatic conversion of all .c files updated slightly over 4000 files, deleting around 700 includes and adding ~480 gfp.h and ~3000 slab.h inclusions. The script emitted errors for ~400 files. 2. Each error was manually checked. Some didn't need the inclusion, some needed manual addition while adding it to implementation .h or embedding .c file was more appropriate for others. This step added inclusions to around 150 files. 3. The script was run again and the output was compared to the edits from #2 to make sure no file was left behind. 4. Several build tests were done and a couple of problems were fixed. e.g. lib/decompress_*.c used malloc/free() wrappers around slab APIs requiring slab.h to be added manually. 5. The script was run on all .h files but without automatically editing them as sprinkling gfp.h and slab.h inclusions around .h files could easily lead to inclusion dependency hell. Most gfp.h inclusion directives were ignored as stuff from gfp.h was usually wildly available and often used in preprocessor macros. Each slab.h inclusion directive was examined and added manually as necessary. 6. percpu.h was updated not to include slab.h. 7. Build test were done on the following configurations and failures were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my distributed build env didn't work with gcov compiles) and a few more options had to be turned off depending on archs to make things build (like ipr on powerpc/64 which failed due to missing writeq). * x86 and x86_64 UP and SMP allmodconfig and a custom test config. * powerpc and powerpc64 SMP allmodconfig * sparc and sparc64 SMP allmodconfig * ia64 SMP allmodconfig * s390 SMP allmodconfig * alpha SMP allmodconfig * um on x86_64 SMP allmodconfig 8. percpu.h modifications were reverted so that it could be applied as a separate patch and serve as bisection point. Given the fact that I had only a couple of failures from tests on step 6, I'm fairly confident about the coverage of this conversion patch. If there is a breakage, it's likely to be something in one of the arch headers which should be easily discoverable easily on most builds of the specific arch. Signed-off-by: Tejun Heo <tj@kernel.org> Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 02:04:11 -06:00
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <asm/xen/hypervisor.h>
#include <xen/xen.h>
#include <xen/events.h>
#include <xen/page.h>
#include <xen/interface/io/fbif.h>
#include <xen/interface/io/protocols.h>
#include <xen/xenbus.h>
struct xenfb_info {
unsigned char *fb;
struct fb_info *fb_info;
int x1, y1, x2, y2; /* dirty rectangle,
protected by dirty_lock */
spinlock_t dirty_lock;
int nr_pages;
int irq;
struct xenfb_page *page;
unsigned long *mfns;
int update_wanted; /* XENFB_TYPE_UPDATE wanted */
int feature_resize; /* XENFB_TYPE_RESIZE ok */
struct xenfb_resize resize; /* protected by resize_lock */
int resize_dpy; /* ditto */
spinlock_t resize_lock;
struct xenbus_device *xbdev;
};
#define XENFB_DEFAULT_FB_LEN (XENFB_WIDTH * XENFB_HEIGHT * XENFB_DEPTH / 8)
enum { KPARAM_MEM, KPARAM_WIDTH, KPARAM_HEIGHT, KPARAM_CNT };
static int video[KPARAM_CNT] = { 2, XENFB_WIDTH, XENFB_HEIGHT };
module_param_array(video, int, NULL, 0);
MODULE_PARM_DESC(video,
"Video memory size in MB, width, height in pixels (default 2,800,600)");
static void xenfb_make_preferred_console(void);
static int xenfb_remove(struct xenbus_device *);
static void xenfb_init_shared_page(struct xenfb_info *, struct fb_info *);
static int xenfb_connect_backend(struct xenbus_device *, struct xenfb_info *);
static void xenfb_disconnect_backend(struct xenfb_info *);
static void xenfb_send_event(struct xenfb_info *info,
union xenfb_out_event *event)
{
u32 prod;
prod = info->page->out_prod;
/* caller ensures !xenfb_queue_full() */
mb(); /* ensure ring space available */
XENFB_OUT_RING_REF(info->page, prod) = *event;
wmb(); /* ensure ring contents visible */
info->page->out_prod = prod + 1;
notify_remote_via_irq(info->irq);
}
static void xenfb_do_update(struct xenfb_info *info,
int x, int y, int w, int h)
{
union xenfb_out_event event;
memset(&event, 0, sizeof(event));
event.type = XENFB_TYPE_UPDATE;
event.update.x = x;
event.update.y = y;
event.update.width = w;
event.update.height = h;
/* caller ensures !xenfb_queue_full() */
xenfb_send_event(info, &event);
}
static void xenfb_do_resize(struct xenfb_info *info)
{
union xenfb_out_event event;
memset(&event, 0, sizeof(event));
event.resize = info->resize;
/* caller ensures !xenfb_queue_full() */
xenfb_send_event(info, &event);
}
static int xenfb_queue_full(struct xenfb_info *info)
{
u32 cons, prod;
prod = info->page->out_prod;
cons = info->page->out_cons;
return prod - cons == XENFB_OUT_RING_LEN;
}
static void xenfb_handle_resize_dpy(struct xenfb_info *info)
{
unsigned long flags;
spin_lock_irqsave(&info->resize_lock, flags);
if (info->resize_dpy) {
if (!xenfb_queue_full(info)) {
info->resize_dpy = 0;
xenfb_do_resize(info);
}
}
spin_unlock_irqrestore(&info->resize_lock, flags);
}
static void xenfb_refresh(struct xenfb_info *info,
int x1, int y1, int w, int h)
{
unsigned long flags;
int x2 = x1 + w - 1;
int y2 = y1 + h - 1;
xenfb_handle_resize_dpy(info);
if (!info->update_wanted)
return;
spin_lock_irqsave(&info->dirty_lock, flags);
/* Combine with dirty rectangle: */
if (info->y1 < y1)
y1 = info->y1;
if (info->y2 > y2)
y2 = info->y2;
if (info->x1 < x1)
x1 = info->x1;
if (info->x2 > x2)
x2 = info->x2;
if (xenfb_queue_full(info)) {
/* Can't send right now, stash it in the dirty rectangle */
info->x1 = x1;
info->x2 = x2;
info->y1 = y1;
info->y2 = y2;
spin_unlock_irqrestore(&info->dirty_lock, flags);
return;
}
/* Clear dirty rectangle: */
info->x1 = info->y1 = INT_MAX;
info->x2 = info->y2 = 0;
spin_unlock_irqrestore(&info->dirty_lock, flags);
if (x1 <= x2 && y1 <= y2)
xenfb_do_update(info, x1, y1, x2 - x1 + 1, y2 - y1 + 1);
}
static void xenfb_deferred_io(struct fb_info *fb_info,
struct list_head *pagelist)
{
struct xenfb_info *info = fb_info->par;
struct page *page;
unsigned long beg, end;
int y1, y2, miny, maxy;
miny = INT_MAX;
maxy = 0;
list_for_each_entry(page, pagelist, lru) {
beg = page->index << PAGE_SHIFT;
end = beg + PAGE_SIZE - 1;
y1 = beg / fb_info->fix.line_length;
y2 = end / fb_info->fix.line_length;
if (y2 >= fb_info->var.yres)
y2 = fb_info->var.yres - 1;
if (miny > y1)
miny = y1;
if (maxy < y2)
maxy = y2;
}
xenfb_refresh(info, 0, miny, fb_info->var.xres, maxy - miny + 1);
}
static struct fb_deferred_io xenfb_defio = {
.delay = HZ / 20,
.deferred_io = xenfb_deferred_io,
};
static int xenfb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp,
struct fb_info *info)
{
u32 v;
if (regno > info->cmap.len)
return 1;
#define CNVT_TOHW(val, width) ((((val)<<(width))+0x7FFF-(val))>>16)
red = CNVT_TOHW(red, info->var.red.length);
green = CNVT_TOHW(green, info->var.green.length);
blue = CNVT_TOHW(blue, info->var.blue.length);
transp = CNVT_TOHW(transp, info->var.transp.length);
#undef CNVT_TOHW
v = (red << info->var.red.offset) |
(green << info->var.green.offset) |
(blue << info->var.blue.offset);
switch (info->var.bits_per_pixel) {
case 16:
case 24:
case 32:
((u32 *)info->pseudo_palette)[regno] = v;
break;
}
return 0;
}
static void xenfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
{
struct xenfb_info *info = p->par;
sys_fillrect(p, rect);
xenfb_refresh(info, rect->dx, rect->dy, rect->width, rect->height);
}
static void xenfb_imageblit(struct fb_info *p, const struct fb_image *image)
{
struct xenfb_info *info = p->par;
sys_imageblit(p, image);
xenfb_refresh(info, image->dx, image->dy, image->width, image->height);
}
static void xenfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
{
struct xenfb_info *info = p->par;
sys_copyarea(p, area);
xenfb_refresh(info, area->dx, area->dy, area->width, area->height);
}
static ssize_t xenfb_write(struct fb_info *p, const char __user *buf,
size_t count, loff_t *ppos)
{
struct xenfb_info *info = p->par;
ssize_t res;
res = fb_sys_write(p, buf, count, ppos);
xenfb_refresh(info, 0, 0, info->page->width, info->page->height);
return res;
}
static int
xenfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
struct xenfb_info *xenfb_info;
int required_mem_len;
xenfb_info = info->par;
if (!xenfb_info->feature_resize) {
if (var->xres == video[KPARAM_WIDTH] &&
var->yres == video[KPARAM_HEIGHT] &&
var->bits_per_pixel == xenfb_info->page->depth) {
return 0;
}
return -EINVAL;
}
/* Can't resize past initial width and height */
if (var->xres > video[KPARAM_WIDTH] || var->yres > video[KPARAM_HEIGHT])
return -EINVAL;
required_mem_len = var->xres * var->yres * xenfb_info->page->depth / 8;
if (var->bits_per_pixel == xenfb_info->page->depth &&
var->xres <= info->fix.line_length / (XENFB_DEPTH / 8) &&
required_mem_len <= info->fix.smem_len) {
var->xres_virtual = var->xres;
var->yres_virtual = var->yres;
return 0;
}
return -EINVAL;
}
static int xenfb_set_par(struct fb_info *info)
{
struct xenfb_info *xenfb_info;
unsigned long flags;
xenfb_info = info->par;
spin_lock_irqsave(&xenfb_info->resize_lock, flags);
xenfb_info->resize.type = XENFB_TYPE_RESIZE;
xenfb_info->resize.width = info->var.xres;
xenfb_info->resize.height = info->var.yres;
xenfb_info->resize.stride = info->fix.line_length;
xenfb_info->resize.depth = info->var.bits_per_pixel;
xenfb_info->resize.offset = 0;
xenfb_info->resize_dpy = 1;
spin_unlock_irqrestore(&xenfb_info->resize_lock, flags);
return 0;
}
static struct fb_ops xenfb_fb_ops = {
.owner = THIS_MODULE,
.fb_read = fb_sys_read,
.fb_write = xenfb_write,
.fb_setcolreg = xenfb_setcolreg,
.fb_fillrect = xenfb_fillrect,
.fb_copyarea = xenfb_copyarea,
.fb_imageblit = xenfb_imageblit,
.fb_check_var = xenfb_check_var,
.fb_set_par = xenfb_set_par,
};
static irqreturn_t xenfb_event_handler(int rq, void *dev_id)
{
/*
* No in events recognized, simply ignore them all.
* If you need to recognize some, see xen-kbdfront's
* input_handler() for how to do that.
*/
struct xenfb_info *info = dev_id;
struct xenfb_page *page = info->page;
if (page->in_cons != page->in_prod) {
info->page->in_cons = info->page->in_prod;
notify_remote_via_irq(info->irq);
}
/* Flush dirty rectangle: */
xenfb_refresh(info, INT_MAX, INT_MAX, -INT_MAX, -INT_MAX);
return IRQ_HANDLED;
}
static int xenfb_probe(struct xenbus_device *dev,
const struct xenbus_device_id *id)
{
struct xenfb_info *info;
struct fb_info *fb_info;
int fb_size;
int val;
int ret = 0;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (info == NULL) {
xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
return -ENOMEM;
}
/* Limit kernel param videoram amount to what is in xenstore */
if (xenbus_scanf(XBT_NIL, dev->otherend, "videoram", "%d", &val) == 1) {
if (val < video[KPARAM_MEM])
video[KPARAM_MEM] = val;
}
/* If requested res does not fit in available memory, use default */
fb_size = video[KPARAM_MEM] * 1024 * 1024;
if (video[KPARAM_WIDTH] * video[KPARAM_HEIGHT] * XENFB_DEPTH / 8
> fb_size) {
video[KPARAM_WIDTH] = XENFB_WIDTH;
video[KPARAM_HEIGHT] = XENFB_HEIGHT;
fb_size = XENFB_DEFAULT_FB_LEN;
}
dev_set_drvdata(&dev->dev, info);
info->xbdev = dev;
info->irq = -1;
info->x1 = info->y1 = INT_MAX;
spin_lock_init(&info->dirty_lock);
spin_lock_init(&info->resize_lock);
info->fb = vzalloc(fb_size);
if (info->fb == NULL)
goto error_nomem;
info->nr_pages = (fb_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
info->mfns = vmalloc(sizeof(unsigned long) * info->nr_pages);
if (!info->mfns)
goto error_nomem;
/* set up shared page */
info->page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
if (!info->page)
goto error_nomem;
/* abusing framebuffer_alloc() to allocate pseudo_palette */
fb_info = framebuffer_alloc(sizeof(u32) * 256, NULL);
if (fb_info == NULL)
goto error_nomem;
/* complete the abuse: */
fb_info->pseudo_palette = fb_info->par;
fb_info->par = info;
fb_info->screen_base = info->fb;
fb_info->fbops = &xenfb_fb_ops;
fb_info->var.xres_virtual = fb_info->var.xres = video[KPARAM_WIDTH];
fb_info->var.yres_virtual = fb_info->var.yres = video[KPARAM_HEIGHT];
fb_info->var.bits_per_pixel = XENFB_DEPTH;
fb_info->var.red = (struct fb_bitfield){16, 8, 0};
fb_info->var.green = (struct fb_bitfield){8, 8, 0};
fb_info->var.blue = (struct fb_bitfield){0, 8, 0};
fb_info->var.activate = FB_ACTIVATE_NOW;
fb_info->var.height = -1;
fb_info->var.width = -1;
fb_info->var.vmode = FB_VMODE_NONINTERLACED;
fb_info->fix.visual = FB_VISUAL_TRUECOLOR;
fb_info->fix.line_length = fb_info->var.xres * XENFB_DEPTH / 8;
fb_info->fix.smem_start = 0;
fb_info->fix.smem_len = fb_size;
strcpy(fb_info->fix.id, "xen");
fb_info->fix.type = FB_TYPE_PACKED_PIXELS;
fb_info->fix.accel = FB_ACCEL_NONE;
fb_info->flags = FBINFO_FLAG_DEFAULT | FBINFO_VIRTFB;
ret = fb_alloc_cmap(&fb_info->cmap, 256, 0);
if (ret < 0) {
framebuffer_release(fb_info);
xenbus_dev_fatal(dev, ret, "fb_alloc_cmap");
goto error;
}
fb_info->fbdefio = &xenfb_defio;
fb_deferred_io_init(fb_info);
xenfb_init_shared_page(info, fb_info);
ret = xenfb_connect_backend(dev, info);
if (ret < 0) {
xenbus_dev_fatal(dev, ret, "xenfb_connect_backend");
goto error_fb;
}
ret = register_framebuffer(fb_info);
if (ret) {
xenbus_dev_fatal(dev, ret, "register_framebuffer");
goto error_fb;
}
info->fb_info = fb_info;
xenfb_make_preferred_console();
return 0;
error_fb:
fb_deferred_io_cleanup(fb_info);
fb_dealloc_cmap(&fb_info->cmap);
framebuffer_release(fb_info);
error_nomem:
if (!ret) {
ret = -ENOMEM;
xenbus_dev_fatal(dev, ret, "allocating device memory");
}
error:
xenfb_remove(dev);
return ret;
}
static void xenfb_make_preferred_console(void)
{
struct console *c;
if (console_set_on_cmdline)
return;
console_lock();
for_each_console(c) {
if (!strcmp(c->name, "tty") && c->index == 0)
break;
}
console_unlock();
if (c) {
unregister_console(c);
c->flags |= CON_CONSDEV;
c->flags &= ~CON_PRINTBUFFER; /* don't print again */
register_console(c);
}
}
static int xenfb_resume(struct xenbus_device *dev)
{
struct xenfb_info *info = dev_get_drvdata(&dev->dev);
xenfb_disconnect_backend(info);
xenfb_init_shared_page(info, info->fb_info);
return xenfb_connect_backend(dev, info);
}
static int xenfb_remove(struct xenbus_device *dev)
{
struct xenfb_info *info = dev_get_drvdata(&dev->dev);
xenfb_disconnect_backend(info);
if (info->fb_info) {
fb_deferred_io_cleanup(info->fb_info);
unregister_framebuffer(info->fb_info);
fb_dealloc_cmap(&info->fb_info->cmap);
framebuffer_release(info->fb_info);
}
free_page((unsigned long)info->page);
vfree(info->mfns);
vfree(info->fb);
kfree(info);
return 0;
}
static unsigned long vmalloc_to_mfn(void *address)
{
return pfn_to_mfn(vmalloc_to_pfn(address));
}
static void xenfb_init_shared_page(struct xenfb_info *info,
struct fb_info *fb_info)
{
int i;
int epd = PAGE_SIZE / sizeof(info->mfns[0]);
for (i = 0; i < info->nr_pages; i++)
info->mfns[i] = vmalloc_to_mfn(info->fb + i * PAGE_SIZE);
for (i = 0; i * epd < info->nr_pages; i++)
info->page->pd[i] = vmalloc_to_mfn(&info->mfns[i * epd]);
info->page->width = fb_info->var.xres;
info->page->height = fb_info->var.yres;
info->page->depth = fb_info->var.bits_per_pixel;
info->page->line_length = fb_info->fix.line_length;
info->page->mem_length = fb_info->fix.smem_len;
info->page->in_cons = info->page->in_prod = 0;
info->page->out_cons = info->page->out_prod = 0;
}
static int xenfb_connect_backend(struct xenbus_device *dev,
struct xenfb_info *info)
{
int ret, evtchn, irq;
struct xenbus_transaction xbt;
ret = xenbus_alloc_evtchn(dev, &evtchn);
if (ret)
return ret;
irq = bind_evtchn_to_irqhandler(evtchn, xenfb_event_handler,
0, dev->devicetype, info);
if (irq < 0) {
xenbus_free_evtchn(dev, evtchn);
xenbus_dev_fatal(dev, ret, "bind_evtchn_to_irqhandler");
return irq;
}
again:
ret = xenbus_transaction_start(&xbt);
if (ret) {
xenbus_dev_fatal(dev, ret, "starting transaction");
goto unbind_irq;
}
ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
virt_to_mfn(info->page));
if (ret)
goto error_xenbus;
ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
evtchn);
if (ret)
goto error_xenbus;
ret = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
XEN_IO_PROTO_ABI_NATIVE);
if (ret)
goto error_xenbus;
ret = xenbus_printf(xbt, dev->nodename, "feature-update", "1");
if (ret)
goto error_xenbus;
ret = xenbus_transaction_end(xbt, 0);
if (ret) {
if (ret == -EAGAIN)
goto again;
xenbus_dev_fatal(dev, ret, "completing transaction");
goto unbind_irq;
}
xenbus_switch_state(dev, XenbusStateInitialised);
info->irq = irq;
return 0;
error_xenbus:
xenbus_transaction_end(xbt, 1);
xenbus_dev_fatal(dev, ret, "writing xenstore");
unbind_irq:
unbind_from_irqhandler(irq, info);
return ret;
}
static void xenfb_disconnect_backend(struct xenfb_info *info)
{
xen/fb: fix xenfb suspend/resume race. When migrating guests over a long period we hit this: <1>BUG: unable to handle kernel paging request at 0000000b819fdb98 <1>IP: [<ffffffff812a588f>] notify_remote_via_irq+0x13/0x34 <4>PGD 94b10067 PUD 0 <0>Oops: 0000 [#1] SMP .. snip.. Call Trace: [<ffffffff812712c9>] xenfb_send_event+0x5c/0x5e [<ffffffff8100ea5f>] ? xen_restore_fl_direct_end+0x0/0x1 [<ffffffff81438d80>] ? _spin_unlock_irqrestore+0x16/0x18 [<ffffffff812714ee>] xenfb_refresh+0x1b1/0x1d7 [<ffffffff81270568>] ? sys_imageblit+0x1ac/0x458 [<ffffffff81271786>] xenfb_imageblit+0x2f/0x34 [<ffffffff8126a3e5>] soft_cursor+0x1b5/0x1c8 [<ffffffff8126a137>] bit_cursor+0x4b6/0x4d7 [<ffffffff8100ea5f>] ? xen_restore_fl_direct_end+0x0/0x1 [<ffffffff81438d80>] ? _spin_unlock_irqrestore+0x16/0x18 [<ffffffff81269c81>] ? bit_cursor+0x0/0x4d7 [<ffffffff812656b7>] fb_flashcursor+0xff/0x111 [<ffffffff812655b8>] ? fb_flashcursor+0x0/0x111 [<ffffffff81071812>] worker_thread+0x14d/0x1ed [<ffffffff81075a8c>] ? autoremove_wake_function+0x0/0x3d [<ffffffff81438d80>] ? _spin_unlock_irqrestore+0x16/0x18 [<ffffffff810716c5>] ? worker_thread+0x0/0x1ed [<ffffffff810756e3>] kthread+0x6e/0x76 [<ffffffff81012dea>] child_rip+0xa/0x20 [<ffffffff81011fd1>] ? int_ret_from_sys_call+0x7/0x1b [<ffffffff8101275d>] ? retint_restore_args+0x5/0x6 [<ffffffff81012de0>] ? child_rip+0x0/0x20 Code: 6b ff 0c 8b 87 a4 db 9f 81 66 85 c0 74 08 0f b7 f8 e8 3b ff ff ff c9 c3 55 48 89 e5 48 83 ec 10 0f 1f 44 00 00 89 ff 48 6b ff 0c <8b> 87 a4 db 9f 81 66 85 c0 74 14 48 8d 75 f0 0f b7 c0 bf 04 00 RIP [<ffffffff812a588f>] notify_remote_via_irq+0x13/0x34 RSP <ffff8800e7bf7bd0> CR2: 0000000b819fdb98 ---[ end trace 098b4b74827595d0 ]--- The root cause of the panic is the race between the resume and reconnect to the backend. Clearing the 'update_wanted' flag of xenfb before disconnecting from the backend fixes this issue. Signed-off-by: Joe Jin <joe.jin@oracle.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Tested-by: Gurudas Pai <gurudas.pai@oracle.com> Acked-by: Ian Campbell <ian.campbell@citrix.com> Cc: Jeremy Fitzhardinge <jeremy@goop.org> Cc: Andrew Morton <akpm@linux-foundation.org>
2011-01-07 03:17:17 -07:00
/* Prevent xenfb refresh */
info->update_wanted = 0;
if (info->irq >= 0)
unbind_from_irqhandler(info->irq, info);
info->irq = -1;
}
static void xenfb_backend_changed(struct xenbus_device *dev,
enum xenbus_state backend_state)
{
struct xenfb_info *info = dev_get_drvdata(&dev->dev);
int val;
switch (backend_state) {
case XenbusStateInitialising:
case XenbusStateInitialised:
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
case XenbusStateUnknown:
break;
case XenbusStateInitWait:
InitWait:
xenbus_switch_state(dev, XenbusStateConnected);
break;
case XenbusStateConnected:
/*
* Work around xenbus race condition: If backend goes
* through InitWait to Connected fast enough, we can
* get Connected twice here.
*/
if (dev->state != XenbusStateConnected)
goto InitWait; /* no InitWait seen yet, fudge it */
if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
"request-update", "%d", &val) < 0)
val = 0;
if (val)
info->update_wanted = 1;
if (xenbus_scanf(XBT_NIL, dev->otherend,
"feature-resize", "%d", &val) < 0)
val = 0;
info->feature_resize = val;
break;
case XenbusStateClosed:
if (dev->state == XenbusStateClosed)
break;
/* Missed the backend's CLOSING state -- fallthrough */
case XenbusStateClosing:
xenbus_frontend_closed(dev);
break;
}
}
static const struct xenbus_device_id xenfb_ids[] = {
{ "vfb" },
{ "" }
};
static DEFINE_XENBUS_DRIVER(xenfb, ,
.probe = xenfb_probe,
.remove = xenfb_remove,
.resume = xenfb_resume,
.otherend_changed = xenfb_backend_changed,
);
static int __init xenfb_init(void)
{
if (!xen_pv_domain())
return -ENODEV;
/* Nothing to do if running in dom0. */
if (xen_initial_domain())
return -ENODEV;
return xenbus_register_frontend(&xenfb_driver);
}
static void __exit xenfb_cleanup(void)
{
xenbus_unregister_driver(&xenfb_driver);
}
module_init(xenfb_init);
module_exit(xenfb_cleanup);
MODULE_DESCRIPTION("Xen virtual framebuffer device frontend");
MODULE_LICENSE("GPL");
MODULE_ALIAS("xen:vfb");