diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
index 308e32570d84..38e6d174c497 100644
--- a/arch/x86/mm/pf_in.c
+++ b/arch/x86/mm/pf_in.c
@@ -40,16 +40,16 @@ static unsigned char prefix_codes[] = {
 static unsigned int reg_rop[] = {
 	0x8A, 0x8B, 0xB60F, 0xB70F, 0xBE0F, 0xBF0F
 };
-static unsigned int reg_wop[] = { 0x88, 0x89 };
+static unsigned int reg_wop[] = { 0x88, 0x89, 0xAA, 0xAB };
 static unsigned int imm_wop[] = { 0xC6, 0xC7 };
 /* IA32 Manual 3, 3-432*/
-static unsigned int rw8[] = { 0x88, 0x8A, 0xC6 };
+static unsigned int rw8[] = { 0x88, 0x8A, 0xC6, 0xAA };
 static unsigned int rw32[] = {
-	0x89, 0x8B, 0xC7, 0xB60F, 0xB70F, 0xBE0F, 0xBF0F
+	0x89, 0x8B, 0xC7, 0xB60F, 0xB70F, 0xBE0F, 0xBF0F, 0xAB
 };
-static unsigned int mw8[] = { 0x88, 0x8A, 0xC6, 0xB60F, 0xBE0F };
+static unsigned int mw8[] = { 0x88, 0x8A, 0xC6, 0xB60F, 0xBE0F, 0xAA };
 static unsigned int mw16[] = { 0xB70F, 0xBF0F };
-static unsigned int mw32[] = { 0x89, 0x8B, 0xC7 };
+static unsigned int mw32[] = { 0x89, 0x8B, 0xC7, 0xAB };
 static unsigned int mw64[] = {};
 #else /* not __i386__ */
 static unsigned char prefix_codes[] = {
@@ -63,20 +63,20 @@ static unsigned char prefix_codes[] = {
 static unsigned int reg_rop[] = {
 	0x8A, 0x8B, 0xB60F, 0xB70F, 0xBE0F, 0xBF0F
 };
-static unsigned int reg_wop[] = { 0x88, 0x89 };
+static unsigned int reg_wop[] = { 0x88, 0x89, 0xAA, 0xAB };
 static unsigned int imm_wop[] = { 0xC6, 0xC7 };
-static unsigned int rw8[] = { 0xC6, 0x88, 0x8A };
+static unsigned int rw8[] = { 0xC6, 0x88, 0x8A, 0xAA };
 static unsigned int rw32[] = {
-	0xC7, 0x89, 0x8B, 0xB60F, 0xB70F, 0xBE0F, 0xBF0F
+	0xC7, 0x89, 0x8B, 0xB60F, 0xB70F, 0xBE0F, 0xBF0F, 0xAB
 };
 /* 8 bit only */
-static unsigned int mw8[] = { 0xC6, 0x88, 0x8A, 0xB60F, 0xBE0F };
+static unsigned int mw8[] = { 0xC6, 0x88, 0x8A, 0xB60F, 0xBE0F, 0xAA };
 /* 16 bit only */
 static unsigned int mw16[] = { 0xB70F, 0xBF0F };
 /* 16 or 32 bit */
 static unsigned int mw32[] = { 0xC7 };
 /* 16, 32 or 64 bit */
-static unsigned int mw64[] = { 0x89, 0x8B };
+static unsigned int mw64[] = { 0x89, 0x8B, 0xAB };
 #endif /* not __i386__ */
 
 struct prefix_bits {
@@ -410,7 +410,6 @@ static unsigned long *get_reg_w32(int no, struct pt_regs *regs)
 unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
 {
 	unsigned int opcode;
-	unsigned char mod_rm;
 	int reg;
 	unsigned char *p;
 	struct prefix_bits prf;
@@ -437,8 +436,13 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
 	goto err;
 
 do_work:
-	mod_rm = *p;
-	reg = ((mod_rm >> 3) & 0x7) | (prf.rexr << 3);
+	/* for STOS, source register is fixed */
+	if (opcode == 0xAA || opcode == 0xAB) {
+		reg = arg_AX;
+	} else {
+		unsigned char mod_rm = *p;
+		reg = ((mod_rm >> 3) & 0x7) | (prf.rexr << 3);
+	}
 	switch (get_ins_reg_width(ins_addr)) {
 	case 1:
 		return *get_reg_w8(reg, prf.rex, regs);
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index fb783d94fc54..a9377c0083ad 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -75,15 +75,12 @@
 #define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\
 	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
 
-#undef __cpparg
-#define __cpparg(arg...) arg
-
 /* Callbacks are meaningless to ftrace. */
 #undef TRACE_EVENT_FN
 #define TRACE_EVENT_FN(name, proto, args, tstruct,			\
 		assign, print, reg, unreg)				\
-	TRACE_EVENT(name, __cpparg(proto), __cpparg(args),		\
-		__cpparg(tstruct), __cpparg(assign), __cpparg(print))	\
+	TRACE_EVENT(name, PARAMS(proto), PARAMS(args),			\
+		PARAMS(tstruct), PARAMS(assign), PARAMS(print))		\
 
 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 23751659582e..000e6e85b445 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -131,10 +131,10 @@ void perf_trace_destroy(struct perf_event *p_event)
 	tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER);
 
 	/*
-	 * Ensure our callback won't be called anymore. See
-	 * tracepoint_probe_unregister() and __DO_TRACE().
+	 * Ensure our callback won't be called anymore. The buffers
+	 * will be freed after that.
 	 */
-	synchronize_sched();
+	tracepoint_synchronize_unregister();
 
 	free_percpu(tp_event->perf_events);
 	tp_event->perf_events = NULL;
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
index 1dc464ee2ca8..aad7525bca1d 100644
--- a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
+++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
@@ -89,3 +89,33 @@ def trace_flag_str(value):
 	    value &= ~idx
 
     return string
+
+
+def taskState(state):
+	states = {
+		0 : "R",
+		1 : "S",
+		2 : "D",
+		64: "DEAD"
+	}
+
+	if state not in states:
+		return "Unknown"
+
+	return states[state]
+
+
+class EventHeaders:
+	def __init__(self, common_cpu, common_secs, common_nsecs,
+		     common_pid, common_comm):
+		self.cpu = common_cpu
+		self.secs = common_secs
+		self.nsecs = common_nsecs
+		self.pid = common_pid
+		self.comm = common_comm
+
+	def ts(self):
+		return (self.secs * (10 ** 9)) + self.nsecs
+
+	def ts_format(self):
+		return "%d.%d" % (self.secs, int(self.nsecs / 1000))
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
new file mode 100644
index 000000000000..ae9a56e43e05
--- /dev/null
+++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
@@ -0,0 +1,184 @@
+# SchedGui.py - Python extension for perf trace, basic GUI code for
+#		traces drawing and overview.
+#
+# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
+#
+# This software is distributed under the terms of the GNU General
+# Public License ("GPL") version 2 as published by the Free Software
+# Foundation.
+
+
+try:
+	import wx
+except ImportError:
+	raise ImportError, "You need to install the wxpython lib for this script"
+
+
+class RootFrame(wx.Frame):
+	Y_OFFSET = 100
+	RECT_HEIGHT = 100
+	RECT_SPACE = 50
+	EVENT_MARKING_WIDTH = 5
+
+	def __init__(self, sched_tracer, title, parent = None, id = -1):
+		wx.Frame.__init__(self, parent, id, title)
+
+		(self.screen_width, self.screen_height) = wx.GetDisplaySize()
+		self.screen_width -= 10
+		self.screen_height -= 10
+		self.zoom = 0.5
+		self.scroll_scale = 20
+		self.sched_tracer = sched_tracer
+		self.sched_tracer.set_root_win(self)
+		(self.ts_start, self.ts_end) = sched_tracer.interval()
+		self.update_width_virtual()
+		self.nr_rects = sched_tracer.nr_rectangles() + 1
+		self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
+
+		# whole window panel
+		self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
+
+		# scrollable container
+		self.scroll = wx.ScrolledWindow(self.panel)
+		self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
+		self.scroll.EnableScrolling(True, True)
+		self.scroll.SetFocus()
+
+		# scrollable drawing area
+		self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
+		self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
+		self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
+		self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
+		self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
+		self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
+		self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
+
+		self.scroll.Fit()
+		self.Fit()
+
+		self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
+
+		self.txt = None
+
+		self.Show(True)
+
+	def us_to_px(self, val):
+		return val / (10 ** 3) * self.zoom
+
+	def px_to_us(self, val):
+		return (val / self.zoom) * (10 ** 3)
+
+	def scroll_start(self):
+		(x, y) = self.scroll.GetViewStart()
+		return (x * self.scroll_scale, y * self.scroll_scale)
+
+	def scroll_start_us(self):
+		(x, y) = self.scroll_start()
+		return self.px_to_us(x)
+
+	def paint_rectangle_zone(self, nr, color, top_color, start, end):
+		offset_px = self.us_to_px(start - self.ts_start)
+		width_px = self.us_to_px(end - self.ts_start)
+
+		offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
+		width_py = RootFrame.RECT_HEIGHT
+
+		dc = self.dc
+
+		if top_color is not None:
+			(r, g, b) = top_color
+			top_color = wx.Colour(r, g, b)
+			brush = wx.Brush(top_color, wx.SOLID)
+			dc.SetBrush(brush)
+			dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
+			width_py -= RootFrame.EVENT_MARKING_WIDTH
+			offset_py += RootFrame.EVENT_MARKING_WIDTH
+
+		(r ,g, b) = color
+		color = wx.Colour(r, g, b)
+		brush = wx.Brush(color, wx.SOLID)
+		dc.SetBrush(brush)
+		dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
+
+	def update_rectangles(self, dc, start, end):
+		start += self.ts_start
+		end += self.ts_start
+		self.sched_tracer.fill_zone(start, end)
+
+	def on_paint(self, event):
+		dc = wx.PaintDC(self.scroll_panel)
+		self.dc = dc
+
+		width = min(self.width_virtual, self.screen_width)
+		(x, y) = self.scroll_start()
+		start = self.px_to_us(x)
+		end = self.px_to_us(x + width)
+		self.update_rectangles(dc, start, end)
+
+	def rect_from_ypixel(self, y):
+		y -= RootFrame.Y_OFFSET
+		rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
+		height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
+
+		if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
+			return -1
+
+		return rect
+
+	def update_summary(self, txt):
+		if self.txt:
+			self.txt.Destroy()
+		self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
+
+
+	def on_mouse_down(self, event):
+		(x, y) = event.GetPositionTuple()
+		rect = self.rect_from_ypixel(y)
+		if rect == -1:
+			return
+
+		t = self.px_to_us(x) + self.ts_start
+
+		self.sched_tracer.mouse_down(rect, t)
+
+
+	def update_width_virtual(self):
+		self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
+
+	def __zoom(self, x):
+		self.update_width_virtual()
+		(xpos, ypos) = self.scroll.GetViewStart()
+		xpos = self.us_to_px(x) / self.scroll_scale
+		self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
+		self.Refresh()
+
+	def zoom_in(self):
+		x = self.scroll_start_us()
+		self.zoom *= 2
+		self.__zoom(x)
+
+	def zoom_out(self):
+		x = self.scroll_start_us()
+		self.zoom /= 2
+		self.__zoom(x)
+
+
+	def on_key_press(self, event):
+		key = event.GetRawKeyCode()
+		if key == ord("+"):
+			self.zoom_in()
+			return
+		if key == ord("-"):
+			self.zoom_out()
+			return
+
+		key = event.GetKeyCode()
+		(x, y) = self.scroll.GetViewStart()
+		if key == wx.WXK_RIGHT:
+			self.scroll.Scroll(x + 1, y)
+		elif key == wx.WXK_LEFT:
+			self.scroll.Scroll(x - 1, y)
+		elif key == wx.WXK_DOWN:
+			self.scroll.Scroll(x, y + 1)
+		elif key == wx.WXK_UP:
+			self.scroll.Scroll(x, y - 1)
diff --git a/tools/perf/scripts/python/bin/sched-migration-record b/tools/perf/scripts/python/bin/sched-migration-record
new file mode 100644
index 000000000000..17a3e9bd9e8f
--- /dev/null
+++ b/tools/perf/scripts/python/bin/sched-migration-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -m 16384 -a -e sched:sched_wakeup -e sched:sched_wakeup_new -e sched:sched_switch -e sched:sched_migrate_task $@
diff --git a/tools/perf/scripts/python/bin/sched-migration-report b/tools/perf/scripts/python/bin/sched-migration-report
new file mode 100644
index 000000000000..61d05f72e443
--- /dev/null
+++ b/tools/perf/scripts/python/bin/sched-migration-report
@@ -0,0 +1,3 @@
+#!/bin/bash
+# description: sched migration overview
+perf trace $@ -s ~/libexec/perf-core/scripts/python/sched-migration.py
diff --git a/tools/perf/scripts/python/sched-migration.py b/tools/perf/scripts/python/sched-migration.py
new file mode 100644
index 000000000000..b934383c3364
--- /dev/null
+++ b/tools/perf/scripts/python/sched-migration.py
@@ -0,0 +1,461 @@
+#!/usr/bin/python
+#
+# Cpu task migration overview toy
+#
+# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
+#
+# perf trace event handlers have been generated by perf trace -g python
+#
+# This software is distributed under the terms of the GNU General
+# Public License ("GPL") version 2 as published by the Free Software
+# Foundation.
+
+
+import os
+import sys
+
+from collections import defaultdict
+from UserList import UserList
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+	'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+from SchedGui import *
+
+
+threads = { 0 : "idle"}
+
+def thread_name(pid):
+	return "%s:%d" % (threads[pid], pid)
+
+class RunqueueEventUnknown:
+	@staticmethod
+	def color():
+		return None
+
+	def __repr__(self):
+		return "unknown"
+
+class RunqueueEventSleep:
+	@staticmethod
+	def color():
+		return (0, 0, 0xff)
+
+	def __init__(self, sleeper):
+		self.sleeper = sleeper
+
+	def __repr__(self):
+		return "%s gone to sleep" % thread_name(self.sleeper)
+
+class RunqueueEventWakeup:
+	@staticmethod
+	def color():
+		return (0xff, 0xff, 0)
+
+	def __init__(self, wakee):
+		self.wakee = wakee
+
+	def __repr__(self):
+		return "%s woke up" % thread_name(self.wakee)
+
+class RunqueueEventFork:
+	@staticmethod
+	def color():
+		return (0, 0xff, 0)
+
+	def __init__(self, child):
+		self.child = child
+
+	def __repr__(self):
+		return "new forked task %s" % thread_name(self.child)
+
+class RunqueueMigrateIn:
+	@staticmethod
+	def color():
+		return (0, 0xf0, 0xff)
+
+	def __init__(self, new):
+		self.new = new
+
+	def __repr__(self):
+		return "task migrated in %s" % thread_name(self.new)
+
+class RunqueueMigrateOut:
+	@staticmethod
+	def color():
+		return (0xff, 0, 0xff)
+
+	def __init__(self, old):
+		self.old = old
+
+	def __repr__(self):
+		return "task migrated out %s" % thread_name(self.old)
+
+class RunqueueSnapshot:
+	def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
+		self.tasks = tuple(tasks)
+		self.event = event
+
+	def sched_switch(self, prev, prev_state, next):
+		event = RunqueueEventUnknown()
+
+		if taskState(prev_state) == "R" and next in self.tasks \
+			and prev in self.tasks:
+			return self
+
+		if taskState(prev_state) != "R":
+			event = RunqueueEventSleep(prev)
+
+		next_tasks = list(self.tasks[:])
+		if prev in self.tasks:
+			if taskState(prev_state) != "R":
+				next_tasks.remove(prev)
+		elif taskState(prev_state) == "R":
+			next_tasks.append(prev)
+
+		if next not in next_tasks:
+			next_tasks.append(next)
+
+		return RunqueueSnapshot(next_tasks, event)
+
+	def migrate_out(self, old):
+		if old not in self.tasks:
+			return self
+		next_tasks = [task for task in self.tasks if task != old]
+
+		return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
+
+	def __migrate_in(self, new, event):
+		if new in self.tasks:
+			self.event = event
+			return self
+		next_tasks = self.tasks[:] + tuple([new])
+
+		return RunqueueSnapshot(next_tasks, event)
+
+	def migrate_in(self, new):
+		return self.__migrate_in(new, RunqueueMigrateIn(new))
+
+	def wake_up(self, new):
+		return self.__migrate_in(new, RunqueueEventWakeup(new))
+
+	def wake_up_new(self, new):
+		return self.__migrate_in(new, RunqueueEventFork(new))
+
+	def load(self):
+		""" Provide the number of tasks on the runqueue.
+		    Don't count idle"""
+		return len(self.tasks) - 1
+
+	def __repr__(self):
+		ret = self.tasks.__repr__()
+		ret += self.origin_tostring()
+
+		return ret
+
+class TimeSlice:
+	def __init__(self, start, prev):
+		self.start = start
+		self.prev = prev
+		self.end = start
+		# cpus that triggered the event
+		self.event_cpus = []
+		if prev is not None:
+			self.total_load = prev.total_load
+			self.rqs = prev.rqs.copy()
+		else:
+			self.rqs = defaultdict(RunqueueSnapshot)
+			self.total_load = 0
+
+	def __update_total_load(self, old_rq, new_rq):
+		diff = new_rq.load() - old_rq.load()
+		self.total_load += diff
+
+	def sched_switch(self, ts_list, prev, prev_state, next, cpu):
+		old_rq = self.prev.rqs[cpu]
+		new_rq = old_rq.sched_switch(prev, prev_state, next)
+
+		if old_rq is new_rq:
+			return
+
+		self.rqs[cpu] = new_rq
+		self.__update_total_load(old_rq, new_rq)
+		ts_list.append(self)
+		self.event_cpus = [cpu]
+
+	def migrate(self, ts_list, new, old_cpu, new_cpu):
+		if old_cpu == new_cpu:
+			return
+		old_rq = self.prev.rqs[old_cpu]
+		out_rq = old_rq.migrate_out(new)
+		self.rqs[old_cpu] = out_rq
+		self.__update_total_load(old_rq, out_rq)
+
+		new_rq = self.prev.rqs[new_cpu]
+		in_rq = new_rq.migrate_in(new)
+		self.rqs[new_cpu] = in_rq
+		self.__update_total_load(new_rq, in_rq)
+
+		ts_list.append(self)
+
+		if old_rq is not out_rq:
+			self.event_cpus.append(old_cpu)
+		self.event_cpus.append(new_cpu)
+
+	def wake_up(self, ts_list, pid, cpu, fork):
+		old_rq = self.prev.rqs[cpu]
+		if fork:
+			new_rq = old_rq.wake_up_new(pid)
+		else:
+			new_rq = old_rq.wake_up(pid)
+
+		if new_rq is old_rq:
+			return
+		self.rqs[cpu] = new_rq
+		self.__update_total_load(old_rq, new_rq)
+		ts_list.append(self)
+		self.event_cpus = [cpu]
+
+	def next(self, t):
+		self.end = t
+		return TimeSlice(t, self)
+
+class TimeSliceList(UserList):
+	def __init__(self, arg = []):
+		self.data = arg
+
+	def get_time_slice(self, ts):
+		if len(self.data) == 0:
+			slice = TimeSlice(ts, TimeSlice(-1, None))
+		else:
+			slice = self.data[-1].next(ts)
+		return slice
+
+	def find_time_slice(self, ts):
+		start = 0
+		end = len(self.data)
+		found = -1
+		searching = True
+		while searching:
+			if start == end or start == end - 1:
+				searching = False
+
+			i = (end + start) / 2
+			if self.data[i].start <= ts and self.data[i].end >= ts:
+				found = i
+				end = i
+				continue
+
+			if self.data[i].end < ts:
+				start = i
+
+			elif self.data[i].start > ts:
+				end = i
+
+		return found
+
+	def set_root_win(self, win):
+		self.root_win = win
+
+	def mouse_down(self, cpu, t):
+		idx = self.find_time_slice(t)
+		if idx == -1:
+			return
+
+		ts = self[idx]
+		rq = ts.rqs[cpu]
+		raw = "CPU: %d\n" % cpu
+		raw += "Last event : %s\n" % rq.event.__repr__()
+		raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
+		raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
+		raw += "Load = %d\n" % rq.load()
+		for t in rq.tasks:
+			raw += "%s \n" % thread_name(t)
+
+		self.root_win.update_summary(raw)
+
+	def update_rectangle_cpu(self, slice, cpu):
+		rq = slice.rqs[cpu]
+
+		if slice.total_load != 0:
+			load_rate = rq.load() / float(slice.total_load)
+		else:
+			load_rate = 0
+
+		red_power = int(0xff - (0xff * load_rate))
+		color = (0xff, red_power, red_power)
+
+		top_color = None
+
+		if cpu in slice.event_cpus:
+			top_color = rq.event.color()
+
+		self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
+
+	def fill_zone(self, start, end):
+		i = self.find_time_slice(start)
+		if i == -1:
+			return
+
+		for i in xrange(i, len(self.data)):
+			timeslice = self.data[i]
+			if timeslice.start > end:
+				return
+
+			for cpu in timeslice.rqs:
+				self.update_rectangle_cpu(timeslice, cpu)
+
+	def interval(self):
+		if len(self.data) == 0:
+			return (0, 0)
+
+		return (self.data[0].start, self.data[-1].end)
+
+	def nr_rectangles(self):
+		last_ts = self.data[-1]
+		max_cpu = 0
+		for cpu in last_ts.rqs:
+			if cpu > max_cpu:
+				max_cpu = cpu
+		return max_cpu
+
+
+class SchedEventProxy:
+	def __init__(self):
+		self.current_tsk = defaultdict(lambda : -1)
+		self.timeslices = TimeSliceList()
+
+	def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
+			 next_comm, next_pid, next_prio):
+		""" Ensure the task we sched out this cpu is really the one
+		    we logged. Otherwise we may have missed traces """
+
+		on_cpu_task = self.current_tsk[headers.cpu]
+
+		if on_cpu_task != -1 and on_cpu_task != prev_pid:
+			print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
+				(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
+
+		threads[prev_pid] = prev_comm
+		threads[next_pid] = next_comm
+		self.current_tsk[headers.cpu] = next_pid
+
+		ts = self.timeslices.get_time_slice(headers.ts())
+		ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
+
+	def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
+		ts = self.timeslices.get_time_slice(headers.ts())
+		ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
+
+	def wake_up(self, headers, comm, pid, success, target_cpu, fork):
+		if success == 0:
+			return
+		ts = self.timeslices.get_time_slice(headers.ts())
+		ts.wake_up(self.timeslices, pid, target_cpu, fork)
+
+
+def trace_begin():
+	global parser
+	parser = SchedEventProxy()
+
+def trace_end():
+	app = wx.App(False)
+	timeslices = parser.timeslices
+	frame = RootFrame(timeslices, "Migration")
+	app.MainLoop()
+
+def sched__sched_stat_runtime(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	comm, pid, runtime, vruntime):
+	pass
+
+def sched__sched_stat_iowait(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	comm, pid, delay):
+	pass
+
+def sched__sched_stat_sleep(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	comm, pid, delay):
+	pass
+
+def sched__sched_stat_wait(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	comm, pid, delay):
+	pass
+
+def sched__sched_process_fork(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	parent_comm, parent_pid, child_comm, child_pid):
+	pass
+
+def sched__sched_process_wait(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	comm, pid, prio):
+	pass
+
+def sched__sched_process_exit(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	comm, pid, prio):
+	pass
+
+def sched__sched_process_free(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	comm, pid, prio):
+	pass
+
+def sched__sched_migrate_task(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	comm, pid, prio, orig_cpu,
+	dest_cpu):
+	headers = EventHeaders(common_cpu, common_secs, common_nsecs,
+				common_pid, common_comm)
+	parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
+
+def sched__sched_switch(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	prev_comm, prev_pid, prev_prio, prev_state,
+	next_comm, next_pid, next_prio):
+
+	headers = EventHeaders(common_cpu, common_secs, common_nsecs,
+				common_pid, common_comm)
+	parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
+			 next_comm, next_pid, next_prio)
+
+def sched__sched_wakeup_new(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	comm, pid, prio, success,
+	target_cpu):
+	headers = EventHeaders(common_cpu, common_secs, common_nsecs,
+				common_pid, common_comm)
+	parser.wake_up(headers, comm, pid, success, target_cpu, 1)
+
+def sched__sched_wakeup(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	comm, pid, prio, success,
+	target_cpu):
+	headers = EventHeaders(common_cpu, common_secs, common_nsecs,
+				common_pid, common_comm)
+	parser.wake_up(headers, comm, pid, success, target_cpu, 0)
+
+def sched__sched_wait_task(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	comm, pid, prio):
+	pass
+
+def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	ret):
+	pass
+
+def sched__sched_kthread_stop(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	comm, pid):
+	pass
+
+def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
+		common_pid, common_comm):
+	pass