diff -urN linux-2.6.19/arch/i386/Kconfig linux-2.6.19-mync/arch/i386/Kconfig --- linux-2.6.19/arch/i386/Kconfig 2006-12-01 03:09:26.000000000 +0300 +++ linux-2.6.19-mync/arch/i386/Kconfig 2006-12-11 22:19:44.000000000 +0300 @@ -182,6 +182,42 @@ endchoice +config SDEBUG + bool "Serial port debugging" + default n + depends on !(SERIAL_8250) + help + Enable debugging through serial port + +config PF + bool "Performance tracing (\"-pf\")" + default y + help + Enable Performance Tracing Toolkit ("-pf") support + +config PF_BUFFER_ORDER + int "Trace buffer size order" + default 12 + depends on PF + help + The count of pages in a trace buffer as the power of two. + The default value is 12 (16 Mb). + +config RTI + bool "Real-time interrupts" + depends on X86_LOCAL_APIC + default y + help + Emulate cli/sti commands through interrupt controller hardware. + This allows the developer to make some interrupt vectors + priveleged. Priveleged interrupt handlers can preempt conventional + Linux code at any time. + +config WRAP_IRQ + bool + default y + depends on (RTI || PF) + config ACPI_SRAT bool default y diff -urN linux-2.6.19/arch/i386/kernel/apic.c linux-2.6.19-mync/arch/i386/kernel/apic.c --- linux-2.6.19/arch/i386/kernel/apic.c 2006-12-01 03:09:26.000000000 +0300 +++ linux-2.6.19-mync/arch/i386/kernel/apic.c 2006-12-02 01:20:35.000000000 +0300 @@ -43,6 +43,35 @@ #include "io_ports.h" +#ifdef CONFIG_RTI +#undef spin_lock_irqsave +#undef spin_unlock_irqrestore + +#define spin_lock_irqsave(a, b) \ + do { \ + direct_irq_save (b); \ + spin_lock (a); \ + } while (0) + +#define spin_unlock_irqrestore(a, b) \ + do { \ + spin_unlock (a); \ + direct_irq_restore (b); \ + } while (0) + +#undef local_irq_save +#undef local_irq_restore + +#define local_irq_save(a) direct_irq_save (a) +#define local_irq_restore(a) direct_irq_restore (a) + +#undef local_irq_enable +#undef local_irq_disable + +#define local_irq_enable(a) direct_irq_enable (a) +#define local_irq_disable(a) direct_irq_disable (a) +#endif + /* * cpu_mask that denotes the CPUs that needs timer interrupt coming in as * IPIs in place of local APIC timers @@ -1198,6 +1227,10 @@ profile_tick(CPU_PROFILING); #ifdef CONFIG_SMP update_process_times(user_mode_vm(get_irq_regs())); +#else +#ifdef CONFIG_RTI + update_process_times(user_mode_vm(get_irq_regs())); +#endif #endif /* diff -urN linux-2.6.19/arch/i386/kernel/cpu/common.c linux-2.6.19-mync/arch/i386/kernel/cpu/common.c --- linux-2.6.19/arch/i386/kernel/cpu/common.c 2006-12-01 03:09:26.000000000 +0300 +++ linux-2.6.19-mync/arch/i386/kernel/cpu/common.c 2006-12-01 15:37:05.000000000 +0300 @@ -431,7 +431,9 @@ } /* SEP disabled? */ +#ifndef CONFIG_WRAP_IRQ if (disable_x86_sep) +#endif clear_bit(X86_FEATURE_SEP, c->x86_capability); if (disable_pse) diff -urN linux-2.6.19/arch/i386/kernel/cpu/mtrr/generic.c linux-2.6.19-mync/arch/i386/kernel/cpu/mtrr/generic.c --- linux-2.6.19/arch/i386/kernel/cpu/mtrr/generic.c 2006-12-01 03:09:26.000000000 +0300 +++ linux-2.6.19-mync/arch/i386/kernel/cpu/mtrr/generic.c 2006-12-02 01:21:14.000000000 +0300 @@ -11,6 +11,35 @@ #include #include "mtrr.h" +#ifdef CONFIG_RTI +#undef spin_lock_irqsave +#undef spin_unlock_irqrestore + +#define spin_lock_irqsave(a, b) \ + do { \ + direct_irq_save (b); \ + spin_lock (a); \ + } while (0) + +#define spin_unlock_irqrestore(a, b) \ + do { \ + spin_unlock (a); \ + direct_irq_restore (b); \ + } while (0) + +#undef local_irq_save +#undef local_irq_restore + +#define local_irq_save(a) direct_irq_save (a) +#define local_irq_restore(a) direct_irq_restore (a) + +#undef local_irq_enable +#undef local_irq_disable + +#define local_irq_enable(a) direct_irq_enable (a) +#define local_irq_disable(a) direct_irq_disable (a) +#endif + struct mtrr_state { struct mtrr_var_range *var_ranges; mtrr_type fixed_ranges[NUM_FIXED_RANGES]; diff -urN linux-2.6.19/arch/i386/kernel/cpu/mtrr/main.c linux-2.6.19-mync/arch/i386/kernel/cpu/mtrr/main.c --- linux-2.6.19/arch/i386/kernel/cpu/mtrr/main.c 2006-10-04 00:11:45.000000000 +0400 +++ linux-2.6.19-mync/arch/i386/kernel/cpu/mtrr/main.c 2006-12-02 03:09:46.000000000 +0300 @@ -45,6 +45,35 @@ #include #include "mtrr.h" +#ifdef CONFIG_RTI +#undef spin_lock_irqsave +#undef spin_unlock_irqrestore + +#define spin_lock_irqsave(a, b) \ + do { \ + direct_irq_save (b); \ + spin_lock (a); \ + } while (0) + +#define spin_unlock_irqrestore(a, b) \ + do { \ + spin_unlock (a); \ + direct_irq_restore (b); \ + } while (0) + +#undef local_irq_save +#undef local_irq_restore + +#define local_irq_save(a) direct_irq_save (a) +#define local_irq_restore(a) direct_irq_restore (a) + +#undef local_irq_enable +#undef local_irq_disable + +#define local_irq_enable(a) direct_irq_enable (a) +#define local_irq_disable(a) direct_irq_disable (a) +#endif + u32 num_var_ranges = 0; unsigned int *usage_table; diff -urN linux-2.6.19/arch/i386/kernel/crash.c linux-2.6.19-mync/arch/i386/kernel/crash.c --- linux-2.6.19/arch/i386/kernel/crash.c 2006-12-01 03:09:26.000000000 +0300 +++ linux-2.6.19-mync/arch/i386/kernel/crash.c 2006-12-02 03:13:04.000000000 +0300 @@ -27,6 +27,34 @@ #include +#ifdef CONFIG_RTI +#undef spin_lock_irqsave +#undef spin_unlock_irqrestore + +#define spin_lock_irqsave(a, b) \ + do { \ + direct_irq_save (b); \ + spin_lock (a); \ + } while (0) + +#define spin_unlock_irqrestore(a, b) \ + do { \ + spin_unlock (a); \ + direct_irq_restore (b); \ + } while (0) + +#undef local_irq_save +#undef local_irq_restore + +#define local_irq_save(a) direct_irq_save (a) +#define local_irq_restore(a) direct_irq_restore (a) + +#undef local_irq_enable +#undef local_irq_disable + +#define local_irq_enable(a) direct_irq_enable (a) +#define local_irq_disable(a) direct_irq_disable (a) +#endif /* This keeps a track of which one is crashing cpu. */ static int crashing_cpu; diff -urN linux-2.6.19/arch/i386/kernel/entry.S linux-2.6.19-mync/arch/i386/kernel/entry.S --- linux-2.6.19/arch/i386/kernel/entry.S 2006-12-01 03:09:26.000000000 +0300 +++ linux-2.6.19-mync/arch/i386/kernel/entry.S 2006-12-10 17:46:04.000000000 +0300 @@ -77,8 +77,8 @@ VM_MASK = 0x00020000 /* These are replaces for paravirtualization */ -#define DISABLE_INTERRUPTS cli -#define ENABLE_INTERRUPTS sti +#define DISABLE_INTERRUPTS rti_cli +#define ENABLE_INTERRUPTS rti_sti #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit #define INTERRUPT_RETURN iret #define GET_CR0_INTO_EAX movl %cr0, %eax @@ -181,6 +181,8 @@ .long 2b,4b; \ .previous +#include "wrap_irq.inc.S" + #define RING0_INT_FRAME \ CFI_STARTPROC simple;\ CFI_SIGNAL_FRAME;\ @@ -390,6 +392,7 @@ TRACE_IRQS_IRET restore_nocheck_notrace: RESTORE_REGS + RESTORE_WRAP_IRQ addl $4, %esp CFI_ADJUST_CFA_OFFSET -4 1: INTERRUPT_RETURN @@ -429,6 +432,7 @@ CFI_ADJUST_CFA_OFFSET -8 # frame has moved TRACE_IRQS_IRET RESTORE_REGS + RESTORE_WRAP_IRQ lss 20+4(%esp), %esp # switch to 16bit stack 1: INTERRUPT_RETURN .section __ex_table,"a" @@ -821,6 +825,7 @@ xorl %edx,%edx # zero error code call do_nmi RESTORE_REGS + RESTORE_WRAP_IRQ lss 12+4(%esp), %esp # back to 16bit stack 1: INTERRUPT_RETURN CFI_ENDPROC diff -urN linux-2.6.19/arch/i386/kernel/head.S linux-2.6.19-mync/arch/i386/kernel/head.S --- linux-2.6.19/arch/i386/kernel/head.S 2006-12-01 03:09:26.000000000 +0300 +++ linux-2.6.19-mync/arch/i386/kernel/head.S 2006-12-02 03:12:46.000000000 +0300 @@ -433,6 +433,9 @@ /* This is the default interrupt "handler" :-) */ ALIGN ignore_int: +#ifdef CONFIG_RTI + iret +#else cld #ifdef CONFIG_PRINTK pushl %eax @@ -464,6 +467,7 @@ popl %eax #endif iret +#endif /* * Real beginning of normal "text" segment diff -urN linux-2.6.19/arch/i386/kernel/i8259.c linux-2.6.19-mync/arch/i386/kernel/i8259.c --- linux-2.6.19/arch/i386/kernel/i8259.c 2006-12-01 03:09:26.000000000 +0300 +++ linux-2.6.19-mync/arch/i386/kernel/i8259.c 2006-12-12 19:34:54.000000000 +0300 @@ -25,6 +25,35 @@ #include +#ifdef CONFIG_RTI +#undef spin_lock_irqsave +#undef spin_unlock_irqrestore + +#define spin_lock_irqsave(a, b) \ + do { \ + direct_irq_save (b); \ + spin_lock (a); \ + } while (0) + +#define spin_unlock_irqrestore(a, b) \ + do { \ + spin_unlock (a); \ + direct_irq_restore (b); \ + } while (0) + +#undef local_irq_save +#undef local_irq_restore + +#define local_irq_save(a) direct_irq_save (a) +#define local_irq_restore(a) direct_irq_restore (a) + +#undef local_irq_enable +#undef local_irq_disable + +#define local_irq_enable(a) direct_irq_enable (a) +#define local_irq_disable(a) direct_irq_disable (a) +#endif + /* * This is the 'legacy' 8259A Programmable Interrupt Controller, * present in the majority of PC/AT boxes. @@ -65,7 +94,11 @@ */ unsigned long io_apic_irqs; +#ifndef CONFIG_RTI void disable_8259A_irq(unsigned int irq) +#else +asmlinkage void disable_8259A_irq(unsigned int irq) +#endif { unsigned int mask = 1 << irq; unsigned long flags; diff -urN linux-2.6.19/arch/i386/kernel/io_apic.c linux-2.6.19-mync/arch/i386/kernel/io_apic.c --- linux-2.6.19/arch/i386/kernel/io_apic.c 2006-12-01 03:09:26.000000000 +0300 +++ linux-2.6.19-mync/arch/i386/kernel/io_apic.c 2006-12-12 19:40:30.000000000 +0300 @@ -49,6 +49,35 @@ #include "io_ports.h" +#ifdef CONFIG_RTI +#undef spin_lock_irqsave +#undef spin_unlock_irqrestore + +#define spin_lock_irqsave(a, b) \ + do { \ + direct_irq_save (b); \ + spin_lock (a); \ + } while (0) + +#define spin_unlock_irqrestore(a, b) \ + do { \ + spin_unlock (a); \ + direct_irq_restore (b); \ + } while (0) + +#undef local_irq_save +#undef local_irq_restore + +#define local_irq_save(a) direct_irq_save (a) +#define local_irq_restore(a) direct_irq_restore (a) + +#undef local_irq_enable +#undef local_irq_disable + +#define local_irq_enable(a) direct_irq_enable (a) +#define local_irq_disable(a) direct_irq_disable (a) +#endif + int (*ioapic_renumber_irq)(int ioapic, int irq); atomic_t irq_mis_count; @@ -266,7 +295,11 @@ __modify_IO_APIC_irq(irq, 0x00008000, 0x00010000); } +#ifndef CONFIG_RTI static void mask_IO_APIC_irq (unsigned int irq) +#else +asmlinkage void mask_IO_APIC_irq (unsigned int irq) +#endif { unsigned long flags; @@ -1237,6 +1270,10 @@ /* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */ u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 }; +#ifdef CONFIG_RTI +extern volatile unsigned long rti_vector_irq [NR_VECTORS]; +#endif + static int __assign_irq_vector(int irq) { static int current_vector = FIRST_DEVICE_VECTOR, offset = 0; @@ -1260,6 +1297,9 @@ vector = current_vector; irq_vector[irq] = vector; +#ifdef CONFIG_RTI + rti_vector_irq [vector] = irq; +#endif return vector; } @@ -1295,6 +1335,10 @@ set_intr_gate(vector, interrupt[irq]); } +#ifdef CONFIG_RTI +extern unsigned long rti_io_apic_enabled; +#endif + static void __init setup_IO_APIC_irqs(void) { struct IO_APIC_route_entry entry; @@ -1303,6 +1347,8 @@ apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); + rti_io_apic_enabled = 1; + for (apic = 0; apic < nr_ioapics; apic++) { for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { @@ -2000,12 +2046,17 @@ static void ack_ioapic_irq(unsigned int irq) { +#ifndef CONFIG_RTI +// SIMPLE move_native_irq(irq); +#endif ack_APIC_irq(); } static void ack_ioapic_quirk_irq(unsigned int irq) { +#ifndef CONFIG_RTI +// SIMPLE unsigned long v; int i; @@ -2032,9 +2083,12 @@ i = irq_vector[irq]; v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); +#endif ack_APIC_irq(); +#ifndef CONFIG_RTI +// SIMPLE if (!(v & (1 << (i & 0x1f)))) { atomic_inc(&irq_mis_count); spin_lock(&ioapic_lock); @@ -2042,6 +2096,9 @@ __unmask_and_level_IO_APIC_irq(irq); spin_unlock(&ioapic_lock); } +#else + unmask_IO_APIC_irq (irq); +#endif } static int ioapic_retrigger_irq(unsigned int irq) diff -urN linux-2.6.19/arch/i386/kernel/Makefile linux-2.6.19-mync/arch/i386/kernel/Makefile --- linux-2.6.19/arch/i386/kernel/Makefile 2006-12-01 03:09:26.000000000 +0300 +++ linux-2.6.19-mync/arch/i386/kernel/Makefile 2006-12-11 22:00:29.000000000 +0300 @@ -39,6 +39,9 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_HPET_TIMER) += hpet.o obj-$(CONFIG_K8_NB) += k8.o +obj-$(CONFIG_PF) += pf.o +obj-$(CONFIG_RTI) += rti.o +obj-$(CONFIG_SDEBUG) += sdebug.o EXTRA_AFLAGS := -traditional diff -urN linux-2.6.19/arch/i386/kernel/pf.c linux-2.6.19-mync/arch/i386/kernel/pf.c --- linux-2.6.19/arch/i386/kernel/pf.c 1970-01-01 03:00:00.000000000 +0300 +++ linux-2.6.19-mync/arch/i386/kernel/pf.c 2006-12-01 21:03:33.000000000 +0300 @@ -0,0 +1,171 @@ +#include +#include +#include +#include +#include + +unsigned char *pf_trace_buffer = NULL; +unsigned char *pf_trace_map = NULL; + +unsigned char *pf_pinfo_pool = NULL; +unsigned char *pf_pinfo_limit = 0; +unsigned char *pf_pinfo_pointer = NULL; + +LIST_HEAD (pf_pid_list); + +/* This is the length of process info field in the trace file that + * comes out of /proc/trace */ +unsigned long pf_pid_data_len = 0; +unsigned long pf_pid_count = 0; + +//extern void rti_debug_beep (void); + +struct pf_process_info* +pf_find_pid (unsigned long pid) +{ + struct list_head *pid_cur; + struct pf_process_info *pinfo; + + list_for_each (pid_cur, &pf_pid_list) { + pinfo = list_entry (pid_cur, + struct pf_process_info, + list_head); + if (pinfo->pid == pid) + return pinfo; + } + + return NULL; +} + +static void* +pf_allocate_pinfo (void) +{ + void *ptr; + + if (pf_pinfo_pointer + sizeof (struct pf_process_info) < + pf_pinfo_pointer) + return NULL; + + if (pf_pinfo_pointer + sizeof (struct pf_process_info) >= + pf_pinfo_limit) + return NULL; + + ptr = pf_pinfo_pointer; + pf_pinfo_pointer += sizeof (struct pf_process_info); + + return ptr; +} + +void +pf_iret_to_userspace (unsigned char *trace_frame) +{ + struct pf_process_info *pinfo; + unsigned long pid; + unsigned long len; + + pid = (unsigned long) current->pid; + ((unsigned long*) trace_frame) [0] = pid; + + pinfo = pf_find_pid (pid); + + if (pinfo == NULL) { + pinfo = pf_allocate_pinfo (); + if (pinfo == NULL) { + /* FIXME Better to stop tracing */ + /*rti_debug_beep ();*/ + printk ("Pinfo OOM\n"); + return; + } + + pinfo->pid = pid; + list_add (&(pinfo->list_head), &pf_pid_list); + + /* process name */ + len = strlen (current->comm) + 1; + memcpy (pinfo->name, current->comm, len); + + /* PID */ + len += 4; + + pf_pid_data_len += len; + pf_pid_count ++; + } else { + if (strcmp (pinfo->name, current->comm)) { + pf_pid_data_len -= strlen (pinfo->name) + 1; + len = strlen (current->comm) + 1; + memcpy (pinfo->name, current->comm, len); + pf_pid_data_len += len; + } + } +} + +void +pf_prepare (void) +{ + /* TODO Clean up buffers */ +} + +/* Tracing must be stopped during the call */ +void +pf_clear_pid_tree (void) +{ + pf_pinfo_pointer = pf_pinfo_pool; + + INIT_LIST_HEAD (&pf_pid_list); + + pf_pid_data_len = 0; + pf_pid_count = 0; +} + +void __init +pf_init (void) +{ + printk ("Initializing Pf tracing\n"); + + INIT_LIST_HEAD (&pf_pid_list); + + pf_trace_buffer = (unsigned char*) + __get_free_pages (GFP_ATOMIC | __GFP_REPEAT/* | __GFP_HIGHMEM*/, + CONFIG_PF_BUFFER_ORDER); + + if (pf_trace_buffer == NULL) { + printk ("Pf trace buffer allocation failed\n"); + return; + } + + pf_trace_map = (unsigned char*) + __get_free_pages ( + GFP_ATOMIC | __GFP_REPEAT, + (1 << (CONFIG_PF_BUFFER_ORDER - PF_TRACE_ALIGNMENT)) * + sizeof (unsigned long)); + if (pf_trace_map == NULL) { + printk ("Pf trace map allocation failed\n"); + return; + } + + /* FIXME ROUGH: 4 Mb for process info */ + pf_pinfo_pool = (unsigned char*) + __get_free_pages (GFP_ATOMIC | __GFP_REPEAT, 10); + if (pf_pinfo_pool == NULL) { + printk ("Pf info pool allocation failed\n"); + return; + } + + pf_pinfo_limit = pf_pinfo_pool + PAGE_SIZE * 10; + pf_pinfo_pointer = pf_pinfo_pool; + + pf_trace_info.trace_offset = 0; + /* There is one guard page at the end */ + pf_trace_info.trace_size = + PAGE_SIZE * ((1 << CONFIG_PF_BUFFER_ORDER) - 1); +} + +EXPORT_SYMBOL (pf_trace_buffer); +EXPORT_SYMBOL (pf_trace_info); + +EXPORT_SYMBOL (pf_pid_list); +EXPORT_SYMBOL (pf_pid_data_len); +EXPORT_SYMBOL (pf_pid_count); + +EXPORT_SYMBOL (pf_clear_pid_tree); + diff -urN linux-2.6.19/arch/i386/kernel/pf.inc.S linux-2.6.19-mync/arch/i386/kernel/pf.inc.S --- linux-2.6.19/arch/i386/kernel/pf.inc.S 1970-01-01 03:00:00.000000000 +0300 +++ linux-2.6.19-mync/arch/i386/kernel/pf.inc.S 2006-12-01 21:06:55.000000000 +0300 @@ -0,0 +1,310 @@ +#ifdef CONFIG_PF +#include + +.data +/* +ENTRY (pf_spinlock) + .long 1 +*/ +/* Shared trace information page */ +.align 4096 +.globl pf_trace_info +pf_trace_info: +pf_trace_info__trace_offset: + .long 0 +pf_trace_info__trace_size: + .long 0 +pf_trace_info__trace_enabled: + .long 0 +.align 4096 +.previous + +#define PF_SAVE_REGS \ + cld; \ + pushl %eax; \ + pushl %ebp; \ + pushl %edi; \ + pushl %esi; \ + pushl %edx; \ + pushl %ecx; \ + pushl %ebx + +#define PF_RESTORE_REGS \ + popl %ebx; \ + popl %ecx; \ + popl %edx; \ + popl %esi; \ + popl %edi; \ + popl %ebp; \ + popl %eax + + /* Pushes 16 bytes on the stack + * One trace packet consists of 14 bytes + * On return %ebx contains adress of a packet to be filled + * Label '13' is forbidden within TRACE_BEGIN/TRACE_END pair */ +#define PF_TRACE_BEGIN(label) \ + pushfl; \ + cli; \ + pushl %eax; \ + pushl %ebx; \ + pushl %edx; \ + /* call pf_lock_buffer; */ \ + cmpl $0, pf_trace_info__trace_enabled; \ + je label; \ + movl pf_trace_info__trace_size, %eax; \ + cmpl pf_trace_info__trace_offset, %eax; \ + jbe label + +#define PF_TRACE_END(label) \ +label: /* call pf_unlock_buffer; */ \ + popl %edx; \ + popl %ebx; \ + popl %eax; \ + popfl +/* +pf_lock_buffer: + pushfl +1: lock + decl pf_spinlock + jns 3f +2: rep + nop + cmpl $0, pf_spinlock + jle 2b + jmp 1b +3: popfl + ret + +pf_unlock_buffer: + movl $1, pf_spinlock + ret +*/ +pf_is_ec_vector: + cmpl $8, %eax; je 1f + cmpl $10, %eax; je 1f + cmpl $11, %eax; je 1f + cmpl $12, %eax; je 1f + cmpl $13, %eax; je 1f + cmpl $14, %eax; je 1f + cmpl $17, %eax; je 1f + movl $0, %eax + ret +1: movl $1, %eax + ret + + /* Traced event types and formats: + * A) Interrupt/trap gate entrance + * Occurs when the CPU enters an interrupt or a trap gate. + * + * 0 1 2 10 11 + * +----+---+---------+------+ + * |type|cpu|timestamp|vector| + * +----+---+---------+------+ + * + * type = 7 + * cpu - CPU that the event belongs to + * vector - interrupt/trap gate vector No. + * timestamp - TSC value at the moment + * + * B) iret to kernelspace (CPL = 0) + * Is triggered before each iret when CS on the stack + * indicate that CPL after iret will be 0. + * This is a very simple event. We have almost nothing to trace, + * e.g. there's no process context. + * + * 0 1 2 10 + * +----+---+---------+ + * |type|cpu|timestamp| + * +----+---+---------+ + * + * type = 2 + * cpu - CPU that the event belongs to + * timestamp - TSC value at the moment + * + * C) iret to userspace (CPL != 0) + * Is triggered before each iret when CS on the stack + * indicate that CPL after iret will be nonzero. + * + * 0 1 2 10 14 + * +----+---+---------+---+ + * |type|cpu|timestamp|PID| + * +----+---+---------+---+ + * + * type = 1 + * cpu - CPU that the event belongs to + * timestamp - TSC value at the moment + * PID - current process's PID + * + *********************************************************************** + * + * Non-structural information: + * + * *) EIP before an interrupt: + * + * 0 1 5 + * +----+---+ + * |type|EIP| + * +----+---+ + * + * type = 3 + * EIP - EIP value saved in the stack + * + */ + + /* After PF_TRACE_BEGIN: + * 4 bytes is EIP pushed by `call' instruction + * 16 bytes are pushed on the stack by TRACE_BEGIN + * the next 4 bytes on the stack is $vector */ +pf_irq_entry: + PF_TRACE_BEGIN (_pf_irq_entry) + + /* 11 bytes for the iret + 5 bytes for EIP */ + movl $16, %eax + lock xaddl %eax, pf_trace_info__trace_offset + + cmpl %eax, pf_trace_info__trace_size + jbe 3f + + /* Keep trace map valid */ + movl %eax, %ebx + andl $PF_TRACE_ALIGNMENT_MASK, %ebx + jz pf_irq_entry__fill_trace_map + movl %eax, %edx + movl %eax, %ebx + addl $15, %edx + andl $(~PF_TRACE_ALIGNMENT_MASK), %ebx + andl $(~PF_TRACE_ALIGNMENT_MASK), %edx + cmpl %edx, %ebx + jb pf_irq_entry__fill_trace_map + jmp pf_irq_entry__fill_event + +pf_irq_entry__fill_trace_map: + movl %eax, %ebx + movl pf_trace_map, %edx + shrl $(PF_TRACE_ALIGNMENT - 2), %ebx + movl %eax, (%ebx) + +pf_irq_entry__fill_event: + movl %eax, %ebx + + pushl %eax + GET_THREAD_INFO (%eax) + movl TI_cpu (%eax), %eax + movb %al, 1 (%ebx) + popl %eax + rdtsc + movl %eax, 2 (%ebx) + movl %edx, 6 (%ebx) + movl 20 (%esp), %eax + movb %al, 10 (%ebx) + + /* Trace EIP event */ + movb $3, 11 (%ebx) + movl 20 (%esp), %eax + andl $0xff, %eax + call pf_is_ec_vector + cmpl $1, %eax + je pf_ie_ec + /* Remember of 16 bytes pushed in WRAP_IRQ_GATE */ + movl /* 24 + 16 */ 40 (%esp), %eax + jmp pf_ie_go +pf_ie_ec: + /* Remember of 16 bytes pushed in WRAP_IRQ_GATE */ + movl /* 28 + 16 */ 44 (%esp), %eax +pf_ie_go: + movl %eax, 12 (%ebx) + + /* Finalize the event by setting its type field */ + movb $7, (%ebx) + + jmp 1f +3: movl $0, pf_trace_info__trace_enabled +1: PF_TRACE_END (_pf_irq_entry) + ret + + /* After PF_TRACE_BEGIN: + * 4 bytes is EIP pushed by `call' instruction + * 16 bytes are pushed to the stack by TRACE_BEGIN + * 20 more bytes are pushed in the beginning of RESTORE_WRAP_IRQ + * then come: + * EIP (offset 40) + * CS (offset 44) + * EFLAGS (offset 48) + * Event type: + * 1 - iret to CPL != 0 + * In this case we have access to current task_struct + * 2 - iret to CPL == 0 (FIGUREMEOUT VM_MASK?) + */ +pf_iret: + PF_TRACE_BEGIN (_pf_iret) + + movl 44 (%esp), %eax + testl $(VM_MASK | 3), %eax + jz 1f + movl $14, %edx + jmp 2f +1: movl $10, %edx +2: movl %edx, %eax + lock xaddl %eax, pf_trace_info__trace_offset + + cmpl %eax, pf_trace_info__trace_size + jbe 4f + + /* Keep trace map valid */ + movl %eax, %ebx + andl $PF_TRACE_ALIGNMENT_MASK, %ebx + jz pf_iret__fill_trace_map + movl %eax, %ebx + addl %eax, %edx + andl $(~PF_TRACE_ALIGNMENT_MASK), %ebx + andl $(~PF_TRACE_ALIGNMENT_MASK), %edx + cmpl %edx, %ebx + jb pf_iret__fill_trace_map + jmp pf_iret__fill_event + +pf_iret__fill_trace_map: + movl %eax, %ebx + movl pf_trace_map, %edx + shrl $(PF_TRACE_ALIGNMENT - 2), %ebx + movl %eax, (%ebx) + +pf_iret__fill_event: + movl %eax, %ebx + + pushl %eax + GET_THREAD_INFO (%eax) + movl TI_cpu (%eax), %eax + movb %al, 1 (%ebx) + popl %eax + rdtsc + movl %eax, 2 (%ebx) + movl %edx, 6 (%ebx) + + movl 44 (%esp), %edx + testl $(VM_MASK | 3), %edx + jz 1f + + PF_SAVE_REGS + pushl %eax + call pf_iret_to_userspace + addl $4, %esp + PF_RESTORE_REGS + movb $1, (%ebx) /* Event finalization */ + jmp 4f + +1: movb $2, (%ebx) /* Event finalization */ + + jmp 3f +4: movl $0, pf_trace_info__trace_enabled +3: PF_TRACE_END (_pf_iret) + ret + +#define PF_IRQ_ENTRY call pf_irq_entry + +#define PF_IRET call pf_iret + +#else +#define PF_IRQ_ENTRY +#define PF_IRET +#endif /* CONFIG_PF */ + diff -urN linux-2.6.19/arch/i386/kernel/rti.c linux-2.6.19-mync/arch/i386/kernel/rti.c --- linux-2.6.19/arch/i386/kernel/rti.c 1970-01-01 03:00:00.000000000 +0300 +++ linux-2.6.19-mync/arch/i386/kernel/rti.c 2006-12-12 19:42:00.000000000 +0300 @@ -0,0 +1,81 @@ +#include +#include +#include +#include + +unsigned long rti_if [NR_CPUS]; + +extern void rti_timer_interrupt (void); +extern void rti_set_privileged_intr_gate (unsigned int n, void *addr); + +volatile unsigned long rti_vector_irq [NR_VECTORS]; + +#ifndef CONFIG_X86_IO_APIC +asmlinkage void mask_IO_APIC_irq (unsigned int irq) +{ + /* Must not get here */ +} +#endif + +static void rti_init_smi (void) +{ + unsigned char b0, + b1; + unsigned short smi_en; + struct pci_dev *dev = NULL; + struct pci_device_id id = + { PCI_DEVICE (PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_ICH7_0) }; + + dev = pci_get_device (id.vendor, id.device, NULL); + + if (dev == NULL || dev->bus->number || dev->devfn != 0xf8) { + pci_dev_put (dev); + printk ("[RTI] init_smi: 945P not found\n"); + return; + } + + pci_read_config_byte (dev, 0x40, &b0); + pci_read_config_byte (dev, 0x41, &b1); + + smi_en = (((b1 << 1) | (b0 >> 7)) << 7) + 0x30; + + outl (inl (smi_en) & ~1, smi_en); + + printk ("[RTI] init_smi: SMI disabled globally in ICH7\n"); +} + +void rti_init_timer (void) +{ + printk ("Setting rti local APIC irq handler\n"); + rti_set_privileged_intr_gate (LOCAL_TIMER_VECTOR, rti_timer_interrupt); + rti_init_smi (); + +/* apic_write_around (APIC_TDCR, 0xb); + apic_write_around (APIC_TMICT, 5000); +*/ +} + +void rti_init (void) +{ + unsigned i; + + for (i = 0; i < NR_CPUS; i++) + rti_if [i] = 0x200; + +// /* Allocate 4 Mb for cache cleaning */ +// rti_cache_bulk = (void*) __get_free_pages (GFP_ATOMIC | __GFP_REPEAT, 10); +} + +extern void rti_save_flags (void); +extern void rti_irq_restore (void); +extern void (*rti_timer_callback) (void); + +EXPORT_SYMBOL (rti_irq_enable); +EXPORT_SYMBOL (rti_irq_disable); +EXPORT_SYMBOL (rti_save_flags); +EXPORT_SYMBOL (rti_irq_restore); +EXPORT_SYMBOL (rti_safe_halt); + +EXPORT_SYMBOL (rti_timer_callback); + diff -urN linux-2.6.19/arch/i386/kernel/rti.inc.S linux-2.6.19-mync/arch/i386/kernel/rti.inc.S --- linux-2.6.19/arch/i386/kernel/rti.inc.S 1970-01-01 03:00:00.000000000 +0300 +++ linux-2.6.19-mync/arch/i386/kernel/rti.inc.S 2006-12-13 00:05:05.000000000 +0300 @@ -0,0 +1,1088 @@ +#ifdef CONFIG_RTI +#include +#include +#include +#include +#include + +.text + +#define RTI_TEST_IF \ + pushl %eax; \ + pushl %ebx; \ + pushl %ebp; \ + pushfl; \ + cmpl $1, rti_if_on; \ + je 21f; \ + pushfl; \ + popl %eax; \ + andl $0x200, %eax; \ + GET_THREAD_INFO (%ebp); \ + movl TI_cpu (%ebp), %ebp; \ + shll $2, %ebp; \ + movl rti_if (%ebp), %ebx; \ + andl $0x200, %ebx; \ + cmpl %ebx, %eax; \ + je 21f; \ + cli; \ + call rti_switch_rt_off; \ + ud2; \ + .word __LINE__; \ + .long . + 4; \ + .string __FILE__; \ + cli; \ +22: jmp 22b; \ +21: popfl; \ + popl %ebp; \ + popl %ebx; \ + popl %eax; + +/* Speaker sound generation */ +#define DEBUG_SPEAKER \ + pushfl; \ + pushl %eax; \ + inb $0x61, %al; \ + xorb $2, %al; \ + outb %al, $0x61; \ + popl %eax; \ + popfl; + + /* Hang on current CPU and make some noise from the speaker */ +#define DEBUG_BEEP \ + cli; \ + movl $0, %ebx; \ +5: cmpl $0, %ebx; \ + jne 6f; \ + inb $0x61, %al; \ + xorb $2, %al; \ + outb %al, $0x61; \ + movl $300000, %ebx; \ + jmp 5b; \ +6: decl %ebx; \ + jmp 5b + +#define rti_cli call rti_irq_disable +#define rti_sti call rti_irq_enable + +ENTRY (rti_debug_beep) + cli + DEBUG_BEEP + ret + +#define RTI_CPUS_ORDER 11 +/* Per-CPU shift for rti_lapic_mask */ +#define RTI_LAPIC_MASK_ORDER 10 + +/* This value must exactly match the one + * in the enum at asm/fixmap.h */ +#define RTI_FIX_APIC_BASE 2 +#define RTI_APIC_BASE (__FIXADDR_TOP - (RTI_FIX_APIC_BASE \ + << PAGE_SHIFT)) +#define RTI_APIC_REGISTER(x) (RTI_APIC_BASE + (x)) + +.data + +/* Virtual local APIC masks are separate for each interrupt. + * The purpose is to prevent extra interrupts right after issuing EOI + * and till conventional kernel code performs ack_APIC_irq thus + * emulating usual system behaivour */ +ENTRY (rti_lapic_mask) +.rept NR_CPUS +.rept 256 + .long 0 +.endr +.endr + +/* "Processing pending interrupts" flag */ +ENTRY (rti_pp) +.rept NR_CPUS + .long 0 +.endr + +/* RTI hooks are active */ +ENTRY (rti_rt_on) + .long 0 + +/* Use virtual IF flag when in RTI mode */ +ENTRY (rti_if_on) + .long 0 + +ENTRY (rti_timer_callback) + .long 0 + +rti_save_flags_vptr: + .long rti_save_flags_nonrt +rti_irq_restore_vptr: + .long rti_irq_restore_nonrt +rti_irq_disable_vptr: + .long rti_irq_disable_nonrt +rti_irq_enable_vptr: + .long rti_irq_enable_nonrt +rti_safe_halt_vptr: + .long rti_safe_halt_nonrt + +rti_irq_gate_vptr: + .long rti_irq_gate_nonrt +rti_irq_gate_ec_vptr: + .long rti_irq_gate_ec_nonrt +rti_managed_irq_gate_vptr: +.long rti_managed_irq_gate_nonrt +rti_managed_irq_noack_vptr: +.long rti_managed_irq_noack_nonrt +rti_managed_irq_ext_vptr: +.long rti_managed_irq_ext_nonrt + +rti_trap_gate_vptr: + .long rti_trap_gate_nonrt +rti_trap_gate_ec_vptr: + .long rti_trap_gate_ec_nonrt +rti_managed_trap_gate_vptr: + .long rti_managed_trap_gate_nonrt +rti_managed_trap_noack_vptr: + .long rti_managed_trap_noack_nonrt +rti_managed_trap_ext_vptr: + .long rti_managed_trap_ext_nonrt + +rti_iret_vptr: + .long rti_iret_nonrt + +.text + +/* Return value in EAX */ +rti_save_flags_nonrt: + pushfl + popl %eax + ret + +.data +rti_flags: + .long 0 +.previous + +/* Parameter in EAX */ +rti_irq_restore_nonrt: + pushl %eax + popfl + ret + +.data +rti_disable_addr: + .long 0 +.previous + +rti_irq_disable_nonrt: + cli + ret + +rti_irq_enable_nonrt: + sti + ret + +rti_safe_halt_nonrt: + sti + hlt + ret + +rti_trap_gate_nonrt: + ret + +rti_trap_gate_ec_nonrt: + ret + +#define RTI_IRQ_GATE call rti_irq_gate +#define RTI_IRQ_GATE_EC call rti_irq_gate_ec +#define RTI_MANAGED_IRQ_GATE call rti_managed_irq_gate +#define RTI_MANAGED_IRQ_NOACK call rti_managed_irq_noack +#define RTI_MANAGED_IRQ_EXT call rti_managed_irq_ext +#define RTI_TRAP_GATE call rti_trap_gate +#define RTI_TRAP_GATE_EC call rti_trap_gate_ec +#define RTI_MANAGED_TRAP_GATE call rti_managed_trap_gate +#define RTI_MANAGED_TRAP_NOACK call rti_managed_trap_noack +#define RTI_MANAGED_TRAP_EXT call rti_managed_trap_ext +#define RTI_IRET call rti_iret + +rti_managed_trap_ext_nonrt: +/* cmpl $0, rti_io_apic_enabled */ + /* TODO Mask LVT0 */ +/* je rti_managed_trap_noack_nonrt */ +rti_managed_trap_gate_nonrt: + /* INCORRECT! */ +/* movl $0, RTI_APIC_REGISTER (APIC_EOI) */ +rti_managed_trap_noack_nonrt: + ret + +.data +rti_msg: + .string "[RTI] Last op: %d, f:%x, se:%x, da:%x\n" +.previous + +rti_managed_irq_ext_nonrt: +/* cmpl $0, rti_io_apic_enabled */ + /* TODO Mask LVT0 */ +/* je rti_managed_irq_noack_nonrt */ +rti_managed_irq_gate_nonrt: + /* INCORRECT! */ +/* movl $0, RTI_APIC_REGISTER (APIC_EOI) */ +rti_managed_irq_noack_nonrt: + ret + +rti_irq_gate_nonrt: + ret + +rti_irq_gate_ec_nonrt: + ret + +rti_iret_nonrt: + ret + +ENTRY (rti_timer_interrupt) + pushfl + pushl %eax + pushl %ds + pushl %es + + movl $(__USER_DS), %eax + movl %eax, %ds; + movl %eax, %es + + pushl %ebp + GET_THREAD_INFO (%ebp) + movl TI_cpu (%ebp), %ebp + cmpl $0, %ebp + popl %ebp + je rti_timer_bsp +rti_timer_nonbsp: + pushl $0xef + PF_IRQ_ENTRY + RTI_MANAGED_IRQ_GATE + addl $4, %esp + + popl %es + popl %ds + popl %eax + popfl + + pushl $(0xef - 256) + SAVE_ALL_NOCFI + movl %esp, %eax + call smp_apic_timer_interrupt + jmp ret_from_intr + +rti_timer_bsp: + pushl $0xef + PF_IRQ_ENTRY + addl $4, %esp + + movl $0, RTI_APIC_REGISTER (APIC_EOI) + + popl %es + popl %ds + popl %eax + popfl + SAVE_ALL_NOCFI + cmpl $0, rti_timer_callback + je 1f + call *rti_timer_callback + +1: RESTORE_REGS_NOCFI + pushl $0xef + RESTORE_WRAP_PRIVILEGED_IRQ + addl $4, %esp + + cli +/* cmpl $0, rti_wbinvd_pending + je 2f + decl rti_wbinvd_pending + wbinvd +*/ +2: iret + +#define RTI_NO_IRQ 666 + +.data +rti_first_pending_irq: +.rept NR_CPUS + .long RTI_NO_IRQ +.endr +rti_last_pending_irq: +.rept NR_CPUS + .long RTI_NO_IRQ +.endr +rti_pending_irqs_list: +.rept NR_CPUS +.rept 256 + .long RTI_NO_IRQ # next pending irq + .long 0 # still pending? +.endr +.endr +.previous + +/* Accepts pending IRQ vector in %eax */ +rti_add_pending_irq: + pushfl + cli + pushl %ebp + pushl %edx + GET_THREAD_INFO (%ebp) + movl TI_cpu (%ebp), %ebp + movl %ebp, %edx + shll $ RTI_CPUS_ORDER, %ebp + shll $2, %edx + + movl $1, rti_pending_irqs_list + 4 (%ebp, %eax, 8) + # Mark irq as pending. + # irq is in the list if it has + # a non-null link or is the last + # element in the list of pending irqs + cmpl %eax, rti_last_pending_irq (%edx) + # If irq is already in the list + je 1f # then do nothing + cmpl $RTI_NO_IRQ, rti_pending_irqs_list (%ebp, %eax, 8) + jne 1f # same for sign #2 + cmpl $RTI_NO_IRQ, rti_last_pending_irq (%edx) + # If the list is not empty + jne 2f # then perform appending + movl %eax, rti_first_pending_irq (%edx) + # else initialize + # the first element + movl %eax, rti_last_pending_irq (%edx) + jmp 1f +2: pushl %ebx + movl rti_last_pending_irq (%edx), %ebx + movl %eax, rti_pending_irqs_list (%ebp, %ebx, 8) + movl %eax, rti_last_pending_irq (%edx) + popl %ebx +1: popl %edx + popl %ebp + popfl + ret + +rti_process_tlb_flush_ipi: + pushfl + cli # I take this serious + pushl %eax + pushl %ebx + movl $INVALIDATE_TLB_VECTOR, %eax + cmpl $0, rti_pending_irqs_list + 4 (%ebp, %eax, 8) + je 1f + movl $0, rti_pending_irqs_list + 4 (%ebp, %eax, 8) + movl wrap_irq_interrupt_entry_points (, %eax, 4), %ebx + pushfl + pushl %cs + + call *%ebx # Emulate an interrupt +1: popl %ebx + popl %eax + popfl + ret + +rti_pp_test: +.byte 'A + +ENTRY (rti_process_pending_irqs) + SDBG_SEND_CHAR (n, rti_process_pending_irqs2) + pushfl + pushl %edx + pushl %eax + pushl %ebp + GET_THREAD_INFO (%ebp) + movl TI_cpu (%ebp), %ebp + movl %ebp, %edx + shll $RTI_CPUS_ORDER, %ebp + shll $2, %edx + movl $0, rti_if (%edx) + cli + cmpl $0, rti_pp (%edx) + je 1f + /* TEST !!! */ + jmp 1f + SDBG_SEND_CHAR (x, rti_process_pending_irqs3) + SDBG_SEND_MEM (rti_pp_test, rti_process_pending_irqs4) + /* There is no pending interrupts for this CPU */ + popl %ebp + popl %eax + popl %edx + popfl + ret +1: /* There are prending interrupts, process them */ + incb rti_pp_test + movl $1, rti_pp (%edx) + + pushl %ebx + pushl %ecx + cli + movl rti_first_pending_irq (%edx), %eax + movl $RTI_NO_IRQ, rti_first_pending_irq (%edx) +2: SDBG_SEND_CHAR (+, rti_process_pending_irqs5) + cmpl $1, rti_if_on + jne 7f + sti +7: call rti_process_tlb_flush_ipi + cli + cmpl $RTI_NO_IRQ, %eax # If the list is empty + jne 6f # then there's nothing to do + movl rti_first_pending_irq (%edx), %eax + movl $RTI_NO_IRQ, rti_first_pending_irq (%edx) + cmpl $RTI_NO_IRQ, %eax + je 1f +6: movl $RTI_NO_IRQ, %ecx + cmpl $1, rti_pending_irqs_list + 4 (%ebp, %eax, 8) + # If the interrupt is not marked as pending + jne 4f # then skip it + movl %eax, %ecx +4: movl %eax, %ebx + movl rti_pending_irqs_list (%ebp, %eax, 8), %eax + # Proceed to the next pending interrupt + movl $RTI_NO_IRQ, rti_pending_irqs_list (%ebp, %ebx, 8) + # Mark current interrupt + movl $0, rti_pending_irqs_list + 4 (%ebp, %ebx, 8) + # as not pending any more + movl $0, rti_if (%edx) + cmpl %ebx, rti_last_pending_irq (%edx) + jne 5f + movl $RTI_NO_IRQ, rti_last_pending_irq (%edx) +5: cmpl $1, rti_if_on + jne 7f + sti +7: + cmpl $RTI_NO_IRQ, %ecx + je 3f + + movl wrap_irq_interrupt_entry_points (, %ecx, 4), %ebx + pushfl + pushl %cs + SDBG_SEND_CHAR (l, rti_process_pending_irqs) + call *%ebx # Emulate an interrupt + +3: cli + movl $0, rti_if (%edx) + jmp 2b # Next + +1: movl $0, rti_pp (%edx) + decb rti_pp_test + movl $RTI_NO_IRQ, rti_first_pending_irq (%edx) # Clear the list + movl $RTI_NO_IRQ, rti_last_pending_irq (%edx) + movl $0x200, rti_if (%edx) + popl %ecx + popl %ebx + popl %ebp + popl %eax + popl %edx + popfl + ret + +rti_irq_disable_rt: + SDBG_SEND_CHAR (d, rti_irq_disable_rt) + cli + pushfl + pushl %ebp + GET_THREAD_INFO (%ebp) + movl TI_cpu (%ebp), %ebp + shll $2, %ebp + movl $0, rti_if (%ebp) + popl %ebp + cmpl $1, rti_if_on + jne 1f + /* Virtual IF flag */ + popfl + sti + ret +1: /* Non-virtual IF flag */ + popfl + ret + +rti_irq_enable_rt: + SDBG_SEND_CHAR (e, rti_irq_enable_rt) + pushl %ebp + cli + pushfl + GET_THREAD_INFO (%ebp) + movl TI_cpu (%ebp), %ebp + shll $2, %ebp + /* TEST */ + call rti_process_pending_irqs +/* !!! TEST TEMPORAL */ + GET_THREAD_INFO (%ebp) + movl TI_cpu (%ebp), %ebp + shll $2, %ebp +/* (!!!) */ + popfl + movl $0x200, rti_if (%ebp) + sti + popl %ebp + RTI_TEST_IF + ret + +rti_safe_halt_rt: + SDBG_SEND_CHAR (h, rti_safe_halt_rt) + pushl %ebp + cli + pushfl + GET_THREAD_INFO (%ebp) + movl TI_cpu (%ebp), %ebp + shll $2, %ebp + cmpl $RTI_NO_IRQ, rti_last_pending_irq (%ebp) + je rti_safe_halt_hlt + call rti_process_pending_irqs + popfl + movl $0x200, rti_if (%ebp) + popl %ebp + sti + RTI_TEST_IF + ret +rti_safe_halt_hlt: + movl $0x200, rti_if (%ebp) + popfl + popl %ebp + sti + hlt + RTI_TEST_IF + ret + +/* Return value in EAX */ +rti_save_flags_rt: + SDBG_SEND_CHAR (s, rti_save_flags_rt) + RTI_TEST_IF + pushl %ebp + pushfl + GET_THREAD_INFO (%ebp) + movl TI_cpu (%ebp), %ebp + shll $2, %ebp + pushfl + popl %eax + andl $0xfffffdff, %eax + orl rti_if (%ebp), %eax + popfl + popl %ebp + ret + +/* Parameter in EAX */ +rti_irq_restore_rt: + SDBG_SEND_CHAR (r, rti_irq_restore_rt) + RTI_TEST_IF + pushfl + andl $0x200, %eax + cmpl $0, %eax + je 1f + popfl + call rti_irq_enable_rt + jmp 2f +1: popfl + call rti_irq_disable_rt +2: pushfl + cmpl $1, rti_if_on + jne 1f + popfl + sti + ret +1: popfl + RTI_TEST_IF + ret + +ENTRY (rti_switch_rt_on) + pushfl + cli + + movl $1, rti_rt_on + /* MEGATEST */ + movl $1, rti_if_on + + /* save_flags_rt depends on all other hooks being enabled */ + movl $rti_save_flags_rt , rti_save_flags_vptr + + movl $rti_irq_restore_rt , rti_irq_restore_vptr + movl $rti_irq_disable_rt , rti_irq_disable_vptr + movl $rti_irq_enable_rt , rti_irq_enable_vptr + movl $rti_safe_halt_rt , rti_safe_halt_vptr + + movl $rti_irq_gate_rt , rti_irq_gate_vptr + movl $rti_irq_gate_ec_rt , rti_irq_gate_ec_vptr + movl $rti_managed_irq_gate_rt , rti_managed_irq_gate_vptr + movl $rti_managed_irq_noack_rt , rti_managed_irq_noack_vptr + movl $rti_managed_irq_ext_rt , rti_managed_irq_ext_vptr + movl $rti_trap_gate_rt , rti_trap_gate_vptr + movl $rti_trap_gate_ec_rt , rti_trap_gate_ec_vptr + movl $rti_managed_trap_gate_rt , rti_managed_trap_gate_vptr + movl $rti_managed_trap_noack_rt , rti_managed_trap_noack_vptr + movl $rti_managed_trap_ext_rt , rti_managed_trap_ext_vptr + + movl $rti_iret_rt , rti_iret_vptr + + popfl + ret + +ENTRY (rti_switch_rt_off) +/* pushfl + * cli */ + + movl $rti_save_flags_nonrt , rti_save_flags_vptr + movl $rti_irq_restore_nonrt , rti_irq_restore_vptr + movl $rti_irq_disable_nonrt , rti_irq_disable_vptr + movl $rti_irq_enable_nonrt , rti_irq_enable_vptr + movl $rti_safe_halt_nonrt , rti_safe_halt_vptr + + movl $rti_irq_gate_nonrt , rti_irq_gate_vptr + movl $rti_irq_gate_ec_nonrt , rti_irq_gate_ec_vptr + movl $rti_managed_irq_gate_nonrt , rti_managed_irq_gate_vptr + movl $rti_managed_irq_noack_nonrt, rti_managed_irq_noack_vptr + movl $rti_managed_irq_ext_nonrt, rti_managed_irq_ext_vptr + movl $rti_trap_gate_nonrt , rti_trap_gate_vptr + movl $rti_trap_gate_ec_nonrt , rti_trap_gate_ec_vptr + movl $rti_managed_trap_gate_nonrt, rti_managed_trap_gate_vptr + movl $rti_managed_trap_noack_nonrt, rti_managed_trap_noack_vptr + movl $rti_managed_trap_ext_nonrt , rti_managed_trap_ext_vptr + + movl $rti_iret_nonrt , rti_iret_vptr + + movl $0, rti_rt_on + +/* popfl */ + ret + +ENTRY (rti_save_flags) + call *%cs:rti_save_flags_vptr + ret + +ENTRY (rti_irq_restore) + call *%cs:rti_irq_restore_vptr + ret + +ENTRY (rti_irq_disable) + call *%cs:rti_irq_disable_vptr + ret + +ENTRY (rti_irq_enable) + call *%cs:rti_irq_enable_vptr + ret + +ENTRY (rti_safe_halt) + call *%cs:rti_safe_halt_vptr + ret + +rti_irq_gate: + call *%cs:rti_irq_gate_vptr + ret + +rti_irq_gate_ec: + call *%cs:rti_irq_gate_ec_vptr + ret + +rti_managed_irq_gate: + call *%cs:rti_managed_irq_gate_vptr + ret + +rti_managed_irq_noack: + call *%cs:rti_managed_irq_noack_vptr + ret + +rti_managed_irq_ext: + call *%cs:rti_managed_irq_ext_vptr + ret + +rti_trap_gate: + call *%cs:rti_trap_gate_vptr + ret + +rti_trap_gate_ec: + call *%cs:rti_trap_gate_ec_vptr + ret + +rti_managed_trap_gate: + call *%cs:rti_managed_trap_gate_vptr + ret + +rti_managed_trap_noack: + call *%cs:rti_managed_trap_noack_vptr + ret + +rti_managed_trap_ext: + call *%cs:rti_managed_trap_ext_vptr + ret + +rti_iret: + call *%cs:rti_iret_vptr + ret + + /* 16 bytes are pushed onto the stack in WRAP_IRQ_GATE + * 4 bytes for interrupt vector + * 'sti' at the end determines interrupt latency + * 8 more bytes for a _vptr call */ +rti_irq_gate_rt: + SDBG_SEND_CHAR (1, rti_irq_gate_rt) + pushl %edx + pushl %ebp + GET_THREAD_INFO (%ebp) + movl TI_cpu (%ebp), %ebp + movl %ebp, %edx + shll $RTI_CPUS_ORDER, %ebp + shll $2, %edx + + cmpl $1, rti_if_on + jne 1f /* (INCOMPLETE IF TEST) */ + movl 44 (%esp), %eax # EFLAGS + andl $0xfffffdff, %eax + orl rti_if (%edx), %eax + movl %eax, 44 (%esp) +1: + movl $0, rti_if (%edx) + movl 16 (%esp), %eax # interrupt vector + movl $0, rti_pending_irqs_list + 4 (%ebp, %eax, 8) + cmpl $1, rti_if_on + jne 1f + sti +1: popl %ebp + popl %edx + ret + + /* 4 more bytes for an error code */ +rti_irq_gate_ec_rt: + SDBG_SEND_CHAR (2, rti_irq_ec_rt) + pushl %edx + pushl %ebp + GET_THREAD_INFO (%ebp) + movl TI_cpu (%ebp), %ebp + movl %ebp, %edx + shll $ RTI_CPUS_ORDER, %ebp + shll $2, %edx + + cmpl $1, rti_if_on + jne 1f /* (INCOMPLETE IF TEST) */ + movl 48 (%esp), %eax # EFLAGS + andl $0xfffffdff, %eax + orl rti_if (%edx), %eax + movl %eax, 48 (%esp) +1: + movl $0, rti_if (%edx) + movl 16 (%esp), %eax # interrupt vector + movl $0, rti_pending_irqs_list + 4 (%ebp, %eax, 8) + cmpl $1, rti_if_on + jne 1f + sti +1: popl %ebp + popl %edx + ret + +rti_managed_irq_noack_rt: + SDBG_SEND_CHAR (4, rti_managed_irq_noack_rt) +/* pushl %edx + pushl %ebp + GET_THREAD_INFO (%ebp) + movl TI_cpu (%ebp), %ebp +*/ jmp 1f + +rti_managed_irq_ext_rt: + SDBG_SEND_CHAR (3, rti_managed_irq_ext_rt) +/* cmpl $1, rti_io_apic_enabled + je rti_managed_irq_gate_rt +*/ + /* Check for a bit in TMR: + * only level-triggered interrupts are those of interest */ + movl 8 (%esp), %eax + andl $0xff, %eax + pushl %ebx + pushl %eax + shrl $5, %eax + shll $4, %eax + movl RTI_APIC_REGISTER (APIC_TMR) (%eax), %ebx + popl %eax + pushl %eax + andl $0x1f, %eax + pushl %ecx + movl %eax, %ecx + shrl %cl, %ebx + popl %ecx + popl %eax + andl $1, %ebx + cmpl $1, %ebx + popl %ebx + jne 1f + /* (checked for a bit in TMR) */ + pushl %ecx + pushl %edx + cmpl $1, rti_io_apic_enabled + jne 2f + shll $2, %eax + movl rti_vector_irq (%eax), %eax + pushl %eax + call mask_IO_APIC_irq + jmp 3f +2: subl $FIRST_EXTERNAL_VECTOR, %eax + pushl %eax + call disable_8259A_irq +3: popl %eax + popl %edx + popl %ecx + jmp 1f + +rti_managed_irq_gate_rt: + SDBG_SEND_CHAR (5, rti_managed_irq_gate_rt) + movl $0, RTI_APIC_REGISTER (APIC_EOI) + +1: pushl %edx + pushl %ebp + GET_THREAD_INFO (%ebp) + movl TI_cpu (%ebp), %ebp + movl %ebp, %edx + shll $RTI_CPUS_ORDER, %ebp + shll $2, %edx + + cmpl $1, rti_if_on + jne 1f /* (INCOMPLETE IF TEST) */ + movl 44 (%esp), %eax + andl $0xfffffdff, %eax + orl rti_if (%edx), %eax + movl %eax, 44 (%esp) + + cmpl $1, rti_pp (%edx) + je 2f + cmpl $0, rti_if (%edx) + jne 1f +2: movl 16 (%esp), %eax # interrupt vector + + call rti_add_pending_irq + + popl %ebp + popl %edx + addl $12, %esp + popl %es + popl %ds + popl %eax + cmpl $1, rti_if_on + jne 2f + orl $0x200, 12 (%esp) # Set IF on the stack to 1 + andl $~0x200, (%esp) # Avoiding nested interrupts - REDUNDAND? + /* TODO No need to do pushfl just before iret! */ +2: popfl + SDBG_SEND_CHAR (p, rti_managed_irq_gate_rt2) + iret + +1: movl $0, rti_if (%edx) + movl 16 (%esp), %eax # interrupt vector + movl $0, rti_pending_irqs_list + 4 (%ebp, %eax, 8) + cmpl $1, rti_if_on + jne 1f + SDBG_SEND_CHAR (m, rti_managed_irq_gate_rt3) + sti +1: popl %ebp + popl %edx + ret + +rti_trap_gate_rt: + SDBG_SEND_CHAR (6, rti_trap_gate_rt) + pushl %edx + pushl %ebp + pushfl + cli + GET_THREAD_INFO (%ebp) + movl TI_cpu (%ebp), %ebp + movl %ebp, %edx + shll $ RTI_CPUS_ORDER, %ebp + shll $2, %edx + + cmpl $1, rti_if_on + jne 1f /* (INCOMPLETE IF TEST) */ + movl 48 (%esp), %eax + andl $0xfffffdff, %eax + orl rti_if (%edx), %eax + movl %eax, 48 (%esp) + movl 20 (%esp), %eax # interrupt vector +1: + movl $0, rti_pending_irqs_list + 4 (%ebp, %eax, 8) + popfl + cmpl $1, rti_if_on + jne 1f + sti +1: popl %ebp + popl %edx + ret + +rti_trap_gate_ec_rt: + SDBG_SEND_CHAR (7, rti_trap_gate_ec_rt) + pushl %edx + pushl %ebp + pushfl + cli + GET_THREAD_INFO (%ebp) + movl TI_cpu (%ebp), %ebp + movl %ebp, %edx + shll $ RTI_CPUS_ORDER, %ebp + shll $2, %edx + + cmpl $1, rti_if_on + jne 1f /* (INCOMPLETE IF TEST) */ + movl 52 (%esp), %eax + andl $0xfffffdff, %eax + orl rti_if (%edx), %eax + movl %eax, 52 (%esp) + movl 20 (%esp), %eax # interrupt vector +1: + movl $0, rti_pending_irqs_list + 4 (%ebp, %eax, 8) + popfl + cmpl $1, rti_if_on + jne 1f + sti +1: popl %ebp + popl %edx + ret + +rti_managed_trap_noack_rt: + SDBG_SEND_CHAR (9, rti_managed_trap_noack_rt) +/* pushfl + pushl %edx + pushl %ebp + cli + GET_THREAD_INFO (%ebp) + movl TI_cpu (%ebp), %ebp +*/ jmp 1f + +rti_managed_trap_ext_rt: + SDBG_SEND_CHAR (8, rti_managed_trap_ext_rt) +/* cmpl $1, rti_io_apic_enabled + je rti_managed_trap_gate_rt +*/ + pushfl + cli + /* Check for a bit in TMR: + * only level-triggered interrupts are those of interest */ + movl 12 (%esp), %eax + andl $0xff, %eax + pushl %ebx + pushl %eax + shrl $5, %eax + shll $4, %eax + movl RTI_APIC_REGISTER (APIC_TMR) (%eax), %ebx + popl %eax + pushl %eax + andl $0x1f, %eax + pushl %ecx + movl %eax, %ecx + shrl %cl, %ebx + popl %ecx + popl %eax + andl $1, %ebx + cmpl $1, %ebx + popl %ebx + jne 1f + /* (checked for a bit in TMR) */ + pushl %ecx + pushl %edx + cmpl $1, rti_io_apic_enabled + jne 2f + shll $2, %eax + movl rti_vector_irq (%eax), %eax + pushl %eax + call mask_IO_APIC_irq + jmp 3f +2: subl $FIRST_EXTERNAL_VECTOR, %eax + pushl %eax + call disable_8259A_irq +3: popl %eax + popl %edx + popl %ecx + jmp 1f + +rti_managed_trap_gate_rt: + SDBG_SEND_CHAR (a, rti_managed_trap_gate_rt) + movl $0, RTI_APIC_REGISTER (APIC_EOI) + +1: pushl %edx + pushl %ebp + GET_THREAD_INFO (%ebp) + movl TI_cpu (%ebp), %ebp + movl %ebp, %edx + shll $ RTI_CPUS_ORDER, %ebp + shll $2, %edx + + cmpl $1, rti_if_on + jne 1f /* (INCOMPLETE IF TEST) */ + movl 48 (%esp), %eax # EFLAGS + andl $0xfffffdff, %eax + orl rti_if (%edx), %eax + movl %eax, 48 (%esp) + + cmpl $1, rti_pp (%edx) + je 2f + cmpl $0, rti_if (%edx) + jne 1f +2: movl 20 (%esp), %eax # interrupt vector + + call rti_add_pending_irq + + popl %ebp + popl %edx + popfl + addl $12, %esp + popl %es + popl %ds + popl %eax + cmpl $1, rti_if_on + jne 2f + orl $0x200, 12 (%esp) # Set IF on the stack to 1 + /* TODO No need to pop flags just before iret! */ + andl $~0x200, (%esp) # Avoiding nested interrupts +2: popfl + iret + +1: movl 16 (%esp), %eax # interrupt vector + movl $0, rti_pending_irqs_list + 4 (%ebp, %eax, 8) + popl %ebp + popl %edx + popfl + cmpl $1, rti_if_on + jne 1f + sti +1: ret + + /* 16 bytes are pushed in the beginning of RESTORE_WRAP_IRQ + * 8 more bytes for a _vptr call + * then come: + * vector (offset 24) + * EIP (offset 28) + * CS (offset 32) + * EFLAGS (offset 36) */ +rti_iret_rt: + SDBG_SEND_CHAR (i, rti_iret_rt) + RTI_TEST_IF + cli +/* NOEXTRAIRQ + GET_THREAD_INFO (%eax) + movl TI_cpu (%eax), %eax + shll $RTI_CPUS_ORDER, %eax + pushl %ecx + movl 24 (%esp), %ecx # vector + andl $0xff, %ecx + # NOEXTRAIRQ movl $0, rti_pending_irqs_list + 4 (%eax, %ecx, 8) + popl %ecx +*/ + movl 36 (%esp), %eax # EFLAGS + andl $0x200, %eax + cmpl $0, %eax + je 1f + call rti_process_pending_irqs + call rti_irq_enable_rt + jmp 2f +1: call rti_irq_disable_rt +2: cmpl $1, rti_if_on + jne 1f + orl $0x200, 36 (%esp) # RT!! +1: ret + +#else /* "#ifndef CONFIG_RTI" case */ + +#define rti_cli cli +#define rti_sti sti + +#define RTI_IRQ_GATE +#define RTI_IRQ_GATE_EC +#define RTI_MANAGED_IRQ_GATE +#define RTI_MANAGED_IRQ_NOACK +#define RTI_MANAGED_IRQ_EXT +#define RTI_TRAP_GATE +#define RTI_TRAP_GATE_EC +#define RTI_MANAGED_TRAP_GATE +#define RTI_MANAGED_TRAP_NOACK +#define RTI_MANAGED_TRAP_EXT +#define RTI_IRET + +#endif /* CONFIG_RTI */ + diff -urN linux-2.6.19/arch/i386/kernel/sdebug.c linux-2.6.19-mync/arch/i386/kernel/sdebug.c --- linux-2.6.19/arch/i386/kernel/sdebug.c 1970-01-01 03:00:00.000000000 +0300 +++ linux-2.6.19-mync/arch/i386/kernel/sdebug.c 2006-12-11 22:00:45.000000000 +0300 @@ -0,0 +1,85 @@ +#include +#include + +#include + +#include +#include + +/* 0x3 means COM1, 0x2 for COM2*/ +#define PORT(a) 0x3 ## a + +/* Example of seial port setup on a receiving machine: + * setserial /dev/ttyS0 baud_base 38400 + */ + +union port_mode { + struct { + unsigned char + len : 2, /* word length */ + stop : 1, /* stop bits */ + parity : 2, /* parity */ + fix_parity : 1, /* parity fixation */ + set_break : 1, /* break */ + div_load : 1; /* divisor loading */ + }; + + unsigned char mode; /* the whole register */ +}; + +void sdbg_init(void) +{ + union port_mode mode; + unsigned short div = 4; + char ctl; + + /* frequency divisor setup */ + ctl = inb(PORT(fb)); + outb(ctl | 0x80, PORT(fb)); + outb((char)((div >> 8) & 0xff), PORT(f9)); + outb((char)(div & 0xff), PORT(f8)); + mode.len = 3; /* 8 data bits */ + mode.stop = 0; /* 1 stop bit */ + mode.parity = 0; /* no parity control */ + mode.fix_parity = 0; + mode.set_break = 0; + mode.div_load = 0; + outb(0, PORT(f9)); /* disable interrupts */ + outb(mode.mode, PORT(fb)); /* port mode setup*/ + /* FIFO initialization: + * use FIFO and report on 8 bytes */ + outb(0x81, PORT(fa)); +} + +static void sdbg_send_byte(char byte) +{ + while(!(inb(PORT(fd)) & 0x20)); + outb(byte, PORT(f8)); +} + +static void sdbg_send_data(char *buf, int len) +{ + int i; + + for(i = 0; i < len; i++) + sdbg_send_byte(buf[i]); +} + +void sdbg_printf(const char *fmt, ...) +{ + va_list args; + int i; + static char buf[1024]; + unsigned long flags; + + local_irq_save(flags); + va_start(args, fmt); + i = vscnprintf(buf, sizeof buf, fmt, args); + va_end(args); + sdbg_send_data(buf, i); + local_irq_restore(flags); +} + +EXPORT_SYMBOL(sdbg_init); +EXPORT_SYMBOL(sdbg_printf); + diff -urN linux-2.6.19/arch/i386/kernel/traps.c linux-2.6.19-mync/arch/i386/kernel/traps.c --- linux-2.6.19/arch/i386/kernel/traps.c 2006-12-01 03:09:26.000000000 +0300 +++ linux-2.6.19-mync/arch/i386/kernel/traps.c 2006-12-12 20:07:26.000000000 +0300 @@ -1181,6 +1181,7 @@ } #endif +#ifndef CONFIG_WRAP_IRQ /* * This needs to use 'idt_table' rather than 'idt', and * thus use the _nonmapped_ version of the IDT, as the @@ -1210,6 +1211,46 @@ _set_gate(n, DESCTYPE_TRAP | DESCTYPE_DPL3, addr, __KERNEL_CS); } +#define rti_set_privileged_intr_gate(n, addr) set_intr_gate (n, addr) + +#else +void* wrap_irq_interrupt_entry_points [256]; + +extern void (*wrap_irq_interrupt [256]) (void); +extern void (*wrap_irq_trap [256]) (void); + +void set_intr_gate (unsigned int n, void *addr) +{ + wrap_irq_interrupt_entry_points [n] = addr; + _set_gate(n, DESCTYPE_INT, wrap_irq_interrupt [n], __KERNEL_CS); +} + +void rti_set_privileged_intr_gate (unsigned int n, void *addr) +{ + _set_gate(n, DESCTYPE_INT, addr, __KERNEL_CS); +} + +static inline void set_system_intr_gate (unsigned int n, void *addr) +{ + wrap_irq_interrupt_entry_points [n] = addr; + _set_gate(n, DESCTYPE_INT | DESCTYPE_DPL3, wrap_irq_interrupt [n], __KERNEL_CS); +} + +static void __init set_trap_gate (unsigned int n, void *addr) +{ + wrap_irq_interrupt_entry_points [n] = addr; + _set_gate(n, DESCTYPE_TRAP, wrap_irq_trap [n], __KERNEL_CS); +} + +static void __init set_system_gate (unsigned int n, void *addr) +{ + wrap_irq_interrupt_entry_points [n] = addr; + _set_gate(n, DESCTYPE_TRAP | DESCTYPE_DPL3, wrap_irq_trap [n], __KERNEL_CS); +} + +unsigned long rti_io_apic_enabled = 0; +#endif /* CONFIG_WRAP_IRQ */ + static void __init set_task_gate(unsigned int n, unsigned int gdt_entry) { _set_gate(n, DESCTYPE_TASK, (void *)0, (gdt_entry<<3)); @@ -1232,7 +1273,9 @@ set_trap_gate(0,÷_error); set_intr_gate(1,&debug); +#ifndef CONFIG_RTI set_intr_gate(2,&nmi); +#endif set_system_intr_gate(3, &int3); /* int3/4 can be called from all */ set_system_gate(4,&overflow); set_trap_gate(5,&bounds); diff -urN linux-2.6.19/arch/i386/kernel/wrap_irq.inc.S linux-2.6.19-mync/arch/i386/kernel/wrap_irq.inc.S --- linux-2.6.19/arch/i386/kernel/wrap_irq.inc.S 1970-01-01 03:00:00.000000000 +0300 +++ linux-2.6.19-mync/arch/i386/kernel/wrap_irq.inc.S 2006-12-10 14:20:44.000000000 +0300 @@ -0,0 +1,273 @@ +#define SAVE_ALL_NOCFI \ + cld; \ + pushl %es; \ + pushl %ds; \ + pushl %eax; \ + pushl %ebp; \ + pushl %edi; \ + pushl %esi; \ + pushl %edx; \ + pushl %ecx; \ + pushl %ebx; \ + movl $(__USER_DS), %edx;\ + movl %edx, %ds; \ + movl %edx, %es; + +#define RESTORE_INT_REGS_NOCFI \ + popl %ebx; \ + popl %ecx; \ + popl %edx; \ + popl %esi; \ + popl %edi; \ + popl %ebp; \ + popl %eax; + +#define RESTORE_REGS_NOCFI \ + RESTORE_INT_REGS_NOCFI \ +1: popl %ds; \ +2: popl %es; \ +.section .fixup,"ax"; \ +3: movl $0,(%esp); \ + jmp 1b; \ +4: movl $0,(%esp); \ + jmp 2b; \ +.previous; \ +.section __ex_table,"a"; \ + .align 4; \ + .long 1b,3b; \ + .long 2b,4b; \ +.previous + +#include "pf.inc.S" + +#if 0 +#define RTI_TRAP_GATE +#define RTI_TRAP_GATE_EC +#define RTI_IRQ_GATE +#define RTI_IRQ_GATE_EC +#define RTI_MANAGED_IRQ_GATE +#define RTI_MANAGED_IRQ_EXT +#define RTI_MANAGED_IRQ_NOACK +#define RTI_MANAGED_TRAP_NOACK +#endif + +#ifdef CONFIG_WRAP_IRQ + /* TODO Just test WRAP_IRQ with 16-bit code in the background + * and say it is fine */ +#define WRAP_IRQ_FORBIDDEN \ + cli; \ + pushl $wrap_irq_forbidden_fs; \ + call printk; \ + DEBUG_BEEP; \ +1: jmp 1b + + /* This block is better be finished without interrupts + * to save stack space */ + /* The interrupts are guaranteed to be disabled + * after RESTORE_WRAP_IRQ */ + /* Pushes 16 bytes on the stack, plus 4 bytes for + * an interrupt vector, totals to 20 bytes */ +#define RESTORE_WRAP_PRIVILEGED_IRQ \ + pushfl; \ + pushl %eax; \ + pushl %ds; \ + pushl %es; \ + \ + movl $(__USER_DS), %eax; \ + movl %eax, %ds; \ + movl %eax, %es; \ + \ + PF_IRET; \ + \ + popl %es; \ + popl %ds; \ + popl %eax; \ + popfl + +#define RESTORE_WRAP_IRQ \ + pushfl; \ + andl $~0x200, (%esp); \ + pushl %eax; \ + pushl %ds; \ + pushl %es; \ + \ + movl $(__USER_DS), %eax; \ + movl %eax, %ds; \ + movl %eax, %es; \ + \ + PF_IRET; \ + RTI_IRET; \ + \ + popl %es; \ + popl %ds; \ + popl %eax; \ + popfl +#else +#define WRAP_IRQ_FORBIDDEN +#define RESTORE_WRAP_IRQ +#endif /* CONFIG_WRAP_IRQ */ + +#include "rti.inc.S" + +#ifdef CONFIG_WRAP_IRQ +.data +wrap_irq_forbidden_fs: + .string "Forbidden irq wrapper's execution path\n" +.previous + +#define WRAP_IRQ_GATE_EARLY_ENTRANCE \ + pushfl; \ + pushl %eax; \ + pushl %ds; \ + pushl %es; \ + \ + movl $(__USER_DS), %eax; \ + movl %eax, %ds; \ + movl %eax, %es + +#define WRAP_IRQ_GATE(entrance_code) \ + ALIGN; \ +1: WRAP_IRQ_GATE_EARLY_ENTRANCE; \ + pushl $vector; \ + PF_IRQ_ENTRY; \ + entrance_code; \ + jmp wrap_irq_common_interrupt; \ +.data; \ + .long 1b; \ +.previous; \ +vector = vector + 1 + +.data +ENTRY (wrap_irq_interrupt) +.text +ALIGN +wrap_irq_interrupt_text: +vector = 0 +.rept 2 + WRAP_IRQ_GATE (RTI_IRQ_GATE) # 0-1 +.endr + WRAP_IRQ_GATE (RTI_MANAGED_IRQ_NOACK) # 2 (NMI) +.rept 5 + WRAP_IRQ_GATE (RTI_IRQ_GATE) # 3-7 +.endr + WRAP_IRQ_GATE (RTI_IRQ_GATE_EC) # 8 + WRAP_IRQ_GATE (RTI_IRQ_GATE) # 9 +.rept 5 + WRAP_IRQ_GATE (RTI_IRQ_GATE_EC) # 10-14 +.endr + WRAP_IRQ_GATE (RTI_IRQ_GATE) # 15 + WRAP_IRQ_GATE (RTI_IRQ_GATE) # 16 + WRAP_IRQ_GATE (RTI_IRQ_GATE_EC) # 17 + WRAP_IRQ_GATE (RTI_IRQ_GATE) # 18 + WRAP_IRQ_GATE (RTI_IRQ_GATE) # 19 +.rept 12 + WRAP_IRQ_GATE (RTI_IRQ_GATE) # 20-31 +.endr +.rept 96 + WRAP_IRQ_GATE (RTI_MANAGED_IRQ_EXT) # 32-127 +.endr + WRAP_IRQ_GATE (RTI_IRQ_GATE) # 128 (syscall) +.rept 110 + WRAP_IRQ_GATE (RTI_MANAGED_IRQ_EXT) # 129-238 +.endr + WRAP_IRQ_GATE (RTI_MANAGED_IRQ_GATE) # 239 (local APIC timer) +.rep 10 + WRAP_IRQ_GATE (RTI_MANAGED_IRQ_GATE) # 240-249 +.endr +.rept 6 + WRAP_IRQ_GATE (RTI_MANAGED_IRQ_GATE) # 250-254 +.endr + WRAP_IRQ_GATE (RTI_MANAGED_IRQ_NOACK) # Spurious vector + +.data +ENTRY (wrap_irq_trap) +.text +ALIGN +wrap_irq_trap_text: +vector = 0 +.rept 2 + WRAP_IRQ_GATE (RTI_TRAP_GATE) # 0-1 +.endr + WRAP_IRQ_GATE (RTI_MANAGED_TRAP_NOACK) # 2 (NMI) +.rept 5 + WRAP_IRQ_GATE (RTI_TRAP_GATE) # 3-7 +.endr + WRAP_IRQ_GATE (RTI_TRAP_GATE_EC) # 8 + WRAP_IRQ_GATE (RTI_TRAP_GATE) # 9 +.rept 5 + WRAP_IRQ_GATE (RTI_TRAP_GATE_EC) # 10-14 +.endr + WRAP_IRQ_GATE (RTI_TRAP_GATE) # 15 + WRAP_IRQ_GATE (RTI_TRAP_GATE) # 16 + WRAP_IRQ_GATE (RTI_TRAP_GATE_EC) # 17 + WRAP_IRQ_GATE (RTI_TRAP_GATE) # 18 + WRAP_IRQ_GATE (RTI_TRAP_GATE) # 19 +.rept 12 + WRAP_IRQ_GATE (RTI_TRAP_GATE) # 20-31 +.endr +.rept 96 +# TSYSCALL WRAP_IRQ_GATE (RTI_MANAGED_TRAP_EXT) # 32-127 + WRAP_IRQ_GATE (RTI_TRAP_GATE) +.endr + WRAP_IRQ_GATE (RTI_TRAP_GATE) # 128 (syscall) +.rept 110 +# TSYSCALL WRAP_IRQ_GATE (RTI_MANAGED_TRAP_EXT) # 129-238 + WRAP_IRQ_GATE (RTI_TRAP_GATE) +.endr +# TSYSCALL WRAP_IRQ_GATE (RTI_MANAGED_TRAP_GATE) # 239 (local APIC timer) + WRAP_IRQ_GATE (RTI_TRAP_GATE) +.rep 10 +# TSYSCALL WRAP_IRQ_GATE (RTI_MANAGED_TRAP_GATE) # 240-249 + WRAP_IRQ_GATE (RTI_TRAP_GATE) +.endr +.rept 6 +# TSYSCALL WRAP_IRQ_GATE (RTI_MANAGED_IRQ_GATE) # 250-254 + WRAP_IRQ_GATE (RTI_TRAP_GATE) +.endr + WRAP_IRQ_GATE (RTI_MANAGED_TRAP_NOACK) # Spurious vector + + ALIGN +wrap_irq_common_interrupt: + movl (%esp), %eax + shll $2, %eax + addl $wrap_irq_interrupt_entry_points, %eax + movl (%eax), %eax + + addl $4, %esp + popl %es + + testl $IF_MASK, 8 (%esp) + jz wrap_irq_if_cleared_entry + + # sti shadows the next instruction from being interrupted. + # We're saved. Let's pray for this. + +1: // xorl $IF_MASK, 8 (%esp) + andl $(~IF_MASK), 8 (%esp) + cli + popl %ds + pushl %eax + movl /*%cs:*/ 8 (%esp), %eax + pushl %eax + popfl + popl %eax + movl %eax, /*%cs:*/ 4 (%esp) + popl %eax + sti + ret + +wrap_irq_if_cleared_entry: +// TEST!! rti_cli +// Irrelevant (non-rt path) + cli + popl %ds + pushl %eax + movl /*%cs:*/ 8 (%esp), %eax + pushl %eax + popfl + popl %eax + movl %eax, /*%cs:*/ 4 (%esp) + popl %eax + ret +#endif /* CONFIG_WRAP_IRQ */ + diff -urN linux-2.6.19/include/asm-i386/apicdef.h linux-2.6.19-mync/include/asm-i386/apicdef.h --- linux-2.6.19/include/asm-i386/apicdef.h 2006-10-04 00:13:06.000000000 +0400 +++ linux-2.6.19-mync/include/asm-i386/apicdef.h 2006-12-02 20:40:33.000000000 +0300 @@ -109,6 +109,8 @@ #define APIC_TDR_DIV_64 0x9 #define APIC_TDR_DIV_128 0xA +#ifndef __ASSEMBLY__ + #define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) #define MAX_IO_APICS 64 @@ -372,4 +374,5 @@ #undef u32 +#endif /* __ASSEMBLY__ */ #endif diff -urN linux-2.6.19/include/asm-i386/apic.h linux-2.6.19-mync/include/asm-i386/apic.h --- linux-2.6.19/include/asm-i386/apic.h 2006-12-01 03:09:31.000000000 +0300 +++ linux-2.6.19-mync/include/asm-i386/apic.h 2006-12-11 12:12:10.000000000 +0300 @@ -71,6 +71,10 @@ # define apic_write_around(x,y) apic_write_atomic((x),(y)) #endif +#ifdef CONIFG_RTI +extern unsigned long rti_rt_on; +#endif + static inline void ack_APIC_irq(void) { /* @@ -81,7 +85,12 @@ */ /* Docs say use 0 for future compatibility */ +#ifndef CONFIG_RTI + if (rti_rt_on == 0) + apic_write_around(APIC_EOI, 0); +#else apic_write_around(APIC_EOI, 0); +#endif } extern void (*wait_timer_tick)(void); diff -urN linux-2.6.19/include/asm-i386/irqflags.h linux-2.6.19-mync/include/asm-i386/irqflags.h --- linux-2.6.19/include/asm-i386/irqflags.h 2006-10-04 00:13:06.000000000 +0400 +++ linux-2.6.19-mync/include/asm-i386/irqflags.h 2006-12-10 12:50:05.000000000 +0300 @@ -12,6 +12,43 @@ #ifndef __ASSEMBLY__ +#ifndef CONFIG_RTI +#define direct_irq_disable() local_irq_disable() +#define direct_irq_enable() local_irq_enable() +#define direct_save_flags(x) local_save_flags(x) +#define direct_irq_save(x) local_irq_save(x) +#define direct_irq_restore(x) local_irq_restore(x) +#else +#define direct_save_flags(x) \ + do { \ + typecheck (unsigned long, x); \ + __asm__ __volatile__ ( \ + "pushfl; popl %0" : \ + "=g" (x) : : \ + "memory"); \ + } while (0) +#define direct_irq_restore(x) \ + do { \ + typecheck (unsigned long, x); \ + __asm__ __volatile__ ( \ + "pushl %0; popfl" : \ + : "g" (x) : \ + "cc"); \ + } while (0) +#define direct_irq_disable() __asm__ __volatile__ ("cli") +#define direct_irq_enable() __asm__ __volatile__ ("sti") +#define direct_irq_save(x) \ + __asm__ __volatile__ ( \ + "pushfl; popl %0; cli" : \ + "=g" (x) : : \ + "memory") + +extern void rti_irq_disable (void); +extern void rti_irq_enable (void); +extern void rti_safe_halt (void); +#endif + +#ifndef CONFIG_RTI static inline unsigned long __raw_local_save_flags(void) { unsigned long flags; @@ -24,38 +61,85 @@ return flags; } +#else +static inline unsigned long __raw_local_save_flags(void) +{ + unsigned long flags; + + __asm__ __volatile__ ( + "call rti_save_flags ;" + "movl %%eax, %0 ;" + : "=g" (flags) : + : "eax", "memory" + ); + + return flags; +} +#endif #define raw_local_save_flags(flags) \ do { (flags) = __raw_local_save_flags(); } while (0) +#ifndef CONFIG_RTI static inline void raw_local_irq_restore(unsigned long flags) { __asm__ __volatile__( "pushl %0 ; popfl" - : /* no output */ - :"g" (flags) + : : "g" (flags) :"memory", "cc" ); } +#else +static inline void raw_local_irq_restore(unsigned long flags) +{ + __asm__ __volatile__( + "movl %0, %%eax ;" + "call rti_irq_restore ;" + : : "g" (flags) + : "eax", "memory" + ); +} +#endif +#ifndef CONFIG_RTI static inline void raw_local_irq_disable(void) { __asm__ __volatile__("cli" : : : "memory"); } +#else +static inline void raw_local_irq_disable(void) +{ + rti_irq_disable (); +} +#endif +#ifndef CONFIG_RTI static inline void raw_local_irq_enable(void) { __asm__ __volatile__("sti" : : : "memory"); } +#else +static inline void raw_local_irq_enable(void) +{ + rti_irq_enable (); +} +#endif /* * Used in the idle loop; sti takes one instruction cycle * to complete: */ +#ifndef CONFIG_RTI static inline void raw_safe_halt(void) { __asm__ __volatile__("sti; hlt" : : : "memory"); } +#else +static inline void raw_safe_halt(void) +{ + rti_safe_halt (); +} +#endif /* * Used when interrupts are already enabled or to diff -urN linux-2.6.19/include/asm-i386/mach-default/do_timer.h linux-2.6.19-mync/include/asm-i386/mach-default/do_timer.h --- linux-2.6.19/include/asm-i386/mach-default/do_timer.h 2006-12-01 03:09:31.000000000 +0300 +++ linux-2.6.19-mync/include/asm-i386/mach-default/do_timer.h 2006-12-04 16:44:19.000000000 +0300 @@ -17,6 +17,9 @@ static inline void do_timer_interrupt_hook(void) { do_timer(1); +#ifdef CONFIG_RTI + smp_local_timer_interrupt (); +#else #ifndef CONFIG_SMP update_process_times(user_mode_vm(get_irq_regs())); #endif @@ -31,6 +34,7 @@ if (!using_apic_timer) smp_local_timer_interrupt(); #endif +#endif /* CONFIG_RTI */ } diff -urN linux-2.6.19/include/asm-i386/sdebug.h linux-2.6.19-mync/include/asm-i386/sdebug.h --- linux-2.6.19/include/asm-i386/sdebug.h 1970-01-01 03:00:00.000000000 +0300 +++ linux-2.6.19-mync/include/asm-i386/sdebug.h 2006-12-12 13:17:34.000000000 +0300 @@ -0,0 +1,40 @@ +#ifndef __ASM_I386_SDEBUG_H__ +#define __ASM_I386_SDEBUG_H__ + +#ifdef CONFIG_SDEBUG + +#define SDBG_SEND_CHAR(char, label) \ + pushl %eax; \ + pushl %edx; \ +label/**/_SDBG_SEND_CHAR: \ + movl $0x3fd, %edx; \ + inb %dx, %al; \ + andl $0x20, %eax; \ + jz label/**/_SDBG_SEND_CHAR; \ + movl $'char, %eax; \ + movl $0x3f8, %edx; \ + outb %al, %dx; \ + popl %edx; \ + popl %eax; + +#define SDBG_SEND_MEM(byte, label) \ + pushl %eax; \ + pushl %edx; \ +label/**/_SDBG_SEND_MEM: \ + movl $0x3fd, %edx; \ + inb %dx, %al; \ + andl $0x20, %eax; \ + jz label/**/_SDBG_SEND_MEM; \ + movb byte, %al; \ + movl $0x3f8, %edx; \ + outb %al, %dx; \ + popl %edx; \ + popl %eax; + +#else +#define SDBG_SEND_CHAR(char, label) +#define SDBG_SEND_MEM(char, label) +#endif + +#endif /* __ASM_I386_SDEBUG_H__ */ + diff -urN linux-2.6.19/include/asm-i386/spinlock.h linux-2.6.19-mync/include/asm-i386/spinlock.h --- linux-2.6.19/include/asm-i386/spinlock.h 2006-12-01 03:09:31.000000000 +0300 +++ linux-2.6.19-mync/include/asm-i386/spinlock.h 2006-12-02 20:45:34.000000000 +0300 @@ -7,8 +7,13 @@ #include #include +#ifndef CONFIG_RTI #define CLI_STRING "cli" #define STI_STRING "sti" +#else +#define CLI_STRING "call rti_irq_disable" +#define STI_STRING "call rti_irq_enable" +#endif /* * Your basic SMP spinlocks, allowing only a single CPU anywhere diff -urN linux-2.6.19/include/linux/pf.h linux-2.6.19-mync/include/linux/pf.h --- linux-2.6.19/include/linux/pf.h 1970-01-01 03:00:00.000000000 +0300 +++ linux-2.6.19-mync/include/linux/pf.h 2006-12-01 21:09:47.000000000 +0300 @@ -0,0 +1,37 @@ +#ifndef __LINUX_PF_H__ +#define __LINUX_PF_H__ + +#define PF_TRACE_ALIGNMENT 12 +#define PF_TRACE_ALIGNMENT_MASK 0xfff +#define PF_EVENT_INVALID 0xff + +#ifndef __ASSEMBLY__ +#include + +struct pf_trace_info { + unsigned char *trace_offset; + unsigned long trace_size; + long trace_enabled; +} __attribute__ ((packed)); + +extern struct pf_trace_info pf_trace_info; + +extern unsigned char *pf_trace_buffer; +extern unsigned char *pf_trace_limit; + +extern struct list_head pf_pid_list; +extern unsigned long pf_pid_data_len; +extern unsigned long pf_pid_count; + +struct pf_process_info { + struct list_head list_head; + unsigned long pid; + char name [TASK_COMM_LEN]; +}; + +extern void pf_clear_pid_tree (void); +extern void pf_init (void); +#endif /* __ASSEMBLY__ */ + +#endif /* __LINUX_PF_H__ */ + diff -urN linux-2.6.19/include/linux/sdebug.h linux-2.6.19-mync/include/linux/sdebug.h --- linux-2.6.19/include/linux/sdebug.h 1970-01-01 03:00:00.000000000 +0300 +++ linux-2.6.19-mync/include/linux/sdebug.h 2006-12-11 22:21:23.000000000 +0300 @@ -0,0 +1,15 @@ +#ifndef _ASM_SDEBUG_H +#define _ASM_SDEBUG_H + +#include + +#ifdef CONFIG_SDEBUG +extern void sdbg_init(void); +extern void sdbg_printf(const char *fmt, ...); +#else +static inline void sdbg_init(void) {} +static inline void sdbg_printf(const char *fmt, ...) {} +#endif + +#endif /* _ASM_SDEBUG_H */ + diff -urN linux-2.6.19/init/main.c linux-2.6.19-mync/init/main.c --- linux-2.6.19/init/main.c 2006-12-01 03:09:32.000000000 +0300 +++ linux-2.6.19-mync/init/main.c 2006-12-12 19:26:49.000000000 +0300 @@ -49,6 +49,8 @@ #include #include #include +#include +#include #include #include @@ -60,6 +62,18 @@ #include #endif +#ifdef CONFIG_RTI +extern void rti_switch_rt_on(void); +extern void rti_switch_rt_off(void); +extern void rti_init_timer (void); +extern void rti_init (void); +#else +#define rti_switch_rt_on() +#define rti_switch_rt_off() +#define rti_init_timer() +#define rti_init() +#endif + /* * This is one of the first .c files built. Error out early if we have compiler * trouble. @@ -502,6 +516,9 @@ page_address_init(); printk(KERN_NOTICE); printk(linux_banner); + + sdbg_init (); + setup_arch(&command_line); unwind_setup(); setup_per_cpu_areas(); @@ -606,6 +623,10 @@ acpi_early_init(); /* before LAPIC and SMP init */ +#ifdef CONFIG_PF + pf_init (); +#endif + /* Do the rest non-__init'ed, we're now alive */ rest_init(); } @@ -765,6 +786,16 @@ system_state = SYSTEM_RUNNING; numa_default_policy(); + printk ("RTI INIT\n"); + rti_init (); + printk ("RTI RT ON\n"); + rti_switch_rt_on (); + printk ("RTI INIT TIMER\n"); + rti_init_timer (); + printk ("RTI IRQ ENABLE\n"); + local_irq_enable (); + printk ("RTI INITIALIZED\n"); + if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0) printk(KERN_WARNING "Warning: unable to open an initial console.\n");