Linux-2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
diff --git a/arch/h8300/kernel/Makefile b/arch/h8300/kernel/Makefile
new file mode 100644
index 0000000..71b6131
--- /dev/null
+++ b/arch/h8300/kernel/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for the linux kernel.
+#
+
+extra-y := vmlinux.lds
+
+obj-y := process.o traps.o ptrace.o ints.o \
+ sys_h8300.o time.o semaphore.o signal.o \
+ setup.o gpio.o init_task.o syscalls.o
+
+obj-$(CONFIG_MODULES) += module.o h8300_ksyms.o
diff --git a/arch/h8300/kernel/asm-offsets.c b/arch/h8300/kernel/asm-offsets.c
new file mode 100644
index 0000000..b78b82a
--- /dev/null
+++ b/arch/h8300/kernel/asm-offsets.c
@@ -0,0 +1,65 @@
+/*
+ * This program is used to generate definitions needed by
+ * assembly language modules.
+ *
+ * We use the technique used in the OSF Mach kernel code:
+ * generate asm statements containing #defines,
+ * compile this file to assembler, and then extract the
+ * #defines from the assembly-language output.
+ */
+
+#include <linux/stddef.h>
+#include <linux/sched.h>
+#include <linux/kernel_stat.h>
+#include <linux/ptrace.h>
+#include <linux/hardirq.h>
+#include <asm/bootinfo.h>
+#include <asm/irq.h>
+#include <asm/ptrace.h>
+
+#define DEFINE(sym, val) \
+ asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+
+#define BLANK() asm volatile("\n->" : : )
+
+int main(void)
+{
+ /* offsets into the task struct */
+ DEFINE(TASK_STATE, offsetof(struct task_struct, state));
+ DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
+ DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
+ DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked));
+ DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
+ DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, thread_info));
+ DEFINE(TASK_MM, offsetof(struct task_struct, mm));
+ DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
+
+ /* offsets into the irq_cpustat_t struct */
+ DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
+
+ /* offsets into the thread struct */
+ DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
+ DEFINE(THREAD_USP, offsetof(struct thread_struct, usp));
+ DEFINE(THREAD_CCR, offsetof(struct thread_struct, ccr));
+
+ /* offsets into the pt_regs struct */
+ DEFINE(LER0, offsetof(struct pt_regs, er0) - sizeof(long));
+ DEFINE(LER1, offsetof(struct pt_regs, er1) - sizeof(long));
+ DEFINE(LER2, offsetof(struct pt_regs, er2) - sizeof(long));
+ DEFINE(LER3, offsetof(struct pt_regs, er3) - sizeof(long));
+ DEFINE(LER4, offsetof(struct pt_regs, er4) - sizeof(long));
+ DEFINE(LER5, offsetof(struct pt_regs, er5) - sizeof(long));
+ DEFINE(LER6, offsetof(struct pt_regs, er6) - sizeof(long));
+ DEFINE(LORIG, offsetof(struct pt_regs, orig_er0) - sizeof(long));
+ DEFINE(LCCR, offsetof(struct pt_regs, ccr) - sizeof(long));
+ DEFINE(LVEC, offsetof(struct pt_regs, vector) - sizeof(long));
+#if defined(__H8300S__)
+ DEFINE(LEXR, offsetof(struct pt_regs, exr) - sizeof(long));
+#endif
+ DEFINE(LRET, offsetof(struct pt_regs, pc) - sizeof(long));
+
+ DEFINE(PT_PTRACED, PT_PTRACED);
+ DEFINE(PT_DTRACE, PT_DTRACE);
+
+ return 0;
+}
diff --git a/arch/h8300/kernel/gpio.c b/arch/h8300/kernel/gpio.c
new file mode 100644
index 0000000..795682b
--- /dev/null
+++ b/arch/h8300/kernel/gpio.c
@@ -0,0 +1,174 @@
+/*
+ * linux/arch/h8300/kernel/gpio.c
+ *
+ * Yoshinori Sato <ysato@users.sourceforge.jp>
+ *
+ */
+
+/*
+ * Internal I/O Port Management
+ */
+
+#include <linux/config.h>
+#include <linux/stddef.h>
+#include <linux/proc_fs.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+
+#define _(addr) (volatile unsigned char *)(addr)
+#if defined(CONFIG_H83007) || defined(CONFIG_H83068)
+#include <asm/regs306x.h>
+static volatile unsigned char *ddrs[] = {
+ _(P1DDR),_(P2DDR),_(P3DDR),_(P4DDR),_(P5DDR),_(P6DDR),
+ NULL, _(P8DDR),_(P9DDR),_(PADDR),_(PBDDR),
+};
+#define MAX_PORT 11
+#endif
+
+ #if defined(CONFIG_H83002) || defined(CONFIG_H8048)
+/* Fix me!! */
+#include <asm/regs306x.h>
+static volatile unsigned char *ddrs[] = {
+ _(P1DDR),_(P2DDR),_(P3DDR),_(P4DDR),_(P5DDR),_(P6DDR),
+ NULL, _(P8DDR),_(P9DDR),_(PADDR),_(PBDDR),
+};
+#define MAX_PORT 11
+#endif
+
+#if defined(CONFIG_H8S2678)
+#include <asm/regs267x.h>
+static volatile unsigned char *ddrs[] = {
+ _(P1DDR),_(P2DDR),_(P3DDR),NULL ,_(P5DDR),_(P6DDR),
+ _(P7DDR),_(P8DDR),NULL, _(PADDR),_(PBDDR),_(PCDDR),
+ _(PDDDR),_(PEDDR),_(PFDDR),_(PGDDR),_(PHDDR),
+ _(PADDR),_(PBDDR),_(PCDDR),_(PDDDR),_(PEDDR),_(PFDDR),
+ _(PGDDR),_(PHDDR)
+};
+#define MAX_PORT 17
+#endif
+#undef _
+
+#if !defined(P1DDR)
+#error Unsuppoted CPU Selection
+#endif
+
+static struct {
+ unsigned char used;
+ unsigned char ddr;
+} gpio_regs[MAX_PORT];
+
+extern char *_platform_gpio_table(int length);
+
+int h8300_reserved_gpio(int port, unsigned int bits)
+{
+ unsigned char *used;
+
+ if (port < 0 || port >= MAX_PORT)
+ return -1;
+ used = &(gpio_regs[port].used);
+ if ((*used & bits) != 0)
+ return 0;
+ *used |= bits;
+ return 1;
+}
+
+int h8300_free_gpio(int port, unsigned int bits)
+{
+ unsigned char *used;
+
+ if (port < 0 || port >= MAX_PORT)
+ return -1;
+ used = &(gpio_regs[port].used);
+ if ((*used & bits) != bits)
+ return 0;
+ *used &= (~bits);
+ return 1;
+}
+
+int h8300_set_gpio_dir(int port_bit,int dir)
+{
+ int port = (port_bit >> 8) & 0xff;
+ int bit = port_bit & 0xff;
+
+ if (ddrs[port] == NULL)
+ return 0;
+ if (gpio_regs[port].used & bit) {
+ if (dir)
+ gpio_regs[port].ddr |= bit;
+ else
+ gpio_regs[port].ddr &= ~bit;
+ *ddrs[port] = gpio_regs[port].ddr;
+ return 1;
+ } else
+ return 0;
+}
+
+int h8300_get_gpio_dir(int port_bit)
+{
+ int port = (port_bit >> 8) & 0xff;
+ int bit = port_bit & 0xff;
+
+ if (ddrs[port] == NULL)
+ return 0;
+ if (gpio_regs[port].used & bit) {
+ return (gpio_regs[port].ddr & bit) != 0;
+ } else
+ return -1;
+}
+
+#if defined(CONFIG_PROC_FS)
+static char *port_status(int portno)
+{
+ static char result[10];
+ const static char io[2]={'I','O'};
+ char *rp;
+ int c;
+ unsigned char used,ddr;
+
+ used = gpio_regs[portno].used;
+ ddr = gpio_regs[portno].ddr;
+ result[8]='\0';
+ rp = result + 7;
+ for (c = 8; c > 0; c--,rp--,used >>= 1, ddr >>= 1)
+ if (used & 0x01)
+ *rp = io[ ddr & 0x01];
+ else
+ *rp = '-';
+ return result;
+}
+
+static int gpio_proc_read(char *buf, char **start, off_t offset,
+ int len, int *unused_i, void *unused_v)
+{
+ int c,outlen;
+ const static char port_name[]="123456789ABCDEFGH";
+ outlen = 0;
+ for (c = 0; c < MAX_PORT; c++) {
+ if (ddrs[c] == NULL)
+ continue ;
+ len = sprintf(buf,"P%c: %s\n",port_name[c],port_status(c));
+ buf += len;
+ outlen += len;
+ }
+ return outlen;
+}
+
+static __init int register_proc(void)
+{
+ struct proc_dir_entry *proc_gpio;
+
+ proc_gpio = create_proc_entry("gpio", S_IRUGO, NULL);
+ if (proc_gpio)
+ proc_gpio->read_proc = gpio_proc_read;
+ return proc_gpio != NULL;
+}
+
+__initcall(register_proc);
+#endif
+
+void __init h8300_gpio_init(void)
+{
+ memcpy(gpio_regs,_platform_gpio_table(sizeof(gpio_regs)),sizeof(gpio_regs));
+}
diff --git a/arch/h8300/kernel/h8300_ksyms.c b/arch/h8300/kernel/h8300_ksyms.c
new file mode 100644
index 0000000..5a63023
--- /dev/null
+++ b/arch/h8300/kernel/h8300_ksyms.c
@@ -0,0 +1,112 @@
+#include <linux/module.h>
+#include <linux/linkage.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/user.h>
+#include <linux/elfcore.h>
+#include <linux/in6.h>
+#include <linux/interrupt.h>
+#include <linux/config.h>
+
+#include <asm/setup.h>
+#include <asm/pgalloc.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/semaphore.h>
+#include <asm/checksum.h>
+#include <asm/current.h>
+#include <asm/gpio.h>
+
+//asmlinkage long long __ashrdi3 (long long, int);
+//asmlinkage long long __lshrdi3 (long long, int);
+extern char h8300_debug_device[];
+
+extern void dump_thread(struct pt_regs *, struct user *);
+
+/* platform dependent support */
+
+EXPORT_SYMBOL(dump_thread);
+EXPORT_SYMBOL(strnlen);
+EXPORT_SYMBOL(strrchr);
+EXPORT_SYMBOL(strstr);
+EXPORT_SYMBOL(strchr);
+EXPORT_SYMBOL(strcat);
+EXPORT_SYMBOL(strlen);
+EXPORT_SYMBOL(strcmp);
+EXPORT_SYMBOL(strncmp);
+
+EXPORT_SYMBOL(ip_fast_csum);
+
+EXPORT_SYMBOL(kernel_thread);
+EXPORT_SYMBOL(enable_irq);
+EXPORT_SYMBOL(disable_irq);
+
+/* Networking helper routines. */
+EXPORT_SYMBOL(csum_partial_copy);
+
+/* The following are special because they're not called
+ explicitly (the C compiler generates them). Fortunately,
+ their interface isn't gonna change any time soon now, so
+ it's OK to leave it out of version control. */
+//EXPORT_SYMBOL(__ashrdi3);
+//EXPORT_SYMBOL(__lshrdi3);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memcmp);
+EXPORT_SYMBOL(memscan);
+EXPORT_SYMBOL(memmove);
+
+EXPORT_SYMBOL(get_wchan);
+
+/*
+ * libgcc functions - functions that are used internally by the
+ * compiler... (prototypes are not correct though, but that
+ * doesn't really matter since they're not versioned).
+ */
+extern void __gcc_bcmp(void);
+extern void __ashldi3(void);
+extern void __ashrdi3(void);
+extern void __cmpdi2(void);
+extern void __divdi3(void);
+extern void __divsi3(void);
+extern void __lshrdi3(void);
+extern void __moddi3(void);
+extern void __modsi3(void);
+extern void __muldi3(void);
+extern void __mulsi3(void);
+extern void __negdi2(void);
+extern void __ucmpdi2(void);
+extern void __udivdi3(void);
+extern void __udivmoddi4(void);
+extern void __udivsi3(void);
+extern void __umoddi3(void);
+extern void __umodsi3(void);
+
+ /* gcc lib functions */
+EXPORT_SYMBOL(__gcc_bcmp);
+EXPORT_SYMBOL(__ashldi3);
+EXPORT_SYMBOL(__ashrdi3);
+EXPORT_SYMBOL(__cmpdi2);
+EXPORT_SYMBOL(__divdi3);
+EXPORT_SYMBOL(__divsi3);
+EXPORT_SYMBOL(__lshrdi3);
+EXPORT_SYMBOL(__moddi3);
+EXPORT_SYMBOL(__modsi3);
+EXPORT_SYMBOL(__muldi3);
+EXPORT_SYMBOL(__mulsi3);
+EXPORT_SYMBOL(__negdi2);
+EXPORT_SYMBOL(__ucmpdi2);
+EXPORT_SYMBOL(__udivdi3);
+EXPORT_SYMBOL(__udivmoddi4);
+EXPORT_SYMBOL(__udivsi3);
+EXPORT_SYMBOL(__umoddi3);
+EXPORT_SYMBOL(__umodsi3);
+
+#ifdef MAGIC_ROM_PTR
+EXPORT_SYMBOL(is_in_rom);
+#endif
+
+EXPORT_SYMBOL(h8300_reserved_gpio);
+EXPORT_SYMBOL(h8300_free_gpio);
+EXPORT_SYMBOL(h8300_set_gpio_dir);
diff --git a/arch/h8300/kernel/init_task.c b/arch/h8300/kernel/init_task.c
new file mode 100644
index 0000000..19272c2
--- /dev/null
+++ b/arch/h8300/kernel/init_task.c
@@ -0,0 +1,43 @@
+/*
+ * linux/arch/h8300/kernel/init_task.c
+ */
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/init_task.h>
+#include <linux/fs.h>
+#include <linux/mqueue.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+
+static struct fs_struct init_fs = INIT_FS;
+static struct files_struct init_files = INIT_FILES;
+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+struct mm_struct init_mm = INIT_MM(init_mm);
+
+EXPORT_SYMBOL(init_mm);
+
+/*
+ * Initial task structure.
+ *
+ * All other task structs will be allocated on slabs in fork.c
+ */
+__asm__(".align 4");
+struct task_struct init_task = INIT_TASK(init_task);
+
+EXPORT_SYMBOL(init_task);
+
+/*
+ * Initial thread structure.
+ *
+ * We need to make sure that this is 8192-byte aligned due to the
+ * way process stacks are handled. This is done by having a special
+ * "init_task" linker map entry..
+ */
+union thread_union init_thread_union
+ __attribute__((__section__(".data.init_task"))) =
+ { INIT_THREAD_INFO(init_task) };
+
diff --git a/arch/h8300/kernel/ints.c b/arch/h8300/kernel/ints.c
new file mode 100644
index 0000000..edb3c41
--- /dev/null
+++ b/arch/h8300/kernel/ints.c
@@ -0,0 +1,255 @@
+/*
+ * linux/arch/h8300/platform/h8300h/ints.c
+ *
+ * Yoshinori Sato <ysato@users.sourceforge.jp>
+ *
+ * Based on linux/arch/$(ARCH)/platform/$(PLATFORM)/ints.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ * Copyright 1996 Roman Zippel
+ * Copyright 1999 D. Jeff Dionne <jeff@rt-control.com>
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/kernel_stat.h>
+#include <linux/seq_file.h>
+#include <linux/init.h>
+#include <linux/random.h>
+#include <linux/bootmem.h>
+#include <linux/hardirq.h>
+
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <asm/traps.h>
+#include <asm/io.h>
+#include <asm/setup.h>
+#include <asm/errno.h>
+
+/*
+ * This structure has only 4 elements for speed reasons
+ */
+typedef struct irq_handler {
+ irqreturn_t (*handler)(int, void *, struct pt_regs *);
+ int flags;
+ int count;
+ void *dev_id;
+ const char *devname;
+} irq_handler_t;
+
+static irq_handler_t *irq_list[NR_IRQS];
+static int use_kmalloc;
+
+extern unsigned long *interrupt_redirect_table;
+extern const int h8300_saved_vectors[];
+extern const unsigned long h8300_trap_table[];
+int h8300_enable_irq_pin(unsigned int irq);
+void h8300_disable_irq_pin(unsigned int irq);
+
+#define CPU_VECTOR ((unsigned long *)0x000000)
+#define ADDR_MASK (0xffffff)
+
+#if defined(CONFIG_RAMKERNEL)
+static unsigned long __init *get_vector_address(void)
+{
+ unsigned long *rom_vector = CPU_VECTOR;
+ unsigned long base,tmp;
+ int vec_no;
+
+ base = rom_vector[EXT_IRQ0] & ADDR_MASK;
+
+ /* check romvector format */
+ for (vec_no = EXT_IRQ1; vec_no <= EXT_IRQ0+EXT_IRQS; vec_no++) {
+ if ((base+(vec_no - EXT_IRQ0)*4) != (rom_vector[vec_no] & ADDR_MASK))
+ return NULL;
+ }
+
+ /* ramvector base address */
+ base -= EXT_IRQ0*4;
+
+ /* writerble check */
+ tmp = ~(*(volatile unsigned long *)base);
+ (*(volatile unsigned long *)base) = tmp;
+ if ((*(volatile unsigned long *)base) != tmp)
+ return NULL;
+ return (unsigned long *)base;
+}
+#endif
+
+void __init init_IRQ(void)
+{
+#if defined(CONFIG_RAMKERNEL)
+ int i;
+ unsigned long *ramvec,*ramvec_p;
+ const unsigned long *trap_entry;
+ const int *saved_vector;
+
+ ramvec = get_vector_address();
+ if (ramvec == NULL)
+ panic("interrupt vector serup failed.");
+ else
+ printk(KERN_INFO "virtual vector at 0x%08lx\n",(unsigned long)ramvec);
+
+ /* create redirect table */
+ ramvec_p = ramvec;
+ trap_entry = h8300_trap_table;
+ saved_vector = h8300_saved_vectors;
+ for ( i = 0; i < NR_IRQS; i++) {
+ if (i == *saved_vector) {
+ ramvec_p++;
+ saved_vector++;
+ } else {
+ if ( i < NR_TRAPS ) {
+ if (*trap_entry)
+ *ramvec_p = VECTOR(*trap_entry);
+ ramvec_p++;
+ trap_entry++;
+ } else
+ *ramvec_p++ = REDIRECT(interrupt_entry);
+ }
+ }
+ interrupt_redirect_table = ramvec;
+#ifdef DUMP_VECTOR
+ ramvec_p = ramvec;
+ for (i = 0; i < NR_IRQS; i++) {
+ if ((i % 8) == 0)
+ printk(KERN_DEBUG "\n%p: ",ramvec_p);
+ printk(KERN_DEBUG "%p ",*ramvec_p);
+ ramvec_p++;
+ }
+ printk(KERN_DEBUG "\n");
+#endif
+#endif
+}
+
+int request_irq(unsigned int irq,
+ irqreturn_t (*handler)(int, void *, struct pt_regs *),
+ unsigned long flags, const char *devname, void *dev_id)
+{
+ irq_handler_t *irq_handle;
+ if (irq < 0 || irq >= NR_IRQS) {
+ printk(KERN_ERR "Incorrect IRQ %d from %s\n", irq, devname);
+ return -EINVAL;
+ }
+
+ if (irq_list[irq] || (h8300_enable_irq_pin(irq) == -EBUSY))
+ return -EBUSY;
+
+ if (use_kmalloc)
+ irq_handle = (irq_handler_t *)kmalloc(sizeof(irq_handler_t), GFP_ATOMIC);
+ else {
+ /* use bootmem allocater */
+ irq_handle = (irq_handler_t *)alloc_bootmem(sizeof(irq_handler_t));
+ irq_handle = (irq_handler_t *)((unsigned long)irq_handle | 0x80000000);
+ }
+
+ if (irq_handle == NULL)
+ return -ENOMEM;
+
+ irq_handle->handler = handler;
+ irq_handle->flags = flags;
+ irq_handle->count = 0;
+ irq_handle->dev_id = dev_id;
+ irq_handle->devname = devname;
+ irq_list[irq] = irq_handle;
+
+ if (irq_handle->flags & SA_SAMPLE_RANDOM)
+ rand_initialize_irq(irq);
+
+ enable_irq(irq);
+ return 0;
+}
+
+EXPORT_SYMBOL(request_irq);
+
+void free_irq(unsigned int irq, void *dev_id)
+{
+ if (irq >= NR_IRQS)
+ return;
+
+ if (!irq_list[irq] || irq_list[irq]->dev_id != dev_id)
+ printk(KERN_WARNING "Removing probably wrong IRQ %d from %s\n",
+ irq, irq_list[irq]->devname);
+ disable_irq(irq);
+ h8300_disable_irq_pin(irq);
+ if (((unsigned long)irq_list[irq] & 0x80000000) == 0) {
+ kfree(irq_list[irq]);
+ irq_list[irq] = NULL;
+ }
+}
+
+EXPORT_SYMBOL(free_irq);
+
+/*
+ * Do we need these probe functions on the m68k?
+ */
+unsigned long probe_irq_on (void)
+{
+ return 0;
+}
+
+EXPORT_SYMBOL(probe_irq_on);
+
+int probe_irq_off (unsigned long irqs)
+{
+ return 0;
+}
+
+EXPORT_SYMBOL(probe_irq_off);
+
+void enable_irq(unsigned int irq)
+{
+ if (irq >= EXT_IRQ0 && irq <= (EXT_IRQ0 + EXT_IRQS))
+ IER_REGS |= 1 << (irq - EXT_IRQ0);
+}
+
+void disable_irq(unsigned int irq)
+{
+ if (irq >= EXT_IRQ0 && irq <= (EXT_IRQ0 + EXT_IRQS))
+ IER_REGS &= ~(1 << (irq - EXT_IRQ0));
+}
+
+asmlinkage void process_int(int irq, struct pt_regs *fp)
+{
+ irq_enter();
+ h8300_clear_isr(irq);
+ if (irq >= NR_TRAPS && irq < NR_IRQS) {
+ if (irq_list[irq]) {
+ irq_list[irq]->handler(irq, irq_list[irq]->dev_id, fp);
+ irq_list[irq]->count++;
+ if (irq_list[irq]->flags & SA_SAMPLE_RANDOM)
+ add_interrupt_randomness(irq);
+ }
+ } else {
+ BUG();
+ }
+ irq_exit();
+}
+
+int show_interrupts(struct seq_file *p, void *v)
+{
+ int i = *(loff_t *) v;
+
+ if ((i < NR_IRQS) && (irq_list[i]!=NULL)) {
+ seq_printf(p, "%3d: %10u ",i,irq_list[i]->count);
+ seq_printf(p, "%s\n", irq_list[i]->devname);
+ }
+
+ return 0;
+}
+
+void init_irq_proc(void)
+{
+}
+
+static int __init enable_kmalloc(void)
+{
+ use_kmalloc = 1;
+ return 0;
+}
+core_initcall(enable_kmalloc);
diff --git a/arch/h8300/kernel/module.c b/arch/h8300/kernel/module.c
new file mode 100644
index 0000000..4fd7138
--- /dev/null
+++ b/arch/h8300/kernel/module.c
@@ -0,0 +1,122 @@
+#include <linux/moduleloader.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(fmt...)
+#endif
+
+void *module_alloc(unsigned long size)
+{
+ if (size == 0)
+ return NULL;
+ return vmalloc(size);
+}
+
+
+/* Free memory returned from module_alloc */
+void module_free(struct module *mod, void *module_region)
+{
+ vfree(module_region);
+ /* FIXME: If module_region == mod->init_region, trim exception
+ table entries. */
+}
+
+/* We don't need anything special. */
+int module_frob_arch_sections(Elf_Ehdr *hdr,
+ Elf_Shdr *sechdrs,
+ char *secstrings,
+ struct module *mod)
+{
+ return 0;
+}
+
+int apply_relocate(Elf32_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *me)
+{
+ printk(KERN_ERR "module %s: RELOCATION unsupported\n",
+ me->name);
+ return -ENOEXEC;
+}
+
+int apply_relocate_add(Elf32_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *me)
+{
+ unsigned int i;
+ Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
+
+ DEBUGP("Applying relocate section %u to %u\n", relsec,
+ sechdrs[relsec].sh_info);
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
+ /* This is where to make the change */
+ uint32_t *loc = (uint32_t *)(sechdrs[sechdrs[relsec].sh_info].sh_addr
+ + rela[i].r_offset);
+ /* This is the symbol it is referring to. Note that all
+ undefined symbols have been resolved. */
+ Elf32_Sym *sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ + ELF32_R_SYM(rela[i].r_info);
+ uint32_t v = sym->st_value + rela[i].r_addend;
+
+ switch (ELF32_R_TYPE(rela[i].r_info)) {
+ case R_H8_DIR24R8:
+ loc = (uint32_t *)((uint32_t)loc - 1);
+ *loc = (*loc & 0xff000000) | ((*loc & 0xffffff) + v);
+ break;
+ case R_H8_DIR24A8:
+ if (ELF32_R_SYM(rela[i].r_info))
+ *loc += v;
+ break;
+ case R_H8_DIR32:
+ case R_H8_DIR32A16:
+ *loc += v;
+ break;
+ case R_H8_PCREL16:
+ v -= (unsigned long)loc + 2;
+ if ((Elf32_Sword)v > 0x7fff ||
+ (Elf32_Sword)v < -(Elf32_Sword)0x8000)
+ goto overflow;
+ else
+ *(unsigned short *)loc = v;
+ break;
+ case R_H8_PCREL8:
+ v -= (unsigned long)loc + 1;
+ if ((Elf32_Sword)v > 0x7f ||
+ (Elf32_Sword)v < -(Elf32_Sword)0x80)
+ goto overflow;
+ else
+ *(unsigned char *)loc = v;
+ break;
+ default:
+ printk(KERN_ERR "module %s: Unknown relocation: %u\n",
+ me->name, ELF32_R_TYPE(rela[i].r_info));
+ return -ENOEXEC;
+ }
+ }
+ return 0;
+ overflow:
+ printk(KERN_ERR "module %s: relocation offset overflow: %08x\n",
+ me->name, rela[i].r_offset);
+ return -ENOEXEC;
+}
+
+int module_finalize(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ struct module *me)
+{
+ return 0;
+}
+
+void module_arch_cleanup(struct module *mod)
+{
+}
diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c
new file mode 100644
index 0000000..134aec1
--- /dev/null
+++ b/arch/h8300/kernel/process.c
@@ -0,0 +1,288 @@
+/*
+ * linux/arch/h8300/kernel/process.c
+ *
+ * Yoshinori Sato <ysato@users.sourceforge.jp>
+ *
+ * Based on:
+ *
+ * linux/arch/m68knommu/kernel/process.c
+ *
+ * Copyright (C) 1998 D. Jeff Dionne <jeff@ryeham.ee.ryerson.ca>,
+ * Kenneth Albanowski <kjahds@kjahds.com>,
+ * The Silver Hammer Group, Ltd.
+ *
+ * linux/arch/m68k/kernel/process.c
+ *
+ * Copyright (C) 1995 Hamish Macdonald
+ *
+ * 68060 fixes by Jesper Skov
+ */
+
+/*
+ * This file handles the architecture-dependent parts of process handling..
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/interrupt.h>
+#include <linux/reboot.h>
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/traps.h>
+#include <asm/setup.h>
+#include <asm/pgtable.h>
+
+asmlinkage void ret_from_fork(void);
+
+/*
+ * The idle loop on an H8/300..
+ */
+#if !defined(CONFIG_H8300H_SIM) && !defined(CONFIG_H8S_SIM)
+void default_idle(void)
+{
+ while(1) {
+ if (need_resched()) {
+ local_irq_enable();
+ __asm__("sleep");
+ local_irq_disable();
+ }
+ schedule();
+ }
+}
+#else
+void default_idle(void)
+{
+ while(1) {
+ if (need_resched())
+ schedule();
+ }
+}
+#endif
+void (*idle)(void) = default_idle;
+
+/*
+ * The idle thread. There's no useful work to be
+ * done, so just try to conserve power and have a
+ * low exit latency (ie sit in a loop waiting for
+ * somebody to say that they'd like to reschedule)
+ */
+void cpu_idle(void)
+{
+ idle();
+}
+
+void machine_restart(char * __unused)
+{
+ local_irq_disable();
+ __asm__("jmp @@0");
+}
+
+EXPORT_SYMBOL(machine_restart);
+
+void machine_halt(void)
+{
+ local_irq_disable();
+ __asm__("sleep");
+ for (;;);
+}
+
+EXPORT_SYMBOL(machine_halt);
+
+void machine_power_off(void)
+{
+ local_irq_disable();
+ __asm__("sleep");
+ for (;;);
+}
+
+EXPORT_SYMBOL(machine_power_off);
+
+void show_regs(struct pt_regs * regs)
+{
+ printk("\nPC: %08lx Status: %02x",
+ regs->pc, regs->ccr);
+ printk("\nORIG_ER0: %08lx ER0: %08lx ER1: %08lx",
+ regs->orig_er0, regs->er0, regs->er1);
+ printk("\nER2: %08lx ER3: %08lx ER4: %08lx ER5: %08lx",
+ regs->er2, regs->er3, regs->er4, regs->er5);
+ printk("\nER6' %08lx ",regs->er6);
+ if (user_mode(regs))
+ printk("USP: %08lx\n", rdusp());
+ else
+ printk("\n");
+}
+
+/*
+ * Create a kernel thread
+ */
+int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+{
+ long retval;
+ long clone_arg;
+ mm_segment_t fs;
+
+ fs = get_fs();
+ set_fs (KERNEL_DS);
+ clone_arg = flags | CLONE_VM;
+ __asm__("mov.l sp,er3\n\t"
+ "sub.l er2,er2\n\t"
+ "mov.l %2,er1\n\t"
+ "mov.l %1,er0\n\t"
+ "trapa #0\n\t"
+ "cmp.l sp,er3\n\t"
+ "beq 1f\n\t"
+ "mov.l %4,er0\n\t"
+ "mov.l %3,er1\n\t"
+ "jsr @er1\n\t"
+ "mov.l %5,er0\n\t"
+ "trapa #0\n"
+ "1:\n\t"
+ "mov.l er0,%0"
+ :"=r"(retval)
+ :"i"(__NR_clone),"g"(clone_arg),"g"(fn),"g"(arg),"i"(__NR_exit)
+ :"er0","er1","er2","er3");
+ set_fs (fs);
+ return retval;
+}
+
+void flush_thread(void)
+{
+}
+
+/*
+ * "h8300_fork()".. By the time we get here, the
+ * non-volatile registers have also been saved on the
+ * stack. We do some ugly pointer stuff here.. (see
+ * also copy_thread)
+ */
+
+asmlinkage int h8300_fork(struct pt_regs *regs)
+{
+ return -EINVAL;
+}
+
+asmlinkage int h8300_vfork(struct pt_regs *regs)
+{
+ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0, NULL, NULL);
+}
+
+asmlinkage int h8300_clone(struct pt_regs *regs)
+{
+ unsigned long clone_flags;
+ unsigned long newsp;
+
+ /* syscall2 puts clone_flags in er1 and usp in er2 */
+ clone_flags = regs->er1;
+ newsp = regs->er2;
+ if (!newsp)
+ newsp = rdusp();
+ return do_fork(clone_flags, newsp, regs, 0, NULL, NULL);
+
+}
+
+int copy_thread(int nr, unsigned long clone_flags,
+ unsigned long usp, unsigned long topstk,
+ struct task_struct * p, struct pt_regs * regs)
+{
+ struct pt_regs * childregs;
+
+ childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
+
+ *childregs = *regs;
+ childregs->retpc = (unsigned long) ret_from_fork;
+ childregs->er0 = 0;
+
+ p->thread.usp = usp;
+ p->thread.ksp = (unsigned long)childregs;
+
+ return 0;
+}
+
+/*
+ * fill in the user structure for a core dump..
+ */
+void dump_thread(struct pt_regs * regs, struct user * dump)
+{
+/* changed the size calculations - should hopefully work better. lbt */
+ dump->magic = CMAGIC;
+ dump->start_code = 0;
+ dump->start_stack = rdusp() & ~(PAGE_SIZE - 1);
+ dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
+ dump->u_dsize = ((unsigned long) (current->mm->brk +
+ (PAGE_SIZE-1))) >> PAGE_SHIFT;
+ dump->u_dsize -= dump->u_tsize;
+ dump->u_ssize = 0;
+
+ dump->u_ar0 = (struct user_regs_struct *)(((int)(&dump->regs)) -((int)(dump)));
+ dump->regs.er0 = regs->er0;
+ dump->regs.er1 = regs->er1;
+ dump->regs.er2 = regs->er2;
+ dump->regs.er3 = regs->er3;
+ dump->regs.er4 = regs->er4;
+ dump->regs.er5 = regs->er5;
+ dump->regs.er6 = regs->er6;
+ dump->regs.orig_er0 = regs->orig_er0;
+ dump->regs.ccr = regs->ccr;
+ dump->regs.pc = regs->pc;
+}
+
+/*
+ * sys_execve() executes a new program.
+ */
+asmlinkage int sys_execve(char *name, char **argv, char **envp,int dummy,...)
+{
+ int error;
+ char * filename;
+ struct pt_regs *regs = (struct pt_regs *) ((unsigned char *)&dummy-4);
+
+ lock_kernel();
+ filename = getname(name);
+ error = PTR_ERR(filename);
+ if (IS_ERR(filename))
+ goto out;
+ error = do_execve(filename, argv, envp, regs);
+ putname(filename);
+out:
+ unlock_kernel();
+ return error;
+}
+
+unsigned long thread_saved_pc(struct task_struct *tsk)
+{
+ return ((struct pt_regs *)tsk->thread.esp0)->pc;
+}
+
+unsigned long get_wchan(struct task_struct *p)
+{
+ unsigned long fp, pc;
+ unsigned long stack_page;
+ int count = 0;
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+
+ stack_page = (unsigned long)p;
+ fp = ((struct pt_regs *)p->thread.ksp)->er6;
+ do {
+ if (fp < stack_page+sizeof(struct thread_info) ||
+ fp >= 8184+stack_page)
+ return 0;
+ pc = ((unsigned long *)fp)[1];
+ if (!in_sched_functions(pc))
+ return pc;
+ fp = *(unsigned long *) fp;
+ } while (count++ < 16);
+ return 0;
+}
diff --git a/arch/h8300/kernel/ptrace.c b/arch/h8300/kernel/ptrace.c
new file mode 100644
index 0000000..5f19d77
--- /dev/null
+++ b/arch/h8300/kernel/ptrace.c
@@ -0,0 +1,277 @@
+/*
+ * linux/arch/h8300/kernel/ptrace.c
+ *
+ * Yoshinori Sato <ysato@users.sourceforge.jp>
+ *
+ * Based on:
+ * linux/arch/m68k/kernel/ptrace.c
+ *
+ * Copyright (C) 1994 by Hamish Macdonald
+ * Taken from linux/kernel/ptrace.c and modified for M680x0.
+ * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of
+ * this archive for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/user.h>
+#include <linux/config.h>
+
+#include <asm/uaccess.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/processor.h>
+#include <asm/signal.h>
+
+/* cpu depend functions */
+extern long h8300_get_reg(struct task_struct *task, int regno);
+extern int h8300_put_reg(struct task_struct *task, int regno, unsigned long data);
+extern void h8300_disable_trace(struct task_struct *child);
+extern void h8300_enable_trace(struct task_struct *child);
+
+/*
+ * does not yet catch signals sent when the child dies.
+ * in exit.c or in signal.c.
+ */
+
+inline
+static int read_long(struct task_struct * tsk, unsigned long addr,
+ unsigned long * result)
+{
+ *result = *(unsigned long *)addr;
+ return 0;
+}
+
+void ptrace_disable(struct task_struct *child)
+{
+ h8300_disable_trace(child);
+}
+
+asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
+{
+ struct task_struct *child;
+ int ret;
+
+ lock_kernel();
+ ret = -EPERM;
+ if (request == PTRACE_TRACEME) {
+ /* are we already being traced? */
+ if (current->ptrace & PT_PTRACED)
+ goto out;
+ /* set the ptrace bit in the process flags. */
+ current->ptrace |= PT_PTRACED;
+ ret = 0;
+ goto out;
+ }
+ ret = -ESRCH;
+ read_lock(&tasklist_lock);
+ child = find_task_by_pid(pid);
+ if (child)
+ get_task_struct(child);
+ read_unlock(&tasklist_lock);
+ if (!child)
+ goto out;
+
+ ret = -EPERM;
+ if (pid == 1) /* you may not mess with init */
+ goto out_tsk;
+
+ if (request == PTRACE_ATTACH) {
+ ret = ptrace_attach(child);
+ goto out_tsk;
+ }
+ ret = ptrace_check_attach(child, request == PTRACE_KILL);
+ if (ret < 0)
+ goto out_tsk;
+
+ switch (request) {
+ case PTRACE_PEEKTEXT: /* read word at location addr. */
+ case PTRACE_PEEKDATA: {
+ unsigned long tmp;
+
+ ret = read_long(child, addr, &tmp);
+ if (ret < 0)
+ break ;
+ ret = put_user(tmp, (unsigned long *) data);
+ break ;
+ }
+
+ /* read the word at location addr in the USER area. */
+ case PTRACE_PEEKUSR: {
+ unsigned long tmp = 0;
+
+ if ((addr & 3) || addr < 0 || addr >= sizeof(struct user)) {
+ ret = -EIO;
+ break ;
+ }
+
+ ret = 0; /* Default return condition */
+ addr = addr >> 2; /* temporary hack. */
+
+ if (addr < H8300_REGS_NO)
+ tmp = h8300_get_reg(child, addr);
+ else {
+ switch(addr) {
+ case 49:
+ tmp = child->mm->start_code;
+ break ;
+ case 50:
+ tmp = child->mm->start_data;
+ break ;
+ case 51:
+ tmp = child->mm->end_code;
+ break ;
+ case 52:
+ tmp = child->mm->end_data;
+ break ;
+ default:
+ ret = -EIO;
+ }
+ }
+ if (!ret)
+ ret = put_user(tmp,(unsigned long *) data);
+ break ;
+ }
+
+ /* when I and D space are separate, this will have to be fixed. */
+ case PTRACE_POKETEXT: /* write the word at location addr. */
+ case PTRACE_POKEDATA:
+ ret = 0;
+ if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
+ break;
+ ret = -EIO;
+ break;
+
+ case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
+ if ((addr & 3) || addr < 0 || addr >= sizeof(struct user)) {
+ ret = -EIO;
+ break ;
+ }
+ addr = addr >> 2; /* temporary hack. */
+
+ if (addr == PT_ORIG_ER0) {
+ ret = -EIO;
+ break ;
+ }
+ if (addr < H8300_REGS_NO) {
+ ret = h8300_put_reg(child, addr, data);
+ break ;
+ }
+ ret = -EIO;
+ break ;
+ case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
+ case PTRACE_CONT: { /* restart after signal. */
+ ret = -EIO;
+ if ((unsigned long) data >= _NSIG)
+ break ;
+ if (request == PTRACE_SYSCALL)
+ set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ else
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ child->exit_code = data;
+ wake_up_process(child);
+ /* make sure the single step bit is not set. */
+ h8300_disable_trace(child);
+ ret = 0;
+ }
+
+/*
+ * make the child exit. Best I can do is send it a sigkill.
+ * perhaps it should be put in the status that it wants to
+ * exit.
+ */
+ case PTRACE_KILL: {
+
+ ret = 0;
+ if (child->exit_state == EXIT_ZOMBIE) /* already dead */
+ break;
+ child->exit_code = SIGKILL;
+ h8300_disable_trace(child);
+ wake_up_process(child);
+ break;
+ }
+
+ case PTRACE_SINGLESTEP: { /* set the trap flag. */
+ ret = -EIO;
+ if ((unsigned long) data > _NSIG)
+ break;
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ child->exit_code = data;
+ h8300_enable_trace(child);
+ wake_up_process(child);
+ ret = 0;
+ break;
+ }
+
+ case PTRACE_DETACH: /* detach a process that was attached. */
+ ret = ptrace_detach(child, data);
+ break;
+
+ case PTRACE_GETREGS: { /* Get all gp regs from the child. */
+ int i;
+ unsigned long tmp;
+ for (i = 0; i < H8300_REGS_NO; i++) {
+ tmp = h8300_get_reg(child, i);
+ if (put_user(tmp, (unsigned long *) data)) {
+ ret = -EFAULT;
+ break;
+ }
+ data += sizeof(long);
+ }
+ ret = 0;
+ break;
+ }
+
+ case PTRACE_SETREGS: { /* Set all gp regs in the child. */
+ int i;
+ unsigned long tmp;
+ for (i = 0; i < H8300_REGS_NO; i++) {
+ if (get_user(tmp, (unsigned long *) data)) {
+ ret = -EFAULT;
+ break;
+ }
+ h8300_put_reg(child, i, tmp);
+ data += sizeof(long);
+ }
+ ret = 0;
+ break;
+ }
+
+ default:
+ ret = -EIO;
+ break;
+ }
+out_tsk:
+ put_task_struct(child);
+out:
+ unlock_kernel();
+ return ret;
+}
+
+asmlinkage void syscall_trace(void)
+{
+ if (!test_thread_flag(TIF_SYSCALL_TRACE))
+ return;
+ if (!(current->ptrace & PT_PTRACED))
+ return;
+ ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
+ ? 0x80 : 0));
+ /*
+ * this isn't the same as continuing with a signal, but it will do
+ * for normal use. strace only continues with a signal if the
+ * stopping signal is not SIGTRAP. -brl
+ */
+ if (current->exit_code) {
+ send_sig(current->exit_code, current, 1);
+ current->exit_code = 0;
+ }
+}
diff --git a/arch/h8300/kernel/semaphore.c b/arch/h8300/kernel/semaphore.c
new file mode 100644
index 0000000..1ebb79b
--- /dev/null
+++ b/arch/h8300/kernel/semaphore.c
@@ -0,0 +1,133 @@
+/*
+ * Generic semaphore code. Buyer beware. Do your own
+ * specific changes in <asm/semaphore-helper.h>
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <asm/semaphore-helper.h>
+
+#ifndef CONFIG_RMW_INSNS
+spinlock_t semaphore_wake_lock;
+#endif
+
+/*
+ * Semaphores are implemented using a two-way counter:
+ * The "count" variable is decremented for each process
+ * that tries to sleep, while the "waking" variable is
+ * incremented when the "up()" code goes to wake up waiting
+ * processes.
+ *
+ * Notably, the inline "up()" and "down()" functions can
+ * efficiently test if they need to do any extra work (up
+ * needs to do something only if count was negative before
+ * the increment operation.
+ *
+ * waking_non_zero() (from asm/semaphore.h) must execute
+ * atomically.
+ *
+ * When __up() is called, the count was negative before
+ * incrementing it, and we need to wake up somebody.
+ *
+ * This routine adds one to the count of processes that need to
+ * wake up and exit. ALL waiting processes actually wake up but
+ * only the one that gets to the "waking" field first will gate
+ * through and acquire the semaphore. The others will go back
+ * to sleep.
+ *
+ * Note that these functions are only called when there is
+ * contention on the lock, and as such all this is the
+ * "non-critical" part of the whole semaphore business. The
+ * critical part is the inline stuff in <asm/semaphore.h>
+ * where we want to avoid any extra jumps and calls.
+ */
+void __up(struct semaphore *sem)
+{
+ wake_one_more(sem);
+ wake_up(&sem->wait);
+}
+
+/*
+ * Perform the "down" function. Return zero for semaphore acquired,
+ * return negative for signalled out of the function.
+ *
+ * If called from __down, the return is ignored and the wait loop is
+ * not interruptible. This means that a task waiting on a semaphore
+ * using "down()" cannot be killed until someone does an "up()" on
+ * the semaphore.
+ *
+ * If called from __down_interruptible, the return value gets checked
+ * upon return. If the return value is negative then the task continues
+ * with the negative value in the return register (it can be tested by
+ * the caller).
+ *
+ * Either form may be used in conjunction with "up()".
+ *
+ */
+
+
+#define DOWN_HEAD(task_state) \
+ \
+ \
+ current->state = (task_state); \
+ add_wait_queue(&sem->wait, &wait); \
+ \
+ /* \
+ * Ok, we're set up. sem->count is known to be less than zero \
+ * so we must wait. \
+ * \
+ * We can let go the lock for purposes of waiting. \
+ * We re-acquire it after awaking so as to protect \
+ * all semaphore operations. \
+ * \
+ * If "up()" is called before we call waking_non_zero() then \
+ * we will catch it right away. If it is called later then \
+ * we will have to go through a wakeup cycle to catch it. \
+ * \
+ * Multiple waiters contend for the semaphore lock to see \
+ * who gets to gate through and who has to wait some more. \
+ */ \
+ for (;;) {
+
+#define DOWN_TAIL(task_state) \
+ current->state = (task_state); \
+ } \
+ current->state = TASK_RUNNING; \
+ remove_wait_queue(&sem->wait, &wait);
+
+void __sched __down(struct semaphore * sem)
+{
+ DECLARE_WAITQUEUE(wait, current);
+
+ DOWN_HEAD(TASK_UNINTERRUPTIBLE)
+ if (waking_non_zero(sem))
+ break;
+ schedule();
+ DOWN_TAIL(TASK_UNINTERRUPTIBLE)
+}
+
+int __sched __down_interruptible(struct semaphore * sem)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ int ret = 0;
+
+ DOWN_HEAD(TASK_INTERRUPTIBLE)
+
+ ret = waking_non_zero_interruptible(sem, current);
+ if (ret)
+ {
+ if (ret == 1)
+ /* ret != 0 only if we get interrupted -arca */
+ ret = 0;
+ break;
+ }
+ schedule();
+ DOWN_TAIL(TASK_INTERRUPTIBLE)
+ return ret;
+}
+
+int __down_trylock(struct semaphore * sem)
+{
+ return waking_non_zero_trylock(sem);
+}
diff --git a/arch/h8300/kernel/setup.c b/arch/h8300/kernel/setup.c
new file mode 100644
index 0000000..f469d91
--- /dev/null
+++ b/arch/h8300/kernel/setup.c
@@ -0,0 +1,248 @@
+/*
+ * linux/arch/h8300/kernel/setup.c
+ *
+ * Copyleft ()) 2000 James D. Schettine {james@telos-systems.com}
+ * Copyright (C) 1999,2000 Greg Ungerer (gerg@snapgear.com)
+ * Copyright (C) 1998,1999 D. Jeff Dionne <jeff@lineo.ca>
+ * Copyright (C) 1998 Kenneth Albanowski <kjahds@kjahds.com>
+ * Copyright (C) 1995 Hamish Macdonald
+ * Copyright (C) 2000 Lineo Inc. (www.lineo.com)
+ * Copyright (C) 2001 Lineo, Inc. <www.lineo.com>
+ *
+ * H8/300 porting Yoshinori Sato <ysato@users.sourceforge.jp>
+ */
+
+/*
+ * This file handles the architecture-dependent parts of system setup
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/fs.h>
+#include <linux/fb.h>
+#include <linux/console.h>
+#include <linux/genhd.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/major.h>
+#include <linux/bootmem.h>
+#include <linux/seq_file.h>
+#include <linux/init.h>
+
+#include <asm/setup.h>
+#include <asm/irq.h>
+
+#ifdef CONFIG_BLK_DEV_INITRD
+#include <asm/pgtable.h>
+#endif
+
+#if defined(__H8300H__)
+#define CPU "H8/300H"
+#include <asm/regs306x.h>
+#endif
+
+#if defined(__H8300S__)
+#define CPU "H8S"
+#include <asm/regs267x.h>
+#endif
+
+#define STUBSIZE 0xc000;
+
+unsigned long rom_length;
+unsigned long memory_start;
+unsigned long memory_end;
+
+char command_line[COMMAND_LINE_SIZE];
+
+extern int _stext, _etext, _sdata, _edata, _sbss, _ebss, _end;
+extern int _ramstart, _ramend;
+extern char _target_name[];
+extern void h8300_gpio_init(void);
+
+#if (defined(CONFIG_H8300H_SIM) || defined(CONFIG_H8S_SIM)) \
+ && defined(CONFIG_GDB_MAGICPRINT)
+/* printk with gdb service */
+static void gdb_console_output(struct console *c, const char *msg, unsigned len)
+{
+ for (; len > 0; len--) {
+ asm("mov.w %0,r2\n\t"
+ "jsr @0xc4"::"r"(*msg++):"er2");
+ }
+}
+
+/*
+ * Setup initial baud/bits/parity. We do two things here:
+ * - construct a cflag setting for the first rs_open()
+ * - initialize the serial port
+ * Return non-zero if we didn't find a serial port.
+ */
+static int __init gdb_console_setup(struct console *co, char *options)
+{
+ return 0;
+}
+
+static const struct console gdb_console = {
+ .name = "gdb_con",
+ .write = gdb_console_output,
+ .device = NULL,
+ .setup = gdb_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+#endif
+
+void __init setup_arch(char **cmdline_p)
+{
+ int bootmap_size;
+
+ memory_start = (unsigned long) &_ramstart;
+
+ /* allow for ROMFS on the end of the kernel */
+ if (memcmp((void *)memory_start, "-rom1fs-", 8) == 0) {
+#if defined(CONFIG_BLK_DEV_INITRD)
+ initrd_start = memory_start;
+ initrd_end = memory_start += be32_to_cpu(((unsigned long *) (memory_start))[2]);
+#else
+ memory_start += be32_to_cpu(((unsigned long *) memory_start)[2]);
+#endif
+ }
+ memory_start = PAGE_ALIGN(memory_start);
+#if !defined(CONFIG_BLKDEV_RESERVE)
+ memory_end = (unsigned long) &_ramend; /* by now the stack is part of the init task */
+#if defined(CONFIG_GDB_DEBUG)
+ memory_end -= STUBSIZE;
+#endif
+#else
+ if ((memory_end < CONFIG_BLKDEV_RESERVE_ADDRESS) &&
+ (memory_end > CONFIG_BLKDEV_RESERVE_ADDRESS)
+ /* overlap userarea */
+ memory_end = CONFIG_BLKDEV_RESERVE_ADDRESS;
+#endif
+
+ init_mm.start_code = (unsigned long) &_stext;
+ init_mm.end_code = (unsigned long) &_etext;
+ init_mm.end_data = (unsigned long) &_edata;
+ init_mm.brk = (unsigned long) 0;
+
+#if (defined(CONFIG_H8300H_SIM) || defined(CONFIG_H8S_SIM)) && defined(CONFIG_GDB_MAGICPRINT)
+ register_console((struct console *)&gdb_console);
+#endif
+
+ printk(KERN_INFO "\r\n\nuClinux " CPU "\n");
+ printk(KERN_INFO "Target Hardware: %s\n",_target_name);
+ printk(KERN_INFO "Flat model support (C) 1998,1999 Kenneth Albanowski, D. Jeff Dionne\n");
+ printk(KERN_INFO "H8/300 series support by Yoshinori Sato <ysato@users.sourceforge.jp>\n");
+
+#ifdef DEBUG
+ printk(KERN_DEBUG "KERNEL -> TEXT=0x%06x-0x%06x DATA=0x%06x-0x%06x "
+ "BSS=0x%06x-0x%06x\n", (int) &_stext, (int) &_etext,
+ (int) &_sdata, (int) &_edata,
+ (int) &_sbss, (int) &_ebss);
+ printk(KERN_DEBUG "KERNEL -> ROMFS=0x%06x-0x%06x MEM=0x%06x-0x%06x "
+ "STACK=0x%06x-0x%06x\n",
+ (int) &_ebss, (int) memory_start,
+ (int) memory_start, (int) memory_end,
+ (int) memory_end, (int) &_ramend);
+#endif
+
+#ifdef CONFIG_DEFAULT_CMDLINE
+ /* set from default command line */
+ if (*command_line == '\0')
+ strcpy(command_line,CONFIG_KERNEL_COMMAND);
+#endif
+ /* Keep a copy of command line */
+ *cmdline_p = &command_line[0];
+ memcpy(saved_command_line, command_line, COMMAND_LINE_SIZE);
+ saved_command_line[COMMAND_LINE_SIZE-1] = 0;
+
+#ifdef DEBUG
+ if (strlen(*cmdline_p))
+ printk(KERN_DEBUG "Command line: '%s'\n", *cmdline_p);
+#endif
+
+ /*
+ * give all the memory to the bootmap allocator, tell it to put the
+ * boot mem_map at the start of memory
+ */
+ bootmap_size = init_bootmem_node(
+ NODE_DATA(0),
+ memory_start >> PAGE_SHIFT, /* map goes here */
+ PAGE_OFFSET >> PAGE_SHIFT, /* 0 on coldfire */
+ memory_end >> PAGE_SHIFT);
+ /*
+ * free the usable memory, we have to make sure we do not free
+ * the bootmem bitmap so we then reserve it after freeing it :-)
+ */
+ free_bootmem(memory_start, memory_end - memory_start);
+ reserve_bootmem(memory_start, bootmap_size);
+ /*
+ * get kmalloc into gear
+ */
+ paging_init();
+ h8300_gpio_init();
+#if defined(CONFIG_H8300_AKI3068NET) && defined(CONFIG_IDE)
+ {
+#define AREABIT(addr) (1 << (((addr) >> 21) & 7))
+ /* setup BSC */
+ volatile unsigned char *abwcr = (volatile unsigned char *)ABWCR;
+ volatile unsigned char *cscr = (volatile unsigned char *)CSCR;
+ *abwcr &= ~(AREABIT(CONFIG_H8300_IDE_BASE) | AREABIT(CONFIG_H8300_IDE_ALT));
+ *cscr |= (AREABIT(CONFIG_H8300_IDE_BASE) | AREABIT(CONFIG_H8300_IDE_ALT)) | 0x0f;
+ }
+#endif
+#ifdef DEBUG
+ printk(KERN_DEBUG "Done setup_arch\n");
+#endif
+}
+
+/*
+ * Get CPU information for use by the procfs.
+ */
+
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+ char *cpu;
+ int mode;
+ u_long clockfreq;
+
+ cpu = CPU;
+ mode = *(volatile unsigned char *)MDCR & 0x07;
+
+ clockfreq = CONFIG_CPU_CLOCK;
+
+ seq_printf(m, "CPU:\t\t%s (mode:%d)\n"
+ "Clock:\t\t%lu.%1luMHz\n"
+ "BogoMips:\t%lu.%02lu\n"
+ "Calibration:\t%lu loops\n",
+ cpu,mode,
+ clockfreq/1000,clockfreq%1000,
+ (loops_per_jiffy*HZ)/500000,((loops_per_jiffy*HZ)/5000)%100,
+ (loops_per_jiffy*HZ));
+
+ return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+ return *pos < NR_CPUS ? ((void *) 0x12345678) : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return c_start(m, pos);
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+struct seq_operations cpuinfo_op = {
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = show_cpuinfo,
+};
diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c
new file mode 100644
index 0000000..a4799d6
--- /dev/null
+++ b/arch/h8300/kernel/signal.c
@@ -0,0 +1,552 @@
+/*
+ * linux/arch/h8300/kernel/signal.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+/*
+ * uClinux H8/300 support by Yoshinori Sato <ysato@users.sourceforge.jp>
+ * and David McCullough <davidm@snapgear.com>
+ *
+ * Based on
+ * Linux/m68k by Hamish Macdonald
+ */
+
+/*
+ * ++roman (07/09/96): implemented signal stacks (specially for tosemu on
+ * Atari :-) Current limitation: Only one sigstack can be active at one time.
+ * If a second signal with SA_ONSTACK set arrives while working on a sigstack,
+ * SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested
+ * signal handlers!
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/syscalls.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <linux/highuid.h>
+#include <linux/personality.h>
+#include <linux/tty.h>
+#include <linux/binfmts.h>
+#include <linux/suspend.h>
+
+#include <asm/setup.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/traps.h>
+#include <asm/ucontext.h>
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+asmlinkage int do_sigsuspend(struct pt_regs *regs)
+{
+ old_sigset_t mask = regs->er3;
+ sigset_t saveset;
+
+ mask &= _BLOCKABLE;
+ spin_lock_irq(¤t->sighand->siglock);
+ saveset = current->blocked;
+ siginitset(¤t->blocked, mask);
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
+
+ regs->er0 = -EINTR;
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (do_signal(regs, &saveset))
+ return -EINTR;
+ }
+}
+
+asmlinkage int
+do_rt_sigsuspend(struct pt_regs *regs)
+{
+ sigset_t *unewset = (sigset_t *)regs->er1;
+ size_t sigsetsize = (size_t)regs->er2;
+ sigset_t saveset, newset;
+
+ /* XXX: Don't preclude handling different sized sigset_t's. */
+ if (sigsetsize != sizeof(sigset_t))
+ return -EINVAL;
+
+ if (copy_from_user(&newset, unewset, sizeof(newset)))
+ return -EFAULT;
+ sigdelsetmask(&newset, ~_BLOCKABLE);
+
+ spin_lock_irq(¤t->sighand->siglock);
+ saveset = current->blocked;
+ current->blocked = newset;
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
+
+ regs->er0 = -EINTR;
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (do_signal(regs, &saveset))
+ return -EINTR;
+ }
+}
+
+asmlinkage int
+sys_sigaction(int sig, const struct old_sigaction *act,
+ struct old_sigaction *oact)
+{
+ struct k_sigaction new_ka, old_ka;
+ int ret;
+
+ if (act) {
+ old_sigset_t mask;
+ if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
+ __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
+ __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+ return -EFAULT;
+ __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+ __get_user(mask, &act->sa_mask);
+ siginitset(&new_ka.sa.sa_mask, mask);
+ }
+
+ ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+ if (!ret && oact) {
+ if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
+ __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
+ __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+ return -EFAULT;
+ __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+ __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+ }
+
+ return ret;
+}
+
+asmlinkage int
+sys_sigaltstack(const stack_t *uss, stack_t *uoss)
+{
+ return do_sigaltstack(uss, uoss, rdusp());
+}
+
+
+/*
+ * Do a signal return; undo the signal stack.
+ *
+ * Keep the return code on the stack quadword aligned!
+ * That makes the cache flush below easier.
+ */
+
+struct sigframe
+{
+ long dummy_er0;
+ long dummy_vector;
+#if defined(CONFIG_CPU_H8S)
+ short dummy_exr;
+#endif
+ long dummy_pc;
+ char *pretcode;
+ unsigned char retcode[8];
+ unsigned long extramask[_NSIG_WORDS-1];
+ struct sigcontext sc;
+ int sig;
+} __attribute__((aligned(2),packed));
+
+struct rt_sigframe
+{
+ long dummy_er0;
+ long dummy_vector;
+#if defined(CONFIG_CPU_H8S)
+ short dummy_exr;
+#endif
+ long dummy_pc;
+ char *pretcode;
+ struct siginfo *pinfo;
+ void *puc;
+ unsigned char retcode[8];
+ struct siginfo info;
+ struct ucontext uc;
+ int sig;
+} __attribute__((aligned(2),packed));
+
+static inline int
+restore_sigcontext(struct pt_regs *regs, struct sigcontext *usc,
+ int *pd0)
+{
+ int err = 0;
+ unsigned int ccr;
+ unsigned int usp;
+ unsigned int er0;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+#define COPY(r) err |= __get_user(regs->r, &usc->sc_##r) /* restore passed registers */
+ COPY(er1);
+ COPY(er2);
+ COPY(er3);
+ COPY(er5);
+ COPY(pc);
+ ccr = regs->ccr & 0x10;
+ COPY(ccr);
+#undef COPY
+ regs->ccr &= 0xef;
+ regs->ccr |= ccr;
+ regs->orig_er0 = -1; /* disable syscall checks */
+ err |= __get_user(usp, &usc->sc_usp);
+ wrusp(usp);
+
+ err |= __get_user(er0, &usc->sc_er0);
+ *pd0 = er0;
+ return err;
+}
+
+asmlinkage int do_sigreturn(unsigned long __unused,...)
+{
+ struct pt_regs *regs = (struct pt_regs *) (&__unused - 1);
+ unsigned long usp = rdusp();
+ struct sigframe *frame = (struct sigframe *)(usp - 4);
+ sigset_t set;
+ int er0;
+
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+ if (__get_user(set.sig[0], &frame->sc.sc_mask) ||
+ (_NSIG_WORDS > 1 &&
+ __copy_from_user(&set.sig[1], &frame->extramask,
+ sizeof(frame->extramask))))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(¤t->sighand->siglock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
+
+ if (restore_sigcontext(regs, &frame->sc, &er0))
+ goto badframe;
+ return er0;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+asmlinkage int do_rt_sigreturn(unsigned long __unused,...)
+{
+ struct pt_regs *regs = (struct pt_regs *) &__unused;
+ unsigned long usp = rdusp();
+ struct rt_sigframe *frame = (struct rt_sigframe *)(usp - 4);
+ sigset_t set;
+ int er0;
+
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_unlock_irq(¤t->sighand->siglock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_lock_irq(¤t->sighand->siglock);
+
+ if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &er0))
+ goto badframe;
+
+ if (do_sigaltstack(&frame->uc.uc_stack, NULL, usp) == -EFAULT)
+ goto badframe;
+
+ return er0;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+static int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
+ unsigned long mask)
+{
+ int err = 0;
+
+ err |= __put_user(regs->er0, &sc->sc_er0);
+ err |= __put_user(regs->er1, &sc->sc_er1);
+ err |= __put_user(regs->er2, &sc->sc_er2);
+ err |= __put_user(regs->er3, &sc->sc_er3);
+ err |= __put_user(regs->er4, &sc->sc_er4);
+ err |= __put_user(regs->er5, &sc->sc_er5);
+ err |= __put_user(regs->er6, &sc->sc_er6);
+ err |= __put_user(rdusp(), &sc->sc_usp);
+ err |= __put_user(regs->pc, &sc->sc_pc);
+ err |= __put_user(regs->ccr, &sc->sc_ccr);
+ err |= __put_user(mask, &sc->sc_mask);
+
+ return err;
+}
+
+static inline void *
+get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
+{
+ unsigned long usp;
+
+ /* Default to using normal stack. */
+ usp = rdusp();
+
+ /* This is the X/Open sanctioned signal stack switching. */
+ if (ka->sa.sa_flags & SA_ONSTACK) {
+ if (!on_sig_stack(usp))
+ usp = current->sas_ss_sp + current->sas_ss_size;
+ }
+ return (void *)((usp - frame_size) & -8UL);
+}
+
+static void setup_frame (int sig, struct k_sigaction *ka,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct sigframe *frame;
+ int err = 0;
+ int usig;
+ unsigned char *ret;
+
+ frame = get_sigframe(ka, regs, sizeof(*frame));
+
+ if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+ goto give_sigsegv;
+
+ usig = current_thread_info()->exec_domain
+ && current_thread_info()->exec_domain->signal_invmap
+ && sig < 32
+ ? current_thread_info()->exec_domain->signal_invmap[sig]
+ : sig;
+
+ err |= __put_user(usig, &frame->sig);
+ if (err)
+ goto give_sigsegv;
+
+ err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
+ if (err)
+ goto give_sigsegv;
+
+ if (_NSIG_WORDS > 1) {
+ err |= copy_to_user(frame->extramask, &set->sig[1],
+ sizeof(frame->extramask));
+ if (err)
+ goto give_sigsegv;
+ }
+
+ ret = frame->retcode;
+ if (ka->sa.sa_flags & SA_RESTORER)
+ ret = (unsigned char *)(ka->sa.sa_restorer);
+ else {
+ /* sub.l er0,er0; mov.b #__NR_sigreturn,r0l; trapa #0 */
+ err != __put_user(0x1a80f800 + (__NR_sigreturn & 0xff),
+ (unsigned long *)(frame->retcode + 0));
+ err |= __put_user(0x5700, (unsigned short *)(frame->retcode + 4));
+ }
+
+ /* Set up to return from userspace. */
+ err |= __put_user(ret, &frame->pretcode);
+
+ if (err)
+ goto give_sigsegv;
+
+ /* Set up registers for signal handler */
+ wrusp ((unsigned long) frame);
+ regs->pc = (unsigned long) ka->sa.sa_handler;
+ regs->er0 = (current_thread_info()->exec_domain
+ && current_thread_info()->exec_domain->signal_invmap
+ && sig < 32
+ ? current_thread_info()->exec_domain->signal_invmap[sig]
+ : sig);
+ regs->er1 = (unsigned long)&(frame->sc);
+ regs->er5 = current->mm->start_data; /* GOT base */
+
+ return;
+
+give_sigsegv:
+ force_sigsegv(sig, current);
+}
+
+static void setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct rt_sigframe *frame;
+ int err = 0;
+ int usig;
+ unsigned char *ret;
+
+ frame = get_sigframe(ka, regs, sizeof(*frame));
+
+ if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+ goto give_sigsegv;
+
+ usig = current_thread_info()->exec_domain
+ && current_thread_info()->exec_domain->signal_invmap
+ && sig < 32
+ ? current_thread_info()->exec_domain->signal_invmap[sig]
+ : sig;
+
+ err |= __put_user(usig, &frame->sig);
+ if (err)
+ goto give_sigsegv;
+
+ err |= __put_user(&frame->info, &frame->pinfo);
+ err |= __put_user(&frame->uc, &frame->puc);
+ err |= copy_siginfo_to_user(&frame->info, info);
+ if (err)
+ goto give_sigsegv;
+
+ /* Create the ucontext. */
+ err |= __put_user(0, &frame->uc.uc_flags);
+ err |= __put_user(0, &frame->uc.uc_link);
+ err |= __put_user((void *)current->sas_ss_sp,
+ &frame->uc.uc_stack.ss_sp);
+ err |= __put_user(sas_ss_flags(rdusp()),
+ &frame->uc.uc_stack.ss_flags);
+ err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]);
+ err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set));
+ if (err)
+ goto give_sigsegv;
+
+ /* Set up to return from userspace. */
+ ret = frame->retcode;
+ if (ka->sa.sa_flags & SA_RESTORER)
+ ret = (unsigned char *)(ka->sa.sa_restorer);
+ else {
+ /* sub.l er0,er0; mov.b #__NR_sigreturn,r0l; trapa #0 */
+ err != __put_user(0x1a80f800 + (__NR_sigreturn & 0xff),
+ (unsigned long *)(frame->retcode + 0));
+ err |= __put_user(0x5700, (unsigned short *)(frame->retcode + 4));
+ }
+ err |= __put_user(ret, &frame->pretcode);
+
+ if (err)
+ goto give_sigsegv;
+
+ /* Set up registers for signal handler */
+ wrusp ((unsigned long) frame);
+ regs->pc = (unsigned long) ka->sa.sa_handler;
+ regs->er0 = (current_thread_info()->exec_domain
+ && current_thread_info()->exec_domain->signal_invmap
+ && sig < 32
+ ? current_thread_info()->exec_domain->signal_invmap[sig]
+ : sig);
+ regs->er1 = (unsigned long)&(frame->info);
+ regs->er2 = (unsigned long)&frame->uc;
+ regs->er5 = current->mm->start_data; /* GOT base */
+
+ return;
+
+give_sigsegv:
+ force_sigsegv(sig, current);
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+static void
+handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
+ sigset_t *oldset, struct pt_regs * regs)
+{
+ /* are we from a system call? */
+ if (regs->orig_er0 >= 0) {
+ switch (regs->er0) {
+ case -ERESTART_RESTARTBLOCK:
+ case -ERESTARTNOHAND:
+ regs->er0 = -EINTR;
+ break;
+
+ case -ERESTARTSYS:
+ if (!(ka->sa.sa_flags & SA_RESTART)) {
+ regs->er0 = -EINTR;
+ break;
+ }
+ /* fallthrough */
+ case -ERESTARTNOINTR:
+ regs->er0 = regs->orig_er0;
+ regs->pc -= 2;
+ }
+ }
+
+ /* set up the stack frame */
+ if (ka->sa.sa_flags & SA_SIGINFO)
+ setup_rt_frame(sig, ka, info, oldset, regs);
+ else
+ setup_frame(sig, ka, oldset, regs);
+
+ if (!(ka->sa.sa_flags & SA_NODEFER)) {
+ spin_lock_irq(¤t->sighand->siglock);
+ sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask);
+ sigaddset(¤t->blocked,sig);
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
+ }
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ */
+asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset)
+{
+ siginfo_t info;
+ int signr;
+ struct k_sigaction ka;
+
+ /*
+ * We want the common case to go fast, which
+ * is why we may in certain cases get here from
+ * kernel mode. Just return without doing anything
+ * if so.
+ */
+ if ((regs->ccr & 0x10))
+ return 1;
+
+ if (current->flags & PF_FREEZE) {
+ refrigerator(0);
+ goto no_signal;
+ }
+
+ current->thread.esp0 = (unsigned long) regs;
+
+ if (!oldset)
+ oldset = ¤t->blocked;
+
+ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+ if (signr > 0) {
+ /* Whee! Actually deliver the signal. */
+ handle_signal(signr, &info, &ka, oldset, regs);
+ return 1;
+ }
+ no_signal:
+ /* Did we come from a system call? */
+ if (regs->orig_er0 >= 0) {
+ /* Restart the system call - no handlers present */
+ if (regs->er0 == -ERESTARTNOHAND ||
+ regs->er0 == -ERESTARTSYS ||
+ regs->er0 == -ERESTARTNOINTR) {
+ regs->er0 = regs->orig_er0;
+ regs->pc -= 2;
+ }
+ if (regs->er0 == -ERESTART_RESTARTBLOCK){
+ regs->er0 = __NR_restart_syscall;
+ regs->pc -= 2;
+ }
+ }
+ return 0;
+}
diff --git a/arch/h8300/kernel/sys_h8300.c b/arch/h8300/kernel/sys_h8300.c
new file mode 100644
index 0000000..0f61b7a
--- /dev/null
+++ b/arch/h8300/kernel/sys_h8300.c
@@ -0,0 +1,282 @@
+/*
+ * linux/arch/h8300/kernel/sys_h8300.c
+ *
+ * This file contains various random system calls that
+ * have a non-standard calling sequence on the H8/300
+ * platform.
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/stat.h>
+#include <linux/syscalls.h>
+#include <linux/mman.h>
+#include <linux/file.h>
+#include <linux/utsname.h>
+
+#include <asm/setup.h>
+#include <asm/uaccess.h>
+#include <asm/cachectl.h>
+#include <asm/traps.h>
+#include <asm/ipc.h>
+
+/*
+ * sys_pipe() is the normal C calling standard for creating
+ * a pipe. It's not the way unix traditionally does this, though.
+ */
+asmlinkage int sys_pipe(unsigned long * fildes)
+{
+ int fd[2];
+ int error;
+
+ error = do_pipe(fd);
+ if (!error) {
+ if (copy_to_user(fildes, fd, 2*sizeof(int)))
+ error = -EFAULT;
+ }
+ return error;
+}
+
+/* common code for old and new mmaps */
+static inline long do_mmap2(
+ unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff)
+{
+ int error = -EBADF;
+ struct file * file = NULL;
+
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
+
+ down_write(¤t->mm->mmap_sem);
+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ up_write(¤t->mm->mmap_sem);
+
+ if (file)
+ fput(file);
+out:
+ return error;
+}
+
+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff)
+{
+ return do_mmap2(addr, len, prot, flags, fd, pgoff);
+}
+
+/*
+ * Perform the select(nd, in, out, ex, tv) and mmap() system
+ * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
+ * handle more than 4 system call parameters, so these system calls
+ * used a memory block for parameter passing..
+ */
+
+struct mmap_arg_struct {
+ unsigned long addr;
+ unsigned long len;
+ unsigned long prot;
+ unsigned long flags;
+ unsigned long fd;
+ unsigned long offset;
+};
+
+asmlinkage int old_mmap(struct mmap_arg_struct *arg)
+{
+ struct mmap_arg_struct a;
+ int error = -EFAULT;
+
+ if (copy_from_user(&a, arg, sizeof(a)))
+ goto out;
+
+ error = -EINVAL;
+ if (a.offset & ~PAGE_MASK)
+ goto out;
+
+ a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+
+ error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
+out:
+ return error;
+}
+
+#if 0 /* DAVIDM - do we want this */
+struct mmap_arg_struct64 {
+ __u32 addr;
+ __u32 len;
+ __u32 prot;
+ __u32 flags;
+ __u64 offset; /* 64 bits */
+ __u32 fd;
+};
+
+asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg)
+{
+ int error = -EFAULT;
+ struct file * file = NULL;
+ struct mmap_arg_struct64 a;
+ unsigned long pgoff;
+
+ if (copy_from_user(&a, arg, sizeof(a)))
+ return -EFAULT;
+
+ if ((long)a.offset & ~PAGE_MASK)
+ return -EINVAL;
+
+ pgoff = a.offset >> PAGE_SHIFT;
+ if ((a.offset >> PAGE_SHIFT) != pgoff)
+ return -EINVAL;
+
+ if (!(a.flags & MAP_ANONYMOUS)) {
+ error = -EBADF;
+ file = fget(a.fd);
+ if (!file)
+ goto out;
+ }
+ a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+
+ down_write(¤t->mm->mmap_sem);
+ error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff);
+ up_write(¤t->mm->mmap_sem);
+ if (file)
+ fput(file);
+out:
+ return error;
+}
+#endif
+
+struct sel_arg_struct {
+ unsigned long n;
+ fd_set *inp, *outp, *exp;
+ struct timeval *tvp;
+};
+
+asmlinkage int old_select(struct sel_arg_struct *arg)
+{
+ struct sel_arg_struct a;
+
+ if (copy_from_user(&a, arg, sizeof(a)))
+ return -EFAULT;
+ /* sys_select() does the appropriate kernel locking */
+ return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
+}
+
+/*
+ * sys_ipc() is the de-multiplexer for the SysV IPC calls..
+ *
+ * This is really horribly ugly.
+ */
+asmlinkage int sys_ipc (uint call, int first, int second,
+ int third, void *ptr, long fifth)
+{
+ int version, ret;
+
+ version = call >> 16; /* hack for backward compatibility */
+ call &= 0xffff;
+
+ if (call <= SEMCTL)
+ switch (call) {
+ case SEMOP:
+ return sys_semop (first, (struct sembuf *)ptr, second);
+ case SEMGET:
+ return sys_semget (first, second, third);
+ case SEMCTL: {
+ union semun fourth;
+ if (!ptr)
+ return -EINVAL;
+ if (get_user(fourth.__pad, (void **) ptr))
+ return -EFAULT;
+ return sys_semctl (first, second, third, fourth);
+ }
+ default:
+ return -EINVAL;
+ }
+ if (call <= MSGCTL)
+ switch (call) {
+ case MSGSND:
+ return sys_msgsnd (first, (struct msgbuf *) ptr,
+ second, third);
+ case MSGRCV:
+ switch (version) {
+ case 0: {
+ struct ipc_kludge tmp;
+ if (!ptr)
+ return -EINVAL;
+ if (copy_from_user (&tmp,
+ (struct ipc_kludge *)ptr,
+ sizeof (tmp)))
+ return -EFAULT;
+ return sys_msgrcv (first, tmp.msgp, second,
+ tmp.msgtyp, third);
+ }
+ default:
+ return sys_msgrcv (first,
+ (struct msgbuf *) ptr,
+ second, fifth, third);
+ }
+ case MSGGET:
+ return sys_msgget ((key_t) first, second);
+ case MSGCTL:
+ return sys_msgctl (first, second,
+ (struct msqid_ds *) ptr);
+ default:
+ return -EINVAL;
+ }
+ if (call <= SHMCTL)
+ switch (call) {
+ case SHMAT:
+ switch (version) {
+ default: {
+ ulong raddr;
+ ret = do_shmat (first, (char *) ptr,
+ second, &raddr);
+ if (ret)
+ return ret;
+ return put_user (raddr, (ulong *) third);
+ }
+ }
+ case SHMDT:
+ return sys_shmdt ((char *)ptr);
+ case SHMGET:
+ return sys_shmget (first, second, third);
+ case SHMCTL:
+ return sys_shmctl (first, second,
+ (struct shmid_ds *) ptr);
+ default:
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
+
+/* sys_cacheflush -- no support. */
+asmlinkage int
+sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
+{
+ return -EINVAL;
+}
+
+asmlinkage int sys_getpagesize(void)
+{
+ return PAGE_SIZE;
+}
+
+#if defined(CONFIG_SYSCALL_PRINT)
+asmlinkage void syscall_print(void *dummy,...)
+{
+ struct pt_regs *regs = (struct pt_regs *) ((unsigned char *)&dummy-4);
+ printk("call %06lx:%ld 1:%08lx,2:%08lx,3:%08lx,ret:%08lx\n",
+ ((regs->pc)&0xffffff)-2,regs->orig_er0,regs->er1,regs->er2,regs->er3,regs->er0);
+}
+#endif
diff --git a/arch/h8300/kernel/syscalls.S b/arch/h8300/kernel/syscalls.S
new file mode 100644
index 0000000..79b3bda
--- /dev/null
+++ b/arch/h8300/kernel/syscalls.S
@@ -0,0 +1,340 @@
+/* Systemcall Entry Table */
+#include <linux/config.h>
+#include <linux/sys.h>
+#include <asm/linkage.h>
+#include <asm/unistd.h>
+
+.globl SYMBOL_NAME(sys_call_table)
+
+#if defined(CONFIG_CPU_H8300H)
+ .h8300h
+#endif
+#if defined(CONFIG_CPU_H8S)
+ .h8300s
+#endif
+ .section .text
+ .align 2
+SYMBOL_NAME_LABEL(sys_call_table)
+ .long SYMBOL_NAME(sys_ni_syscall) /* 0 - old "setup()" system call*/
+ .long SYMBOL_NAME(sys_exit)
+ .long SYMBOL_NAME(sys_fork)
+ .long SYMBOL_NAME(sys_read)
+ .long SYMBOL_NAME(sys_write)
+ .long SYMBOL_NAME(sys_open) /* 5 */
+ .long SYMBOL_NAME(sys_close)
+ .long SYMBOL_NAME(sys_waitpid)
+ .long SYMBOL_NAME(sys_creat)
+ .long SYMBOL_NAME(sys_link)
+ .long SYMBOL_NAME(sys_unlink) /* 10 */
+ .long SYMBOL_NAME(sys_execve)
+ .long SYMBOL_NAME(sys_chdir)
+ .long SYMBOL_NAME(sys_time)
+ .long SYMBOL_NAME(sys_mknod)
+ .long SYMBOL_NAME(sys_chmod) /* 15 */
+ .long SYMBOL_NAME(sys_chown16)
+ .long SYMBOL_NAME(sys_ni_syscall) /* old break syscall holder */
+ .long SYMBOL_NAME(sys_stat)
+ .long SYMBOL_NAME(sys_lseek)
+ .long SYMBOL_NAME(sys_getpid) /* 20 */
+ .long SYMBOL_NAME(sys_mount)
+ .long SYMBOL_NAME(sys_oldumount)
+ .long SYMBOL_NAME(sys_setuid16)
+ .long SYMBOL_NAME(sys_getuid16)
+ .long SYMBOL_NAME(sys_stime) /* 25 */
+ .long SYMBOL_NAME(sys_ptrace)
+ .long SYMBOL_NAME(sys_alarm)
+ .long SYMBOL_NAME(sys_fstat)
+ .long SYMBOL_NAME(sys_pause)
+ .long SYMBOL_NAME(sys_utime) /* 30 */
+ .long SYMBOL_NAME(sys_ni_syscall) /* old stty syscall holder */
+ .long SYMBOL_NAME(sys_ni_syscall) /* old gtty syscall holder */
+ .long SYMBOL_NAME(sys_access)
+ .long SYMBOL_NAME(sys_nice)
+ .long SYMBOL_NAME(sys_ni_syscall) /* 35 */ /* old ftime syscall holder */
+ .long SYMBOL_NAME(sys_sync)
+ .long SYMBOL_NAME(sys_kill)
+ .long SYMBOL_NAME(sys_rename)
+ .long SYMBOL_NAME(sys_mkdir)
+ .long SYMBOL_NAME(sys_rmdir) /* 40 */
+ .long SYMBOL_NAME(sys_dup)
+ .long SYMBOL_NAME(sys_pipe)
+ .long SYMBOL_NAME(sys_times)
+ .long SYMBOL_NAME(sys_ni_syscall) /* old prof syscall holder */
+ .long SYMBOL_NAME(sys_brk) /* 45 */
+ .long SYMBOL_NAME(sys_setgid16)
+ .long SYMBOL_NAME(sys_getgid16)
+ .long SYMBOL_NAME(sys_signal)
+ .long SYMBOL_NAME(sys_geteuid16)
+ .long SYMBOL_NAME(sys_getegid16) /* 50 */
+ .long SYMBOL_NAME(sys_acct)
+ .long SYMBOL_NAME(sys_umount) /* recycled never used phys() */
+ .long SYMBOL_NAME(sys_ni_syscall) /* old lock syscall holder */
+ .long SYMBOL_NAME(sys_ioctl)
+ .long SYMBOL_NAME(sys_fcntl) /* 55 */
+ .long SYMBOL_NAME(sys_ni_syscall) /* old mpx syscall holder */
+ .long SYMBOL_NAME(sys_setpgid)
+ .long SYMBOL_NAME(sys_ni_syscall) /* old ulimit syscall holder */
+ .long SYMBOL_NAME(sys_ni_syscall)
+ .long SYMBOL_NAME(sys_umask) /* 60 */
+ .long SYMBOL_NAME(sys_chroot)
+ .long SYMBOL_NAME(sys_ustat)
+ .long SYMBOL_NAME(sys_dup2)
+ .long SYMBOL_NAME(sys_getppid)
+ .long SYMBOL_NAME(sys_getpgrp) /* 65 */
+ .long SYMBOL_NAME(sys_setsid)
+ .long SYMBOL_NAME(sys_sigaction)
+ .long SYMBOL_NAME(sys_sgetmask)
+ .long SYMBOL_NAME(sys_ssetmask)
+ .long SYMBOL_NAME(sys_setreuid16) /* 70 */
+ .long SYMBOL_NAME(sys_setregid16)
+ .long SYMBOL_NAME(sys_sigsuspend)
+ .long SYMBOL_NAME(sys_sigpending)
+ .long SYMBOL_NAME(sys_sethostname)
+ .long SYMBOL_NAME(sys_setrlimit) /* 75 */
+ .long SYMBOL_NAME(sys_old_getrlimit)
+ .long SYMBOL_NAME(sys_getrusage)
+ .long SYMBOL_NAME(sys_gettimeofday)
+ .long SYMBOL_NAME(sys_settimeofday)
+ .long SYMBOL_NAME(sys_getgroups16) /* 80 */
+ .long SYMBOL_NAME(sys_setgroups16)
+ .long SYMBOL_NAME(old_select)
+ .long SYMBOL_NAME(sys_symlink)
+ .long SYMBOL_NAME(sys_lstat)
+ .long SYMBOL_NAME(sys_readlink) /* 85 */
+ .long SYMBOL_NAME(sys_uselib)
+ .long SYMBOL_NAME(sys_swapon)
+ .long SYMBOL_NAME(sys_reboot)
+ .long SYMBOL_NAME(old_readdir)
+ .long SYMBOL_NAME(old_mmap) /* 90 */
+ .long SYMBOL_NAME(sys_munmap)
+ .long SYMBOL_NAME(sys_truncate)
+ .long SYMBOL_NAME(sys_ftruncate)
+ .long SYMBOL_NAME(sys_fchmod)
+ .long SYMBOL_NAME(sys_fchown16) /* 95 */
+ .long SYMBOL_NAME(sys_getpriority)
+ .long SYMBOL_NAME(sys_setpriority)
+ .long SYMBOL_NAME(sys_ni_syscall) /* old profil syscall holder */
+ .long SYMBOL_NAME(sys_statfs)
+ .long SYMBOL_NAME(sys_fstatfs) /* 100 */
+ .long SYMBOL_NAME(sys_ni_syscall) /* ioperm for i386 */
+ .long SYMBOL_NAME(sys_socketcall)
+ .long SYMBOL_NAME(sys_syslog)
+ .long SYMBOL_NAME(sys_setitimer)
+ .long SYMBOL_NAME(sys_getitimer) /* 105 */
+ .long SYMBOL_NAME(sys_newstat)
+ .long SYMBOL_NAME(sys_newlstat)
+ .long SYMBOL_NAME(sys_newfstat)
+ .long SYMBOL_NAME(sys_ni_syscall)
+ .long SYMBOL_NAME(sys_ni_syscall) /* iopl for i386 */ /* 110 */
+ .long SYMBOL_NAME(sys_vhangup)
+ .long SYMBOL_NAME(sys_ni_syscall) /* obsolete idle() syscall */
+ .long SYMBOL_NAME(sys_ni_syscall) /* vm86old for i386 */
+ .long SYMBOL_NAME(sys_wait4)
+ .long SYMBOL_NAME(sys_swapoff) /* 115 */
+ .long SYMBOL_NAME(sys_sysinfo)
+ .long SYMBOL_NAME(sys_ipc)
+ .long SYMBOL_NAME(sys_fsync)
+ .long SYMBOL_NAME(sys_sigreturn)
+ .long SYMBOL_NAME(sys_clone) /* 120 */
+ .long SYMBOL_NAME(sys_setdomainname)
+ .long SYMBOL_NAME(sys_newuname)
+ .long SYMBOL_NAME(sys_cacheflush) /* modify_ldt for i386 */
+ .long SYMBOL_NAME(sys_adjtimex)
+ .long SYMBOL_NAME(sys_ni_syscall) /* 125 sys_mprotect */
+ .long SYMBOL_NAME(sys_sigprocmask)
+ .long SYMBOL_NAME(sys_ni_syscall) /* sys_create_module */
+ .long SYMBOL_NAME(sys_init_module)
+ .long SYMBOL_NAME(sys_delete_module)
+ .long SYMBOL_NAME(sys_ni_syscall) /* 130 sys_get_kernel_syms */
+ .long SYMBOL_NAME(sys_quotactl)
+ .long SYMBOL_NAME(sys_getpgid)
+ .long SYMBOL_NAME(sys_fchdir)
+ .long SYMBOL_NAME(sys_bdflush)
+ .long SYMBOL_NAME(sys_sysfs) /* 135 */
+ .long SYMBOL_NAME(sys_personality)
+ .long SYMBOL_NAME(sys_ni_syscall) /* for afs_syscall */
+ .long SYMBOL_NAME(sys_setfsuid16)
+ .long SYMBOL_NAME(sys_setfsgid16)
+ .long SYMBOL_NAME(sys_llseek) /* 140 */
+ .long SYMBOL_NAME(sys_getdents)
+ .long SYMBOL_NAME(sys_select)
+ .long SYMBOL_NAME(sys_flock)
+ .long SYMBOL_NAME(sys_ni_syscall) /* sys_msync */
+ .long SYMBOL_NAME(sys_readv) /* 145 */
+ .long SYMBOL_NAME(sys_writev)
+ .long SYMBOL_NAME(sys_getsid)
+ .long SYMBOL_NAME(sys_fdatasync)
+ .long SYMBOL_NAME(sys_sysctl)
+ .long SYMBOL_NAME(sys_ni_syscall) /* 150 sys_mlock */
+ .long SYMBOL_NAME(sys_ni_syscall) /* sys_munlock */
+ .long SYMBOL_NAME(sys_ni_syscall) /* sys_mlockall */
+ .long SYMBOL_NAME(sys_ni_syscall) /* sys_munlockall */
+ .long SYMBOL_NAME(sys_sched_setparam)
+ .long SYMBOL_NAME(sys_sched_getparam) /* 155 */
+ .long SYMBOL_NAME(sys_sched_setscheduler)
+ .long SYMBOL_NAME(sys_sched_getscheduler)
+ .long SYMBOL_NAME(sys_sched_yield)
+ .long SYMBOL_NAME(sys_sched_get_priority_max)
+ .long SYMBOL_NAME(sys_sched_get_priority_min) /* 160 */
+ .long SYMBOL_NAME(sys_sched_rr_get_interval)
+ .long SYMBOL_NAME(sys_nanosleep)
+ .long SYMBOL_NAME(sys_ni_syscall) /* sys_mremap */
+ .long SYMBOL_NAME(sys_setresuid16)
+ .long SYMBOL_NAME(sys_getresuid16) /* 165 */
+ .long SYMBOL_NAME(sys_ni_syscall) /* for vm86 */
+ .long SYMBOL_NAME(sys_ni_syscall) /* sys_query_module */
+ .long SYMBOL_NAME(sys_poll)
+ .long SYMBOL_NAME(sys_nfsservctl)
+ .long SYMBOL_NAME(sys_setresgid16) /* 170 */
+ .long SYMBOL_NAME(sys_getresgid16)
+ .long SYMBOL_NAME(sys_prctl)
+ .long SYMBOL_NAME(sys_rt_sigreturn)
+ .long SYMBOL_NAME(sys_rt_sigaction)
+ .long SYMBOL_NAME(sys_rt_sigprocmask) /* 175 */
+ .long SYMBOL_NAME(sys_rt_sigpending)
+ .long SYMBOL_NAME(sys_rt_sigtimedwait)
+ .long SYMBOL_NAME(sys_rt_sigqueueinfo)
+ .long SYMBOL_NAME(sys_rt_sigsuspend)
+ .long SYMBOL_NAME(sys_pread64) /* 180 */
+ .long SYMBOL_NAME(sys_pwrite64)
+ .long SYMBOL_NAME(sys_lchown16);
+ .long SYMBOL_NAME(sys_getcwd)
+ .long SYMBOL_NAME(sys_capget)
+ .long SYMBOL_NAME(sys_capset) /* 185 */
+ .long SYMBOL_NAME(sys_sigaltstack)
+ .long SYMBOL_NAME(sys_sendfile)
+ .long SYMBOL_NAME(sys_ni_syscall) /* streams1 */
+ .long SYMBOL_NAME(sys_ni_syscall) /* streams2 */
+ .long SYMBOL_NAME(sys_vfork) /* 190 */
+ .long SYMBOL_NAME(sys_getrlimit)
+ .long SYMBOL_NAME(sys_mmap2)
+ .long SYMBOL_NAME(sys_truncate64)
+ .long SYMBOL_NAME(sys_ftruncate64)
+ .long SYMBOL_NAME(sys_stat64) /* 195 */
+ .long SYMBOL_NAME(sys_lstat64)
+ .long SYMBOL_NAME(sys_fstat64)
+ .long SYMBOL_NAME(sys_chown)
+ .long SYMBOL_NAME(sys_getuid)
+ .long SYMBOL_NAME(sys_getgid) /* 200 */
+ .long SYMBOL_NAME(sys_geteuid)
+ .long SYMBOL_NAME(sys_getegid)
+ .long SYMBOL_NAME(sys_setreuid)
+ .long SYMBOL_NAME(sys_setregid)
+ .long SYMBOL_NAME(sys_getgroups) /* 205 */
+ .long SYMBOL_NAME(sys_setgroups)
+ .long SYMBOL_NAME(sys_fchown)
+ .long SYMBOL_NAME(sys_setresuid)
+ .long SYMBOL_NAME(sys_getresuid)
+ .long SYMBOL_NAME(sys_setresgid) /* 210 */
+ .long SYMBOL_NAME(sys_getresgid)
+ .long SYMBOL_NAME(sys_lchown)
+ .long SYMBOL_NAME(sys_setuid)
+ .long SYMBOL_NAME(sys_setgid)
+ .long SYMBOL_NAME(sys_setfsuid) /* 215 */
+ .long SYMBOL_NAME(sys_setfsgid)
+ .long SYMBOL_NAME(sys_pivot_root)
+ .long SYMBOL_NAME(sys_ni_syscall)
+ .long SYMBOL_NAME(sys_ni_syscall)
+ .long SYMBOL_NAME(sys_getdents64) /* 220 */
+ .long SYMBOL_NAME(sys_fcntl64)
+ .long SYMBOL_NAME(sys_ni_syscall) /* reserved for TUX */
+ .long SYMBOL_NAME(sys_ni_syscall)
+ .long SYMBOL_NAME(sys_gettid)
+ .long SYMBOL_NAME(sys_ni_syscall) /* 225 */ /* sys_readahead */
+ .long SYMBOL_NAME(sys_setxattr)
+ .long SYMBOL_NAME(sys_lsetxattr)
+ .long SYMBOL_NAME(sys_fsetxattr)
+ .long SYMBOL_NAME(sys_getxattr)
+ .long SYMBOL_NAME(sys_lgetxattr) /* 230 */
+ .long SYMBOL_NAME(sys_fgetxattr)
+ .long SYMBOL_NAME(sys_listxattr)
+ .long SYMBOL_NAME(sys_llistxattr)
+ .long SYMBOL_NAME(sys_flistxattr)
+ .long SYMBOL_NAME(sys_removexattr) /* 235 */
+ .long SYMBOL_NAME(sys_lremovexattr)
+ .long SYMBOL_NAME(sys_fremovexattr)
+ .long SYMBOL_NAME(sys_tkill)
+ .long SYMBOL_NAME(sys_sendfile64)
+ .long SYMBOL_NAME(sys_futex) /* 240 */
+ .long SYMBOL_NAME(sys_sched_setaffinity)
+ .long SYMBOL_NAME(sys_sched_getaffinity)
+ .long SYMBOL_NAME(sys_ni_syscall) /* sys_set_thread_area */
+ .long SYMBOL_NAME(sys_ni_syscall) /* sys_get_thread_area */
+ .long SYMBOL_NAME(sys_io_setup) /* 245 */
+ .long SYMBOL_NAME(sys_io_destroy)
+ .long SYMBOL_NAME(sys_io_getevents)
+ .long SYMBOL_NAME(sys_io_submit)
+ .long SYMBOL_NAME(sys_io_cancel)
+ .long SYMBOL_NAME(sys_fadvise64) /* 250 */
+ .long SYMBOL_NAME(sys_ni_syscall)
+ .long SYMBOL_NAME(sys_exit_group)
+ .long SYMBOL_NAME(sys_lookup_dcookie)
+ .long SYMBOL_NAME(sys_epoll_create)
+ .long SYMBOL_NAME(sys_epoll_ctl) /* 255 */
+ .long SYMBOL_NAME(sys_epoll_wait)
+ .long SYMBOL_NAME(sys_ni_syscall) /* sys_remap_file_pages */
+ .long SYMBOL_NAME(sys_set_tid_address)
+ .long SYMBOL_NAME(sys_timer_create)
+ .long SYMBOL_NAME(sys_timer_settime) /* 260 */
+ .long SYMBOL_NAME(sys_timer_gettime)
+ .long SYMBOL_NAME(sys_timer_getoverrun)
+ .long SYMBOL_NAME(sys_timer_delete)
+ .long SYMBOL_NAME(sys_clock_settime)
+ .long SYMBOL_NAME(sys_clock_gettime) /* 265 */
+ .long SYMBOL_NAME(sys_clock_getres)
+ .long SYMBOL_NAME(sys_clock_nanosleep)
+ .long SYMBOL_NAME(sys_statfs64)
+ .long SYMBOL_NAME(sys_fstatfs64)
+ .long SYMBOL_NAME(sys_tgkill) /* 270 */
+ .long SYMBOL_NAME(sys_utimes)
+ .long SYMBOL_NAME(sys_fadvise64_64)
+ .long SYMBOL_NAME(sys_ni_syscall) /* sys_vserver */
+ .long SYMBOL_NAME(sys_mbind)
+ .long SYMBOL_NAME(sys_get_mempolicy)
+ .long SYMBOL_NAME(sys_set_mempolicy)
+ .long SYMBOL_NAME(sys_mq_open)
+ .long SYMBOL_NAME(sys_mq_unlink)
+ .long SYMBOL_NAME(sys_mq_timedsend)
+ .long SYMBOL_NAME(sys_mq_timedreceive) /* 280 */
+ .long SYMBOL_NAME(sys_mq_notify)
+ .long SYMBOL_NAME(sys_mq_getsetattr)
+ .long SYMBOL_NAME(sys_ni_syscall) /* reserved for kexec */
+ .long SYMBOL_NAME(sys_waitid)
+ .long SYMBOL_NAME(sys_ni_syscall) /* 285 */ /* available */
+ .long SYMBOL_NAME(sys_add_key)
+ .long SYMBOL_NAME(sys_request_key)
+ .long SYMBOL_NAME(sys_keyctl)
+
+ .rept NR_syscalls-(.-SYMBOL_NAME(sys_call_table))/4
+ .long SYMBOL_NAME(sys_ni_syscall)
+ .endr
+
+ .macro call_sp addr
+ mov.l #SYMBOL_NAME(\addr),er6
+ bra SYMBOL_NAME(syscall_trampoline):8
+ .endm
+
+SYMBOL_NAME_LABEL(sys_clone)
+ call_sp h8300_clone
+
+SYMBOL_NAME_LABEL(sys_sigsuspend)
+ call_sp do_sigsuspend
+
+SYMBOL_NAME_LABEL(sys_rt_sigsuspend)
+ call_sp do_rt_sigsuspend
+
+SYMBOL_NAME_LABEL(sys_sigreturn)
+ call_sp do_sigreturn
+
+SYMBOL_NAME_LABEL(sys_rt_sigreturn)
+ call_sp do_rt_sigreturn
+
+SYMBOL_NAME_LABEL(sys_fork)
+ call_sp h8300_fork
+
+SYMBOL_NAME_LABEL(sys_vfork)
+ call_sp h8300_vfork
+
+SYMBOL_NAME_LABEL(syscall_trampoline)
+ mov.l sp,er0
+ jmp @er6
diff --git a/arch/h8300/kernel/time.c b/arch/h8300/kernel/time.c
new file mode 100644
index 0000000..8a60021
--- /dev/null
+++ b/arch/h8300/kernel/time.c
@@ -0,0 +1,134 @@
+/*
+ * linux/arch/h8300/kernel/time.c
+ *
+ * Yoshinori Sato <ysato@users.sourceforge.jp>
+ *
+ * Copied/hacked from:
+ *
+ * linux/arch/m68k/kernel/time.c
+ *
+ * Copyright (C) 1991, 1992, 1995 Linus Torvalds
+ *
+ * This file contains the m68k-specific time handling details.
+ * Most of the stuff is located in the machine specific files.
+ *
+ * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
+ * "A Kernel Model for Precision Timekeeping" by Dave Mills
+ */
+
+#include <linux/config.h> /* CONFIG_HEARTBEAT */
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/timex.h>
+#include <linux/profile.h>
+
+#include <asm/io.h>
+#include <asm/target_time.h>
+
+#define TICK_SIZE (tick_nsec / 1000)
+
+u64 jiffies_64;
+
+EXPORT_SYMBOL(jiffies_64);
+
+/*
+ * timer_interrupt() needs to keep up the real-time clock,
+ * as well as call the "do_timer()" routine every clocktick
+ */
+static void timer_interrupt(int irq, void *dummy, struct pt_regs * regs)
+{
+ /* may need to kick the hardware timer */
+ platform_timer_eoi();
+
+ do_timer(regs);
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(regs));
+#endif
+ profile_tick(CPU_PROFILING, regs);
+}
+
+void time_init(void)
+{
+ unsigned int year, mon, day, hour, min, sec;
+
+ /* FIX by dqg : Set to zero for platforms that don't have tod */
+ /* without this time is undefined and can overflow time_t, causing */
+ /* very stange errors */
+ year = 1980;
+ mon = day = 1;
+ hour = min = sec = 0;
+ platform_gettod (&year, &mon, &day, &hour, &min, &sec);
+
+ if ((year += 1900) < 1970)
+ year += 100;
+ xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
+ xtime.tv_nsec = 0;
+
+ platform_timer_setup(timer_interrupt);
+}
+
+/*
+ * This version of gettimeofday has near microsecond resolution.
+ */
+void do_gettimeofday(struct timeval *tv)
+{
+ unsigned long flags;
+ unsigned long usec, sec;
+
+ read_lock_irqsave(&xtime_lock, flags);
+ usec = 0;
+ sec = xtime.tv_sec;
+ usec += (xtime.tv_nsec / 1000);
+ read_unlock_irqrestore(&xtime_lock, flags);
+
+ while (usec >= 1000000) {
+ usec -= 1000000;
+ sec++;
+ }
+
+ tv->tv_sec = sec;
+ tv->tv_usec = usec;
+}
+
+EXPORT_SYMBOL(do_gettimeofday);
+
+int do_settimeofday(struct timespec *tv)
+{
+ if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
+ return -EINVAL;
+
+ write_lock_irq(&xtime_lock);
+ /* This is revolting. We need to set the xtime.tv_usec
+ * correctly. However, the value in this location is
+ * is value at the last tick.
+ * Discover what correction gettimeofday
+ * would have done, and then undo it!
+ */
+ while (tv->tv_nsec < 0) {
+ tv->tv_nsec += NSEC_PER_SEC;
+ tv->tv_sec--;
+ }
+
+ xtime.tv_sec = tv->tv_sec;
+ xtime.tv_nsec = tv->tv_nsec;
+ time_adjust = 0; /* stop active adjtime() */
+ time_status |= STA_UNSYNC;
+ time_maxerror = NTP_PHASE_LIMIT;
+ time_esterror = NTP_PHASE_LIMIT;
+ write_sequnlock_irq(&xtime_lock);
+ clock_was_set();
+ return 0;
+}
+
+EXPORT_SYMBOL(do_settimeofday);
+
+unsigned long long sched_clock(void)
+{
+ return (unsigned long long)jiffies * (1000000000 / HZ);
+
+}
diff --git a/arch/h8300/kernel/traps.c b/arch/h8300/kernel/traps.c
new file mode 100644
index 0000000..300e327
--- /dev/null
+++ b/arch/h8300/kernel/traps.c
@@ -0,0 +1,169 @@
+/*
+ * linux/arch/h8300/boot/traps.c -- general exception handling code
+ * H8/300 support Yoshinori Sato <ysato@users.sourceforge.jp>
+ *
+ * Cloned from Linux/m68k.
+ *
+ * No original Copyright holder listed,
+ * Probabily original (C) Roman Zippel (assigned DJD, 1999)
+ *
+ * Copyright 1999-2000 D. Jeff Dionne, <jeff@rt-control.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <asm/traps.h>
+#include <asm/page.h>
+#include <asm/gpio.h>
+
+/*
+ * this must be called very early as the kernel might
+ * use some instruction that are emulated on the 060
+ */
+
+void __init base_trap_init(void)
+{
+}
+
+void __init trap_init (void)
+{
+}
+
+asmlinkage void set_esp0 (unsigned long ssp)
+{
+ current->thread.esp0 = ssp;
+}
+
+/*
+ * Generic dumping code. Used for panic and debug.
+ */
+
+static void dump(struct pt_regs *fp)
+{
+ unsigned long *sp;
+ unsigned char *tp;
+ int i;
+
+ printk("\nCURRENT PROCESS:\n\n");
+ printk("COMM=%s PID=%d\n", current->comm, current->pid);
+ if (current->mm) {
+ printk("TEXT=%08x-%08x DATA=%08x-%08x BSS=%08x-%08x\n",
+ (int) current->mm->start_code,
+ (int) current->mm->end_code,
+ (int) current->mm->start_data,
+ (int) current->mm->end_data,
+ (int) current->mm->end_data,
+ (int) current->mm->brk);
+ printk("USER-STACK=%08x KERNEL-STACK=%08lx\n\n",
+ (int) current->mm->start_stack,
+ (int) PAGE_SIZE+(unsigned long)current);
+ }
+
+ show_regs(fp);
+ printk("\nCODE:");
+ tp = ((unsigned char *) fp->pc) - 0x20;
+ for (sp = (unsigned long *) tp, i = 0; (i < 0x40); i += 4) {
+ if ((i % 0x10) == 0)
+ printk("\n%08x: ", (int) (tp + i));
+ printk("%08x ", (int) *sp++);
+ }
+ printk("\n");
+
+ printk("\nKERNEL STACK:");
+ tp = ((unsigned char *) fp) - 0x40;
+ for (sp = (unsigned long *) tp, i = 0; (i < 0xc0); i += 4) {
+ if ((i % 0x10) == 0)
+ printk("\n%08x: ", (int) (tp + i));
+ printk("%08x ", (int) *sp++);
+ }
+ printk("\n");
+ if (STACK_MAGIC != *(unsigned long *)((unsigned long)current+PAGE_SIZE))
+ printk("(Possibly corrupted stack page??)\n");
+
+ printk("\n\n");
+}
+
+void die_if_kernel (char *str, struct pt_regs *fp, int nr)
+{
+ extern int console_loglevel;
+
+ if (!(fp->ccr & PS_S))
+ return;
+
+ console_loglevel = 15;
+ dump(fp);
+
+ do_exit(SIGSEGV);
+}
+
+extern char _start, _etext;
+#define check_kernel_text(addr) \
+ ((addr >= (unsigned long)(&_start)) && \
+ (addr < (unsigned long)(&_etext)))
+
+static int kstack_depth_to_print = 24;
+
+void show_stack(struct task_struct *task, unsigned long *esp)
+{
+ unsigned long *stack, addr;
+ int i;
+
+ if (esp == NULL)
+ esp = (unsigned long *) &esp;
+
+ stack = esp;
+
+ printk("Stack from %08lx:", (unsigned long)stack);
+ for (i = 0; i < kstack_depth_to_print; i++) {
+ if (((unsigned long)stack & (THREAD_SIZE - 1)) == 0)
+ break;
+ if (i % 8 == 0)
+ printk("\n ");
+ printk(" %08lx", *stack++);
+ }
+
+ printk("\nCall Trace:");
+ i = 0;
+ stack = esp;
+ while (((unsigned long)stack & (THREAD_SIZE - 1)) == 0) {
+ addr = *stack++;
+ /*
+ * If the address is either in the text segment of the
+ * kernel, or in the region which contains vmalloc'ed
+ * memory, it *may* be the address of a calling
+ * routine; if so, print it so that someone tracing
+ * down the cause of the crash will be able to figure
+ * out the call path that was taken.
+ */
+ if (check_kernel_text(addr)) {
+ if (i % 4 == 0)
+ printk("\n ");
+ printk(" [<%08lx>]", addr);
+ i++;
+ }
+ }
+ printk("\n");
+}
+
+void show_trace_task(struct task_struct *tsk)
+{
+ show_stack(tsk,(unsigned long *)tsk->thread.esp0);
+}
+
+void dump_stack(void)
+{
+ show_stack(NULL,NULL);
+}
+
+EXPORT_SYMBOL(dump_stack);
diff --git a/arch/h8300/kernel/vmlinux.lds.S b/arch/h8300/kernel/vmlinux.lds.S
new file mode 100644
index 0000000..17fa11d
--- /dev/null
+++ b/arch/h8300/kernel/vmlinux.lds.S
@@ -0,0 +1,172 @@
+#define VMLINUX_SYMBOL(_sym_) _##_sym_
+#include <asm-generic/vmlinux.lds.h>
+#include <linux/config.h>
+
+/* target memory map */
+#ifdef CONFIG_H8300H_GENERIC
+#define ROMTOP 0x000000
+#define ROMSIZE 0x400000
+#define RAMTOP 0x400000
+#define RAMSIZE 0x400000
+#endif
+
+#ifdef CONFIG_H8300H_AKI3068NET
+#define ROMTOP 0x000000
+#define ROMSIZE 0x080000
+#define RAMTOP 0x400000
+#define RAMSIZE 0x200000
+#endif
+
+#ifdef CONFIG_H8300H_H8MAX
+#define ROMTOP 0x000000
+#define ROMSIZE 0x080000
+#define RAMTOP 0x400000
+#define RAMSIZE 0x200000
+#endif
+
+#ifdef CONFIG_H8300H_SIM
+#define ROMTOP 0x000000
+#define ROMSIZE 0x400000
+#define RAMTOP 0x400000
+#define RAMSIZE 0x400000
+#endif
+
+#ifdef CONFIG_H8S_SIM
+#define ROMTOP 0x000000
+#define ROMSIZE 0x400000
+#define RAMTOP 0x400000
+#define RAMSIZE 0x800000
+#endif
+
+#ifdef CONFIG_H8S_EDOSK2674
+#define ROMTOP 0x000000
+#define ROMSIZE 0x400000
+#define RAMTOP 0x400000
+#define RAMSIZE 0x800000
+#endif
+
+#if defined(CONFIG_H8300H_SIM) || defined(CONFIG_H8S_SIM)
+INPUT(romfs.o)
+#endif
+
+_jiffies = _jiffies_64 + 4;
+
+ENTRY(__start)
+
+SECTIONS
+{
+#if defined(CONFIG_ROMKERNEL)
+ . = ROMTOP;
+ .vectors :
+ {
+ __vector = . ;
+ *(.vectors*)
+ }
+#else
+ . = RAMTOP;
+ .bootvec :
+ {
+ *(.bootvec)
+ }
+#endif
+ .text :
+ {
+#if defined(CONFIG_ROMKERNEL)
+ *(.int_redirect)
+#endif
+ __stext = . ;
+ *(.text)
+ SCHED_TEXT
+ LOCK_TEXT
+ __etext = . ;
+ . = ALIGN(16); /* Exception table */
+ ___start___ex_table = .;
+ *(__ex_table)
+ ___stop___ex_table = .;
+ }
+
+ RODATA
+#if defined(CONFIG_ROMKERNEL)
+ SECURITY_INIT
+#endif
+ ROEND = .;
+#if defined(CONFIG_ROMKERNEL)
+ . = RAMTOP;
+ .data : AT(ROEND)
+#else
+ .data :
+#endif
+ {
+ __sdata = . ;
+ ___data_start = . ;
+
+ . = ALIGN(0x2000) ;
+ *(.data.init_task)
+ . = ALIGN(0x4) ;
+ *(.data)
+ . = ALIGN(0x4) ;
+ *(.data.*)
+
+ . = ALIGN(0x4) ;
+ ___init_begin = .;
+ __sinittext = .;
+ *(.init.text)
+ __einittext = .;
+ *(.init.data)
+ . = ALIGN(0x4) ;
+ ___setup_start = .;
+ *(.init.setup)
+ . = ALIGN(0x4) ;
+ ___setup_end = .;
+ ___initcall_start = .;
+ *(.initcall1.init)
+ *(.initcall2.init)
+ *(.initcall3.init)
+ *(.initcall4.init)
+ *(.initcall5.init)
+ *(.initcall6.init)
+ *(.initcall7.init)
+ ___initcall_end = .;
+ ___con_initcall_start = .;
+ *(.con_initcall.init)
+ ___con_initcall_end = .;
+ *(.exit.text)
+ *(.exit.data)
+ . = ALIGN(4);
+ ___initramfs_start = .;
+ *(.init.ramfs)
+ ___initramfs_end = .;
+ . = ALIGN(0x4) ;
+ ___init_end = .;
+ __edata = . ;
+ }
+#if defined(CONFIG_RAMKERNEL)
+ SECURITY_INIT
+#endif
+ __begin_data = LOADADDR(.data);
+ .bss :
+ {
+ . = ALIGN(0x4) ;
+ __sbss = . ;
+ *(.bss*)
+ . = ALIGN(0x4) ;
+ *(COMMON)
+ . = ALIGN(0x4) ;
+ __ebss = . ;
+ __end = . ;
+ __ramstart = .;
+ }
+ /DISCARD/ : {
+ *(.exitcall.exit)
+ }
+ .romfs :
+ {
+ *(.romfs*)
+ }
+ . = RAMTOP+RAMSIZE;
+ .dummy :
+ {
+ COMMAND_START = . - 0x200 ;
+ __ramend = . ;
+ }
+}