[coreboot] [commit] r5382 - in trunk/src: cpu/amd/model_10xxx cpu/amd/model_fxx include/cpu/amd northbridge/amd/amdfam10 northbridge/amd/amdmct/mct

repository service svn at coreboot.org
Thu Apr 8 17:12:19 CEST 2010


Author: myles
Date: Thu Apr  8 17:12:18 2010
New Revision: 5382
URL: https://tracker.coreboot.org/trac/coreboot/changeset/5382

Log:
Cosmetically make init_cpus more similar for fam10 and K8.

Remove some fam10 warnings.

Signed-off-by: Myles Watson <mylesgw at gmail.com>
Acked-by: Myles Watson <mylesgw at gmail.com>

Modified:
   trunk/src/cpu/amd/model_10xxx/init_cpus.c
   trunk/src/cpu/amd/model_10xxx/model_10xxx_init.c
   trunk/src/cpu/amd/model_fxx/init_cpus.c
   trunk/src/include/cpu/amd/model_10xxx_msr.h
   trunk/src/include/cpu/amd/model_fxx_rev.h
   trunk/src/northbridge/amd/amdfam10/northbridge.c
   trunk/src/northbridge/amd/amdfam10/raminit_amdmct.c
   trunk/src/northbridge/amd/amdmct/mct/mct_d.h
   trunk/src/northbridge/amd/amdmct/mct/mctdqs_d.c
   trunk/src/northbridge/amd/amdmct/mct/mctpro_d.c
   trunk/src/northbridge/amd/amdmct/mct/mctsrc.c

Modified: trunk/src/cpu/amd/model_10xxx/init_cpus.c
==============================================================================
--- trunk/src/cpu/amd/model_10xxx/init_cpus.c	Thu Apr  8 17:09:53 2010	(r5381)
+++ trunk/src/cpu/amd/model_10xxx/init_cpus.c	Thu Apr  8 17:12:18 2010	(r5382)
@@ -26,33 +26,10 @@
 
 #ifndef SET_FIDVID_CORE0_ONLY
 	/* MSR FIDVID_CTL and FIDVID_STATUS are shared by cores,
-	   Need to do every AP to set common FID/VID*/
+	   Need to do every AP to set common FID/VID */
 	#define SET_FIDVID_CORE0_ONLY 0
 #endif
 
-static void print_initcpu8 (const char *strval, u8 val)
-{
-	printk(BIOS_DEBUG, "%s%02x\n", strval, val);
-}
-
-static void print_initcpu8_nocr (const char *strval, u8 val)
-{
-	printk(BIOS_DEBUG, "%s%02x", strval, val);
-}
-
-
-static void print_initcpu16 (const char *strval, u16 val)
-{
-	printk(BIOS_DEBUG, "%s%04x\n", strval, val);
-}
-
-
-static void print_initcpu(const char *strval, u32 val)
-{
-	printk(BIOS_DEBUG, "%s%08x\n", strval, val);
-}
-
-
 void update_microcode(u32 cpu_deviceid);
 static void prep_fid_change(void);
 static void init_fidvid_stage2(u32 apicid, u32 nodeid);
@@ -65,14 +42,13 @@
 	msr_t msr;
 	msr = rdmsr(NB_CFG_MSR);
 	// EnableCf8ExtCfg: We need that to access CONFIG_PCI_IO_CFG_EXT 4K range
-	msr.hi |= (1<<(46-32));
+	msr.hi |= (1 << (46 - 32));
 	wrmsr(NB_CFG_MSR, msr);
 }
 #else
 static void set_EnableCf8ExtCfg(void) { }
 #endif
 
-
 /*[39:8] */
 #define PCI_MMIO_BASE 0xfe000000
 /* because we will use gs to store hi, so need to make sure lo can start
@@ -85,34 +61,31 @@
 	msr = rdmsr(0xc0010058);
 	msr.lo &= ~(0xfff00000 | (0xf << 2));
 	// 256 bus per segment, MMIO reg will be 4G , enable MMIO Config space
-	msr.lo |= ((8+CONFIG_PCI_BUS_SEGN_BITS) << 2) | (1 << 0);
+	msr.lo |= ((8 + CONFIG_PCI_BUS_SEGN_BITS) << 2) | (1 << 0);
 	msr.hi &= ~(0x0000ffff);
-	msr.hi |= (PCI_MMIO_BASE >> (32-8));
-	wrmsr(0xc0010058, msr); // MMIO Config Base Address Reg
+	msr.hi |= (PCI_MMIO_BASE >> (32 - 8));
+	wrmsr(0xc0010058, msr);	// MMIO Config Base Address Reg
 
 	//mtrr for that range?
 	// set_var_mtrr_x(7, PCI_MMIO_BASE<<8, PCI_MMIO_BASE>>(32-8), 0x00000000, 0x01, MTRR_TYPE_UNCACHEABLE);
 
 	set_wrap32dis();
 
-	msr.hi = (PCI_MMIO_BASE >> (32-8));
+	msr.hi = (PCI_MMIO_BASE >> (32 - 8));
 	msr.lo = 0;
-	wrmsr(0xc0000101, msr); //GS_Base Reg
-
-
+	wrmsr(0xc0000101, msr);	//GS_Base Reg
 
 #endif
 }
 
-
-typedef void (*process_ap_t)(u32 apicid, void *gp);
+typedef void (*process_ap_t) (u32 apicid, void *gp);
 
 //core_range = 0 : all cores
 //core range = 1 : core 0 only
 //core range = 2 : cores other than core0
 
-static void for_each_ap(u32 bsp_apicid, u32 core_range,
-				process_ap_t process_ap, void *gp)
+static void for_each_ap(u32 bsp_apicid, u32 core_range, process_ap_t process_ap,
+			void *gp)
 {
 	// here assume the OS don't change our apicid
 	u32 ap_apicid;
@@ -122,7 +95,7 @@
 	u32 disable_siblings;
 	u32 cores_found;
 	u32 nb_cfg_54;
-	int i,j;
+	int i, j;
 	u32 ApicIdCoreIdSize;
 
 	/* get_nodes define in ht_wrapper.c */
@@ -130,8 +103,8 @@
 
 	disable_siblings = !CONFIG_LOGICAL_CPUS;
 
-#if CONFIG_LOGICAL_CPUS == 1
-	if(read_option(CMOS_VSTART_multi_core, CMOS_VLEN_multi_core, 0) != 0) { // 0 mean multi core
+#if CONFIG_LOGICAL_CPUS == 1 && CONFIG_HAVE_OPTION_TABLE == 1
+	if (read_option(CMOS_VSTART_multi_core, CMOS_VLEN_multi_core, 0) != 0) {	// 0 mean multi core
 		disable_siblings = 1;
 	}
 #endif
@@ -141,10 +114,10 @@
 	nb_cfg_54 = read_nb_cfg_54();
 
 	ApicIdCoreIdSize = (cpuid_ecx(0x80000008) >> 12 & 0xf);
-	if(ApicIdCoreIdSize) {
+	if (ApicIdCoreIdSize) {
 		siblings = ((1 << ApicIdCoreIdSize) - 1);
 	} else {
-		siblings = 3; //quad core
+		siblings = 3;	//quad core
 	}
 
 	for (i = 0; i < nodes; i++) {
@@ -158,24 +131,26 @@
 			jstart = 0;
 		}
 
-		if (disable_siblings || (core_range==1)) {
+		if (disable_siblings || (core_range == 1)) {
 			jend = 0;
 		} else {
 			jend = cores_found;
 		}
 
-
 		for (j = jstart; j <= jend; j++) {
-			ap_apicid = i * (nb_cfg_54 ? (siblings + 1):1) + j * (nb_cfg_54 ? 1:64);
+			ap_apicid =
+			    i * (nb_cfg_54 ? (siblings + 1) : 1) +
+			    j * (nb_cfg_54 ? 1 : 64);
 
-		#if (CONFIG_ENABLE_APIC_EXT_ID == 1) && (CONFIG_APIC_ID_OFFSET > 0)
-			#if CONFIG_LIFT_BSP_APIC_ID == 0
-			if( (i != 0) || (j != 0)) /* except bsp */
-			#endif
+#if (CONFIG_ENABLE_APIC_EXT_ID == 1) && (CONFIG_APIC_ID_OFFSET > 0)
+#if CONFIG_LIFT_BSP_APIC_ID == 0
+			if ((i != 0) || (j != 0))	/* except bsp */
+#endif
 				ap_apicid += CONFIG_APIC_ID_OFFSET;
-		#endif
+#endif
 
-			if(ap_apicid == bsp_apicid) continue;
+			if (ap_apicid == bsp_apicid)
+				continue;
 
 			process_ap(ap_apicid, gp);
 
@@ -183,8 +158,7 @@
 	}
 }
 
-/* FIXME: Duplicate of what is in lapic.h? */
-static int lapic_remote_read(int apicid, int reg, u32 *pvalue)
+static inline int lapic_remote_read(int apicid, int reg, u32 *pvalue)
 {
 	int timeout;
 	u32 status;
@@ -192,8 +166,9 @@
 	lapic_wait_icr_idle();
 	lapic_write(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(apicid));
 	lapic_write(LAPIC_ICR, LAPIC_DM_REMRD | (reg >> 4));
-	timeout = 0;
 
+/* Extra busy check compared to lapic.h */
+	timeout = 0;
 	do {
 		status = lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY;
 	} while (status == LAPIC_ICR_BUSY && timeout++ < 1000);
@@ -212,31 +187,33 @@
 	return result;
 }
 
-
 /* Use the LAPIC timer count register to hold each cores init status */
 #define LAPIC_MSG_REG 0x380
 
-
 #if SET_FIDVID == 1
 static void init_fidvid_ap(u32 bsp_apicid, u32 apicid, u32 nodeid, u32 coreid);
 #endif
 
-static inline __attribute__((always_inline)) void print_apicid_nodeid_coreid(u32 apicid, struct node_core_id id, const char *str)
+static inline __attribute__ ((always_inline))
+void print_apicid_nodeid_coreid(u32 apicid, struct node_core_id id,
+				const char *str)
 {
-		printk(BIOS_DEBUG, "%s --- {	 APICID = %02x NODEID = %02x COREID = %02x} ---\n", str, apicid, id.nodeid, id.coreid);
+	printk(BIOS_DEBUG,
+	       "%s --- { APICID = %02x NODEID = %02x COREID = %02x} ---\n", str,
+	       apicid, id.nodeid, id.coreid);
 }
 
-
-static unsigned wait_cpu_state(u32 apicid, u32 state)
+static u32 wait_cpu_state(u32 apicid, u32 state)
 {
 	u32 readback = 0;
 	u32 timeout = 1;
 	int loop = 4000000;
 	while (--loop > 0) {
-		if (lapic_remote_read(apicid, LAPIC_MSG_REG, &readback) != 0) continue;
+		if (lapic_remote_read(apicid, LAPIC_MSG_REG, &readback) != 0)
+			continue;
 		if ((readback & 0x3f) == state) {
 			timeout = 0;
-			break; //target cpu is in stage started
+			break;	//target cpu is in stage started
 		}
 	}
 	if (timeout) {
@@ -248,30 +225,26 @@
 	return timeout;
 }
 
-
-static void wait_ap_started(u32 ap_apicid, void *gp )
+static void wait_ap_started(u32 ap_apicid, void *gp)
 {
 	u32 timeout;
-	timeout = wait_cpu_state(ap_apicid, 0x13); // started
-	if(timeout) {
-		print_initcpu8_nocr("* AP ", ap_apicid);
-		print_initcpu(" didn't start timeout:", timeout);
-	}
-	else {
-		print_initcpu8_nocr("AP started: ", ap_apicid);
+	timeout = wait_cpu_state(ap_apicid, 0x13);	// started
+	printk(BIOS_DEBUG, "* AP %02x", ap_apicid);
+	if (timeout) {
+		printk(BIOS_DEBUG, " timed out:%08x\n", timeout);
+	} else {
+		printk(BIOS_DEBUG, "started\n");
 	}
 }
 
-
 static void wait_all_other_cores_started(u32 bsp_apicid)
 {
 	// all aps other than core0
-	print_debug("started ap apicid: ");
-	for_each_ap(bsp_apicid, 2 , wait_ap_started, (void *)0);
-	print_debug("\n");
+	printk(BIOS_DEBUG, "started ap apicid: ");
+	for_each_ap(bsp_apicid, 2, wait_ap_started, (void *)0);
+	printk(BIOS_DEBUG, "\n");
 }
 
-
 static void allow_all_aps_stop(u32 bsp_apicid)
 {
 	/* Called by the BSP to indicate AP can stop */
@@ -292,7 +265,6 @@
 	pci_write_config32(NODE_HT(node), 0x68, val);
 }
 
-
 static void STOP_CAR_AND_CPU(void)
 {
 	msr_t msr;
@@ -302,12 +274,13 @@
 	msr.lo &= ~(1 << ClLinesToNbDis);
 	wrmsr(BU_CFG2, msr);
 
-	disable_cache_as_ram(); // inline
+	disable_cache_as_ram();	// inline
+	/* stop all cores except node0/core0 the bsp .... */
 	stop_this_cpu();
 }
 
 #if RAMINIT_SYSINFO == 1
-static u32 init_cpus(u32 cpu_init_detectedx ,struct sys_info *sysinfo)
+static u32 init_cpus(u32 cpu_init_detectedx, struct sys_info *sysinfo)
 #else
 static u32 init_cpus(u32 cpu_init_detectedx)
 #endif
@@ -320,7 +293,7 @@
 	 * already set early mtrr in cache_as_ram.inc
 	 */
 
-	/* enable access pci conf via mmio*/
+	/* enable access pci conf via mmio */
 	set_pci_mmio_conf_reg();
 
 	/* that is from initial apicid, we need nodeid and coreid
@@ -328,36 +301,35 @@
 	id = get_node_core_id_x();
 
 	/* NB_CFG MSR is shared between cores, so we need make sure
-	  core0 is done at first --- use wait_all_core0_started  */
-	if(id.coreid == 0) {
-		set_apicid_cpuid_lo(); /* only set it on core0 */
-		set_EnableCf8ExtCfg(); /* only set it on core0 */
-		#if (CONFIG_ENABLE_APIC_EXT_ID == 1)
+	   core0 is done at first --- use wait_all_core0_started  */
+	if (id.coreid == 0) {
+		set_apicid_cpuid_lo();	/* only set it on core0 */
+		set_EnableCf8ExtCfg();	/* only set it on core0 */
+#if (CONFIG_ENABLE_APIC_EXT_ID == 1)
 		enable_apic_ext_id(id.nodeid);
-		#endif
+#endif
 	}
 
 	enable_lapic();
 
-
 #if (CONFIG_ENABLE_APIC_EXT_ID == 1) && (CONFIG_APIC_ID_OFFSET > 0)
 	u32 initial_apicid = get_initial_apicid();
 
-	#if CONFIG_LIFT_BSP_APIC_ID == 0
-	if( initial_apicid != 0 ) // other than bsp
-	#endif
+#if CONFIG_LIFT_BSP_APIC_ID == 0
+	if (initial_apicid != 0)	// other than bsp
+#endif
 	{
 		/* use initial apic id to lift it */
 		u32 dword = lapic_read(LAPIC_ID);
 		dword &= ~(0xff << 24);
-		dword |= (((initial_apicid + CONFIG_APIC_ID_OFFSET) & 0xff) << 24);
+		dword |=
+		    (((initial_apicid + CONFIG_APIC_ID_OFFSET) & 0xff) << 24);
 
 		lapic_write(LAPIC_ID, dword);
 	}
-
-	#if CONFIG_LIFT_BSP_APIC_ID == 1
+#if CONFIG_LIFT_BSP_APIC_ID == 1
 	bsp_apicid += CONFIG_APIC_ID_OFFSET;
-	#endif
+#endif
 
 #endif
 
@@ -365,31 +337,28 @@
 	apicid = lapicid();
 
 	// show our apicid, nodeid, and coreid
-	if( id.coreid==0 ) {
-		if (id.nodeid!=0) //all core0 except bsp
+	if (id.coreid == 0) {
+		if (id.nodeid != 0)	//all core0 except bsp
 			print_apicid_nodeid_coreid(apicid, id, " core0: ");
-	}
-	else { //all other cores
+	} else {		//all other cores
 		print_apicid_nodeid_coreid(apicid, id, " corex: ");
 	}
 
-
 	if (cpu_init_detectedx) {
-		print_apicid_nodeid_coreid(apicid, id, "\n\n\nINIT detected from ");
-		print_debug("\nIssuing SOFT_RESET...\n");
+		print_apicid_nodeid_coreid(apicid, id,
+					   "\n\n\nINIT detected from ");
+		printk(BIOS_DEBUG, "\nIssuing SOFT_RESET...\n");
 		soft_reset();
 	}
 
-	if(id.coreid == 0) {
-		if(!(warm_reset_detect(id.nodeid))) //FIXME: INIT is checked above but check for more resets?
-			distinguish_cpu_resets(id.nodeid); // Also indicates we are started
+	if (id.coreid == 0) {
+		if (!(warm_reset_detect(id.nodeid)))	//FIXME: INIT is checked above but check for more resets?
+			distinguish_cpu_resets(id.nodeid);	// Also indicates we are started
 	}
-
 	// Mark the core as started.
 	lapic_write(LAPIC_MSG_REG, (apicid << 24) | 0x13);
 
-
-	if(apicid != bsp_apicid) {
+	if (apicid != bsp_apicid) {
 		/* Setup each AP's cores MSRs.
 		 * This happens after HTinit.
 		 * The BSP runs this code in it's own path.
@@ -397,20 +366,24 @@
 		update_microcode(cpuid_eax(1));
 		cpuSetAMDMSR();
 
-
 #if SET_FIDVID == 1
-	#if (CONFIG_LOGICAL_CPUS == 1)  && (SET_FIDVID_CORE0_ONLY == 1)
+#if (CONFIG_LOGICAL_CPUS == 1) && (SET_FIDVID_CORE0_ONLY == 1)
 		// Run on all AP for proper FID/VID setup.
-		if(id.coreid == 0 ) // only need set fid for core0
-	#endif
+		if (id.coreid == 0)	// only need set fid for core0
+#endif
 		{
-		// check warm(bios) reset to call stage2 otherwise do stage1
+			// check warm(bios) reset to call stage2 otherwise do stage1
 			if (warm_reset_detect(id.nodeid)) {
-				printk(BIOS_DEBUG, "init_fidvid_stage2 apicid: %02x\n", apicid);
+				printk(BIOS_DEBUG,
+				       "init_fidvid_stage2 apicid: %02x\n",
+				       apicid);
 				init_fidvid_stage2(apicid, id.nodeid);
 			} else {
-				printk(BIOS_DEBUG, "init_fidvid_ap(stage1) apicid: %02x\n", apicid);
-				init_fidvid_ap(bsp_apicid, apicid, id.nodeid, id.coreid);
+				printk(BIOS_DEBUG,
+				       "init_fidvid_ap(stage1) apicid: %02x\n",
+				       apicid);
+				init_fidvid_ap(bsp_apicid, apicid, id.nodeid,
+					       id.coreid);
 			}
 		}
 #endif
@@ -424,13 +397,14 @@
 		set_var_mtrr(0, 0x00000000, CONFIG_RAMTOP, MTRR_TYPE_WRBACK);
 
 		STOP_CAR_AND_CPU();
-		printk(BIOS_DEBUG, "\nAP %02x should be halted but you are reading this....\n", apicid);
+		printk(BIOS_DEBUG,
+		       "\nAP %02x should be halted but you are reading this....\n",
+		       apicid);
 	}
 
 	return bsp_apicid;
 }
 
-
 static u32 is_core0_started(u32 nodeid)
 {
 	u32 htic;
@@ -441,21 +415,22 @@
 	return htic;
 }
 
-
 static void wait_all_core0_started(void)
 {
 	/* When core0 is started, it will distingush_cpu_resets
-	  . So wait for that to finish */
+	 * So wait for that to finish */
 	u32 i;
 	u32 nodes = get_nodes();
 
-	printk(BIOS_DEBUG, "Wait all core0s started \n");
-	for(i=1;i<nodes;i++) { // skip bsp, because it is running on bsp
-		while(!is_core0_started(i)) {}
-		print_initcpu8("  Core0 started on node: ", i);
+	printk(BIOS_DEBUG, "core0 started: ");
+	for (i = 1; i < nodes; i++) {	// skip bsp, because it is running on bsp
+		while (!is_core0_started(i)) {
+		}
+		printk(BIOS_DEBUG, " %02x", i);
 	}
-	printk(BIOS_DEBUG, "Wait all core0s started done\n");
+	printk(BIOS_DEBUG, "\n");
 }
+
 #if CONFIG_MAX_PHYSICAL_CPUS > 1
 /**
  * void start_node(u32 node)
@@ -480,14 +455,13 @@
 #endif
 
 	/* Allow APs to make requests (ROM fetch) */
-	val=pci_read_config32(NODE_HT(node), 0x6c);
+	val = pci_read_config32(NODE_HT(node), 0x6c);
 	val &= ~(1 << 1);
 	pci_write_config32(NODE_HT(node), 0x6c, val);
 
 	printk(BIOS_DEBUG, " done.\n");
 }
 
-
 /**
  * static void setup_remote_node(u32 node)
  *
@@ -517,7 +491,7 @@
 	printk(BIOS_DEBUG, "setup_remote_node: %02x", node);
 
 	/* copy the default resource map from node 0 */
-	for(i = 0; i < ARRAY_SIZE(pci_reg); i++) {
+	for (i = 0; i < ARRAY_SIZE(pci_reg); i++) {
 		u32 value;
 		u16 reg;
 		reg = pci_reg[i];
@@ -527,7 +501,7 @@
 	}
 	printk(BIOS_DEBUG, " done\n");
 }
-#endif	/* CONFIG_MAX_PHYSICAL_CPUS > 1 */
+#endif				/* CONFIG_MAX_PHYSICAL_CPUS > 1 */
 
 static void AMD_Errata281(u8 node, u32 revision, u32 platform)
 {
@@ -546,7 +520,8 @@
 		/* For each node we need to check for a "broken" node */
 		if (!(revision & (AMD_DR_B0 | AMD_DR_B1))) {
 			for (i = 0; i < nodes; i++) {
-				if (mctGetLogicalCPUID(i) & (AMD_DR_B0 | AMD_DR_B1)) {
+				if (mctGetLogicalCPUID(i) &
+				    (AMD_DR_B0 | AMD_DR_B1)) {
 					mixed = 1;
 					break;
 				}
@@ -590,7 +565,6 @@
 	}
 }
 
-
 static void AMD_Errata298(void)
 {
 	/* Workaround for L2 Eviction May Occur during operation to
@@ -612,36 +586,35 @@
 
 	if (affectedRev) {
 		msr = rdmsr(HWCR);
-		msr.lo |= 0x08;		/* Set TlbCacheDis bit[3] */
+		msr.lo |= 0x08;	/* Set TlbCacheDis bit[3] */
 		wrmsr(HWCR, msr);
 
 		msr = rdmsr(BU_CFG);
-		msr.lo |= 0x02;		/* Set TlbForceMemTypeUc bit[1] */
+		msr.lo |= 0x02;	/* Set TlbForceMemTypeUc bit[1] */
 		wrmsr(BU_CFG, msr);
 
 		msr = rdmsr(OSVW_ID_Length);
-		msr.lo |= 0x01;		/* OS Visible Workaround - MSR */
+		msr.lo |= 0x01;	/* OS Visible Workaround - MSR */
 		wrmsr(OSVW_ID_Length, msr);
 
 		msr = rdmsr(OSVW_Status);
-		msr.lo |= 0x01;		/* OS Visible Workaround - MSR */
+		msr.lo |= 0x01;	/* OS Visible Workaround - MSR */
 		wrmsr(OSVW_Status, msr);
 	}
 
 	if (!affectedRev && (mctGetLogicalCPUID(0xFF) & AMD_DR_B3)) {
 		msr = rdmsr(OSVW_ID_Length);
-		msr.lo |= 0x01;		/* OS Visible Workaround - MSR */
+		msr.lo |= 0x01;	/* OS Visible Workaround - MSR */
 		wrmsr(OSVW_ID_Length, msr);
 
 	}
 }
 
-
-u32 get_platform_type(void)
+static u32 get_platform_type(void)
 {
 	u32 ret = 0;
 
-	switch(SYSTEM_TYPE) {
+	switch (SYSTEM_TYPE) {
 	case 1:
 		ret |= AMD_PTYPE_DSK;
 		break;
@@ -663,8 +636,7 @@
 	return ret;
 }
 
-
-void AMD_SetupPSIVID_d (u32 platform_type, u8 node)
+static void AMD_SetupPSIVID_d(u32 platform_type, u8 node)
 {
 	u32 dword;
 	int i;
@@ -672,27 +644,26 @@
 
 	if (platform_type & (AMD_PTYPE_MOB | AMD_PTYPE_DSK)) {
 
-	/* The following code sets the PSIVID to the lowest support P state
-	 * assuming that the VID for the lowest power state is below
-	 * the VDD voltage regulator threshold. (This also assumes that there
-	 * is a Pstate lower than P0)
-	 */
+		/* The following code sets the PSIVID to the lowest support P state
+		 * assuming that the VID for the lowest power state is below
+		 * the VDD voltage regulator threshold. (This also assumes that there
+		 * is a Pstate lower than P0)
+		 */
 
-		for( i = 4; i >= 0; i--) {
+		for (i = 4; i >= 0; i--) {
 			msr = rdmsr(PS_REG_BASE + i);
 			/*  Pstate valid? */
 			if (msr.hi & PS_EN_MASK) {
-				dword = pci_read_config32(NODE_PCI(i,3), 0xA0);
+				dword = pci_read_config32(NODE_PCI(i, 3), 0xA0);
 				dword &= ~0x7F;
 				dword |= (msr.lo >> 9) & 0x7F;
-				pci_write_config32(NODE_PCI(i,3), 0xA0, dword);
+				pci_write_config32(NODE_PCI(i, 3), 0xA0, dword);
 				break;
 			}
 		}
 	}
 }
 
-
 /**
  * AMD_CpuFindCapability - Traverse PCI capability list to find host HT links.
  *  HT Phy operations are not valid on links that aren't present, so this
@@ -700,29 +671,29 @@
  *
  * Returns the offset of the link register.
  */
-BOOL AMD_CpuFindCapability (u8 node, u8 cap_count, u8 *offset)
+static BOOL AMD_CpuFindCapability(u8 node, u8 cap_count, u8 * offset)
 {
-       u32 reg;
+	u32 reg;
 	u32 val;
 
 	/* get start of CPU HT Host Capabilities */
 	val = pci_read_config32(NODE_PCI(node, 0), 0x34);
-       val &= 0xFF;  //reg offset of first link
+	val &= 0xFF;		//reg offset of first link
 
 	cap_count++;
 
 	/* Traverse through the capabilities. */
 	do {
-               reg = pci_read_config32(NODE_PCI(node, 0), val);
+		reg = pci_read_config32(NODE_PCI(node, 0), val);
 		/* Is the capability block a HyperTransport capability block? */
-               if ((reg & 0xFF) == 0x08) {
+		if ((reg & 0xFF) == 0x08) {
 			/* Is the HT capability block an HT Host Capability? */
-                       if ((reg & 0xE0000000) == (1 << 29))
+			if ((reg & 0xE0000000) == (1 << 29))
 				cap_count--;
 		}
 
-               if(cap_count)
-                   val = (reg >> 8)  & 0xFF; //update reg offset
+		if (cap_count)
+			val = (reg >> 8) & 0xFF;	//update reg offset
 	} while (cap_count && val);
 
 	*offset = (u8) val;
@@ -734,14 +705,13 @@
 		return FALSE;
 }
 
-
 /**
  * AMD_checkLinkType - Compare desired link characteristics using a logical
  *     link type mask.
  *
  * Returns the link characteristic mask.
  */
-u32 AMD_checkLinkType (u8 node, u8 link, u8 regoff)
+static u32 AMD_checkLinkType(u8 node, u8 link, u8 regoff)
 {
 	u32 val;
 	u32 linktype = 0;
@@ -765,11 +735,10 @@
 		else
 			linktype |= HTPHY_LINKTYPE_HT1;
 
-
 		/* Check ganged */
 		val = pci_read_config32(NODE_PCI(node, 0), (link << 2) + 0x170);
 
-		if ( val & 1)
+		if (val & 1)
 			linktype |= HTPHY_LINKTYPE_GANGED;
 		else
 			linktype |= HTPHY_LINKTYPE_UNGANGED;
@@ -777,12 +746,11 @@
 	return linktype;
 }
 
-
 /**
  * AMD_SetHtPhyRegister - Use the HT link's HT Phy portal registers to update
  *   a phy setting for that link.
  */
-void AMD_SetHtPhyRegister (u8 node, u8 link, u8 entry)
+static void AMD_SetHtPhyRegister(u8 node, u8 link, u8 entry)
 {
 	u32 phyReg;
 	u32 phyBase;
@@ -792,8 +760,7 @@
 	if (link > 3)
 		link -= 4;
 
-	phyBase = ((u32)link << 3) | 0x180;
-
+	phyBase = ((u32) link << 3) | 0x180;
 
 	/* Get the portal control register's initial value
 	 * and update it to access the desired phy register
@@ -834,7 +801,6 @@
 	} while (!(val & HTPHY_IS_COMPLETE_MASK));
 }
 
-
 void cpuSetAMDMSR(void)
 {
 	/* This routine loads the CPU with default settings in fam10_msr_default
@@ -852,7 +818,7 @@
 	revision = mctGetLogicalCPUID(0xFF);
 	platform = get_platform_type();
 
-	for(i = 0; i < ARRAY_SIZE(fam10_msr_default); i++) {
+	for (i = 0; i < ARRAY_SIZE(fam10_msr_default); i++) {
 		if ((fam10_msr_default[i].revision & revision) &&
 		    (fam10_msr_default[i].platform & platform)) {
 			msr = rdmsr(fam10_msr_default[i].msr);
@@ -868,8 +834,7 @@
 	printk(BIOS_DEBUG, " done\n");
 }
 
-
-void cpuSetAMDPCI(u8 node)
+static void cpuSetAMDPCI(u8 node)
 {
 	/* This routine loads the CPU with default settings in fam10_pci_default
 	 * table . It must be run after Cache-As-RAM has been enabled, and
@@ -883,39 +848,41 @@
 
 	printk(BIOS_DEBUG, "cpuSetAMDPCI %02d", node);
 
-
 	revision = mctGetLogicalCPUID(node);
 	platform = get_platform_type();
 
 	AMD_SetupPSIVID_d(platform, node);	/* Set PSIVID offset which is not table driven */
 
-	for(i = 0; i < ARRAY_SIZE(fam10_pci_default); i++) {
+	for (i = 0; i < ARRAY_SIZE(fam10_pci_default); i++) {
 		if ((fam10_pci_default[i].revision & revision) &&
 		    (fam10_pci_default[i].platform & platform)) {
 			val = pci_read_config32(NODE_PCI(node,
-				fam10_pci_default[i].function),
-				fam10_pci_default[i].offset);
+							 fam10_pci_default[i].
+							 function),
+						fam10_pci_default[i].offset);
 			val &= ~fam10_pci_default[i].mask;
 			val |= fam10_pci_default[i].data;
 			pci_write_config32(NODE_PCI(node,
-				fam10_pci_default[i].function),
-				fam10_pci_default[i].offset, val);
+						    fam10_pci_default[i].
+						    function),
+					   fam10_pci_default[i].offset, val);
 		}
 	}
 
-	for(i = 0; i < ARRAY_SIZE(fam10_htphy_default); i++) {
+	for (i = 0; i < ARRAY_SIZE(fam10_htphy_default); i++) {
 		if ((fam10_htphy_default[i].revision & revision) &&
 		    (fam10_htphy_default[i].platform & platform)) {
 			/* HT Phy settings either apply to both sublinks or have
 			 * separate registers for sublink zero and one, so there
 			 * will be two table entries. So, here we only loop
-			 cd t	* through the sublink zeros in function zero.
+			 cd t   * through the sublink zeros in function zero.
 			 */
 			for (j = 0; j < 4; j++) {
 				if (AMD_CpuFindCapability(node, j, &offset)) {
 					if (AMD_checkLinkType(node, j, offset)
 					    & fam10_htphy_default[i].linktype) {
-						AMD_SetHtPhyRegister(node, j, i);
+						AMD_SetHtPhyRegister(node, j,
+								     i);
 					}
 				} else {
 					/* No more capabilities,
@@ -932,14 +899,13 @@
 	AMD_Errata281(node, revision, platform);
 
 	/* FIXME: if the dct phy doesn't init correct it needs to reset.
-	if (revision & (AMD_DR_B2 | AMD_DR_B3))
-		dctPhyDiag(); */
+	   if (revision & (AMD_DR_B2 | AMD_DR_B3))
+	   dctPhyDiag(); */
 
 	printk(BIOS_DEBUG, " done\n");
 }
 
-
-void cpuInitializeMCA(void)
+static void cpuInitializeMCA(void)
 {
 	/* Clears Machine Check Architecture (MCA) registers, which power on
 	 * containing unknown data, on currently running processor.
@@ -951,30 +917,29 @@
 	u32 reg;
 	u8 i;
 
-	if (cpuid_edx(1) & 0x4080) { /* MCE and MCA (edx[7] and edx[14]) */
+	if (cpuid_edx(1) & 0x4080) {	/* MCE and MCA (edx[7] and edx[14]) */
 		msr = rdmsr(MCG_CAP);
-		if (msr.lo & MCG_CTL_P){	/* MCG_CTL_P bit is set? */
+		if (msr.lo & MCG_CTL_P) {	/* MCG_CTL_P bit is set? */
 			msr.lo &= 0xFF;
 			msr.lo--;
-			msr.lo <<= 2;		/* multiply the count by 4 */
+			msr.lo <<= 2;	/* multiply the count by 4 */
 			reg = MC0_STA + msr.lo;
 			msr.lo = msr.hi = 0;
-			for (i=0; i < 4; i++) {
-				wrmsr (reg, msr);
-				reg -=4;	/* Touch status regs for each bank */
+			for (i = 0; i < 4; i++) {
+				wrmsr(reg, msr);
+				reg -= 4;	/* Touch status regs for each bank */
 			}
 		}
 	}
 }
 
-
 /**
  * finalize_node_setup()
  *
  * Do any additional post HT init
  *
  */
-void finalize_node_setup(struct sys_info *sysinfo)
+static void finalize_node_setup(struct sys_info *sysinfo)
 {
 	u8 i;
 	u8 nodes = get_nodes();
@@ -983,13 +948,12 @@
 #if RAMINIT_SYSINFO == 1
 	/* read Node0 F0_0x64 bit [8:10] to find out SbLink # */
 	reg = pci_read_config32(NODE_HT(0), 0x64);
-	sysinfo->sblk = (reg>>8) & 7;
+	sysinfo->sblk = (reg >> 8) & 7;
 	sysinfo->sbbusn = 0;
 	sysinfo->nodes = nodes;
 	sysinfo->sbdn = get_sbdn(sysinfo->sbbusn);
 #endif
 
-
 	for (i = 0; i < nodes; i++) {
 		cpuSetAMDPCI(i);
 	}
@@ -1001,10 +965,9 @@
 
 #if CONFIG_MAX_PHYSICAL_CPUS > 1
 	/* Skip the BSP, start at node 1 */
-	for(i=1; i<nodes; i++) {
+	for (i = 1; i < nodes; i++) {
 		setup_remote_node(i);
 		start_node(i);
 	}
 #endif
 }
-

Modified: trunk/src/cpu/amd/model_10xxx/model_10xxx_init.c
==============================================================================
--- trunk/src/cpu/amd/model_10xxx/model_10xxx_init.c	Thu Apr  8 17:09:53 2010	(r5381)
+++ trunk/src/cpu/amd/model_10xxx/model_10xxx_init.c	Thu Apr  8 17:12:18 2010	(r5382)
@@ -38,13 +38,9 @@
 #include <cpu/amd/model_10xxx_msr.h>
 
 extern device_t get_node_pci(u32 nodeid, u32 fn);
-extern int init_processor_name(void);
-
-
 
 #define MCI_STATUS 0x401
 
-
 msr_t rdmsr_amd(u32 index)
 {
 	 msr_t result;
@@ -67,7 +63,7 @@
 }
 
 
-void model_10xxx_init(device_t dev)
+static void model_10xxx_init(device_t dev)
 {
 	u8 i;
 	msr_t msr;

Modified: trunk/src/cpu/amd/model_fxx/init_cpus.c
==============================================================================
--- trunk/src/cpu/amd/model_fxx/init_cpus.c	Thu Apr  8 17:09:53 2010	(r5381)
+++ trunk/src/cpu/amd/model_fxx/init_cpus.c	Thu Apr  8 17:12:18 2010	(r5382)
@@ -1,357 +1,344 @@
 //it takes the CONFIG_ENABLE_APIC_EXT_ID and CONFIG_APIC_ID_OFFSET and CONFIG_LIFT_BSP_APIC_ID
 #ifndef SET_FIDVID
-	#if CONFIG_K8_REV_F_SUPPORT == 0
-		#define SET_FIDVID 0
-	#else
+#if CONFIG_K8_REV_F_SUPPORT == 0
+	#define SET_FIDVID 0
+#else
 		// for rev F, need to set FID to max
-		#define SET_FIDVID 1
-	#endif
-	
+	#define SET_FIDVID 1
 #endif
 
-#ifndef SET_FIDVID_CORE0_ONLY
-	/* MSR FIDVID_CTL and FIDVID_STATUS are shared by cores, so may don't need to do twice*/
-       	#define SET_FIDVID_CORE0_ONLY 1
 #endif
 
-static inline void print_initcpu8 (const char *strval, unsigned val)
-{
-        printk(BIOS_DEBUG, "%s%02x\n", strval, val);
-}
-
-static inline void print_initcpu8_nocr (const char *strval, unsigned val)
-{
-        printk(BIOS_DEBUG, "%s%02x", strval, val);
-}
-
-
-static inline void print_initcpu16 (const char *strval, unsigned val)
-{
-        printk(BIOS_DEBUG, "%s%04x\n", strval, val);
-}
-
-static inline void print_initcpu(const char *strval, unsigned val)
-{
-        printk(BIOS_DEBUG, "%s%08x\n", strval, val);
-}
+#ifndef SET_FIDVID_CORE0_ONLY
+	/* MSR FIDVID_CTL and FIDVID_STATUS are shared by cores, so may don't need to do twice */
+	#define SET_FIDVID_CORE0_ONLY 1
+#endif
 
-typedef void (*process_ap_t)(unsigned apicid, void *gp);
+typedef void (*process_ap_t) (u32 apicid, void *gp);
 
 //core_range = 0 : all cores
 //core range = 1 : core 0 only
 //core range = 2 : cores other than core0
-static void for_each_ap(unsigned bsp_apicid, unsigned core_range, process_ap_t process_ap, void *gp)
+
+static void for_each_ap(u32 bsp_apicid, u32 core_range, process_ap_t process_ap,
+			void *gp)
 {
 	// here assume the OS don't change our apicid
-	unsigned ap_apicid;
+	u32 ap_apicid;
 
-	unsigned nodes;
-        unsigned siblings = 0;
-        unsigned disable_siblings;
-        unsigned e0_later_single_core;
-        unsigned nb_cfg_54;
-        int i,j;
+	u32 nodes;
+	u32 siblings = 0;
+	u32 disable_siblings;
+	u32 e0_later_single_core;
+	u32 nb_cfg_54;
+	int i, j;
 
 	/* get_nodes define in in_coherent_ht.c */
 	nodes = get_nodes();
 
-        disable_siblings = !CONFIG_LOGICAL_CPUS;
+	disable_siblings = !CONFIG_LOGICAL_CPUS;
+
 #if CONFIG_LOGICAL_CPUS == 1 && CONFIG_HAVE_OPTION_TABLE == 1
-        if(read_option(CMOS_VSTART_multi_core, CMOS_VLEN_multi_core, 0) != 0) { // 0 mean multi core
-                disable_siblings = 1;
-        }
+	if (read_option(CMOS_VSTART_multi_core, CMOS_VLEN_multi_core, 0) != 0) {	// 0 mean multi core
+		disable_siblings = 1;
+	}
 #endif
 
 	/* here I assume that all node are same stepping, otherwise we can use use nb_cfg_54 from bsp for all nodes */
-        nb_cfg_54 = read_nb_cfg_54();
+	nb_cfg_54 = read_nb_cfg_54();
 
-
-        for(i=0; i<nodes;i++) {
+	for (i = 0; i < nodes; i++) {
 		e0_later_single_core = 0;
-                j = ((pci_read_config32(PCI_DEV(0, 0x18+i, 3), 0xe8) >> 12) & 3);
-                if(nb_cfg_54) {
- 	               if(j == 0 ){ // if it is single core, we need to increase siblings for apic calculation 
-                       #if CONFIG_K8_REV_F_SUPPORT == 0
-        	               e0_later_single_core = is_e0_later_in_bsp(i);  // single core
-                       #else
-                               e0_later_single_core = is_cpu_f0_in_bsp(i);  // We can read cpuid(1) from Func3
-                       #endif
-                       } 
-                       if(e0_later_single_core) {
-                                j=1;
-                       }
-                }
-                siblings = j;
-
-                unsigned jstart, jend;
-
-                if(core_range == 2) {
-                        jstart = 1;
-                }
-		else {
+		j = ((pci_read_config32(PCI_DEV(0, 0x18 + i, 3), 0xe8) >> 12) &
+		     3);
+		if (nb_cfg_54) {
+			if (j == 0) {	// if it is single core, we need to increase siblings for apic calculation
+#if CONFIG_K8_REV_F_SUPPORT == 0
+				e0_later_single_core = is_e0_later_in_bsp(i);	// single core
+#else
+				e0_later_single_core = is_cpu_f0_in_bsp(i);	// We can read cpuid(1) from Func3
+#endif
+			}
+			if (e0_later_single_core) {
+				j = 1;
+			}
+		}
+		siblings = j;
+
+		u32 jstart, jend;
+
+		if (core_range == 2) {
+			jstart = 1;
+		} else {
 			jstart = 0;
 		}
 
-                if(e0_later_single_core || disable_siblings || (core_range==1)) {
-                        jend = 0;
-                } else {
-                        jend = siblings;
-		}	
-		
-	
-                for(j=jstart; j<=jend; j++) {
-
-                        ap_apicid = i * (nb_cfg_54?(siblings+1):1) + j * (nb_cfg_54?1:8);
-
-                #if (CONFIG_ENABLE_APIC_EXT_ID == 1)
-			#if CONFIG_LIFT_BSP_APIC_ID == 0
-			if( (i!=0) || (j!=0)) /* except bsp */
-			#endif
-                        	ap_apicid += CONFIG_APIC_ID_OFFSET;
-                #endif
-
-			if(ap_apicid == bsp_apicid) continue;
-
-			process_ap(ap_apicid, gp); 
-
-                }
-        }
-}
+		if (e0_later_single_core || disable_siblings
+		    || (core_range == 1)) {
+			jend = 0;
+		} else {
+			jend = siblings;
+		}
+
+		for (j = jstart; j <= jend; j++) {
+			ap_apicid =
+			    i * (nb_cfg_54 ? (siblings + 1) : 1) +
+			    j * (nb_cfg_54 ? 1 : 8);
+
+#if (CONFIG_ENABLE_APIC_EXT_ID == 1)
+#if CONFIG_LIFT_BSP_APIC_ID == 0
+			if ((i != 0) || (j != 0))	/* except bsp */
+#endif
+				ap_apicid += CONFIG_APIC_ID_OFFSET;
+#endif
+
+			if (ap_apicid == bsp_apicid)
+				continue;
 
+			process_ap(ap_apicid, gp);
 
-static inline int lapic_remote_read(int apicid, int reg, unsigned *pvalue)
-{
-        int timeout;
-        unsigned status;
-        int result;
-        lapic_wait_icr_idle();
-        lapic_write(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(apicid));
-        lapic_write(LAPIC_ICR, LAPIC_DM_REMRD | (reg >> 4));
-        timeout = 0;
-
-        do {
-                status = lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY;
-        } while (status == LAPIC_ICR_BUSY && timeout++ < 1000);
-
-        timeout = 0;
-        do {
-                status = lapic_read(LAPIC_ICR) & LAPIC_ICR_RR_MASK;
-        } while (status == LAPIC_ICR_RR_INPROG && timeout++ < 1000);
-
-        result = -1;
-        if (status == LAPIC_ICR_RR_VALID) {
-                *pvalue = lapic_read(LAPIC_RRR);
-                result = 0;
-        }
-        return result;
+		}
+	}
 }
 
-#define LAPIC_MSG_REG 0x380
+static inline int lapic_remote_read(int apicid, int reg, u32 *pvalue)
+{
+	int timeout;
+	u32 status;
+	int result;
+	lapic_wait_icr_idle();
+	lapic_write(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(apicid));
+	lapic_write(LAPIC_ICR, LAPIC_DM_REMRD | (reg >> 4));
+
+/* Extra busy check compared to lapic.h */
+	timeout = 0;
+	do {
+		status = lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY;
+	} while (status == LAPIC_ICR_BUSY && timeout++ < 1000);
+
+	timeout = 0;
+	do {
+		status = lapic_read(LAPIC_ICR) & LAPIC_ICR_RR_MASK;
+	} while (status == LAPIC_ICR_RR_INPROG && timeout++ < 1000);
+
+	result = -1;
+
+	if (status == LAPIC_ICR_RR_VALID) {
+		*pvalue = lapic_read(LAPIC_RRR);
+		result = 0;
+	}
+	return result;
+}
 
+#define LAPIC_MSG_REG 0x380
 
 #if SET_FIDVID == 1
-static void init_fidvid_ap(unsigned bsp_apicid, unsigned apicid);
+static void init_fidvid_ap(u32 bsp_apicid, u32 apicid);
 #endif
 
-static inline __attribute__((always_inline)) void print_apicid_nodeid_coreid(unsigned apicid, struct node_core_id id, const char *str)
+static inline __attribute__ ((always_inline))
+void print_apicid_nodeid_coreid(u32 apicid, struct node_core_id id,
+				const char *str)
 {
-                printk(BIOS_DEBUG, "%s --- {  APICID = %02x NODEID = %02x COREID = %02x} ---\n", str, apicid, id.nodeid, id.coreid);
+	printk(BIOS_DEBUG,
+	       "%s --- { APICID = %02x NODEID = %02x COREID = %02x} ---\n", str,
+	       apicid, id.nodeid, id.coreid);
 }
 
-
-static unsigned wait_cpu_state(unsigned apicid, unsigned state)
+static u32 wait_cpu_state(u32 apicid, u32 state)
 {
-        unsigned readback = 0;
-	unsigned timeout = 1;
+	u32 readback = 0;
+	u32 timeout = 1;
 	int loop = 2000000;
-        while(--loop>0) {
-                if(lapic_remote_read(apicid, LAPIC_MSG_REG, &readback)!=0) continue;
-                if((readback & 0xff) == state) {
+	while (--loop > 0) {
+		if (lapic_remote_read(apicid, LAPIC_MSG_REG, &readback) != 0)
+			continue;
+		if ((readback & 0xff) == state) {
 			timeout = 0;
-			break; //target cpu is in stage started
+			break;	//target cpu is in stage started
 		}
-        }
-	if(timeout) {
-		if(readback) {
+	}
+	if (timeout) {
+		if (readback) {
 			timeout = readback;
 		}
 	}
 
 	return timeout;
 }
-static void wait_ap_started(unsigned ap_apicid, void *gp )
+
+static void wait_ap_started(u32 ap_apicid, void *gp)
 {
-	unsigned timeout;
-        timeout = wait_cpu_state(ap_apicid, 0x33); // started
-	if(timeout) {
-	        print_initcpu8_nocr("*", ap_apicid);
-	        print_initcpu("*", timeout);
-	}
-	else {
-        	print_initcpu8_nocr(" ", ap_apicid);
+	u32 timeout;
+	timeout = wait_cpu_state(ap_apicid, 0x33);	// started
+	printk(BIOS_DEBUG, "* AP %02x", ap_apicid);
+	if (timeout) {
+		printk(BIOS_DEBUG, " timed out:%08x\n", timeout);
+	} else {
+		printk(BIOS_DEBUG, "started\n");
 	}
 }
 
-static void wait_all_aps_started(unsigned bsp_apicid)
+static void wait_all_aps_started(u32 bsp_apicid)
 {
-        for_each_ap(bsp_apicid, 0 , wait_ap_started, (void *)0);
+	for_each_ap(bsp_apicid, 0, wait_ap_started, (void *)0);
 }
 
-static void wait_all_other_cores_started(unsigned bsp_apicid) // all aps other than core0
+static void wait_all_other_cores_started(u32 bsp_apicid)
 {
-        print_debug("started ap apicid: ");
-        for_each_ap(bsp_apicid, 2 , wait_ap_started, (void *)0);
-        print_debug("\n");
+	// all aps other than core0
+	printk(BIOS_DEBUG, "started ap apicid: ");
+	for_each_ap(bsp_apicid, 2, wait_ap_started, (void *)0);
+	printk(BIOS_DEBUG, "\n");
 }
 
-static void allow_all_aps_stop(unsigned bsp_apicid)
+static void allow_all_aps_stop(u32 bsp_apicid)
 {
-        lapic_write(LAPIC_MSG_REG, (bsp_apicid<<24) | 0x44); // allow aps to stop
+	// allow aps to stop
+
+	lapic_write(LAPIC_MSG_REG, (bsp_apicid << 24) | 0x44);
 }
 
 static void STOP_CAR_AND_CPU(void)
 {
-	disable_cache_as_ram(); // inline
-	stop_this_cpu(); // inline, it will stop all cores except node0/core0 the bsp ....
+	disable_cache_as_ram();	// inline
+	/* stop all cores except node0/core0 the bsp .... */
+	stop_this_cpu();
 }
 
-#if CONFIG_MEM_TRAIN_SEQ == 1
-static inline void train_ram_on_node(unsigned nodeid, unsigned coreid, struct sys_info *sysinfo, unsigned retcall); 
-#endif
-
 #if RAMINIT_SYSINFO == 1
-static unsigned init_cpus(unsigned cpu_init_detectedx ,struct sys_info *sysinfo)
+static u32 init_cpus(u32 cpu_init_detectedx, struct sys_info *sysinfo)
 #else
-static unsigned init_cpus(unsigned cpu_init_detectedx)
+static u32 init_cpus(u32 cpu_init_detectedx)
 #endif
 {
-		unsigned bsp_apicid = 0;
-		unsigned apicid;
-                struct node_core_id id;
-
-		/* 
-                 * already set early mtrr in cache_as_ram.inc
-		 */
-
-		/* that is from initial apicid, we need nodeid and coreid later */
-		id = get_node_core_id_x(); 
-
-
-                /* NB_CFG MSR is shared between cores, so we need make sure core0 is done at first --- use wait_all_core0_started  */
-		if(id.coreid == 0) {
-                	set_apicid_cpuid_lo(); /* only set it on core0 */
-			#if CONFIG_ENABLE_APIC_EXT_ID == 1
-                        enable_apic_ext_id(id.nodeid);
-			#endif
-                }
-
-		enable_lapic();
-//              init_timer(); // We need TMICT to pass msg for FID/VID change
-
-        #if (CONFIG_ENABLE_APIC_EXT_ID == 1)
-		unsigned initial_apicid = get_initial_apicid();	
-                #if CONFIG_LIFT_BSP_APIC_ID == 0
-                if( initial_apicid != 0 ) // other than bsp
-                #endif
-                {
-                                /* use initial apic id to lift it */
-                                uint32_t dword = lapic_read(LAPIC_ID);
-                                dword &= ~(0xff<<24);
-                                dword |= (((initial_apicid + CONFIG_APIC_ID_OFFSET) & 0xff)<<24);
-
-                                lapic_write(LAPIC_ID, dword);
-                }
-
-                #if CONFIG_LIFT_BSP_APIC_ID == 1
-                bsp_apicid += CONFIG_APIC_ID_OFFSET;
-                #endif
-
-        #endif
-
-		/* get the apicid, it may be lifted already */
-		apicid = lapicid();
-
-#if 0 
-		// show our apicid, nodeid, and coreid
-		if( id.coreid==0 ) {
-			if (id.nodeid!=0) //all core0 except bsp
-				print_apicid_nodeid_coreid(apicid, id, " core0: ");
-		}
-	#if 0 
-                else { //all core1
-			print_apicid_nodeid_coreid(apicid, id, " core1: ");
-                }
-	#endif
+	u32 bsp_apicid = 0;
+	u32 apicid;
+	struct node_core_id id;
+
+	/*
+	 * already set early mtrr in cache_as_ram.inc
+	 */
+
+	/* that is from initial apicid, we need nodeid and coreid
+	   later */
+	id = get_node_core_id_x();
+
+	/* NB_CFG MSR is shared between cores, so we need make sure
+	   core0 is done at first --- use wait_all_core0_started  */
+	if (id.coreid == 0) {
+		set_apicid_cpuid_lo();	/* only set it on core0 */
+#if (CONFIG_ENABLE_APIC_EXT_ID == 1)
+		enable_apic_ext_id(id.nodeid);
+#endif
+	}
+
+	enable_lapic();
+	//      init_timer(); // We need TMICT to pass msg for FID/VID change
 
+#if (CONFIG_ENABLE_APIC_EXT_ID == 1)
+	u32 initial_apicid = get_initial_apicid();
+
+#if CONFIG_LIFT_BSP_APIC_ID == 0
+	if (initial_apicid != 0)	// other than bsp
 #endif
+	{
+		/* use initial apic id to lift it */
+		u32 dword = lapic_read(LAPIC_ID);
+		dword &= ~(0xff << 24);
+		dword |=
+		    (((initial_apicid + CONFIG_APIC_ID_OFFSET) & 0xff) << 24);
 
-                if (cpu_init_detectedx) {
-                        print_apicid_nodeid_coreid(apicid, id, "\n\n\nINIT detected from ");
-                        print_debug("\nIssuing SOFT_RESET...\n");
-                        soft_reset();
-                }
-
-		if(id.coreid==0) {
-	                distinguish_cpu_resets(id.nodeid);
-	//              start_other_core(id.nodeid); // start second core in first cpu, only allowed for nb_cfg_54 is not set
-		}
+		lapic_write(LAPIC_ID, dword);
+	}
+#if CONFIG_LIFT_BSP_APIC_ID == 1
+	bsp_apicid += CONFIG_APIC_ID_OFFSET;
+#endif
 
-		//here don't need to wait 
-		lapic_write(LAPIC_MSG_REG, (apicid<<24) | 0x33); // mark the cpu is started
+#endif
 
-		if(apicid != bsp_apicid) {
-			unsigned timeout=1;
-			unsigned loop = 100;
-	#if SET_FIDVID == 1
-		#if (CONFIG_LOGICAL_CPUS == 1) && (SET_FIDVID_CORE0_ONLY == 1)
-			if(id.coreid == 0 ) // only need set fid for core0
-		#endif 
-       		                init_fidvid_ap(bsp_apicid, apicid);
-	#endif
-
-                       // We need to stop the CACHE as RAM for this CPU, really?
-			while(timeout && (loop-->0)) {	
-	                        timeout = wait_cpu_state(bsp_apicid, 0x44);
-			}
-			if(timeout) {
-			        print_initcpu8("while waiting for BSP signal to STOP, timeout in ap ", apicid);
-			}
-                        lapic_write(LAPIC_MSG_REG, (apicid<<24) | 0x44); // bsp can not check it before stop_this_cpu
-			set_var_mtrr(0, 0x00000000, CONFIG_RAMTOP, MTRR_TYPE_WRBACK);
-	#if CONFIG_MEM_TRAIN_SEQ == 1
-			train_ram_on_node(id.nodeid, id.coreid, sysinfo,
-					  (unsigned) STOP_CAR_AND_CPU);
-	#endif
+	/* get the apicid, it may be lifted already */
+	apicid = lapicid();
 
-			STOP_CAR_AND_CPU();
-                } 
+#if 0
+	// show our apicid, nodeid, and coreid
+	if (id.coreid == 0) {
+		if (id.nodeid != 0)	//all core0 except bsp
+			print_apicid_nodeid_coreid(apicid, id, " core0: ");
+	} else {		//all other cores
+		print_apicid_nodeid_coreid(apicid, id, " corex: ");
+	}
+#endif
 
-		return bsp_apicid;
-}
+	if (cpu_init_detectedx) {
+		print_apicid_nodeid_coreid(apicid, id,
+					   "\n\n\nINIT detected from ");
+		printk(BIOS_DEBUG, "\nIssuing SOFT_RESET...\n");
+		soft_reset();
+	}
+
+	if (id.coreid == 0) {
+		distinguish_cpu_resets(id.nodeid);
+//            start_other_core(id.nodeid); // start second core in first cpu, only allowed for nb_cfg_54 is not set
+	}
+	//here don't need to wait
+	lapic_write(LAPIC_MSG_REG, (apicid << 24) | 0x33);	// mark the cpu is started
+
+	if (apicid != bsp_apicid) {
+		u32 timeout = 1;
+		u32 loop = 100;
+
+#if SET_FIDVID == 1
+#if (CONFIG_LOGICAL_CPUS == 1) && (SET_FIDVID_CORE0_ONLY == 1)
+		if (id.coreid == 0)	// only need set fid for core0
+#endif
+			init_fidvid_ap(bsp_apicid, apicid);
+#endif
+
+		// We need to stop the CACHE as RAM for this CPU, really?
+		while (timeout && (loop-- > 0)) {
+			timeout = wait_cpu_state(bsp_apicid, 0x44);
+		}
+		if (timeout) {
+			printk(BIOS_DEBUG,
+			       "while waiting for BSP signal to STOP, timeout in ap %02x\n",
+			       apicid);
+		}
+		lapic_write(LAPIC_MSG_REG, (apicid << 24) | 0x44);	// bsp can not check it before stop_this_cpu
+		set_var_mtrr(0, 0x00000000, CONFIG_RAMTOP, MTRR_TYPE_WRBACK);
+#if CONFIG_MEM_TRAIN_SEQ == 1
+		train_ram_on_node(id.nodeid, id.coreid, sysinfo,
+				  (unsigned)STOP_CAR_AND_CPU);
+#endif
+
+		STOP_CAR_AND_CPU();
+	}
 
+	return bsp_apicid;
+}
 
-static unsigned is_core0_started(unsigned nodeid)
+static u32 is_core0_started(u32 nodeid)
 {
-        uint32_t htic;
-        device_t device;
-        device = PCI_DEV(0, 0x18 + nodeid, 0);
-        htic = pci_read_config32(device, HT_INIT_CONTROL);
-        htic &= HTIC_INIT_Detect;
-        return htic;
+	u32 htic;
+	device_t device;
+	device = PCI_DEV(0, 0x18 + nodeid, 0);
+	htic = pci_read_config32(device, HT_INIT_CONTROL);
+	htic &= HTIC_INIT_Detect;
+	return htic;
 }
 
 static void wait_all_core0_started(void)
 {
-	//When core0 is started, it will distingush_cpu_resets. So wait for that
-	unsigned i;
-	unsigned nodes = get_nodes();
-
-        print_debug("core0 started: ");
-        for(i=1;i<nodes;i++) { // skip bsp, because it is running on bsp
-                while(!is_core0_started(i)) {}
-                print_initcpu8_nocr(" ", i);
-        }
-        print_debug("\n");
-
+	/* When core0 is started, it will distingush_cpu_resets
+	 * So wait for that to finish */
+	u32 i;
+	u32 nodes = get_nodes();
+
+	printk(BIOS_DEBUG, "core0 started: ");
+	for (i = 1; i < nodes; i++) {	// skip bsp, because it is running on bsp
+		while (!is_core0_started(i)) {
+		}
+		printk(BIOS_DEBUG, " %02x", i);
+	}
+	printk(BIOS_DEBUG, "\n");
 }
-

Modified: trunk/src/include/cpu/amd/model_10xxx_msr.h
==============================================================================
--- trunk/src/include/cpu/amd/model_10xxx_msr.h	Thu Apr  8 17:09:53 2010	(r5381)
+++ trunk/src/include/cpu/amd/model_10xxx_msr.h	Thu Apr  8 17:12:18 2010	(r5382)
@@ -33,4 +33,7 @@
 #define LOGICAL_CPUS_NUM_MSR		0xC001100d
 #define CPU_ID_EXT_FEATURES_MSR	0xC0011005
 
+msr_t rdmsr_amd(u32 index);
+void wrmsr_amd(u32 index, msr_t msr);
+
 #endif /* CPU_AMD_MODEL_10XXX_MSR_H */

Modified: trunk/src/include/cpu/amd/model_fxx_rev.h
==============================================================================
--- trunk/src/include/cpu/amd/model_fxx_rev.h	Thu Apr  8 17:09:53 2010	(r5381)
+++ trunk/src/include/cpu/amd/model_fxx_rev.h	Thu Apr  8 17:12:18 2010	(r5382)
@@ -98,7 +98,7 @@
 
 #ifdef __PRE_RAM__
 //AMD_F0_SUPPORT
-static int is_cpu_f0_in_bsp(int nodeid)
+static inline int is_cpu_f0_in_bsp(int nodeid)
 {
 	uint32_t dword;
 	device_t dev;
@@ -106,7 +106,7 @@
 	dword = pci_read_config32(dev, 0xfc);
         return (dword & 0xfff00) == 0x40f00;
 }
-static int is_cpu_pre_f2_in_bsp(int nodeid)
+static inline int is_cpu_pre_f2_in_bsp(int nodeid)
 {
         uint32_t dword;
 	device_t dev;

Modified: trunk/src/northbridge/amd/amdfam10/northbridge.c
==============================================================================
--- trunk/src/northbridge/amd/amdfam10/northbridge.c	Thu Apr  8 17:09:53 2010	(r5381)
+++ trunk/src/northbridge/amd/amdfam10/northbridge.c	Thu Apr  8 17:12:18 2010	(r5382)
@@ -153,7 +153,9 @@
 		device_t devx;
 		u32 busses;
 		u32 segn = max>>8;
+#if CONFIG_SB_HT_CHAIN_ON_BUS0 > 1
 		u32 busn = max&0xff;
+#endif
 		u32 max_devfn;
 
 #if CONFIG_HT3_SUPPORT==1
@@ -332,7 +334,7 @@
 			u32 goal_link)
 {
 	struct resource *res;
-	u32 nodeid, link;
+	u32 nodeid, link = 0;
 	int result;
 	res = 0;
 	for(nodeid = 0; !res && (nodeid < NODE_NUMS); nodeid++) {
@@ -646,9 +648,7 @@
 
 static void amdfam10_domain_read_resources(device_t dev)
 {
-	struct resource *resource;
 	unsigned reg;
-	unsigned link;
 
 	/* Find the already assigned resource pairs */
 	get_fx_devs();
@@ -658,19 +658,19 @@
 		limit = f1_read_config32(reg + 0x04);
 		/* Is this register allocated? */
 		if ((base & 3) != 0) {
-			unsigned nodeid, link;
+			unsigned nodeid, reg_link;
 			device_t reg_dev;
 			if(reg<0xc0) { // mmio
 				nodeid = (limit & 0xf) + (base&0x30);
 			} else { // io
 				nodeid =  (limit & 0xf) + ((base>>4)&0x30);
 			}
-			link   = (limit >> 4) & 7;
+			reg_link   = (limit >> 4) & 7;
 			reg_dev = __f0_dev[nodeid];
 			if (reg_dev) {
 				/* Reserve the resource  */
 				struct resource *reg_resource;
-				reg_resource = new_resource(reg_dev, IOINDEX(0x1000 + reg, link));
+				reg_resource = new_resource(reg_dev, IOINDEX(0x1000 + reg, reg_link));
 				if (reg_resource) {
 					reg_resource->flags = 1;
 				}
@@ -683,6 +683,8 @@
 #if CONFIG_PCI_64BIT_PREF_MEM == 0
 	pci_domain_read_resources(dev);
 #else
+	unsigned link;
+	struct resource *resource;
 	for(link=0; link<dev->links; link++) {
 		/* Initialize the system wide io space constraints */
 		resource = new_resource(dev, 0|(link<<2));
@@ -1215,7 +1217,9 @@
 {
 	struct bus *cpu_bus;
 	device_t dev_mc;
+#if CONFIG_CBB
 	device_t pci_domain;
+#endif
 	int i,j;
 	int nodes;
 	unsigned nb_cfg_54;
@@ -1309,7 +1313,7 @@
 	/* Find which cpus are present */
 	cpu_bus = &dev->link[0];
 	for(i = 0; i < nodes; i++) {
-		device_t dev, cpu;
+		device_t cdb_dev, cpu;
 		struct device_path cpu_path;
 		unsigned busn, devn;
 		struct bus *pbus;
@@ -1326,47 +1330,47 @@
 #endif
 
 		/* Find the cpu's pci device */
-		dev = dev_find_slot(busn, PCI_DEVFN(devn, 0));
-		if (!dev) {
+		cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 0));
+		if (!cdb_dev) {
 			/* If I am probing things in a weird order
 			 * ensure all of the cpu's pci devices are found.
 			 */
-			int j;
-			for(j = 0; j <= 5; j++) { //FBDIMM?
-				dev = pci_probe_dev(NULL, pbus,
-					PCI_DEVFN(devn, j));
+			int fn;
+			for(fn = 0; fn <= 5; fn++) { //FBDIMM?
+				cdb_dev = pci_probe_dev(NULL, pbus,
+					PCI_DEVFN(devn, fn));
 			}
-			dev = dev_find_slot(busn, PCI_DEVFN(devn,0));
+			cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn,0));
 		}
-		if(dev) {
+		if(cdb_dev) {
 			/* Ok, We need to set the links for that device.
 			 * otherwise the device under it will not be scanned
 			 */
-			int j;
+			int link;
 			int linknum;
 #if CONFIG_HT3_SUPPORT==1
 			linknum = 8;
 #else
 			linknum = 4;
 #endif
-			if(dev->links < linknum) {
-				for(j=dev->links; j<linknum; j++) {
-					 dev->link[j].link = j;
-					 dev->link[j].dev = dev;
+			if(cdb_dev->links < linknum) {
+				for(link=cdb_dev->links; link<linknum; link++) {
+					 cdb_dev->link[link].link = link;
+					 cdb_dev->link[link].dev = cdb_dev;
 				}
-				dev->links = linknum;
-				printk(BIOS_DEBUG, "%s links increase to %d\n", dev_path(dev), dev->links);
+				cdb_dev->links = linknum;
+				printk(BIOS_DEBUG, "%s links increase to %d\n", dev_path(cdb_dev), cdb_dev->links);
 			}
 		}
 
 		cores_found = 0; // one core
-		dev = dev_find_slot(busn, PCI_DEVFN(devn, 3));
-		if (dev && dev->enabled) {
-			j = pci_read_config32(dev, 0xe8);
+		cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 3));
+		if (cdb_dev && cdb_dev->enabled) {
+			j = pci_read_config32(cdb_dev, 0xe8);
 			cores_found = (j >> 12) & 3; // dev is func 3
 			if (siblings > 3)
 				cores_found |= (j >> 13) & 4;
-			printk(BIOS_DEBUG, "  %s siblings=%d\n", dev_path(dev), cores_found);
+			printk(BIOS_DEBUG, "  %s siblings=%d\n", dev_path(cdb_dev), cores_found);
 		}
 
 		u32 jj;
@@ -1387,7 +1391,7 @@
 			cpu = find_dev_path(cpu_bus, &cpu_path);
 
 			/* Enable the cpu if I have the processor */
-			if (dev && dev->enabled) {
+			if (cdb_dev && cdb_dev->enabled) {
 				if (!cpu) {
 					cpu = alloc_dev(cpu_bus, &cpu_path);
 				}
@@ -1397,7 +1401,7 @@
 			}
 
 			/* Disable the cpu if I don't have the processor */
-			if (cpu && (!dev || !dev->enabled)) {
+			if (cpu && (!cdb_dev || !cdb_dev->enabled)) {
 				cpu->enabled = 0;
 			}
 

Modified: trunk/src/northbridge/amd/amdfam10/raminit_amdmct.c
==============================================================================
--- trunk/src/northbridge/amd/amdfam10/raminit_amdmct.c	Thu Apr  8 17:09:53 2010	(r5381)
+++ trunk/src/northbridge/amd/amdfam10/raminit_amdmct.c	Thu Apr  8 17:12:18 2010	(r5382)
@@ -17,23 +17,17 @@
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301 USA
  */
 
-
-static  void print_raminit(const char *strval, u32 val)
-{
-	printk(BIOS_DEBUG, "%s%08x\n", strval, val);
-}
-
 static  void print_tx(const char *strval, u32 val)
 {
 #if CONFIG_DEBUG_RAM_SETUP
-	print_raminit(strval, val);
+	printk(BIOS_DEBUG, "%s%08x\n", strval, val);
 #endif
 }
 
 static  void print_t(const char *strval)
 {
 #if CONFIG_DEBUG_RAM_SETUP
-	print_debug(strval);
+	printk(BIOS_DEBUG, "%s", strval);
 #endif
 }
 #include "amdfam10.h"

Modified: trunk/src/northbridge/amd/amdmct/mct/mct_d.h
==============================================================================
--- trunk/src/northbridge/amd/amdmct/mct/mct_d.h	Thu Apr  8 17:09:53 2010	(r5381)
+++ trunk/src/northbridge/amd/amdmct/mct/mct_d.h	Thu Apr  8 17:12:18 2010	(r5382)
@@ -726,13 +726,16 @@
 void InterleaveNodes_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA);
 void InterleaveChannels_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA);
 void mct_BeforeDQSTrain_Samp_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat);
-static void StoreDQSDatStrucVal_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u8 ChipSel);
+void StoreDQSDatStrucVal_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u8 ChipSel);
 void phyAssistedMemFnceTraining(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA);
 u8 mct_SaveRcvEnDly_D_1Pass(struct DCTStatStruc *pDCTstat, u8 pass);
-static void mct_AdjustScrub_D(struct DCTStatStruc *pDCTstat, u16 *scrub_request);
-static u8 mct_InitReceiver_D(struct DCTStatStruc *pDCTstat, u8 dct);
-static void mct_Wait(u32 cycles);
+u32 CheckNBCOFAutoPrechg(struct DCTStatStruc *pDCTstat, u32 dct);
+u8 mct_AdjustDQSPosDelay_D(struct DCTStatStruc *pDCTstat, u8 dly);
+void mct_AdjustScrub_D(struct DCTStatStruc *pDCTstat, u16 *scrub_request);
+u8 mct_InitReceiver_D(struct DCTStatStruc *pDCTstat, u8 dct);
+void mct_Wait(u32 cycles);
 u8 mct_RcvrRankEnabled_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u8 Channel, u8 ChipSel);
 u32 mct_GetRcvrSysAddr_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u8 channel, u8 receiver, u8 *valid);
 void mct_Read1LTestPattern_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u32 addr);
+void EarlySampleSupport_D(void);
 #endif

Modified: trunk/src/northbridge/amd/amdmct/mct/mctdqs_d.c
==============================================================================
--- trunk/src/northbridge/amd/amdmct/mct/mctdqs_d.c	Thu Apr  8 17:09:53 2010	(r5381)
+++ trunk/src/northbridge/amd/amdmct/mct/mctdqs_d.c	Thu Apr  8 17:12:18 2010	(r5382)
@@ -582,8 +582,8 @@
 }
 
 
-static void StoreDQSDatStrucVal_D(struct MCTStatStruc *pMCTstat,
-					struct DCTStatStruc *pDCTstat, u8 ChipSel)
+void StoreDQSDatStrucVal_D(struct MCTStatStruc *pMCTstat,
+			struct DCTStatStruc *pDCTstat, u8 ChipSel)
 {
 	/* Store the DQSDelay value, found during a training sweep, into the DCT
 	 * status structure for this node

Modified: trunk/src/northbridge/amd/amdmct/mct/mctpro_d.c
==============================================================================
--- trunk/src/northbridge/amd/amdmct/mct/mctpro_d.c	Thu Apr  8 17:09:53 2010	(r5381)
+++ trunk/src/northbridge/amd/amdmct/mct/mctpro_d.c	Thu Apr  8 17:12:18 2010	(r5382)
@@ -17,15 +17,10 @@
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301 USA
  */
 
-
-static u32 CheckNBCOFAutoPrechg(struct DCTStatStruc *pDCTstat, u32 dct);
-static u8 mct_AdjustDQSPosDelay_D(struct DCTStatStruc *pDCTstat, u8 dly);
-
 void EarlySampleSupport_D(void)
 {
 }
 
-
 u32 procOdtWorkaround(struct DCTStatStruc *pDCTstat, u32 dct, u32 val)
 {
 	u32 tmp;
@@ -251,7 +246,7 @@
 }
 
 
-static u32 CheckNBCOFAutoPrechg(struct DCTStatStruc *pDCTstat, u32 dct)
+u32 CheckNBCOFAutoPrechg(struct DCTStatStruc *pDCTstat, u32 dct)
 {
 	u32 ret = 0;
 	u32 lo, hi;
@@ -362,7 +357,7 @@
 }
 
 
-static u8 mct_AdjustDQSPosDelay_D(struct DCTStatStruc *pDCTstat, u8 dly)
+u8 mct_AdjustDQSPosDelay_D(struct DCTStatStruc *pDCTstat, u8 dly)
 {
 	u8 skip = 0;
 
@@ -393,8 +388,7 @@
 	return 0;
 }
 
-
-static void mct_AdjustScrub_D(struct DCTStatStruc *pDCTstat, u16 *scrub_request) {
+void mct_AdjustScrub_D(struct DCTStatStruc *pDCTstat, u16 *scrub_request) {
 
 	/* Erratum #202: disable DCache scrubber for Ax parts */
 
@@ -403,4 +397,3 @@
 		pDCTstat->ErrStatus |= 1 << SB_DCBKScrubDis;
 	}
 }
-

Modified: trunk/src/northbridge/amd/amdmct/mct/mctsrc.c
==============================================================================
--- trunk/src/northbridge/amd/amdmct/mct/mctsrc.c	Thu Apr  8 17:09:53 2010	(r5381)
+++ trunk/src/northbridge/amd/amdmct/mct/mctsrc.c	Thu Apr  8 17:12:18 2010	(r5382)
@@ -502,7 +502,7 @@
 }
 
 
-static u8 mct_InitReceiver_D(struct DCTStatStruc *pDCTstat, u8 dct)
+u8 mct_InitReceiver_D(struct DCTStatStruc *pDCTstat, u8 dct)
 {
 	if (pDCTstat->DIMMValidDCT[dct] == 0 ) {
 		return 8;
@@ -1080,7 +1080,7 @@
 }
 
 
-static void mct_Wait(u32 cycles)
+void mct_Wait(u32 cycles)
 {
 	u32 saved;
 	u32 hi, lo, msr;




More information about the coreboot mailing list