[coreboot-gerrit] Patch set updated for coreboot: northbridge/amd/amdmct: Improve code formatting

Martin Roth (martinroth@google.com) gerrit at coreboot.org
Wed Sep 21 15:58:09 CEST 2016


Martin Roth (martinroth at google.com) just uploaded a new patch set to gerrit, which you can find at https://review.coreboot.org/16643

-gerrit

commit f7f041532f585d46ee34aa526176dda82c361323
Author: Elyes HAOUAS <ehaouas at noos.fr>
Date:   Mon Sep 19 10:25:41 2016 -0600

    northbridge/amd/amdmct: Improve code formatting
    
    Change-Id: If87718b6c91d79212a9b045f5fda32d69ac4caee
    Signed-off-by: Elyes HAOUAS <ehaouas at noos.fr>
---
 src/northbridge/amd/amdmct/amddefs.h            |   4 +-
 src/northbridge/amd/amdmct/mct/mct.h            | 202 ++++++++++----------
 src/northbridge/amd/amdmct/mct/mct_d.c          | 234 ++++++++++++------------
 src/northbridge/amd/amdmct/mct/mct_d.h          | 220 +++++++++++-----------
 src/northbridge/amd/amdmct/mct/mct_d_gcc.h      |  12 +-
 src/northbridge/amd/amdmct/mct/mctardk3.c       |   2 +-
 src/northbridge/amd/amdmct/mct/mctardk4.c       |   2 +-
 src/northbridge/amd/amdmct/mct/mctchi_d.c       |   4 +-
 src/northbridge/amd/amdmct/mct/mctcsi_d.c       |  16 +-
 src/northbridge/amd/amdmct/mct/mctdqs_d.c       |  52 +++---
 src/northbridge/amd/amdmct/mct/mctecc_d.c       |  30 +--
 src/northbridge/amd/amdmct/mct/mctgr.c          |   6 +-
 src/northbridge/amd/amdmct/mct/mctmtr_d.c       |  28 +--
 src/northbridge/amd/amdmct/mct/mctndi_d.c       |  10 +-
 src/northbridge/amd/amdmct/mct/mctpro_d.c       |  26 +--
 src/northbridge/amd/amdmct/mct/mctsrc.c         |  52 +++---
 src/northbridge/amd/amdmct/mct/mctsrc1p.c       |   2 +-
 src/northbridge/amd/amdmct/mct/mctsrc2p.c       |   6 +-
 src/northbridge/amd/amdmct/mct/mcttmrl.c        |  24 +--
 src/northbridge/amd/amdmct/mct_ddr3/mct_d.c     | 128 ++++++-------
 src/northbridge/amd/amdmct/mct_ddr3/mct_d.h     | 220 +++++++++++-----------
 src/northbridge/amd/amdmct/mct_ddr3/mct_d_gcc.h |  12 +-
 src/northbridge/amd/amdmct/mct_ddr3/mctardk6.c  |   2 +-
 src/northbridge/amd/amdmct/mct_ddr3/mctchi_d.c  |   6 +-
 src/northbridge/amd/amdmct/mct_ddr3/mctcsi_d.c  |   2 +-
 src/northbridge/amd/amdmct/mct_ddr3/mctdqs_d.c  |  44 ++---
 src/northbridge/amd/amdmct/mct_ddr3/mctecc_d.c  |  28 +--
 src/northbridge/amd/amdmct/mct_ddr3/mctmtr_d.c  |  28 +--
 src/northbridge/amd/amdmct/mct_ddr3/mctndi_d.c  |  10 +-
 src/northbridge/amd/amdmct/mct_ddr3/mctrci.c    |   4 +-
 src/northbridge/amd/amdmct/mct_ddr3/mctsdi.c    |   6 +-
 src/northbridge/amd/amdmct/mct_ddr3/mctsrc.c    |  56 +++---
 src/northbridge/amd/amdmct/mct_ddr3/mctsrc1p.c  |   2 +-
 src/northbridge/amd/amdmct/mct_ddr3/mctsrc2p.c  |   6 +-
 src/northbridge/amd/amdmct/mct_ddr3/mcttmrl.c   |  20 +-
 src/northbridge/amd/amdmct/mct_ddr3/mctwl.c     |  10 +-
 src/northbridge/amd/amdmct/mct_ddr3/mhwlc_d.c   |  82 ++++-----
 src/northbridge/amd/amdmct/mct_ddr3/s3utils.c   | 110 +++++------
 src/northbridge/amd/amdmct/wrappers/mcti.h      |   2 +-
 src/northbridge/amd/amdmct/wrappers/mcti_d.c    |  22 +--
 40 files changed, 866 insertions(+), 866 deletions(-)

diff --git a/src/northbridge/amd/amdmct/amddefs.h b/src/northbridge/amd/amdmct/amddefs.h
index bbdd2ed..9d3e86a 100644
--- a/src/northbridge/amd/amdmct/amddefs.h
+++ b/src/northbridge/amd/amdmct/amddefs.h
@@ -74,8 +74,8 @@
 #define AMD_FAM10_REV_D		(AMD_HY_D0 | AMD_HY_D1)
 #define	AMD_DA_Cx       	(AMD_DA_C2 | AMD_DA_C3)
 #define	AMD_FAM10_C3		(AMD_RB_C3 | AMD_DA_C3)
-#define	AMD_DRBH_Cx		(AMD_DR_Cx | AMD_HY_D0 )
-#define	AMD_DRBA23_RBC2		(AMD_DR_BA | AMD_DR_B2 | AMD_DR_B3 | AMD_RB_C2 )
+#define	AMD_DRBH_Cx		(AMD_DR_Cx | AMD_HY_D0)
+#define	AMD_DRBA23_RBC2		(AMD_DR_BA | AMD_DR_B2 | AMD_DR_B3 | AMD_RB_C2)
 #define	AMD_DR_DAC2_OR_C3	(AMD_DA_C2 | AMD_DA_C3 | AMD_RB_C3)
 #define	AMD_FAM15_ALL		(AMD_OR_B2 | AMD_OR_C0)
 
diff --git a/src/northbridge/amd/amdmct/mct/mct.h b/src/northbridge/amd/amdmct/mct/mct.h
index 61b2b2b..3847688 100644
--- a/src/northbridge/amd/amdmct/mct/mct.h
+++ b/src/northbridge/amd/amdmct/mct/mct.h
@@ -23,16 +23,16 @@
 #define PT_M2		1
 #define PT_S1		2
 
-#define J_MIN		0		/* j loop constraint. 1=CL 2.0 T*/
-#define J_MAX		4		/* j loop constraint. 4=CL 6.0 T*/
-#define K_MIN		1		/* k loop constraint. 1=200 MHz*/
-#define K_MAX		4		/* k loop constraint. 9=400 MHz*/
-#define CL_DEF		2		/* Default value for failsafe operation. 2=CL 4.0 T*/
-#define T_DEF		1		/* Default value for failsafe operation. 1=5ns (cycle time)*/
-
-#define BSCRate	1		/* reg bit field=rate of dram scrubber for ecc*/
+#define J_MIN		0		/* j loop constraint. 1 = CL 2.0 T*/
+#define J_MAX		4		/* j loop constraint. 4 = CL 6.0 T*/
+#define K_MIN		1		/* k loop constraint. 1 = 200 MHz*/
+#define K_MAX		4		/* k loop constraint. 9 = 400 MHz*/
+#define CL_DEF		2		/* Default value for failsafe operation. 2 = CL 4.0 T*/
+#define T_DEF		1		/* Default value for failsafe operation. 1 = 5ns (cycle time)*/
+
+#define BSCRate	1		/* reg bit field = rate of dram scrubber for ecc*/
 					/* memory initialization (ecc and check-bits).*/
-					/* 1=40 ns/64 bytes.*/
+					/* 1 = 40 ns/64 bytes.*/
 #define FirstPass	1		/* First pass through RcvEn training*/
 #define SecondPass	2		/* Second pass through Rcven training*/
 
@@ -208,7 +208,7 @@ struct DCTStatStruc {		/* A per Node structure*/
 				/* SPD address of..MB2_CS_L[0,1]*/
 				/* SPD address of..MA3_CS_L[0,1]*/
 				/* SPD address of..MB3_CS_L[0,1]*/
-	u16 DIMMPresent;	/* For each bit n 0..7, 1=DIMM n is present.
+	u16 DIMMPresent;	/* For each bit n 0..7, 1 = DIMM n is present.
 				   DIMM#  Select Signal
 				   0	  MA0_CS_L[0,1]
 				   1	  MB0_CS_L[0,1]
@@ -218,14 +218,14 @@ struct DCTStatStruc {		/* A per Node structure*/
 				   5	  MB2_CS_L[0,1]
 				   6	  MA3_CS_L[0,1]
 				   7	  MB3_CS_L[0,1]*/
-	u16 DIMMValid;		/* For each bit n 0..7, 1=DIMM n is valid and is/will be configured*/
-	u16 DIMMSPDCSE;		/* For each bit n 0..7, 1=DIMM n SPD checksum error*/
-	u16 DimmECCPresent;	/* For each bit n 0..7, 1=DIMM n is ECC capable.*/
-	u16 DimmPARPresent;	/* For each bit n 0..7, 1=DIMM n is ADR/CMD Parity capable.*/
-	u16 Dimmx4Present;		/* For each bit n 0..7, 1=DIMM n contains x4 data devices.*/
-	u16 Dimmx8Present;		/* For each bit n 0..7, 1=DIMM n contains x8 data devices.*/
-	u16 Dimmx16Present;	/* For each bit n 0..7, 1=DIMM n contains x16 data devices.*/
-	u16 DIMM1Kpage;		/* For each bit n 0..7, 1=DIMM n contains 1K page devices.*/
+	u16 DIMMValid;		/* For each bit n 0..7, 1 = DIMM n is valid and is/will be configured*/
+	u16 DIMMSPDCSE;		/* For each bit n 0..7, 1 = DIMM n SPD checksum error*/
+	u16 DimmECCPresent;	/* For each bit n 0..7, 1 = DIMM n is ECC capable.*/
+	u16 DimmPARPresent;	/* For each bit n 0..7, 1 = DIMM n is ADR/CMD Parity capable.*/
+	u16 Dimmx4Present;		/* For each bit n 0..7, 1 = DIMM n contains x4 data devices.*/
+	u16 Dimmx8Present;		/* For each bit n 0..7, 1 = DIMM n contains x8 data devices.*/
+	u16 Dimmx16Present;	/* For each bit n 0..7, 1 = DIMM n contains x16 data devices.*/
+	u16 DIMM1Kpage;		/* For each bit n 0..7, 1 = DIMM n contains 1K page devices.*/
 	u8 MAload[2];		/* Number of devices loading MAA bus*/
 					/* Number of devices loading MAB bus*/
 	u8 MAdimms[2];		/* Number of DIMMs loading CH A*/
@@ -233,16 +233,16 @@ struct DCTStatStruc {		/* A per Node structure*/
 	u8 DATAload[2];		/* Number of ranks loading CH A DATA*/
 					/* Number of ranks loading CH B DATA*/
 	u8 DIMMAutoSpeed;		/* Max valid Mfg. Speed of DIMMs
-					   1=200MHz
-					   2=266MHz
-					   3=333MHz
-					   4=400MHz */
+					   1 = 200MHz
+					   2 = 266MHz
+					   3 = 333MHz
+					   4 = 400MHz */
 	u8 DIMMCASL;		/* Min valid Mfg. CL bitfield
-					   0=2.0
-					   1=3.0
-					   2=4.0
-					   3=5.0
-					   4=6.0 */
+					   0 = 2.0
+					   1 = 3.0
+					   2 = 4.0
+					   3 = 5.0
+					   4 = 6.0 */
 	u16 DIMMTrcd;	/* Minimax Trcd*40 (ns) of DIMMs*/
 	u16 DIMMTrp;	/* Minimax Trp*40 (ns) of DIMMs*/
 	u16 DIMMTrtp;	/* Minimax Trtp*40 (ns) of DIMMs*/
@@ -252,16 +252,16 @@ struct DCTStatStruc {		/* A per Node structure*/
 	u16 DIMMTrrd;	/* Minimax Trrd*40 (ns) of DIMMs*/
 	u16 DIMMTwtr;	/* Minimax Twtr*40 (ns) of DIMMs*/
 	u8 Speed;		/* Bus Speed (to set Controller)
-				   1=200MHz
-				   2=266MHz
-				   3=333MHz
-				   4=400MHz */
+				   1 = 200MHz
+				   2 = 266MHz
+				   3 = 333MHz
+				   4 = 400MHz */
 	u8 CASL;		/* CAS latency DCT setting
-				   0=2.0
-				   1=3.0
-				   2=4.0
-				   3=5.0
-				   4=6.0 */
+				   0 = 2.0
+				   1 = 3.0
+				   2 = 4.0
+				   3 = 5.0
+				   4 = 6.0 */
 	u8 Trcd;		/* DCT Trcd (busclocks) */
 	u8 Trp;		/* DCT Trp (busclocks) */
 	u8 Trtp;		/* DCT Trtp (busclocks) */
@@ -271,27 +271,27 @@ struct DCTStatStruc {		/* A per Node structure*/
 	u8 Trrd;		/* DCT Trrd (busclocks) */
 	u8 Twtr;		/* DCT Twtr (busclocks) */
 	u8 Trfc[4];	/* DCT Logical DIMM0 Trfc
-				   0=75ns (for 256Mb devs)
-				   1=105ns (for 512Mb devs)
-				   2=127.5ns (for 1Gb devs)
-				   3=195ns (for 2Gb devs)
-				   4=327.5ns (for 4Gb devs) */
+				   0 = 75ns (for 256Mb devs)
+				   1 = 105ns (for 512Mb devs)
+				   2 = 127.5ns (for 1Gb devs)
+				   3 = 195ns (for 2Gb devs)
+				   4 = 327.5ns (for 4Gb devs) */
 				/* DCT Logical DIMM1 Trfc (see Trfc0 for format) */
 				/* DCT Logical DIMM2 Trfc (see Trfc0 for format) */
 				/* DCT Logical DIMM3 Trfc (see Trfc0 for format) */
-	u16 CSPresent;	/* For each bit n 0..7, 1=Chip-select n is present */
-	u16 CSTestFail;	/* For each bit n 0..7, 1=Chip-select n is present but disabled */
+	u16 CSPresent;	/* For each bit n 0..7, 1 = Chip-select n is present */
+	u16 CSTestFail;	/* For each bit n 0..7, 1 = Chip-select n is present but disabled */
 	u32 DCTSysBase;	/* BASE[39:8] (system address) of this Node's DCTs. */
 	u32 DCTHoleBase;	/* If not zero, BASE[39:8] (system address) of dram hole for HW remapping.  Dram hole exists on this Node's DCTs. */
 	u32 DCTSysLimit;	/* LIMIT[39:8] (system address) of this Node's DCTs */
 	u16 PresetmaxFreq;	/* Maximum OEM defined DDR frequency
-				   200=200MHz (DDR400)
-				   266=266MHz (DDR533)
-				   333=333MHz (DDR667)
-				   400=400MHz (DDR800) */
+				   200 = 200MHz (DDR400)
+				   266 = 266MHz (DDR533)
+				   333 = 333MHz (DDR667)
+				   400 = 400MHz (DDR800) */
 	u8 _2Tmode;	/* 1T or 2T CMD mode (slow access mode)
-				   1=1T
-				   2=2T */
+				   1 = 1T
+				   2 = 2T */
 	u8 TrwtTO;		/* DCT TrwtTO (busclocks)*/
 	u8 Twrrd;		/* DCT Twrrd (busclocks)*/
 	u8 Twrwr;		/* DCT Twrwr (busclocks)*/
@@ -319,9 +319,9 @@ struct DCTStatStruc {		/* A per Node structure*/
 					/* CHB DIMM 0 - 3 Receiver Enable Delay*/
 	u32 PtrPatternBufA;	/* Ptr on stack to aligned DQS testing pattern*/
 	u32 PtrPatternBufB;	/*Ptr on stack to aligned DQS testing pattern*/
-	u8 Channel;	/* Current Channel (0= CH A, 1=CH B)*/
+	u8 Channel;	/* Current Channel (0= CH A, 1 = CH B)*/
 	u8 ByteLane;	/* Current Byte Lane (0..7)*/
-	u8 Direction;	/* Current DQS-DQ training write direction (0=read, 1=write)*/
+	u8 Direction;	/* Current DQS-DQ training write direction (0 = read, 1 = write)*/
 	u8 Pattern;	/* Current pattern*/
 	u8 DQSDelay;	/* Current DQS delay value*/
 	u32 TrainErrors;	/* Current Training Errors*/
@@ -399,72 +399,72 @@ struct DCTStatStruc {		/* A per Node structure*/
 ===============================================================================*/
 /* Platform Configuration */
 #define NV_PACK_TYPE		0	/* CPU Package Type (2-bits)
-					   0=NPT L1
-					   1=NPT M2
-					   2=NPT S1*/
+					   0 = NPT L1
+					   1 = NPT M2
+					   2 = NPT S1*/
 #define NV_MAX_NODES		1	/* Number of Nodes/Sockets (4-bits)*/
 #define NV_MAX_DIMMS		2	/* Number of DIMM slots for the specified Node ID (4-bits)*/
 #define NV_MAX_MEMCLK		3	/* Maximum platform demonstrated Memclock (10-bits)
-					   200=200MHz (DDR400)
-					   266=266MHz (DDR533)
-					   333=333MHz (DDR667)
-					   400=400MHz (DDR800)*/
+					   200 = 200MHz (DDR400)
+					   266 = 266MHz (DDR533)
+					   333 = 333MHz (DDR667)
+					   400 = 400MHz (DDR800)*/
 #define NV_ECC_CAP		4	/* Bus ECC capable (1-bits)
-					   0=Platform not capable
-					   1=Platform is capable*/
+					   0 = Platform not capable
+					   1 = Platform is capable*/
 #define NV_4RANKType		5	/* Quad Rank DIMM slot type (2-bits)
-					   0=Normal
-					   1=R4 (4-Rank Registered DIMMs in AMD server configuration)
-					   2=S4 (Unbuffered SO-DIMMs)*/
+					   0 = Normal
+					   1 = R4 (4-Rank Registered DIMMs in AMD server configuration)
+					   2 = S4 (Unbuffered SO-DIMMs)*/
 #define NV_BYPMAX		6	/* Value to set DcqBypassMax field (See Function 2, Offset 94h, [27:24] of BKDG for field definition).
-					   4=4 times bypass (normal for non-UMA systems)
-					   7=7 times bypass (normal for UMA systems)*/
+					   4 = 4 times bypass (normal for non-UMA systems)
+					   7 = 7 times bypass (normal for UMA systems)*/
 #define NV_RDWRQBYP		7	/* Value to set RdWrQByp field (See Function 2, Offset A0h, [3:2] of BKDG for field definition).
-					   2=8 times (normal for non-UMA systems)
-					   3=16 times (normal for UMA systems)*/
+					   2 = 8 times (normal for non-UMA systems)
+					   3 = 16 times (normal for UMA systems)*/
 
 
 /* Dram Timing */
 #define NV_MCTUSRTMGMODE	10	/* User Memclock Mode (2-bits)
-					   0=Auto, no user limit
-					   1=Auto, user limit provided in NV_MemCkVal
-					   2=Manual, user value provided in NV_MemCkVal*/
+					   0 = Auto, no user limit
+					   1 = Auto, user limit provided in NV_MemCkVal
+					   2 = Manual, user value provided in NV_MemCkVal*/
 #define NV_MemCkVal		11	/* Memory Clock Value (2-bits)
-					   0=200MHz
-					   1=266MHz
-					   2=333MHz
-					   3=400MHz*/
+					   0 = 200MHz
+					   1 = 266MHz
+					   2 = 333MHz
+					   3 = 400MHz*/
 
 /* Dram Configuration */
 #define NV_BankIntlv		20	/* Dram Bank (chip-select) Interleaving (1-bits)
-					   0=disable
-					   1=enable*/
+					   0 = disable
+					   1 = enable*/
 #define NV_AllMemClks		21	/* Turn on All DIMM clocks (1-bits)
-					   0=normal
-					   1=enable all memclocks*/
+					   0 = normal
+					   1 = enable all memclocks*/
 #define NV_SPDCHK_RESTRT	22	/* SPD Check control bitmap (1-bits)
-					   0=Exit current node init if any DIMM has SPD checksum error
-					   1=Ignore faulty SPD checksums (Note: DIMM cannot be enabled)*/
+					   0 = Exit current node init if any DIMM has SPD checksum error
+					   1 = Ignore faulty SPD checksums (Note: DIMM cannot be enabled)*/
 #define NV_DQSTrainCTL		23	/* DQS Signal Timing Training Control
-					   0=skip DQS training
-					   1=perform DQS training*/
+					   0 = skip DQS training
+					   1 = perform DQS training*/
 #define NV_NodeIntlv		24	/* Node Memory Interleaving (1-bits)
-					   0=disable
-					   1=enable*/
+					   0 = disable
+					   1 = enable*/
 #define NV_BurstLen32		25	/* burstLength32 for 64-bit mode (1-bits)
-					   0=disable (normal)
-					   1=enable (4 beat burst when width is 64-bits)*/
+					   0 = disable (normal)
+					   1 = enable (4 beat burst when width is 64-bits)*/
 
 /* Dram Power */
 #define NV_CKE_PDEN		30	/* CKE based power down mode (1-bits)
-					   0=disable
-					   1=enable*/
+					   0 = disable
+					   1 = enable*/
 #define NV_CKE_CTL		31	/* CKE based power down control (1-bits)
-					   0=per Channel control
-					   1=per Chip select control*/
+					   0 = per Channel control
+					   1 = per Chip select control*/
 #define NV_CLKHZAltVidC3	32	/* Memclock tri-stating during C3 and Alt VID (1-bits)
-					   0=disable
-					   1=enable*/
+					   0 = disable
+					   1 = enable*/
 
 /* Memory Map/Mgt.*/
 #define NV_BottomIO		40	/* Bottom of 32-bit IO space (8-bits)
@@ -472,8 +472,8 @@ struct DCTStatStruc {		/* A per Node structure*/
 #define NV_BottomUMA		41	/* Bottom of shared graphics dram (8-bits)
 					   NV_BottomUMA[7:0]=Addr[31:24]*/
 #define NV_MemHole		42	/* Memory Hole Remapping (1-bits)
-					   0=disable
-					   1=enable  */
+					   0 = disable
+					   1 = enable  */
 
 /* ECC */
 #define NV_ECC			50	/* Dram ECC enable*/
@@ -484,14 +484,14 @@ struct DCTStatStruc {		/* A per Node structure*/
 #define NV_L2BKScrub		56	/* L2 ECC Background Scrubber CTL*/
 #define NV_DCBKScrub		57	/* DCache ECC Background Scrubber CTL*/
 #define NV_CS_SpareCTL		58	/* Chip Select Spare Control bit 0:
-					      0=disable Spare
-					      1=enable Spare */
+					      0 = disable Spare
+					      1 = enable Spare */
 					 /*Chip Select Spare Control bit 1-4:
 					    Reserved, must be zero*/
 #define NV_Parity		60	/* Parity Enable*/
 #define NV_SyncOnUnEccEn	61	/* SyncOnUnEccEn control
-					   0=disable
-					   1=enable*/
+					   0 = disable
+					   1 = enable*/
 
 
 /* global function */
@@ -505,11 +505,11 @@ void K8FInterleaveBanks(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCT
 void mctInitWithWritetoCS(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat);
 
 void mctGet_PS_Cfg(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat);
-void Get_ChannelPS_Cfg0( unsigned MAAdimms, unsigned Speed, unsigned MAAload, unsigned DATAAload,
+void Get_ChannelPS_Cfg0(unsigned MAAdimms, unsigned Speed, unsigned MAAload, unsigned DATAAload,
 		unsigned *AddrTmgCTL, unsigned *ODC_CTL);
-void Get_ChannelPS_Cfg1( unsigned MAAdimms, unsigned Speed, unsigned MAAload,
+void Get_ChannelPS_Cfg1(unsigned MAAdimms, unsigned Speed, unsigned MAAload,
 		unsigned *AddrTmgCTL, unsigned *ODC_CTL, unsigned *val);
-void Get_ChannelPS_Cfg2( unsigned MAAdimms, unsigned Speed, unsigned MAAload,
+void Get_ChannelPS_Cfg2(unsigned MAAdimms, unsigned Speed, unsigned MAAload,
 		unsigned *AddrTmgCTL, unsigned *ODC_CTL, unsigned *val);
 
 u8 MCTDefRet(void);
diff --git a/src/northbridge/amd/amdmct/mct/mct_d.c b/src/northbridge/amd/amdmct/mct/mct_d.c
index 16e67df..87863b1 100644
--- a/src/northbridge/amd/amdmct/mct/mct_d.c
+++ b/src/northbridge/amd/amdmct/mct/mct_d.c
@@ -195,7 +195,7 @@ static void mctAutoInitMCT_D(struct MCTStatStruc *pMCTstat,
 	 * 1. Complete Hypertransport Bus Configuration
 	 * 2. SMBus Controller Initialized
 	 * 3. Checksummed or Valid NVRAM bits
-	 * 4. MCG_CTL=-1, MC4_CTL_EN=0 for all CPUs
+	 * 4. MCG_CTL=-1, MC4_CTL_EN = 0 for all CPUs
 	 * 5. MCi_STS from shutdown/warm reset recorded (if desired) prior to
 	 *     entry
 	 * 6. All var MTRRs reset to zero
@@ -308,7 +308,7 @@ restartinit:
 		MCTMemClr_D(pMCTstat,pDCTstatA);
 	}
 
-	mct_FinalMCT_D(pMCTstat, (pDCTstatA + 0) );	// Node 0
+	mct_FinalMCT_D(pMCTstat, (pDCTstatA + 0));	// Node 0
 	print_tx("mctAutoInitMCT_D Done: Global Status: ", pMCTstat->GStatus);
 	return;
 
@@ -434,7 +434,7 @@ static void LoadDQSSigTmgRegs_D(struct MCTStatStruc *pMCTstat,
 
 				}
 			}
-			for (Channel = 0; Channel<2; Channel++) {
+			for (Channel = 0; Channel < 2; Channel++) {
 				SetEccDQSRcvrEn_D(pDCTstat, Channel);
 			}
 
@@ -452,7 +452,7 @@ static void LoadDQSSigTmgRegs_D(struct MCTStatStruc *pMCTstat,
 				 *   + 0x100 to next dimm
 				*/
 				for (DIMM = 0; DIMM < 2; DIMM++) {
-					if (DIMM==0) {
+					if (DIMM == 0) {
 						index = 0;	/* CHA Write Data Timing Low */
 					} else {
 						if (pDCTstat->Speed >= 4) {
@@ -461,7 +461,7 @@ static void LoadDQSSigTmgRegs_D(struct MCTStatStruc *pMCTstat,
 							break;
 						}
 					}
-					for (Dir=0;Dir<2;Dir++) {//RD/WR
+					for (Dir = 0; Dir < 2; Dir++) {//RD/WR
 						p = pDCTstat->CH_D_DIR_B_DQS[Channel][DIMM][Dir];
 						val = stream_to_int(p); /* CHA Read Data Timing High */
 						Set_NB32_index_wait(dev, index_reg, index+1, val);
@@ -474,12 +474,12 @@ static void LoadDQSSigTmgRegs_D(struct MCTStatStruc *pMCTstat,
 				}
 			}
 
-			for (Channel = 0; Channel<2; Channel++) {
+			for (Channel = 0; Channel < 2; Channel++) {
 				reg = 0x78 + Channel * 0x100;
 				val = Get_NB32(dev, reg);
 				val &= ~(0x3ff<<22);
 				val |= ((u32) pDCTstat->CH_MaxRdLat[Channel] << 22);
-				val &= ~(1<<DqsRcvEnTrain);
+				val &= ~(1 << DqsRcvEnTrain);
 				Set_NB32(dev, reg, val);	/* program MaxRdLatency to correspond with current delay*/
 			}
 		}
@@ -497,7 +497,7 @@ static void ResetNBECCstat_D(struct MCTStatStruc *pMCTstat,
 	 * or normal ECC memory conditioning.
 	 */
 
-	//FIXME: this function depends on pDCTstat Array ( with Node id ) - Is this really a problem?
+	//FIXME: this function depends on pDCTstat Array (with Node id) - Is this really a problem?
 
 	u32 dev;
 	u8 Node;
@@ -592,9 +592,9 @@ static void HTMemMapInit_D(struct MCTStatStruc *pMCTstat,
 					limit = pDCTstat->DCTSysLimit;
 				} else if (base == BottomIO) {
 					/* SW Node Hoist */
-					pMCTstat->GStatus |= 1<<GSB_SpIntRemapHole;
-					pDCTstat->Status |= 1<<SB_SWNodeHole;
-					pMCTstat->GStatus |= 1<<GSB_SoftHole;
+					pMCTstat->GStatus |= 1 << GSB_SpIntRemapHole;
+					pDCTstat->Status |= 1 << SB_SWNodeHole;
+					pMCTstat->GStatus |= 1 << GSB_SoftHole;
 					pMCTstat->HoleBase = base;
 					limit -= base;
 					base = _4GB_RJ8;
@@ -650,7 +650,7 @@ static void HTMemMapInit_D(struct MCTStatStruc *pMCTstat,
 				val = Get_NB32(dev, reg);
 				Set_NB32(devx, reg, val);
 				reg += 4;
-			} while ( reg < 0x80);
+			} while (reg < 0x80);
 		} else {
 			break;			/* stop at first absent Node */
 		}
@@ -672,7 +672,7 @@ static void MCTMemClr_D(struct MCTStatStruc *pMCTstat,
 	u8 Node;
 	struct DCTStatStruc *pDCTstat;
 
-	if (!mctGet_NVbits(NV_DQSTrainCTL)){
+	if (!mctGet_NVbits(NV_DQSTrainCTL)) {
 		// FIXME: callback to wrapper: mctDoWarmResetMemClr_D
 	} else {	// NV_DQSTrainCTL == 1
 		for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
@@ -723,7 +723,7 @@ static void MCTMemClrSync_D(struct MCTStatStruc *pMCTstat,
 	u8 Node;
 	struct DCTStatStruc *pDCTstat;
 
-	if (!mctGet_NVbits(NV_DQSTrainCTL)){
+	if (!mctGet_NVbits(NV_DQSTrainCTL)) {
 		// callback to wrapper: mctDoWarmResetMemClr_D
 	} else {	// NV_DQSTrainCTL == 1
 		for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
@@ -745,7 +745,7 @@ static void DCTMemClr_Sync_D(struct MCTStatStruc *pMCTstat,
 	u32 reg;
 
 	/* Ensure that a memory clear operation has completed on one node */
-	if (pDCTstat->DCTSysLimit){
+	if (pDCTstat->DCTSysLimit) {
 		reg = 0x110;
 
 		do {
@@ -834,7 +834,7 @@ static void DCTInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTst
 		u32 reg_off = dct * 0x100;
 		val = 1<<DisDramInterface;
 		Set_NB32(pDCTstat->dev_dct, reg_off+0x94, val);
-		/*To maximize power savings when DisDramInterface=1b,
+		/*To maximize power savings when DisDramInterface = 1b,
 		  all of the MemClkDis bits should also be set.*/
 		val = 0xFF000000;
 		Set_NB32(pDCTstat->dev_dct, reg_off+0x88, val);
@@ -887,7 +887,7 @@ static void StartupDCT_D(struct MCTStatStruc *pMCTstat,
 
 	dev = pDCTstat->dev_dct;
 	val = Get_NB32(dev, 0x94 + reg_off);
-	if (val & (1<<MemClkFreqVal)) {
+	if (val & (1 << MemClkFreqVal)) {
 		print_t("\t\t\tStartupDCT_D: MemClkFreqVal\n");
 		byte = mctGet_NVbits(NV_DQSTrainCTL);
 		if (byte == 1) {
@@ -997,7 +997,7 @@ static u8 AutoCycTiming_D(struct MCTStatStruc *pMCTstat,
 		pDCTstat->CASL = pDCTstat->DIMMCASL;
 
 		/* if "manual" memclock mode */
-		if ( mctGet_NVbits(NV_MCTUSRTMGMODE) == 2)
+		if (mctGet_NVbits(NV_MCTUSRTMGMODE) == 2)
 			pDCTstat->Speed = mctGet_NVbits(NV_MemCkVal) + 1;
 
 	}
@@ -1013,10 +1013,10 @@ static u8 AutoCycTiming_D(struct MCTStatStruc *pMCTstat,
 	Trc = 0;
 	Twr = 0;
 	Twtr = 0;
-	for (i=0; i < 4; i++)
+	for (i = 0; i < 4; i++)
 		Trfc[i] = 0;
 
-	for ( i = 0; i< MAX_DIMMS_SUPPORTED; i++) {
+	for (i = 0; i< MAX_DIMMS_SUPPORTED; i++) {
 		LDIMM = i >> 1;
 		if (pDCTstat->DIMMValid & (1 << i)) {
 			smbaddr = Get_DIMMAddress_D(pDCTstat, dct + i);
@@ -1062,13 +1062,13 @@ static u8 AutoCycTiming_D(struct MCTStatStruc *pMCTstat,
 			if (Trc < val)
 				Trc = val;
 
-			/* dev density=rank size/#devs per rank */
+			/* dev density = rank size/#devs per rank */
 			byte = mctRead_SPD(smbaddr, SPD_BANKSZ);
 
 			val = ((byte >> 5) | (byte << 3)) & 0xFF;
 			val <<= 2;
 
-			byte = mctRead_SPD(smbaddr, SPD_DEVWIDTH) & 0xFE;     /* dev density=2^(rows+columns+banks) */
+			byte = mctRead_SPD(smbaddr, SPD_DEVWIDTH) & 0xFE;     /* dev density = 2^(rows+columns+banks) */
 			if (byte == 4) {
 				val >>= 4;
 			} else if (byte == 8) {
@@ -1094,7 +1094,7 @@ static u8 AutoCycTiming_D(struct MCTStatStruc *pMCTstat,
 	if (byte == 5)
 		DDR2_1066 = 1;
 	Tk40 = Get_40Tk_D(byte);
-	Tk10 = Tk40>>2;
+	Tk10 = Tk40 >> 2;
 
 	/* Notes:
 	 1. All secondary time values given in SPDs are in binary with units of ns.
@@ -1260,7 +1260,7 @@ static u8 AutoCycTiming_D(struct MCTStatStruc *pMCTstat,
 
 
 	/* Trfc0-Trfc3 */
-	for (i=0; i<4; i++)
+	for (i = 0; i < 4; i++)
 		pDCTstat->Trfc[i] = Trfc[i];
 
 	mctAdjustAutoCycTmg_D();
@@ -1277,7 +1277,7 @@ static u8 AutoCycTiming_D(struct MCTStatStruc *pMCTstat,
 	else
 		val -= Bias_TrcdT;
 
-	DramTimingLo |= val<<4;
+	DramTimingLo |= val << 4;
 
 	val = pDCTstat->Trp;
 	if (DDR2_1066)
@@ -1286,27 +1286,27 @@ static u8 AutoCycTiming_D(struct MCTStatStruc *pMCTstat,
 		val -= Bias_TrpT;
 		val <<= 1;
 	}
-	DramTimingLo |= val<<7;
+	DramTimingLo |= val << 7;
 
 	val = pDCTstat->Trtp;
 	val -= Bias_TrtpT;
-	DramTimingLo |= val<<11;
+	DramTimingLo |= val << 11;
 
 	val = pDCTstat->Tras;
 	if (DDR2_1066)
 		val -= Bias_TrasT_1066;
 	else
 		val -= Bias_TrasT;
-	DramTimingLo |= val<<12;
+	DramTimingLo |= val << 12;
 
 	val = pDCTstat->Trc;
 	val -= Bias_TrcT;
-	DramTimingLo |= val<<16;
+	DramTimingLo |= val << 16;
 
 	if (!DDR2_1066) {
 		val = pDCTstat->Twr;
 		val -= Bias_TwrT;
-		DramTimingLo |= val<<20;
+		DramTimingLo |= val << 20;
 	}
 
 	val = pDCTstat->Trrd;
@@ -1314,7 +1314,7 @@ static u8 AutoCycTiming_D(struct MCTStatStruc *pMCTstat,
 		val -= Bias_TrrdT_1066;
 	else
 		val -= Bias_TrrdT;
-	DramTimingLo |= val<<22;
+	DramTimingLo |= val << 22;
 
 
 	DramTimingHi = 0;	/* Dram Timing Low init */
@@ -1323,13 +1323,13 @@ static u8 AutoCycTiming_D(struct MCTStatStruc *pMCTstat,
 		val -= Bias_TwtrT_1066;
 	else
 		val -= Bias_TwtrT;
-	DramTimingHi |= val<<8;
+	DramTimingHi |= val << 8;
 
 	val = 2;
-	DramTimingHi |= val<<16;
+	DramTimingHi |= val << 16;
 
 	val = 0;
-	for (i=4;i>0;i--) {
+	for (i = 4; i > 0; i--) {
 		val <<= 3;
 		val |= Trfc[i-1];
 	}
@@ -1385,7 +1385,7 @@ static void GetPresetmaxF_D(struct MCTStatStruc *pMCTstat,
 	proposedFreq = 533;	 /* Rev F0 programmable max memclock is */
 
 	/*Get User defined limit if  "limit" mode */
-	if ( mctGet_NVbits(NV_MCTUSRTMGMODE) == 1) {
+	if (mctGet_NVbits(NV_MCTUSRTMGMODE) == 1) {
 		word = Get_Fk_D(mctGet_NVbits(NV_MemCkVal) + 1);
 		if (word < proposedFreq)
 			proposedFreq = word;
@@ -1426,9 +1426,9 @@ static void SPDGetTCL_D(struct MCTStatStruc *pMCTstat,
 
 	CL1min = 0xFF;
 	T1min = 0xFF;
-	for (k=K_MAX; k >= K_MIN; k--) {
+	for (k = K_MAX; k >= K_MIN; k--) {
 		for (j = J_MIN; j <= J_MAX; j++) {
-			if (Sys_Capability_D(pMCTstat, pDCTstat, j, k) ) {
+			if (Sys_Capability_D(pMCTstat, pDCTstat, j, k)) {
 				/* 1. check to see if DIMMi is populated.
 				   2. check if DIMMi supports CLj and Tjk */
 				for (i = 0; i < MAX_DIMMS_SUPPORTED; i++) {
@@ -1480,7 +1480,7 @@ static u8 PlatformSpec_D(struct MCTStatStruc *pMCTstat,
 		mctGet_PS_Cfg_D(pMCTstat, pDCTstat, 1);
 	}
 
-	if ( pDCTstat->_2Tmode == 2) {
+	if (pDCTstat->_2Tmode == 2) {
 		dev = pDCTstat->dev_dct;
 		reg = 0x94 + 0x100 * dct; /* Dram Configuration Hi */
 		val = Get_NB32(dev, reg);
@@ -1547,20 +1547,20 @@ static u8 AutoConfig_D(struct MCTStatStruc *pMCTstat,
 		if (pDCTstat->Speed == 3) {
 			if (pDCTstat->MAdimms[dct] == 4)
 				DramConfigLo |= 1 << 5;		/* 50 Ohms ODT */
-		} else if (pDCTstat->Speed == 4){
+		} else if (pDCTstat->Speed == 4) {
 			if (pDCTstat->MAdimms[dct] != 1)
 				DramConfigLo |= 1 << 5;		/* 50 Ohms ODT */
 		}
 	} else {
 		// FIXME: Skip for Ax versions
 		if (pDCTstat->MAdimms[dct] == 4) {
-			if ( pDCTstat->DimmQRPresent != 0) {
+			if (pDCTstat->DimmQRPresent != 0) {
 				if ((pDCTstat->Speed == 3) || (pDCTstat->Speed == 4)) {
 					DramConfigLo |= 1 << 5;	/* 50 Ohms ODT */
 				}
 			} else if (pDCTstat->MAdimms[dct] == 4) {
 				if (pDCTstat->Speed == 4) {
-					if ( pDCTstat->DimmQRPresent != 0) {
+					if (pDCTstat->DimmQRPresent != 0) {
 						DramConfigLo |= 1 << 5;	/* 50 Ohms ODT */
 					}
 				}
@@ -1604,7 +1604,7 @@ static u8 AutoConfig_D(struct MCTStatStruc *pMCTstat,
 
 	if (mctGet_NVbits(NV_ECC_CAP))
 		if (Status & (1 << SB_ECCDIMMs))
-			if ( mctGet_NVbits(NV_ECC))
+			if (mctGet_NVbits(NV_ECC))
 				DramConfigLo |= 1 << DimmEcEn;
 
 	DramConfigLo = mct_DisDllShutdownSR(pMCTstat, pDCTstat, DramConfigLo, dct);
@@ -1633,7 +1633,7 @@ static u8 AutoConfig_D(struct MCTStatStruc *pMCTstat,
 		DramConfigHi |= 1 << BankSwizzleMode; /* recommended setting (default) */
 
 	/* Check for Quadrank DIMM presence */
-	if ( pDCTstat->DimmQRPresent != 0) {
+	if (pDCTstat->DimmQRPresent != 0) {
 		byte = mctGet_NVbits(NV_4RANKType);
 		if (byte == 2)
 			DramConfigHi |= 1 << 17;	/* S4 (4-Rank SO-DIMMs) */
@@ -1641,7 +1641,7 @@ static u8 AutoConfig_D(struct MCTStatStruc *pMCTstat,
 			DramConfigHi |= 1 << 18;	/* R4 (4-Rank Registered DIMMs) */
 	}
 
-	if (0) /* call back not needed mctOverrideDcqBypMax_D ) */
+	if (0) /* call back not needed mctOverrideDcqBypMax_D) */
 		val = mctGet_NVbits(NV_BYPMAX);
 	else
 		val = 0x0f; // recommended setting (default)
@@ -1676,14 +1676,14 @@ static u8 AutoConfig_D(struct MCTStatStruc *pMCTstat,
 	 1. We will assume that MemClkDis field has been preset prior to this
 	    point.
 	 2. We will only set MemClkDis bits if a DIMM is NOT present AND if:
-	    NV_AllMemClks <>0 AND SB_DiagClks ==0 */
+	    NV_AllMemClks <>0 AND SB_DiagClks == 0 */
 
 
 	/* Dram Timing Low (owns Clock Enable bits) */
 	DramTimingLo = Get_NB32(dev, 0x88 + reg_off);
 	if (mctGet_NVbits(NV_AllMemClks) == 0) {
 		/* Special Jedec SPD diagnostic bit - "enable all clocks" */
-		if (!(pDCTstat->Status & (1<<SB_DiagClks))) {
+		if (!(pDCTstat->Status & (1 << SB_DiagClks))) {
 			const u8 *p;
 			byte = mctGet_NVbits(NV_PACK_TYPE);
 			if (byte == PT_L1)
@@ -1697,9 +1697,9 @@ static u8 AutoConfig_D(struct MCTStatStruc *pMCTstat,
 			while (dword < MAX_DIMMS_SUPPORTED) {
 				val = p[dword];
 				print_tx("DramTimingLo: val=", val);
-				if (!(pDCTstat->DIMMValid & (1<<val)))
+				if (!(pDCTstat->DIMMValid & (1 << val)))
 					/*disable memclk*/
-					DramTimingLo |= 1<<(dword+24);
+					DramTimingLo |= 1 << (dword+24);
 				dword++ ;
 			}
 		}
@@ -1763,7 +1763,7 @@ static void SPDSetBanks_D(struct MCTStatStruc *pMCTstat,
 		if ((pDCTstat->Status & (1 << SB_64MuxedMode)) && ChipSel >=4)
 			byte -= 3;
 
-		if (pDCTstat->DIMMValid & (1<<byte)) {
+		if (pDCTstat->DIMMValid & (1 << byte)) {
 			smbaddr = Get_DIMMAddress_D(pDCTstat, (ChipSel + dct));
 
 			byte = mctRead_SPD(smbaddr, SPD_ROWSZ);
@@ -1795,7 +1795,7 @@ static void SPDSetBanks_D(struct MCTStatStruc *pMCTstat,
 			/* 13 Rows is smallest dev size */
 			byte |= Rows - 13;	/* CCCBRR internal encode */
 
-			for (dword=0; dword < 12; dword++) {
+			for (dword = 0; dword < 12; dword++) {
 				if (byte == Tab_BankAddr[dword])
 					break;
 			}
@@ -1803,14 +1803,14 @@ static void SPDSetBanks_D(struct MCTStatStruc *pMCTstat,
 			if (dword < 12) {
 
 				/* bit no. of CS field in address mapping reg.*/
-				dword <<= (ChipSel<<1);
+				dword <<= (ChipSel << 1);
 				BankAddrReg |= dword;
 
 				/* Mask value=(2pow(rows+cols+banks+3)-1)>>8,
 				   or 2pow(rows+cols+banks-5)-1*/
 				csMask = 0;
 
-				byte = Rows + Cols;		/* cl=rows+cols*/
+				byte = Rows + Cols;		/* cl = rows+cols*/
 				if (Banks == 8)
 					byte -= 2;		/* 3 banks - 5 */
 				else
@@ -1824,19 +1824,19 @@ static void SPDSetBanks_D(struct MCTStatStruc *pMCTstat,
 				csMask--;
 
 				/*set ChipSelect population indicator even bits*/
-				pDCTstat->CSPresent |= (1<<ChipSel);
+				pDCTstat->CSPresent |= (1 << ChipSel);
 				if (Ranks >= 2)
 					/*set ChipSelect population indicator odd bits*/
 					pDCTstat->CSPresent |= 1 << (ChipSel + 1);
 
-				reg = 0x60+(ChipSel<<1) + reg_off;	/*Dram CS Mask Register */
+				reg = 0x60+(ChipSel << 1) + reg_off;	/*Dram CS Mask Register */
 				val = csMask;
 				val &= 0x1FF83FE0;	/* Mask out reserved bits.*/
 				Set_NB32(dev, reg, val);
 			}
 		} else {
-			if (pDCTstat->DIMMSPDCSE & (1<<ChipSel))
-				pDCTstat->CSTestFail |= (1<<ChipSel);
+			if (pDCTstat->DIMMSPDCSE & (1 << ChipSel))
+				pDCTstat->CSTestFail |= (1 << ChipSel);
 		}	/* if DIMMValid*/
 	}	/* while ChipSel*/
 
@@ -1881,7 +1881,7 @@ static void SPDCalcWidth_D(struct MCTStatStruc *pMCTstat,
 
 	/* Check Symmetry of Channel A and Channel B DIMMs
 	  (must be matched for 128-bit mode).*/
-	for (i=0; i < MAX_DIMMS_SUPPORTED; i += 2) {
+	for (i = 0; i < MAX_DIMMS_SUPPORTED; i += 2) {
 		if ((pDCTstat->DIMMValid & (1 << i)) && (pDCTstat->DIMMValid & (1<<(i+1)))) {
 			smbaddr = Get_DIMMAddress_D(pDCTstat, i);
 			smbaddr1 = Get_DIMMAddress_D(pDCTstat, i+1);
@@ -1889,35 +1889,35 @@ static void SPDCalcWidth_D(struct MCTStatStruc *pMCTstat,
 			byte = mctRead_SPD(smbaddr, SPD_ROWSZ) & 0x1f;
 			byte1 = mctRead_SPD(smbaddr1, SPD_ROWSZ) & 0x1f;
 			if (byte != byte1) {
-				pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
+				pDCTstat->ErrStatus |= (1 << SB_DimmMismatchO);
 				break;
 			}
 
 			byte =	 mctRead_SPD(smbaddr, SPD_COLSZ) & 0x1f;
 			byte1 =	 mctRead_SPD(smbaddr1, SPD_COLSZ) & 0x1f;
 			if (byte != byte1) {
-				pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
+				pDCTstat->ErrStatus |= (1 << SB_DimmMismatchO);
 				break;
 			}
 
 			byte = mctRead_SPD(smbaddr, SPD_BANKSZ);
 			byte1 = mctRead_SPD(smbaddr1, SPD_BANKSZ);
 			if (byte != byte1) {
-				pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
+				pDCTstat->ErrStatus |= (1 << SB_DimmMismatchO);
 				break;
 			}
 
 			byte = mctRead_SPD(smbaddr, SPD_DEVWIDTH) & 0x7f;
 			byte1 = mctRead_SPD(smbaddr1, SPD_DEVWIDTH) & 0x7f;
 			if (byte != byte1) {
-				pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
+				pDCTstat->ErrStatus |= (1 << SB_DimmMismatchO);
 				break;
 			}
 
 			byte = mctRead_SPD(smbaddr, SPD_DMBANKS) & 7;	 /* #ranks-1 */
 			byte1 = mctRead_SPD(smbaddr1, SPD_DMBANKS) & 7;	  /* #ranks-1 */
 			if (byte != byte1) {
-				pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
+				pDCTstat->ErrStatus |= (1 << SB_DimmMismatchO);
 				break;
 			}
 
@@ -1952,7 +1952,7 @@ static void StitchMemory_D(struct MCTStatStruc *pMCTstat,
 
 	_DSpareEn = 0;
 
-	/* CS Sparing 1=enabled, 0=disabled */
+	/* CS Sparing 1 = enabled, 0 = disabled */
 	if (mctGet_NVbits(NV_CS_SpareCTL) & 1) {
 		if (MCT_DIMM_SPARE_NO_WARM) {
 			/* Do no warm-reset DIMM spare */
@@ -1967,7 +1967,7 @@ static void StitchMemory_D(struct MCTStatStruc *pMCTstat,
 					pDCTstat->ErrStatus |= 1 << SB_SpareDis;
 			}
 		} else {
-			if (!mctGet_NVbits(NV_DQSTrainCTL)) { /*DQS Training 1=enabled, 0=disabled */
+			if (!mctGet_NVbits(NV_DQSTrainCTL)) { /*DQS Training 1 = enabled, 0 = disabled */
 				word = pDCTstat->CSPresent;
 				val = bsf(word);
 				word &= ~(1 << val);
@@ -1981,13 +1981,13 @@ static void StitchMemory_D(struct MCTStatStruc *pMCTstat,
 	}
 
 	nxtcsBase = 0;		/* Next available cs base ADDR[39:8] */
-	for (p=0; p < MAX_DIMMS_SUPPORTED; p++) {
+	for (p = 0; p < MAX_DIMMS_SUPPORTED; p++) {
 		BiggestBank = 0;
 		for (q = 0; q < MAX_CS_SUPPORTED; q++) { /* from DIMMS to CS */
 			if (pDCTstat->CSPresent & (1 << q)) {  /* bank present? */
 				reg  = 0x40 + (q << 2) + reg_off;  /* Base[q] reg.*/
 				val = Get_NB32(dev, reg);
-				if (!(val & 3)) {	/* (CSEnable|Spare==1)bank is enabled already? */
+				if (!(val & 3)) {	/* (CSEnable|Spare == 1)bank is enabled already? */
 					reg = 0x60 + ((q << 1) & 0xc) + reg_off; /*Mask[q] reg.*/
 					val = Get_NB32(dev, reg);
 					val >>= 19;
@@ -2003,7 +2003,7 @@ static void StitchMemory_D(struct MCTStatStruc *pMCTstat,
 			}	/*if bank present */
 		}	/* while q */
 		if (BiggestBank !=0) {
-			curcsBase = nxtcsBase;		/* curcsBase=nxtcsBase*/
+			curcsBase = nxtcsBase;		/* curcsBase = nxtcsBase*/
 			/* DRAM CS Base b Address Register offset */
 			reg = 0x40 + (b << 2) + reg_off;
 			if (_DSpareEn) {
@@ -2022,7 +2022,7 @@ static void StitchMemory_D(struct MCTStatStruc *pMCTstat,
 		}
 
 		/* bank present but disabled?*/
-		if ( pDCTstat->CSTestFail & (1 << p)) {
+		if (pDCTstat->CSTestFail & (1 << p)) {
 			/* DRAM CS Base b Address Register offset */
 			reg = (p << 2) + 0x40 + reg_off;
 			val = 1 << TestFail;
@@ -2102,8 +2102,8 @@ static u8 Dimm_Supports_D(struct DCTStatStruc *pDCTstat,
 		byte = mctRead_SPD(DIMMi, word);	/* DIMMi speed */
 		if (Tk < byte) {
 			ret = 1;
-		} else if (byte == 0){
-			pDCTstat->ErrStatus |= 1<<SB_NoCycTime;
+		} else if (byte == 0) {
+			pDCTstat->ErrStatus |= 1 << SB_NoCycTime;
 			ret = 1;
 		} else {
 			ret = 0;	/* DIMM is capable! */
@@ -2121,12 +2121,12 @@ static u8 DIMMPresence_D(struct MCTStatStruc *pMCTstat,
 	/* Check DIMMs present, verify checksum, flag SDRAM type,
 	 * build population indicator bitmaps, and preload bus loading
 	 * of DIMMs into DCTStatStruc.
-	 * MAAload=number of devices on the "A" bus.
-	 * MABload=number of devices on the "B" bus.
-	 * MAAdimms=number of DIMMs on the "A" bus slots.
-	 * MABdimms=number of DIMMs on the "B" bus slots.
-	 * DATAAload=number of ranks on the "A" bus slots.
-	 * DATABload=number of ranks on the "B" bus slots.
+	 * MAAload = number of devices on the "A" bus.
+	 * MABload = number of devices on the "B" bus.
+	 * MAAdimms = number of DIMMs on the "A" bus slots.
+	 * MABdimms = number of DIMMs on the "B" bus slots.
+	 * DATAAload = number of ranks on the "A" bus slots.
+	 * DATABload = number of ranks on the "B" bus slots.
 	 */
 
 	u16 i, j, k;
@@ -2159,7 +2159,7 @@ static u8 DIMMPresence_D(struct MCTStatStruc *pMCTstat,
 			print_tx("\t DIMMPresence: smbaddr=", smbaddr);
 			if (smbaddr) {
 				Checksum = 0;
-				for (Index=0; Index < 64; Index++){
+				for (Index = 0; Index < 64; Index++) {
 					int status;
 					status = mctRead_SPD(smbaddr, Index);
 					if (status < 0)
@@ -2184,7 +2184,7 @@ static u8 DIMMPresence_D(struct MCTStatStruc *pMCTstat,
 							pDCTstat->ErrCode = SC_StopError;
 						} else {
 							/*if NV_SPDCHK_RESTRT is set to 1, ignore faulty SPD checksum*/
-							pDCTstat->ErrStatus |= 1<<SB_DIMMChkSum;
+							pDCTstat->ErrStatus |= 1 << SB_DIMMChkSum;
 							byte = mctRead_SPD(smbaddr, SPD_TYPE);
 							if (byte == JED_DDR2SDRAM)
 								pDCTstat->DIMMValid |= 1 << i;
@@ -2250,17 +2250,17 @@ static u8 DIMMPresence_D(struct MCTStatStruc *pMCTstat,
 
 					/*Check if SPD diag bit 'analysis probe installed' is set */
 					byte = mctRead_SPD(smbaddr, SPD_ATTRIB);
-					if ( byte & JED_PROBEMSK )
-						pDCTstat->Status |= 1<<SB_DiagClks;
+					if (byte & JED_PROBEMSK)
+						pDCTstat->Status |= 1 << SB_DiagClks;
 
 					byte = mctRead_SPD(smbaddr, SPD_DMBANKS);
-					if (!(byte & (1<< SPDPLBit)))
+					if (!(byte & (1 << SPDPLBit)))
 						pDCTstat->DimmPlPresent |= 1 << i;
 					byte &= 7;
 					byte++;		 /* ranks */
 					if (byte > 2) {
 						/* if any DIMMs are QR, we have to make two passes through DIMMs*/
-						if ( pDCTstat->DimmQRPresent == 0) {
+						if (pDCTstat->DimmQRPresent == 0) {
 							MaxDimms <<= 1;
 						}
 						if (i < DimmSlots) {
@@ -2274,7 +2274,7 @@ static u8 DIMMPresence_D(struct MCTStatStruc *pMCTstat,
 					if (devwidth == 16)
 						bytex = 4;
 					else if (devwidth == 4)
-						bytex=16;
+						bytex = 16;
 
 					if (byte == 2)
 						bytex <<= 1;	/*double Addr bus load value for dual rank DIMMs*/
@@ -2322,28 +2322,28 @@ static u8 DIMMPresence_D(struct MCTStatStruc *pMCTstat,
 		if (RegDIMMPresent != 0) {
 			if ((RegDIMMPresent ^ pDCTstat->DIMMValid) !=0) {
 				/* module type DIMM mismatch (reg'ed, unbuffered) */
-				pDCTstat->ErrStatus |= 1<<SB_DimmMismatchM;
+				pDCTstat->ErrStatus |= 1 << SB_DimmMismatchM;
 				pDCTstat->ErrCode = SC_StopError;
 			} else{
 				/* all DIMMs are registered */
-				pDCTstat->Status |= 1<<SB_Registered;
+				pDCTstat->Status |= 1 << SB_Registered;
 			}
 		}
 		if (pDCTstat->DimmECCPresent != 0) {
-			if ((pDCTstat->DimmECCPresent ^ pDCTstat->DIMMValid )== 0) {
+			if ((pDCTstat->DimmECCPresent ^ pDCTstat->DIMMValid) == 0) {
 				/* all DIMMs are ECC capable */
-				pDCTstat->Status |= 1<<SB_ECCDIMMs;
+				pDCTstat->Status |= 1 << SB_ECCDIMMs;
 			}
 		}
 		if (pDCTstat->DimmPARPresent != 0) {
 			if ((pDCTstat->DimmPARPresent ^ pDCTstat->DIMMValid) == 0) {
 				/*all DIMMs are Parity capable */
-				pDCTstat->Status |= 1<<SB_PARDIMMs;
+				pDCTstat->Status |= 1 << SB_PARDIMMs;
 			}
 		}
 	} else {
 		/* no DIMMs present or no DIMMs that qualified. */
-		pDCTstat->ErrStatus |= 1<<SB_NoDimms;
+		pDCTstat->ErrStatus |= 1 << SB_NoDimms;
 		pDCTstat->ErrCode = SC_StopError;
 	}
 
@@ -2418,7 +2418,7 @@ static void mct_initDCT(struct MCTStatStruc *pMCTstat,
 	} else {
 		/* Configure DCT1 if unganged and enabled*/
 		if (!pDCTstat->GangedMode) {
-			if ( pDCTstat->DIMMValidDCT[1] > 0) {
+			if (pDCTstat->DIMMValidDCT[1] > 0) {
 				print_t("\tmct_initDCT: DCTInit_D 1\n");
 				err_code = pDCTstat->ErrCode;		/* save DCT0 errors */
 				pDCTstat->ErrCode = 0;
@@ -2497,7 +2497,7 @@ static u8 mct_setMode(struct MCTStatStruc *pMCTstat,
 	if (byte != bytex) {
 		pDCTstat->ErrStatus &= ~(1 << SB_DimmMismatchO);
 	} else {
-		if ( mctGet_NVbits(NV_Unganged) )
+		if (mctGet_NVbits(NV_Unganged))
 			pDCTstat->ErrStatus |= (1 << SB_DimmMismatchO);
 
 		if (!(pDCTstat->ErrStatus & (1 << SB_DimmMismatchO))) {
@@ -2595,7 +2595,7 @@ static u8 mct_PlatformSpec(struct MCTStatStruc *pMCTstat,
 		i_start = dct;
 		i_end = dct + 1;
 	}
-	for (i=i_start; i<i_end; i++) {
+	for (i = i_start; i < i_end; i++) {
 		index_reg = 0x98 + (i * 0x100);
 		Set_NB32_index_wait(dev, index_reg, 0x00, pDCTstat->CH_ODC_CTL[i]); /* Channel A Output Driver Compensation Control */
 		Set_NB32_index_wait(dev, index_reg, 0x04, pDCTstat->CH_ADDR_TMG[i]); /* Channel A Output Driver Compensation Control */
@@ -2615,7 +2615,7 @@ static void mct_SyncDCTsReady(struct DCTStatStruc *pDCTstat)
 		print_tx("mct_SyncDCTsReady: Node ", pDCTstat->Node_ID);
 		dev = pDCTstat->dev_dct;
 
-		if ((pDCTstat->DIMMValidDCT[0] ) || (pDCTstat->DIMMValidDCT[1])) {		/* This Node has dram */
+		if ((pDCTstat->DIMMValidDCT[0]) || (pDCTstat->DIMMValidDCT[1])) {		/* This Node has dram */
 			do {
 				val = Get_NB32(dev, 0x110);
 			} while (!(val & (1 << DramEnabled)));
@@ -2629,7 +2629,7 @@ static void mct_AfterGetCLT(struct MCTStatStruc *pMCTstat,
 				struct DCTStatStruc *pDCTstat, u8 dct)
 {
 	if (!pDCTstat->GangedMode) {
-		if (dct == 0 ) {
+		if (dct == 0) {
 			pDCTstat->DIMMValid = pDCTstat->DIMMValidDCT[dct];
 			if (pDCTstat->DIMMValidDCT[dct] == 0)
 				pDCTstat->ErrCode = SC_StopError;
@@ -2648,7 +2648,7 @@ static u8 mct_SPDCalcWidth(struct MCTStatStruc *pMCTstat,
 {
 	u8 ret;
 
-	if ( dct == 0) {
+	if (dct == 0) {
 		SPDCalcWidth_D(pMCTstat, pDCTstat);
 		ret = mct_setMode(pMCTstat, pDCTstat);
 	} else {
@@ -2752,7 +2752,7 @@ static u8 mct_DIMMPresence(struct MCTStatStruc *pMCTstat,
 {
 	u8 ret;
 
-	if ( dct == 0)
+	if (dct == 0)
 		ret = DIMMPresence_D(pMCTstat, pDCTstat);
 	else
 		ret = pDCTstat->ErrCode;
@@ -2777,7 +2777,7 @@ static void mct_OtherTiming(struct MCTStatStruc *pMCTstat,
 				pDCTstat->DIMMValid = pDCTstat->DIMMValidDCT[0];
 				Set_OtherTiming(pMCTstat, pDCTstat, 0);
 			}
-			if (pDCTstat->DIMMValidDCT[1] && !pDCTstat->GangedMode ) {
+			if (pDCTstat->DIMMValidDCT[1] && !pDCTstat->GangedMode) {
 				pDCTstat->DIMMValid = pDCTstat->DIMMValidDCT[1];
 				Set_OtherTiming(pMCTstat, pDCTstat, 1);
 			}
@@ -3022,8 +3022,8 @@ static u8 Check_DqsRcvEn_Diff(struct DCTStatStruc *pDCTstat,
 	if (index == 0x12)
 		ecc_reg = 1;
 
-	for (i=0; i < 8; i+=2) {
-		if ( pDCTstat->DIMMValid & (1 << i)) {
+	for (i = 0; i < 8; i+=2) {
+		if (pDCTstat->DIMMValid & (1 << i)) {
 			val = Get_NB32_index_wait(dev, index_reg, index);
 			byte = val & 0xFF;
 			if (byte < Smallest_0)
@@ -3153,8 +3153,8 @@ static u16 Get_DqsRcvEnGross_MaxMin(struct DCTStatStruc *pDCTstat,
 	if (index == 0x12)
 		ecc_reg = 1;
 
-	for (i=0; i < 8; i+=2) {
-		if ( pDCTstat->DIMMValid & (1 << i)) {
+	for (i = 0; i < 8; i+=2) {
+		if (pDCTstat->DIMMValid & (1 << i)) {
 			val = Get_NB32_index_wait(dev, index_reg, index);
 			val &= 0x00E000E0;
 			byte = (val >> 5) & 0xFF;
@@ -3192,11 +3192,11 @@ static u16 Get_WrDatGross_MaxMin(struct DCTStatStruc *pDCTstat,
 
 	Smallest = 3;
 	Largest = 0;
-	for (i=0; i < 2; i++) {
+	for (i = 0; i < 2; i++) {
 		val = Get_NB32_index_wait(dev, index_reg, index);
 		val &= 0x60606060;
 		val >>= 5;
-		for (j=0; j < 4; j++) {
+		for (j = 0; j < 4; j++) {
 			byte = val & 0xFF;
 			if (byte < Smallest)
 				Smallest = byte;
@@ -3308,14 +3308,14 @@ static void mct_HTMemMapExt(struct MCTStatStruc *pMCTstat,
 
 	/* Copy dram map from F1x40/44,F1x48/4c,
 	   to F1x120/124(Node0),F1x120/124(Node1),...*/
-	for (Node=0; Node < MAX_NODES_SUPPORTED; Node++) {
+	for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
 		pDCTstat = pDCTstatA + Node;
 		devx = pDCTstat->dev_map;
 
 		/* get base/limit from Node0 */
 		reg = 0x40 + (Node << 3);		/* Node0/Dram Base 0 */
 		val = Get_NB32(dev, reg);
-		Drambase = val >> ( 16 + 3);
+		Drambase = val >> (16 + 3);
 
 		reg = 0x44 + (Node << 3);		/* Node0/Dram Base 0 */
 		val = Get_NB32(dev, reg);
@@ -3335,7 +3335,7 @@ static void mct_HTMemMapExt(struct MCTStatStruc *pMCTstat,
 			val |= Dramlimit;
 			Set_NB32(devx, reg, val);
 
-			if ( pMCTstat->GStatus & ( 1 << GSB_HWHole)) {
+			if (pMCTstat->GStatus & (1 << GSB_HWHole)) {
 				reg = 0xF0;
 				val = Get_NB32(devx, reg);
 				val |= (1 << DramMemHoistValid);
@@ -3476,7 +3476,7 @@ static void InitPhyCompensation(struct MCTStatStruc *pMCTstat,
 
 	val = Get_NB32_index_wait(dev, index_reg, 0x00);
 	dword = 0;
-	for (i=0; i < 6; i++) {
+	for (i = 0; i < 6; i++) {
 		switch (i) {
 			case 0:
 			case 4:
@@ -3652,8 +3652,8 @@ static void mct_BeforeDramInit_Prod_D(struct MCTStatStruc *pMCTstat,
 	u32 dev = pDCTstat->dev_dct;
 
 	// FIXME: skip for Ax
-	if ((pDCTstat->Speed == 3) || ( pDCTstat->Speed == 2)) { // MemClkFreq = 667MHz or 533MHz
-		for (i=0; i < 2; i++) {
+	if ((pDCTstat->Speed == 3) || (pDCTstat->Speed == 2)) { // MemClkFreq = 667MHz or 533MHz
+		for (i = 0; i < 2; i++) {
 			reg_off = 0x100 * i;
 			Set_NB32(dev,  0x98 + reg_off, 0x0D000030);
 			Set_NB32(dev,  0x9C + reg_off, 0x00000806);
@@ -3667,7 +3667,7 @@ static void mct_AdjustDelayRange_D(struct MCTStatStruc *pMCTstat,
 			struct DCTStatStruc *pDCTstat, u8 *dqs_pos)
 {
 	// FIXME: Skip for Ax
-	if ((pDCTstat->Speed == 3) || ( pDCTstat->Speed == 2)) { // MemClkFreq = 667MHz or 533MHz
+	if ((pDCTstat->Speed == 3) || (pDCTstat->Speed == 2)) { // MemClkFreq = 667MHz or 533MHz
 		*dqs_pos = 32;
 	}
 }
@@ -3737,7 +3737,7 @@ void mct_ClrClToNB_D(struct MCTStatStruc *pMCTstat,
 	msr = BU_CFG2;
 	_RDMSR(msr, &lo, &hi);
 	if (!pDCTstat->ClToNB_flag)
-		lo &= ~(1<<ClLinesToNbDis);
+		lo &= ~(1 << ClLinesToNbDis);
 	_WRMSR(msr, lo, hi);
 
 }
@@ -3783,10 +3783,10 @@ void mct_SetDramConfigHi_D(struct DCTStatStruc *pDCTstat, u32 dct,
 	 * Solution: From the bug report:
 	 *  1. A software-initiated frequency change should be wrapped into the
 	 *     following sequence :
-	 * 	- a) Disable Compensation (F2[1, 0]9C_x08[30] )
+	 * 	- a) Disable Compensation (F2[1, 0]9C_x08[30])
 	 * 	b) Reset the Begin Compensation bit (D3CMP->COMP_CONFIG[0]) in all the compensation engines
 	 * 	c) Do frequency change
-	 * 	d) Enable Compensation (F2[1, 0]9C_x08[30] )
+	 * 	d) Enable Compensation (F2[1, 0]9C_x08[30])
 	 *  2. A software-initiated Disable Compensation should always be
 	 *     followed by step b) of the above steps.
 	 * Silicon Status: Fixed In Rev B0
@@ -3970,7 +3970,7 @@ static void AfterDramInit_D(struct DCTStatStruc *pDCTstat, u8 dct) {
 	if (pDCTstat->LogicalCPUID & (AMD_DR_B2 | AMD_DR_B3)) {
 		mct_Wait(10000);	/* Wait 50 us*/
 		val = Get_NB32(dev, 0x110);
-		if ( val & (1 << DramEnabled)) {
+		if (val & (1 << DramEnabled)) {
 			/* If 50 us expires while DramEnable =0 then do the following */
 			val = Get_NB32(dev, 0x90 + reg_off);
 			val &= ~(1 << Width128);		/* Program Width128 = 0 */
@@ -3990,9 +3990,9 @@ static void AfterDramInit_D(struct DCTStatStruc *pDCTstat, u8 dct) {
 
 /* ==========================================================
  *  6-bit Bank Addressing Table
- *  RR=rows-13 binary
- *  B=Banks-2 binary
- *  CCC=Columns-9 binary
+ *  RR = rows-13 binary
+ *  B = Banks-2 binary
+ *  CCC = Columns-9 binary
  * ==========================================================
  *  DCT	CCCBRR	Rows	Banks	Columns	64-bit CS Size
  *  Encoding
diff --git a/src/northbridge/amd/amdmct/mct/mct_d.h b/src/northbridge/amd/amdmct/mct/mct_d.h
index b580457..4e1a909 100644
--- a/src/northbridge/amd/amdmct/mct/mct_d.h
+++ b/src/northbridge/amd/amdmct/mct/mct_d.h
@@ -30,16 +30,16 @@
 #define PT_S1		2
 #define PT_GR		3
 
-#define J_MIN		0		/* j loop constraint. 1=CL 2.0 T*/
-#define J_MAX		5		/* j loop constraint. 5=CL 7.0 T*/
-#define K_MIN		1		/* k loop constraint. 1=200 MHz*/
-#define K_MAX		5		/* k loop constraint. 5=533 MHz*/
-#define CL_DEF		2		/* Default value for failsafe operation. 2=CL 4.0 T*/
-#define T_DEF		1		/* Default value for failsafe operation. 1=5ns (cycle time)*/
-
-#define BSCRate	1		/* reg bit field=rate of dram scrubber for ecc*/
+#define J_MIN		0		/* j loop constraint. 1 = CL 2.0 T*/
+#define J_MAX		5		/* j loop constraint. 5 = CL 7.0 T*/
+#define K_MIN		1		/* k loop constraint. 1 = 200 MHz*/
+#define K_MAX		5		/* k loop constraint. 5 = 533 MHz*/
+#define CL_DEF		2		/* Default value for failsafe operation. 2 = CL 4.0 T*/
+#define T_DEF		1		/* Default value for failsafe operation. 1 = 5ns (cycle time)*/
+
+#define BSCRate	1		/* reg bit field = rate of dram scrubber for ecc*/
 					/* memory initialization (ecc and check-bits).*/
-					/* 1=40 ns/64 bytes.*/
+					/* 1 = 40 ns/64 bytes.*/
 #define FirstPass	1		/* First pass through RcvEn training*/
 #define SecondPass	2		/* Second pass through Rcven training*/
 
@@ -289,7 +289,7 @@ struct DCTStatStruc {		/* A per Node structure*/
 /* DCTStatStruct_F -  start */
 	u8 Node_ID;			/* Node ID of current controller*/
 	uint8_t Internal_Node_ID;	/* Internal Node ID of the current controller */
-	uint8_t Dual_Node_Package;	/* 1=Dual node package (G34) */
+	uint8_t Dual_Node_Package;	/* 1 = Dual node package (G34) */
 	uint8_t stopDCT;		/* Set if the DCT will be stopped */
 	u8 ErrCode;			/* Current error condition of Node
 		0= no error
@@ -306,7 +306,7 @@ struct DCTStatStruc {		/* A per Node structure*/
 		/* SPD address of..MB2_CS_L[0,1]*/
 		/* SPD address of..MA3_CS_L[0,1]*/
 		/* SPD address of..MB3_CS_L[0,1]*/
-	u16 DIMMPresent;		/*For each bit n 0..7, 1=DIMM n is present.
+	u16 DIMMPresent;		/*For each bit n 0..7, 1 = DIMM n is present.
 		DIMM#  Select Signal
 		0  MA0_CS_L[0,1]
 		1  MB0_CS_L[0,1]
@@ -316,15 +316,15 @@ struct DCTStatStruc {		/* A per Node structure*/
 		5  MB2_CS_L[0,1]
 		6  MA3_CS_L[0,1]
 		7  MB3_CS_L[0,1]*/
-	u16 DIMMValid;		/* For each bit n 0..7, 1=DIMM n is valid and is/will be configured*/
-	u16 DIMMMismatch;	/* For each bit n 0..7, 1=DIMM n is mismatched, channel B is always considered the mismatch */
-	u16 DIMMSPDCSE;		/* For each bit n 0..7, 1=DIMM n SPD checksum error*/
-	u16 DimmECCPresent;	/* For each bit n 0..7, 1=DIMM n is ECC capable.*/
-	u16 DimmPARPresent;	/* For each bit n 0..7, 1=DIMM n is ADR/CMD Parity capable.*/
-	u16 Dimmx4Present;	/* For each bit n 0..7, 1=DIMM n contains x4 data devices.*/
-	u16 Dimmx8Present;	/* For each bit n 0..7, 1=DIMM n contains x8 data devices.*/
-	u16 Dimmx16Present;	/* For each bit n 0..7, 1=DIMM n contains x16 data devices.*/
-	u16 DIMM2Kpage;		/* For each bit n 0..7, 1=DIMM n contains 1K page devices.*/
+	u16 DIMMValid;		/* For each bit n 0..7, 1 = DIMM n is valid and is/will be configured*/
+	u16 DIMMMismatch;	/* For each bit n 0..7, 1 = DIMM n is mismatched, channel B is always considered the mismatch */
+	u16 DIMMSPDCSE;		/* For each bit n 0..7, 1 = DIMM n SPD checksum error*/
+	u16 DimmECCPresent;	/* For each bit n 0..7, 1 = DIMM n is ECC capable.*/
+	u16 DimmPARPresent;	/* For each bit n 0..7, 1 = DIMM n is ADR/CMD Parity capable.*/
+	u16 Dimmx4Present;	/* For each bit n 0..7, 1 = DIMM n contains x4 data devices.*/
+	u16 Dimmx8Present;	/* For each bit n 0..7, 1 = DIMM n contains x8 data devices.*/
+	u16 Dimmx16Present;	/* For each bit n 0..7, 1 = DIMM n contains x16 data devices.*/
+	u16 DIMM2Kpage;		/* For each bit n 0..7, 1 = DIMM n contains 1K page devices.*/
 	u8 MAload[2];		/* Number of devices loading MAA bus*/
 		/* Number of devices loading MAB bus*/
 	u8 MAdimms[2];		/*Number of DIMMs loading CH A*/
@@ -332,17 +332,17 @@ struct DCTStatStruc {		/* A per Node structure*/
 	u8 DATAload[2];		/*Number of ranks loading CH A DATA*/
 		/* Number of ranks loading CH B DATA*/
 	u8 DIMMAutoSpeed;	/*Max valid Mfg. Speed of DIMMs
-		1=200MHz
-		2=266MHz
-		3=333MHz
-		4=400MHz
-		5=533MHz*/
+		1 = 200MHz
+		2 = 266MHz
+		3 = 333MHz
+		4 = 400MHz
+		5 = 533MHz*/
 	u8 DIMMCASL;		/* Min valid Mfg. CL bitfield
-		0=2.0
-		1=3.0
-		2=4.0
-		3=5.0
-		4=6.0 */
+		0 = 2.0
+		1 = 3.0
+		2 = 4.0
+		3 = 5.0
+		4 = 6.0 */
 	u16 DIMMTrcd;		/* Minimax Trcd*40 (ns) of DIMMs*/
 	u16 DIMMTrp;		/* Minimax Trp*40 (ns) of DIMMs*/
 	u16 DIMMTrtp;		/* Minimax Trtp*40 (ns) of DIMMs*/
@@ -352,16 +352,16 @@ struct DCTStatStruc {		/* A per Node structure*/
 	u16 DIMMTrrd;		/* Minimax Trrd*40 (ns) of DIMMs*/
 	u16 DIMMTwtr;		/* Minimax Twtr*40 (ns) of DIMMs*/
 	u8 Speed;		/* Bus Speed (to set Controller)
-		1=200MHz
-		2=266MHz
-		3=333MHz
-		4=400MHz */
+		1 = 200MHz
+		2 = 266MHz
+		3 = 333MHz
+		4 = 400MHz */
 	u8 CASL;		/* CAS latency DCT setting
-		0=2.0
-		1=3.0
-		2=4.0
-		3=5.0
-		4=6.0 */
+		0 = 2.0
+		1 = 3.0
+		2 = 4.0
+		3 = 5.0
+		4 = 6.0 */
 	u8 Trcd;		/* DCT Trcd (busclocks) */
 	u8 Trp;			/* DCT Trp (busclocks) */
 	u8 Trtp;		/* DCT Trtp (busclocks) */
@@ -371,27 +371,27 @@ struct DCTStatStruc {		/* A per Node structure*/
 	u8 Trrd;		/* DCT Trrd (busclocks) */
 	u8 Twtr;		/* DCT Twtr (busclocks) */
 	u8 Trfc[4];		/* DCT Logical DIMM0 Trfc
-		0=75ns (for 256Mb devs)
-		1=105ns (for 512Mb devs)
-		2=127.5ns (for 1Gb devs)
-		3=195ns (for 2Gb devs)
-		4=327.5ns (for 4Gb devs) */
+		0 = 75ns (for 256Mb devs)
+		1 = 105ns (for 512Mb devs)
+		2 = 127.5ns (for 1Gb devs)
+		3 = 195ns (for 2Gb devs)
+		4 = 327.5ns (for 4Gb devs) */
 		/* DCT Logical DIMM1 Trfc (see Trfc0 for format) */
 		/* DCT Logical DIMM2 Trfc (see Trfc0 for format) */
 		/* DCT Logical DIMM3 Trfc (see Trfc0 for format) */
-	u16 CSPresent;		/* For each bit n 0..7, 1=Chip-select n is present */
-	u16 CSTestFail;		/* For each bit n 0..7, 1=Chip-select n is present but disabled */
+	u16 CSPresent;		/* For each bit n 0..7, 1 = Chip-select n is present */
+	u16 CSTestFail;		/* For each bit n 0..7, 1 = Chip-select n is present but disabled */
 	u32 DCTSysBase;		/* BASE[39:8] (system address) of this Node's DCTs. */
 	u32 DCTHoleBase;	/* If not zero, BASE[39:8] (system address) of dram hole for HW remapping.  Dram hole exists on this Node's DCTs. */
 	u32 DCTSysLimit;	/* LIMIT[39:8] (system address) of this Node's DCTs */
 	u16 PresetmaxFreq;	/* Maximum OEM defined DDR frequency
-		200=200MHz (DDR400)
-		266=266MHz (DDR533)
-		333=333MHz (DDR667)
-		400=400MHz (DDR800) */
+		200 = 200MHz (DDR400)
+		266 = 266MHz (DDR533)
+		333 = 333MHz (DDR667)
+		400 = 400MHz (DDR800) */
 	u8 _2Tmode;		/* 1T or 2T CMD mode (slow access mode)
-		1=1T
-		2=2T */
+		1 = 1T
+		2 = 2T */
 	u8 TrwtTO;		/* DCT TrwtTO (busclocks)*/
 	u8 Twrrd;		/* DCT Twrrd (busclocks)*/
 	u8 Twrwr;		/* DCT Twrwr (busclocks)*/
@@ -415,9 +415,9 @@ struct DCTStatStruc {		/* A per Node structure*/
 		/* CHB Byte 0-7 Read DQS Delay */
 	u32 PtrPatternBufA;	/* Ptr on stack to aligned DQS testing pattern*/
 	u32 PtrPatternBufB;	/* Ptr on stack to aligned DQS testing pattern*/
-	u8 Channel;		/* Current Channel (0= CH A, 1=CH B)*/
+	u8 Channel;		/* Current Channel (0= CH A, 1 = CH B)*/
 	u8 ByteLane;		/* Current Byte Lane (0..7)*/
-	u8 Direction;		/* Current DQS-DQ training write direction (0=read, 1=write)*/
+	u8 Direction;		/* Current DQS-DQ training write direction (0 = read, 1 = write)*/
 	u8 Pattern;		/* Current pattern*/
 	u8 DQSDelay;		/* Current DQS delay value*/
 	u32 TrainErrors;	/* Current Training Errors*/
@@ -483,15 +483,15 @@ struct DCTStatStruc {		/* A per Node structure*/
 	u8 WrDatGrossH;
 	u8 DqsRcvEnGrossL;
 	// NOTE: Not used - u8 NodeSpeed		/* Bus Speed (to set Controller)
-		/* 1=200MHz */
-		/* 2=266MHz */
-		/* 3=333MHz */
+		/* 1 = 200MHz */
+		/* 2 = 266MHz */
+		/* 3 = 333MHz */
 	// NOTE: Not used - u8 NodeCASL		/* CAS latency DCT setting
-		/* 0=2.0 */
-		/* 1=3.0 */
-		/* 2=4.0 */
-		/* 3=5.0 */
-		/* 4=6.0 */
+		/* 0 = 2.0 */
+		/* 1 = 3.0 */
+		/* 2 = 4.0 */
+		/* 3 = 5.0 */
+		/* 4 = 6.0 */
 	u8 TrwtWB;
 	u8 CurrRcvrCHADelay;	/* for keep current RcvrEnDly of chA*/
 	u16 T1000;		/* get the T1000 figure (cycle time (ns)*1K)*/
@@ -575,7 +575,7 @@ struct DCTStatStruc {		/* A per Node structure*/
 #define SB_SWNodeHole		7	/* Remapping of Node Base on this Node to create a gap.*/
 #define SB_HWHole		8	/* Memory Hole created on this Node using HW remapping.*/
 #define SB_Over400MHz		9	/* DCT freq >= 400MHz flag*/
-#define SB_DQSPos_Pass2	10	/* Using for TrainDQSPos DIMM0/1, when freq>=400MHz*/
+#define SB_DQSPos_Pass2	10	/* Using for TrainDQSPos DIMM0/1, when freq >= 400MHz*/
 #define SB_DQSRcvLimit		11	/* Using for DQSRcvEnTrain to know we have reached to upper bound.*/
 #define SB_ExtConfig		12	/* Indicator the default setting for extend PCI configuration support*/
 
@@ -587,73 +587,73 @@ struct DCTStatStruc {		/* A per Node structure*/
 ===============================================================================*/
 /*Platform Configuration*/
 #define NV_PACK_TYPE		0	/* CPU Package Type (2-bits)
-					    0=NPT L1
-					    1=NPT M2
-					    2=NPT S1*/
+					    0 = NPT L1
+					    1 = NPT M2
+					    2 = NPT S1*/
 #define NV_MAX_NODES		1	/* Number of Nodes/Sockets (4-bits)*/
 #define NV_MAX_DIMMS		2	/* Number of DIMM slots for the specified Node ID (4-bits)*/
 #define NV_MAX_MEMCLK		3	/* Maximum platform demonstrated Memclock (10-bits)
-					    200=200MHz (DDR400)
-					    266=266MHz (DDR533)
-					    333=333MHz (DDR667)
-					    400=400MHz (DDR800)*/
+					    200 = 200MHz (DDR400)
+					    266 = 266MHz (DDR533)
+					    333 = 333MHz (DDR667)
+					    400 = 400MHz (DDR800)*/
 #define NV_MIN_MEMCLK		4	/* Minimum platform demonstrated Memclock (10-bits) */
 #define NV_ECC_CAP		5	/* Bus ECC capable (1-bits)
-					    0=Platform not capable
-					    1=Platform is capable*/
+					    0 = Platform not capable
+					    1 = Platform is capable*/
 #define NV_4RANKType		6	/* Quad Rank DIMM slot type (2-bits)
-					    0=Normal
-					    1=R4 (4-Rank Registered DIMMs in AMD server configuration)
-					    2=S4 (Unbuffered SO-DIMMs)*/
+					    0 = Normal
+					    1 = R4 (4-Rank Registered DIMMs in AMD server configuration)
+					    2 = S4 (Unbuffered SO-DIMMs)*/
 #define NV_BYPMAX		7	/* Value to set DcqBypassMax field (See Function 2, Offset 94h, [27:24] of BKDG for field definition).
-					    4=4 times bypass (normal for non-UMA systems)
-					    7=7 times bypass (normal for UMA systems)*/
+					    4 = 4 times bypass (normal for non-UMA systems)
+					    7 = 7 times bypass (normal for UMA systems)*/
 #define NV_RDWRQBYP		8	/* Value to set RdWrQByp field (See Function 2, Offset A0h, [3:2] of BKDG for field definition).
-					    2=8 times (normal for non-UMA systems)
-					    3=16 times (normal for UMA systems)*/
+					    2 = 8 times (normal for non-UMA systems)
+					    3 = 16 times (normal for UMA systems)*/
 
 
 /*Dram Timing*/
 #define NV_MCTUSRTMGMODE	10	/* User Memclock Mode (2-bits)
-					    0=Auto, no user limit
-					    1=Auto, user limit provided in NV_MemCkVal
-					    2=Manual, user value provided in NV_MemCkVal*/
+					    0 = Auto, no user limit
+					    1 = Auto, user limit provided in NV_MemCkVal
+					    2 = Manual, user value provided in NV_MemCkVal*/
 #define NV_MemCkVal		11	/* Memory Clock Value (2-bits)
-					    0=200MHz
-					    1=266MHz
-					    2=333MHz
-					    3=400MHz*/
+					    0 = 200MHz
+					    1 = 266MHz
+					    2 = 333MHz
+					    3 = 400MHz*/
 
 /*Dram Configuration*/
 #define NV_BankIntlv		20	/* Dram Bank (chip-select) Interleaving (1-bits)
-					    0=disable
-					    1=enable*/
+					    0 = disable
+					    1 = enable*/
 #define NV_AllMemClks		21	/* Turn on All DIMM clocks (1-bits)
-					    0=normal
-					    1=enable all memclocks*/
+					    0 = normal
+					    1 = enable all memclocks*/
 #define NV_SPDCHK_RESTRT	22	/* SPD Check control bitmap (1-bits)
-					    0=Exit current node init if any DIMM has SPD checksum error
-					    1=Ignore faulty SPD checksums (Note: DIMM cannot be enabled)*/
+					    0 = Exit current node init if any DIMM has SPD checksum error
+					    1 = Ignore faulty SPD checksums (Note: DIMM cannot be enabled)*/
 #define NV_DQSTrainCTL		23	/* DQS Signal Timing Training Control
-					    0=skip DQS training
-					    1=perform DQS training*/
+					    0 = skip DQS training
+					    1 = perform DQS training*/
 #define NV_NodeIntlv		24	/* Node Memory Interleaving (1-bits)
-					    0=disable
-					    1=enable*/
+					    0 = disable
+					    1 = enable*/
 #define NV_BurstLen32		25	/* BurstLength32 for 64-bit mode (1-bits)
-					    0=disable (normal)
-					    1=enable (4 beat burst when width is 64-bits)*/
+					    0 = disable (normal)
+					    1 = enable (4 beat burst when width is 64-bits)*/
 
 /*Dram Power*/
 #define NV_CKE_PDEN		30	/* CKE based power down mode (1-bits)
-					    0=disable
-					    1=enable*/
+					    0 = disable
+					    1 = enable*/
 #define NV_CKE_CTL		31	/* CKE based power down control (1-bits)
-					    0=per Channel control
-					    1=per Chip select control*/
+					    0 = per Channel control
+					    1 = per Chip select control*/
 #define NV_CLKHZAltVidC3	32	/* Memclock tri-stating during C3 and Alt VID (1-bits)
-					    0=disable
-					    1=enable*/
+					    0 = disable
+					    1 = enable*/
 
 /*Memory Map/Mgt.*/
 #define NV_BottomIO		40	/* Bottom of 32-bit IO space (8-bits)
@@ -661,8 +661,8 @@ struct DCTStatStruc {		/* A per Node structure*/
 #define NV_BottomUMA		41	/* Bottom of shared graphics dram (8-bits)
 					    NV_BottomUMA[7:0]=Addr[31:24]*/
 #define NV_MemHole		42	/* Memory Hole Remapping (1-bits)
-					    0=disable
-					    1=enable  */
+					    0 = disable
+					    1 = enable  */
 
 /*ECC*/
 #define NV_ECC			50	/* Dram ECC enable*/
@@ -674,13 +674,13 @@ struct DCTStatStruc {		/* A per Node structure*/
 #define NV_L3BKScrub		57	/* L3 ECC Background Scrubber CTL*/
 #define NV_DCBKScrub		58	/* DCache ECC Background Scrubber CTL*/
 #define NV_CS_SpareCTL		59	/* Chip Select Spare Control bit 0:
-					       0=disable Spare
-					       1=enable Spare */
+					       0 = disable Spare
+					       1 = enable Spare */
 					/* Chip Select Spare Control bit 1-4:
 					     Reserved, must be zero*/
 #define NV_SyncOnUnEccEn	61	/* SyncOnUnEccEn control
-					   0=disable
-					   1=enable*/
+					   0 = disable
+					   1 = enable*/
 #define NV_Unganged		62
 
 #define NV_ChannelIntlv	63	/* Channel Interleaving (3-bits)
diff --git a/src/northbridge/amd/amdmct/mct/mct_d_gcc.h b/src/northbridge/amd/amdmct/mct/mct_d_gcc.h
index fd39b38..fbfe988 100644
--- a/src/northbridge/amd/amdmct/mct/mct_d_gcc.h
+++ b/src/northbridge/amd/amdmct/mct/mct_d_gcc.h
@@ -39,7 +39,7 @@ static inline void _RDTSC(u32 *lo, u32 *hi)
 	__asm__ volatile (
 		 "rdtsc"
 		 : "=a" (*lo), "=d"(*hi)
-		 );
+		);
 }
 
 
@@ -61,7 +61,7 @@ static u32 bsr(u32 x)
 	u8 i;
 	u32 ret = 0;
 
-	for (i=31; i>0; i--) {
+	for (i = 31; i > 0; i--) {
 		if (x & (1<<i)) {
 			ret = i;
 			break;
@@ -78,7 +78,7 @@ static u32 bsf(u32 x)
 	u8 i;
 	u32 ret = 32;
 
-	for (i=0; i<32; i++) {
+	for (i = 0; i < 32; i++) {
 		if (x & (1<<i)) {
 			ret = i;
 			break;
@@ -88,9 +88,9 @@ static u32 bsf(u32 x)
 	return ret;
 }
 
-#define _MFENCE asm volatile ( "mfence")
+#define _MFENCE asm volatile ("mfence")
 
-#define _SFENCE asm volatile ( "sfence" )
+#define _SFENCE asm volatile ("sfence")
 
 /* prevent speculative execution of following instructions */
 #define _EXECFENCE asm volatile ("outb %al, $0xed")
@@ -343,7 +343,7 @@ static u32 stream_to_int(u8 const *p)
 
 	val = 0;
 
-	for (i=3; i>=0; i--) {
+	for (i = 3; i >= 0; i--) {
 		val <<= 8;
 		valx = *(p+i);
 		val |= valx;
diff --git a/src/northbridge/amd/amdmct/mct/mctardk3.c b/src/northbridge/amd/amdmct/mct/mctardk3.c
index 327acbc..e290333 100644
--- a/src/northbridge/amd/amdmct/mct/mctardk3.c
+++ b/src/northbridge/amd/amdmct/mct/mctardk3.c
@@ -187,7 +187,7 @@ static void Get_ChannelPS_Cfg0_D(u8 MAAdimms, u8 Speed, u8 MAAload,
 	}
 
 	while (*p != 0xFF) {
-		if ((MAAdimms == *(p+10)) || (*(p+10 ) == 0xFE)) {
+		if ((MAAdimms == *(p+10)) || (*(p+10) == 0xFE)) {
 			if ((*p == Speed) || (*p == 0xFE)) {
 				if (MAAload <= *(p+1)) {
 					*AddrTmgCTL = stream_to_int((u8*)(p+2));
diff --git a/src/northbridge/amd/amdmct/mct/mctardk4.c b/src/northbridge/amd/amdmct/mct/mctardk4.c
index cac2342..7e1d91a 100644
--- a/src/northbridge/amd/amdmct/mct/mctardk4.c
+++ b/src/northbridge/amd/amdmct/mct/mctardk4.c
@@ -96,7 +96,7 @@ static const u8 Table_ATC_ODC_D_Ax[] = {
 };
 
 
-static void Get_ChannelPS_Cfg0_D( u8 MAAdimms, u8 Speed, u8 MAAload,
+static void Get_ChannelPS_Cfg0_D(u8 MAAdimms, u8 Speed, u8 MAAload,
 				u8 DATAAload, u32 *AddrTmgCTL, u32 *ODC_CTL,
 				u8 *CMDmode)
 {
diff --git a/src/northbridge/amd/amdmct/mct/mctchi_d.c b/src/northbridge/amd/amdmct/mct/mctchi_d.c
index d5956b1..705bd91 100644
--- a/src/northbridge/amd/amdmct/mct/mctchi_d.c
+++ b/src/northbridge/amd/amdmct/mct/mctchi_d.c
@@ -37,7 +37,7 @@ void InterleaveChannels_D(struct MCTStatStruc *pMCTstat,
 	/* call back to wrapper not needed ManualChannelInterleave_D(); */
 	/* call back - DctSelIntLvAddr = mctGet_NVbits(NV_ChannelIntlv);*/	/* override interleave */
 	// FIXME: Check for Cx
-	DctSelIntLvAddr = mctGet_NVbits(NV_ChannelIntlv); /* typ=5: Hash*: exclusive OR of address bits[20:16, 6]. */
+	DctSelIntLvAddr = mctGet_NVbits(NV_ChannelIntlv); /* typ = 5: Hash*: exclusive OR of address bits[20:16, 6]. */
 	beforeInterleaveChannels_D(pDCTstatA, &enabled);
 
 	if (DctSelIntLvAddr & 1) {
@@ -71,7 +71,7 @@ void InterleaveChannels_D(struct MCTStatStruc *pMCTstat,
 				if (dct1_size == dct0_size) {
 					dct1_size = 0;
 					DctSelHi = 0x04;	/* DctSelHiRngEn = 0 */
-				} else if (dct1_size > dct0_size ) {
+				} else if (dct1_size > dct0_size) {
 					dct1_size = dct0_size;
 					DctSelHi = 0x07;	/* DctSelHiRngEn = 1, DctSelHi = 1 */
 				}
diff --git a/src/northbridge/amd/amdmct/mct/mctcsi_d.c b/src/northbridge/amd/amdmct/mct/mctcsi_d.c
index e8d26da..4841b73 100644
--- a/src/northbridge/amd/amdmct/mct/mctcsi_d.c
+++ b/src/northbridge/amd/amdmct/mct/mctcsi_d.c
@@ -45,11 +45,11 @@ void InterleaveBanks_D(struct MCTStatStruc *pMCTstat,
 
 	ChipSel = 0;		/* Find out if current configuration is capable */
 	while (DoIntlv && (ChipSel < MAX_CS_SUPPORTED)) {
-		reg = 0x40+(ChipSel<<2) + reg_off;	/* Dram CS Base 0 */
+		reg = 0x40+(ChipSel << 2) + reg_off;	/* Dram CS Base 0 */
 		val = Get_NB32(dev, reg);
-		if ( val & (1<<CSEnable)) {
+		if (val & (1 << CSEnable)) {
 			EnChipSels++;
-			reg = 0x60+((ChipSel>>1)<<2)+reg_off; /*Dram CS Mask 0 */
+			reg = 0x60+((ChipSel >> 1) << 2)+reg_off; /*Dram CS Mask 0 */
 			val = Get_NB32(dev, reg);
 			val >>= 19;
 			val &= 0x3ff;
@@ -62,7 +62,7 @@ void InterleaveBanks_D(struct MCTStatStruc *pMCTstat,
 					break;
 			reg = 0x80 + reg_off;		/*Dram Bank Addressing */
 			val = Get_NB32(dev, reg);
-			val >>= (ChipSel>>1)<<2;
+			val >>= (ChipSel >> 1) << 2;
 			val &= 0x0f;
 			if (EnChipSels == 1)
 				BankEncd = val;
@@ -80,14 +80,14 @@ void InterleaveBanks_D(struct MCTStatStruc *pMCTstat,
 
 	if (DoIntlv) {
 		if (!_CsIntCap) {
-			pDCTstat->ErrStatus |= 1<<SB_BkIntDis;
+			pDCTstat->ErrStatus |= 1 << SB_BkIntDis;
 			DoIntlv = 0;
 		}
 	}
 
 	if (DoIntlv) {
 		val = Tab_int_D[BankEncd];
-		if (pDCTstat->Status & (1<<SB_128bitmode))
+		if (pDCTstat->Status & (1 << SB_128bitmode))
 			val++;
 
 		AddrLoMask = (EnChipSels - 1)  << val;
@@ -100,7 +100,7 @@ void InterleaveBanks_D(struct MCTStatStruc *pMCTstat,
 		BitDelta = bsf(AddrHiMask) - bsf(AddrLoMask);
 
 		for (ChipSel = 0; ChipSel < MAX_CS_SUPPORTED; ChipSel++) {
-			reg = 0x40+(ChipSel<<2) + reg_off;	/*Dram CS Base 0 */
+			reg = 0x40+(ChipSel << 2) + reg_off;	/*Dram CS Base 0 */
 			val = Get_NB32(dev, reg);
 			if (val & 3) {
 				val_lo = val & AddrLoMask;
@@ -116,7 +116,7 @@ void InterleaveBanks_D(struct MCTStatStruc *pMCTstat,
 				if (ChipSel & 1)
 					continue;
 
-				reg = 0x60 + ((ChipSel>>1)<<2) + reg_off; /*Dram CS Mask 0 */
+				reg = 0x60 + ((ChipSel >> 1) << 2) + reg_off; /*Dram CS Mask 0 */
 				val = Get_NB32(dev, reg);
 				val_lo = val & AddrLoMask;
 				val_hi = val & AddrHiMask;
diff --git a/src/northbridge/amd/amdmct/mct/mctdqs_d.c b/src/northbridge/amd/amdmct/mct/mctdqs_d.c
index 67ff823..e0c7761 100644
--- a/src/northbridge/amd/amdmct/mct/mctdqs_d.c
+++ b/src/northbridge/amd/amdmct/mct/mctdqs_d.c
@@ -221,12 +221,12 @@ static void SetEccDQSRdWrPos_D(struct MCTStatStruc *pMCTstat,
 	u8 channel;
 	u8 direction;
 
-	for (channel = 0; channel < 2; channel++){
+	for (channel = 0; channel < 2; channel++) {
 		for (direction = 0; direction < 2; direction++) {
 			pDCTstat->Channel = channel;	/* Channel A or B */
 			pDCTstat->Direction = direction; /* Read or write */
 			CalcEccDQSPos_D(pMCTstat, pDCTstat, pDCTstat->CH_EccDQSLike[channel], pDCTstat->CH_EccDQSScale[channel], ChipSel);
-			print_debug_dqs_pair("\t\tSetEccDQSRdWrPos: channel ", channel, direction==DQS_READDIR? " R dqs_delay":" W dqs_delay",	pDCTstat->DQSDelay, 2);
+			print_debug_dqs_pair("\t\tSetEccDQSRdWrPos: channel ", channel, direction == DQS_READDIR? " R dqs_delay":" W dqs_delay",	pDCTstat->DQSDelay, 2);
 			pDCTstat->ByteLane = 8;
 			StoreDQSDatStrucVal_D(pMCTstat, pDCTstat, ChipSel);
 			mct_SetDQSDelayCSR_D(pMCTstat, pDCTstat, ChipSel);
@@ -251,7 +251,7 @@ static void CalcEccDQSPos_D(struct MCTStatStruc *pMCTstat,
 	GetDQSDatStrucVal_D(pMCTstat, pDCTstat, ChipSel);
 	DQSDelay1 = pDCTstat->DQSDelay;
 
-	if (DQSDelay0>DQSDelay1) {
+	if (DQSDelay0 > DQSDelay1) {
 		DQSDelay = DQSDelay0 - DQSDelay1;
 	} else {
 		DQSDelay = DQSDelay1 - DQSDelay0;
@@ -263,7 +263,7 @@ static void CalcEccDQSPos_D(struct MCTStatStruc *pMCTstat,
 
 	DQSDelay >>= 8;		// /256
 
-	if (DQSDelay0>DQSDelay1) {
+	if (DQSDelay0 > DQSDelay1) {
 		DQSDelay = DQSDelay1 - DQSDelay;
 	} else {
 		DQSDelay += DQSDelay1;
@@ -320,7 +320,7 @@ static void TrainDQSRdWrPos_D(struct MCTStatStruc *pMCTstat,
 		if (pDCTstat->DIMMValidDCT[Channel] == 0)	/* mct_BeforeTrainDQSRdWrPos_D */
 			continue;
 
-		for ( DQSWrDelay = 0; DQSWrDelay < dqsWrDelay_end; DQSWrDelay++) {
+		for (DQSWrDelay = 0; DQSWrDelay < dqsWrDelay_end; DQSWrDelay++) {
 			pDCTstat->DQSDelay = DQSWrDelay;
 			pDCTstat->Direction = DQS_WRITEDIR;
 			mct_SetDQSDelayAllCSR_D(pMCTstat, pDCTstat, cs_start);
@@ -362,7 +362,7 @@ static void TrainDQSRdWrPos_D(struct MCTStatStruc *pMCTstat,
 				for (Receiver = cs_start; Receiver < (cs_start + 2); Receiver += 2) {
 					printk(BIOS_DEBUG, "\t\tReceiver: %02x: ", Receiver);
 					p = pDCTstat->CH_D_DIR_B_DQS[Channel][Receiver >> 1][Dir];
-					for (i=0;i<8; i++) {
+					for (i = 0; i < 8; i++) {
 						val  = p[i];
 						printk(BIOS_DEBUG, "%02x ", val);
 					}
@@ -383,7 +383,7 @@ static void TrainDQSRdWrPos_D(struct MCTStatStruc *pMCTstat,
 		lo &= ~(1<<17);		/* restore HWCR.wrap32dis */
 		_WRMSR(addr, lo, hi);
 	}
-	if (!_SSE2){
+	if (!_SSE2) {
 		cr4 = read_cr4();
 		cr4 &= ~(1<<9);		/* restore cr4.OSFXSR */
 		write_cr4(cr4);
@@ -411,11 +411,11 @@ static void SetupDqsPattern_D(struct MCTStatStruc *pMCTstat,
 	buf = (u32 *)(((u32)buffer + 0x10) & (0xfffffff0));
 	if (pDCTstat->Status & (1 << SB_128bitmode)) {
 		pDCTstat->Pattern = 1;	/* 18 cache lines, alternating qwords */
-		for (i=0; i<16*18; i++)
+		for (i = 0; i < 16*18; i++)
 			buf[i] = TestPatternJD1b_D[i];
 	} else {
 		pDCTstat->Pattern = 0;	/* 9 cache lines, sequential qwords */
-		for (i=0; i<16*9; i++)
+		for (i = 0; i < 16*9; i++)
 			buf[i] = TestPatternJD1a_D[i];
 	}
 	pDCTstat->PtrPatternBufA = (u32)buf;
@@ -458,10 +458,10 @@ static void TrainDQSPos_D(struct MCTStatStruc *pMCTstat,
 		dqsDelay_end = 32;
 	}
 
-	/* Bitmapped status per delay setting, 0xff=All positions
+	/* Bitmapped status per delay setting, 0xff = All positions
 	 * passing (1= PASS). Set the entire array.
 	 */
-	for (DQSDelay=0; DQSDelay<64; DQSDelay++) {
+	for (DQSDelay = 0; DQSDelay < 64; DQSDelay++) {
 		MutualCSPassW[DQSDelay] = 0xFF;
 	}
 
@@ -481,7 +481,7 @@ static void TrainDQSPos_D(struct MCTStatStruc *pMCTstat,
 		}
 
 		print_debug_dqs("\t\t\t\tTrainDQSPos: 12 TestAddr ", TestAddr, 4);
-		SetUpperFSbase(TestAddr);	/* fs:eax=far ptr to target */
+		SetUpperFSbase(TestAddr);	/* fs:eax = far ptr to target */
 
 		if (pDCTstat->Direction == DQS_READDIR) {
 			print_debug_dqs("\t\t\t\tTrainDQSPos: 13 for read ", 0, 4);
@@ -504,7 +504,7 @@ static void TrainDQSPos_D(struct MCTStatStruc *pMCTstat,
 			print_debug_dqs("\t\t\t\t\tTrainDQSPos: 144 Pattern ", pDCTstat->Pattern, 5);
 			ReadDQSTestPattern_D(pMCTstat, pDCTstat, TestAddr << 8);
 			/* print_debug_dqs("\t\t\t\t\tTrainDQSPos: 145 MutualCSPassW ", MutualCSPassW[DQSDelay], 5); */
-			tmp = CompareDQSTestPattern_D(pMCTstat, pDCTstat, TestAddr << 8); /* 0=fail, 1=pass */
+			tmp = CompareDQSTestPattern_D(pMCTstat, pDCTstat, TestAddr << 8); /* 0 = fail, 1 = pass */
 
 			if (mct_checkFenceHoleAdjust_D(pMCTstat, pDCTstat, DQSDelay, ChipSel, &tmp)) {
 				goto skipLocMiddle;
@@ -538,7 +538,7 @@ static void TrainDQSPos_D(struct MCTStatStruc *pMCTstat,
 					if (LastTest == DQS_FAIL) {
 						RnkDlySeqPassMin = DQSDelay; //start sequential run
 					}
-					if ((RnkDlySeqPassMax - RnkDlySeqPassMin)>(RnkDlyFilterMax-RnkDlyFilterMin)){
+					if ((RnkDlySeqPassMax - RnkDlySeqPassMin)>(RnkDlyFilterMax-RnkDlyFilterMin)) {
 						RnkDlyFilterMin = RnkDlySeqPassMin;
 						RnkDlyFilterMax = RnkDlySeqPassMax;
 					}
@@ -552,7 +552,7 @@ static void TrainDQSPos_D(struct MCTStatStruc *pMCTstat,
 				Errors |= 1 << SB_NODQSPOS; /* no passing window */
 			} else {
 				print_debug_dqs_pair("\t\t\t\tTrainDQSPos: 34 RnkDlyFilter: ", RnkDlyFilterMin, " ",  RnkDlyFilterMax, 4);
-				if (((RnkDlyFilterMax - RnkDlyFilterMin) < MIN_DQS_WNDW)){
+				if (((RnkDlyFilterMax - RnkDlyFilterMin) < MIN_DQS_WNDW)) {
 					Errors |= 1 << SB_SMALLDQS;
 				} else {
 					u8 middle_dqs;
@@ -631,7 +631,7 @@ static u8 MiddleDQS_D(u8 min, u8 max)
 	size = max-min;
 	if (size % 2)
 		size++;		// round up if the size isn't even.
-	return ( min + (size >> 1));
+	return (min + (size >> 1));
 }
 
 
@@ -679,10 +679,10 @@ static u8 ChipSelPresent_D(struct MCTStatStruc *pMCTstat,
 		reg_off = 0;
 	}
 
-	if (ChipSel < MAX_CS_SUPPORTED){
+	if (ChipSel < MAX_CS_SUPPORTED) {
 		reg = 0x40 + (ChipSel << 2) + reg_off;
 		val = Get_NB32(dev, reg);
-		if (val & ( 1 << 0))
+		if (val & (1 << 0))
 			ret = 1;
 	}
 
@@ -775,8 +775,8 @@ static u8 CompareDQSTestPattern_D(struct MCTStatStruc *pMCTstat, struct DCTStatS
 	}
 
 	bytelane = 0;  		/* bytelane counter */
-	bitmap = 0xFF;		/* bytelane test bitmap, 1=pass */
-	for (i=0; i < (9 * 64 / 4); i++) { /* sizeof testpattern. /4 due to next loop */
+	bitmap = 0xFF;		/* bytelane test bitmap, 1 = pass */
+	for (i = 0; i < (9 * 64 / 4); i++) { /* sizeof testpattern. /4 due to next loop */
 		value = read32_fs(addr_lo);
 		value_test = *test_buf;
 
@@ -797,7 +797,7 @@ static u8 CompareDQSTestPattern_D(struct MCTStatStruc *pMCTstat, struct DCTStatS
 		if (!bitmap)
 			break;
 
-		if (bytelane == 0){
+		if (bytelane == 0) {
 			if (pattern == 1) { //dual channel
 				addr_lo += 8; //skip over other channel's data
 				test_buf += 2;
@@ -815,7 +815,7 @@ static void FlushDQSTestPattern_D(struct DCTStatStruc *pDCTstat,
 					u32 addr_lo)
 {
 	/* Flush functions in mct_gcc.h */
-	if (pDCTstat->Pattern == 0){
+	if (pDCTstat->Pattern == 0) {
 		FlushDQSTestPattern_L9(addr_lo);
 	} else {
 		FlushDQSTestPattern_L18(addr_lo);
@@ -1036,7 +1036,7 @@ static void mct_SetDQSDelayAllCSR_D(struct MCTStatStruc *pMCTstat,
 
 
 	for (ChipSel = cs_start; ChipSel < (cs_start + 2); ChipSel++) {
-		if ( mct_RcvrRankEnabled_D(pMCTstat, pDCTstat, pDCTstat->Channel, ChipSel)) {
+		if (mct_RcvrRankEnabled_D(pMCTstat, pDCTstat, pDCTstat->Channel, ChipSel)) {
 			for (ByteLane = 0; ByteLane < 8; ByteLane++) {
 				pDCTstat->ByteLane = ByteLane;
 				mct_SetDQSDelayCSR_D(pMCTstat, pDCTstat, ChipSel);
@@ -1088,9 +1088,9 @@ u32 mct_GetMCTSysAddr_D(struct MCTStatStruc *pMCTstat,
 
 	val &= ~0x0F;
 
-	/* unganged mode DCT0+DCT1, sys addr of DCT1=node
+	/* unganged mode DCT0+DCT1, sys addr of DCT1 = node
 	 * base+DctSelBaseAddr+local ca base*/
-	if ((Channel) && (pDCTstat->GangedMode == 0) && ( pDCTstat->DIMMValidDCT[0] > 0)) {
+	if ((Channel) && (pDCTstat->GangedMode == 0) && (pDCTstat->DIMMValidDCT[0] > 0)) {
 		reg = 0x110;
 		dword = Get_NB32(dev, reg);
 		dword &= 0xfffff800;
@@ -1104,7 +1104,7 @@ u32 mct_GetMCTSysAddr_D(struct MCTStatStruc *pMCTstat,
 			val += dword;
 		}
 	} else {
-		/* sys addr=node base+local cs base */
+		/* sys addr = node base+local cs base */
 		val += pDCTstat->DCTSysBase;
 
 		/* New stuff */
diff --git a/src/northbridge/amd/amdmct/mct/mctecc_d.c b/src/northbridge/amd/amdmct/mct/mctecc_d.c
index 5c1dc3a..9b22c84 100644
--- a/src/northbridge/amd/amdmct/mct/mctecc_d.c
+++ b/src/northbridge/amd/amdmct/mct/mctecc_d.c
@@ -40,7 +40,7 @@ static u8 isDramECCEn_D(struct DCTStatStruc *pDCTstat);
  *
  * Conditions for setting background scrubber.
  *  1. node is present
- *  2. node has dram functioning (WE=RE=1)
+ *  2. node has dram functioning (WE = RE = 1)
  *  3. all eccdimms (or bit 17 of offset 90,fn 2)
  *  4. no chip-select gap exists
  *
@@ -121,10 +121,10 @@ u8 ECCInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA)
 			val = Get_NB32(dev, reg);
 
 			/* WE/RE is checked */
-			if ((val & 3)==3) {	/* Node has dram populated */
+			if ((val & 3) == 3) {	/* Node has dram populated */
 				/* Negate 'all nodes/dimms ECC' flag if non ecc
 				   memory populated */
-				if ( pDCTstat->Status & (1<<SB_ECCDIMMs)) {
+				if (pDCTstat->Status & (1 << SB_ECCDIMMs)) {
 					LDramECC = isDramECCEn_D(pDCTstat);
 					if (pDCTstat->ErrCode != SC_RunningOK) {
 						pDCTstat->Status &=  ~(1 << SB_ECCDIMMs);
@@ -161,9 +161,9 @@ u8 ECCInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA)
 	}
 
 	if (AllECC)
-		pMCTstat->GStatus |= 1<<GSB_ECCDIMMs;
+		pMCTstat->GStatus |= 1 << GSB_ECCDIMMs;
 	else
-		pMCTstat->GStatus &= ~(1<<GSB_ECCDIMMs);
+		pMCTstat->GStatus &= ~(1 << GSB_ECCDIMMs);
 
 	/* Program the Dram BKScrub CTL to the proper (user selected) value.*/
 	/* Reset MC4_STS. */
@@ -172,11 +172,11 @@ u8 ECCInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA)
 		pDCTstat = pDCTstatA + Node;
 		LDramECC = 0;
 		if (NodePresent_D(Node)) {	/* If Node is present */
-			reg = 0x40+(Node<<3);	/* Dram Base Node 0 + index */
+			reg = 0x40+(Node << 3);	/* Dram Base Node 0 + index */
 			val = Get_NB32(pDCTstat->dev_map, reg);
 			curBase = val & 0xffff0000;
 			/*WE/RE is checked because memory config may have been */
-			if ((val & 3)==3) {	/* Node has dram populated */
+			if ((val & 3) == 3) {	/* Node has dram populated */
 				if (isDramECCEn_D(pDCTstat)) {	/* if ECC is enabled on this dram */
 					dev = pDCTstat->dev_nbmisc;
 					val = curBase << 8;
@@ -184,7 +184,7 @@ u8 ECCInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA)
 						val |= (1<<0); /* enable redirection */
 					}
 					Set_NB32(dev, 0x5C, val); /* Dram Scrub Addr Low */
-					val = curBase>>24;
+					val = curBase >> 24;
 					Set_NB32(dev, 0x60, val); /* Dram Scrub Addr High */
 					Set_NB32(dev, 0x58, OF_ScrubCTL);	/*Scrub Control */
 
@@ -236,16 +236,16 @@ static void setSyncOnUnEccEn_D(struct MCTStatStruc *pMCTstat,
 		struct DCTStatStruc *pDCTstat;
 		pDCTstat = pDCTstatA + Node;
 		if (NodePresent_D(Node)) {	/* If Node is present*/
-			reg = 0x40+(Node<<3);	/* Dram Base Node 0 + index*/
+			reg = 0x40+(Node << 3);	/* Dram Base Node 0 + index*/
 			val = Get_NB32(pDCTstat->dev_map, reg);
 			/*WE/RE is checked because memory config may have been*/
-			if ((val & 3)==3) {	/* Node has dram populated*/
-				if ( isDramECCEn_D(pDCTstat)) {
+			if ((val & 3) == 3) {	/* Node has dram populated*/
+				if (isDramECCEn_D(pDCTstat)) {
 					/*if ECC is enabled on this dram*/
 					dev = pDCTstat->dev_nbmisc;
 					reg = 0x44;	/* MCA NB Configuration*/
 					val = Get_NB32(dev, reg);
-					val |= (1<<SyncOnUcEccEn);
+					val |= (1 << SyncOnUcEccEn);
 					Set_NB32(dev, reg, val);
 				}
 			}	/* Node has Dram*/
@@ -300,11 +300,11 @@ static u8 isDramECCEn_D(struct DCTStatStruc *pDCTstat)
 	} else {
 		ch_end = 2;
 	}
-	for (i=0; i<ch_end; i++) {
-		if (pDCTstat->DIMMValidDCT[i] > 0){
+	for (i = 0; i < ch_end; i++) {
+		if (pDCTstat->DIMMValidDCT[i] > 0) {
 			reg = 0x90 + i * 0x100;		/* Dram Config Low */
 			val = Get_NB32(dev, reg);
-			if (val & (1<<DimmEcEn)) {
+			if (val & (1 << DimmEcEn)) {
 				/* set local flag 'dram ecc capable' */
 				isDimmECCEn = 1;
 				break;
diff --git a/src/northbridge/amd/amdmct/mct/mctgr.c b/src/northbridge/amd/amdmct/mct/mctgr.c
index a13d4e2..41a479b 100644
--- a/src/northbridge/amd/amdmct/mct/mctgr.c
+++ b/src/northbridge/amd/amdmct/mct/mctgr.c
@@ -31,12 +31,12 @@ u32 mct_AdjustMemClkDis_GR(struct DCTStatStruc *pDCTstat, u32 dct,
 	DramTimingLo = val;
 	/* Dram Timing Low (owns Clock Enable bits) */
 	NewDramTimingLo = Get_NB32(dev, 0x88 + reg_off);
-	if (mctGet_NVbits(NV_AllMemClks)==0) {
+	if (mctGet_NVbits(NV_AllMemClks) == 0) {
 		/*Special Jedec SPD diagnostic bit - "enable all clocks"*/
 		if (!(pDCTstat->Status & (1<<SB_DiagClks))) {
-			for (i=0; i<MAX_DIMMS_SUPPORTED; i++) {
+			for (i = 0; i < MAX_DIMMS_SUPPORTED; i++) {
 				val = Tab_GRCLKDis[i];
-				if (val<8) {
+				if (val < 8) {
 					if (!(pDCTstat->DIMMValidDCT[dct] & (1<<val))) {
 						/* disable memclk */
 						NewDramTimingLo |= (1<<(i+1));
diff --git a/src/northbridge/amd/amdmct/mct/mctmtr_d.c b/src/northbridge/amd/amdmct/mct/mctmtr_d.c
index 5e91947..deb0f8a 100644
--- a/src/northbridge/amd/amdmct/mct/mctmtr_d.c
+++ b/src/northbridge/amd/amdmct/mct/mctmtr_d.c
@@ -35,11 +35,11 @@ void CPUMemTyping_D(struct MCTStatStruc *pMCTstat,
 
 	/* Set temporary top of memory from Node structure data.
 	 * Adjust temp top of memory down to accommodate 32-bit IO space.
-	 * Bottom40bIO=top of memory, right justified 8 bits
+	 * Bottom40bIO = top of memory, right justified 8 bits
 	 * 	(defines dram versus IO space type)
-	 * Bottom32bIO=sub 4GB top of memory, right justified 8 bits
+	 * Bottom32bIO = sub 4GB top of memory, right justified 8 bits
 	 * 	(defines dram versus IO space type)
-	 * Cache32bTOP=sub 4GB top of WB cacheable memory,
+	 * Cache32bTOP = sub 4GB top of WB cacheable memory,
 	 * 	right justified 8 bits
 	 */
 
@@ -83,8 +83,8 @@ void CPUMemTyping_D(struct MCTStatStruc *pMCTstat,
 		 */
 	addr = 0x204;	/* MTRR phys base 2*/
 			/* use TOP_MEM as limit*/
-			/* Limit=TOP_MEM|TOM2*/
-			/* Base=0*/
+			/* Limit = TOP_MEM|TOM2*/
+			/* Base = 0*/
 	print_tx("\t CPUMemTyping: Cache32bTOP:", Cache32bTOP);
 	SetMTRRrangeWB_D(0, &Cache32bTOP, &addr);
 				/* Base */
@@ -115,10 +115,10 @@ void CPUMemTyping_D(struct MCTStatStruc *pMCTstat,
 	addr = 0xC0010010;		/* SYS_CFG */
 	_RDMSR(addr, &lo, &hi);
 	if (Bottom40bIO) {
-		lo |= (1<<21);		/* MtrrTom2En=1 */
+		lo |= (1<<21);		/* MtrrTom2En = 1 */
 		lo |= (1<<22);		/* Tom2ForceMemTypeWB */
 	} else {
-		lo &= ~(1<<21);		/* MtrrTom2En=0 */
+		lo &= ~(1<<21);		/* MtrrTom2En = 0 */
 		lo &= ~(1<<22);		/* Tom2ForceMemTypeWB */
 	}
 	_WRMSR(addr, lo, hi);
@@ -151,7 +151,7 @@ static void SetMTRRrange_D(u32 Base, u32 *pLimit, u32 *pMtrrAddr, u16 MtrrType)
 	 * next set bit in a forward or backward sequence of bits (as a function
 	 * of the Limit). We start with the ascending path, to ensure that
 	 * regions are naturally aligned, then we switch to the descending path
-	 * to maximize MTRR usage efficiency. Base=0 is a special case where we
+	 * to maximize MTRR usage efficiency. Base = 0 is a special case where we
 	 * start with the descending path. Correct Mask for region is
 	 * 2comp(Size-1)-1, which is 2comp(Limit-Base-1)-1
 	 */
@@ -177,17 +177,17 @@ static void SetMTRRrange_D(u32 Base, u32 *pLimit, u32 *pMtrrAddr, u16 MtrrType)
 			curSize = valx;
 			valx += curBase;
 		}
-		curLimit = valx;		/*eax=curBase, edx=curLimit*/
+		curLimit = valx;		/*eax = curBase, edx = curLimit*/
 		valx = val>>24;
 		val <<= 8;
 
 		/* now program the MTRR */
 		val |= MtrrType;		/* set cache type (UC or WB)*/
 		_WRMSR(addr, val, valx);	/* prog. MTRR with current region Base*/
-		val = ((~(curSize - 1))+1) - 1;	/* Size-1*/ /*Mask=2comp(Size-1)-1*/
+		val = ((~(curSize - 1))+1) - 1;	/* Size-1*/ /*Mask = 2comp(Size-1)-1*/
 		valx = (val >> 24) | (0xff00);	/* GH have 48 bits addr */
 		val <<= 8;
-		val |= ( 1 << 11);			/* set MTRR valid*/
+		val |= (1 << 11);			/* set MTRR valid*/
 		addr++;
 		_WRMSR(addr, val, valx);	/* prog. MTRR with current region Mask*/
 		val = curLimit;
@@ -217,9 +217,9 @@ void UMAMemTyping_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat
 	/*======================================================================
 	 * Adjust temp top of memory down to accommodate UMA memory start
 	 *======================================================================*/
-	/* Bottom32bIO=sub 4GB top of memory, right justified 8 bits
+	/* Bottom32bIO = sub 4GB top of memory, right justified 8 bits
 	 * (defines dram versus IO space type)
-	 * Cache32bTOP=sub 4GB top of WB cacheable memory, right justified 8 bits */
+	 * Cache32bTOP = sub 4GB top of WB cacheable memory, right justified 8 bits */
 
 	Bottom32bIO = pMCTstat->Sub4GCacheTop >> 8;
 
@@ -238,7 +238,7 @@ void UMAMemTyping_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat
 		addr = 0x200;
 		lo = 0;
 		hi = lo;
-		while ( addr < 0x20C) {
+		while (addr < 0x20C) {
 			_WRMSR(addr, lo, hi);		/* prog. MTRR with current region Mask */
 			addr++;						/* next MTRR pair addr */
 		}
diff --git a/src/northbridge/amd/amdmct/mct/mctndi_d.c b/src/northbridge/amd/amdmct/mct/mctndi_d.c
index 32c3199..389d56b 100644
--- a/src/northbridge/amd/amdmct/mct/mctndi_d.c
+++ b/src/northbridge/amd/amdmct/mct/mctndi_d.c
@@ -72,7 +72,7 @@ void InterleaveNodes_D(struct MCTStatStruc *pMCTstat,
 				NodesWmem++;
 				Base &= 0xFFFF0000;	/* Base[39:8] */
 
-				if (pDCTstat->Status & (1 << SB_HWHole )) {
+				if (pDCTstat->Status & (1 << SB_HWHole)) {
 
 					/* to get true amount of dram,
 					 * subtract out memory hole if HW dram remapping */
@@ -87,7 +87,7 @@ void InterleaveNodes_D(struct MCTStatStruc *pMCTstat,
 				DctSelBase = Get_NB32(pDCTstat->dev_dct, 0x114);
 				if (DctSelBase) {
 					DctSelBase <<= 8;
-					if ( pDCTstat->Status & (1 << SB_HWHole)) {
+					if (pDCTstat->Status & (1 << SB_HWHole)) {
 						if (DctSelBase >= 0x1000000) {
 							DctSelBase -= HWHoleSz;
 						}
@@ -104,7 +104,7 @@ void InterleaveNodes_D(struct MCTStatStruc *pMCTstat,
 				MemSize &= 0xFFFF0000;
 				MemSize += 0x00010000;
 				MemSize -= Base;
-				if ( pDCTstat->Status & (1 << SB_HWHole)) {
+				if (pDCTstat->Status & (1 << SB_HWHole)) {
 					MemSize -= HWHoleSz;
 				}
 				if (Node == 0) {
@@ -144,7 +144,7 @@ void InterleaveNodes_D(struct MCTStatStruc *pMCTstat,
 	if (DoIntlv) {
 		MCTMemClr_D(pMCTstat,pDCTstatA);
 		/* Program Interleaving enabled on Node 0 map only.*/
-		MemSize0 <<= bsf(Nodes);	/* MemSize=MemSize*2 (or 4, or 8) */
+		MemSize0 <<= bsf(Nodes);	/* MemSize = MemSize*2 (or 4, or 8) */
 		Dct0MemSize <<= bsf(Nodes);
 		MemSize0 += HWHoleSz;
 		Base = ((Nodes - 1) << 8) | 3;
@@ -185,7 +185,7 @@ void InterleaveNodes_D(struct MCTStatStruc *pMCTstat,
 				HoleBase = pMCTstat->HoleBase;
 				if (Dct0MemSize >= HoleBase) {
 					val = HWHoleSz;
-					if ( Node == 0) {
+					if (Node == 0) {
 						val += Dct0MemSize;
 					}
 				} else {
diff --git a/src/northbridge/amd/amdmct/mct/mctpro_d.c b/src/northbridge/amd/amdmct/mct/mctpro_d.c
index 95afebf..6802a76 100644
--- a/src/northbridge/amd/amdmct/mct/mctpro_d.c
+++ b/src/northbridge/amd/amdmct/mct/mctpro_d.c
@@ -36,13 +36,13 @@ u32 OtherTiming_A_D(struct DCTStatStruc *pDCTstat, u32 val)
 {
 	/* Bug#10695:One MEMCLK Bubble Writes Don't Do X4 X8 Switching Correctly
 	 * Solution: BIOS should set DRAM Timing High[Twrwr] > 00b
-	 * ( F2x[1, 0]8C[1:0] > 00b).  Silicon Status: Fixed in Rev B
+	 * (F2x[1, 0]8C[1:0] > 00b).  Silicon Status: Fixed in Rev B
 	 * FIXME: check if this is still required.
 	 */
 	uint64_t tmp;
 	tmp = pDCTstat->LogicalCPUID;
 	if ((tmp == AMD_DR_A0A) || (tmp == AMD_DR_A1B) || (tmp == AMD_DR_A2)) {
-		if (!(val & (3<<12) ))
+		if (!(val & (3<<12)))
 			val |= 1<<12;
 	}
 	return val;
@@ -69,7 +69,7 @@ void mct_ForceAutoPrecharge_D(struct DCTStatStruc *pDCTstat, u32 dct)
 				val |= 1<<BurstLength32;
 			Set_NB32(dev, reg, val);
 
-			reg = 0x88 + reg_off;	/* cx=Dram Timing Lo */
+			reg = 0x88 + reg_off;	/* cx = Dram Timing Lo */
 			val = Get_NB32(dev, reg);
 			val |= 0x000F0000;	/* Trc = 0Fh */
 			Set_NB32(dev, reg, val);
@@ -89,7 +89,7 @@ void mct_EndDQSTraining_D(struct MCTStatStruc *pMCTstat,
 	 * NOTE -- this has been documented with a note at the end of this
 	 * section in the  BKDG (although, admittedly, the note does not really
 	 * stand out).
-	 * Silicon Status: Fixed in Rev B ( confirm)
+	 * Silicon Status: Fixed in Rev B (confirm)
 	 * FIXME: check this.
 	 */
 
@@ -149,14 +149,14 @@ void mct_BeforeDQSTrain_Samp_D(struct MCTStatStruc *pMCTstat,
 		dev = pDCTstat->dev_dct;
 		index = 0;
 
-		for (Channel = 0; Channel<2; Channel++) {
+		for (Channel = 0; Channel < 2; Channel++) {
 			index_reg = 0x98 + 0x100 * Channel;
 			val = Get_NB32_index_wait(dev, index_reg, 0x0d004007);
 			val |= 0x3ff;
 			Set_NB32_index_wait(dev, index_reg, 0x0d0f4f07, val);
 		}
 
-		for (Channel = 0; Channel<2; Channel++) {
+		for (Channel = 0; Channel < 2; Channel++) {
 			if (pDCTstat->GangedMode && Channel)
 				break;
 			reg_off = 0x100 * Channel;
@@ -167,11 +167,11 @@ void mct_BeforeDQSTrain_Samp_D(struct MCTStatStruc *pMCTstat,
 			Set_NB32(dev, reg, val);
 		}
 
-		for (Channel = 0; Channel<2; Channel++) {
+		for (Channel = 0; Channel < 2; Channel++) {
 			reg_off = 0x100 * Channel;
 			val = 0;
 			index_reg = 0x98 + reg_off;
-			for ( index = 0x30; index < (0x45 + 1); index++) {
+			for (index = 0x30; index < (0x45 + 1); index++) {
 				Set_NB32_index_wait(dev, index_reg, index, val);
 			}
 		}
@@ -211,7 +211,7 @@ u32 Modify_D3CMP(struct DCTStatStruc *pDCTstat, u32 dct, u32 value)
 		index = 0x0D004201;
 		val = Get_NB32_index_wait(dev, index_reg, index);
 		value &= ~(1<<27);
-		value |= ((val>>10) & 1) << 27;
+		value |= ((val >> 10) & 1) << 27;
 	}
 	return value;
 }
@@ -254,10 +254,10 @@ u32 CheckNBCOFAutoPrechg(struct DCTStatStruc *pDCTstat, u32 dct)
 	/* 3 * (Fn2xD4[NBFid]+4)/(2^NbDid)/(3+Fn2x94[MemClkFreq]) */
 	msr = 0xC0010071;
 	_RDMSR(msr, &lo, &hi);
-	NbDid = (lo>>22) & 1;
+	NbDid = (lo >> 22) & 1;
 
 	val = Get_NB32(pDCTstat->dev_dct, 0x94 + 0x100 * dct);
-	valx = ((val & 0x07) + 3)<<NbDid;
+	valx = ((val & 0x07) + 3) << NbDid;
 	print_tx("MemClk:", valx >> NbDid);
 
 	val = Get_NB32(pDCTstat->dev_nbmisc, 0xd4);
@@ -265,7 +265,7 @@ u32 CheckNBCOFAutoPrechg(struct DCTStatStruc *pDCTstat, u32 dct)
 	print_tx("NB COF:", valy >> NbDid);
 
 	val = valy/valx;
-	if ((val==3) && (valy%valx))  /* 3 < NClk/MemClk < 4 */
+	if ((val == 3) && (valy % valx))  /* 3 < NClk/MemClk < 4 */
 		ret = 1;
 
 	return ret;
@@ -296,7 +296,7 @@ void mct_BeforeDramInit_D(struct DCTStatStruc *pDCTstat, u32 dct)
 			}
 			dev = pDCTstat->dev_dct;
 			index = 0x0D00E001;
-			for (ch=ch_start; ch<ch_end; ch++) {
+			for (ch = ch_start; ch < ch_end; ch++) {
 				index_reg = 0x98 + 0x100 * ch;
 				val = Get_NB32_index(dev, index_reg, 0x0D00E001);
 				val &= ~(0xf0);
diff --git a/src/northbridge/amd/amdmct/mct/mctsrc.c b/src/northbridge/amd/amdmct/mct/mctsrc.c
index 510cf0d..a87cea8 100644
--- a/src/northbridge/amd/amdmct/mct/mctsrc.c
+++ b/src/northbridge/amd/amdmct/mct/mctsrc.c
@@ -86,7 +86,7 @@ static void SetupRcvrPattern(struct MCTStatStruc *pMCTstat,
 	p_A = (u32 *)SetupDqsPattern_1PassB(pass);
 	p_B = (u32 *)SetupDqsPattern_1PassA(pass);
 
-	for (i=0;i<16;i++) {
+	for (i = 0; i < 16; i++) {
 		buf_a[i] = p_A[i];
 		buf_b[i] = p_B[i];
 	}
@@ -161,7 +161,7 @@ static void dqsTrainRcvrEn_SW(struct MCTStatStruc *pMCTstat,
 	print_t("TrainRcvrEn: 1\n");
 
 	cr4 = read_cr4();
-	if (cr4 & ( 1 << 9)) {	/* save the old value */
+	if (cr4 & (1 << 9)) {	/* save the old value */
 		_SSE2 = 1;
 	}
 	cr4 |= (1 << 9);	/* OSFXSR enable SSE2 */
@@ -261,7 +261,7 @@ static void dqsTrainRcvrEn_SW(struct MCTStatStruc *pMCTstat,
 			if (Pass == FirstPass) {
 				pDCTstat->DqsRcvEn_Pass = 0;
 			} else {
-				pDCTstat->DqsRcvEn_Pass=0xFF;
+				pDCTstat->DqsRcvEn_Pass = 0xFF;
 			}
 			pDCTstat->DqsRcvEn_Saved = 0;
 
@@ -446,7 +446,7 @@ static void dqsTrainRcvrEn_SW(struct MCTStatStruc *pMCTstat,
 		lo &= ~(1<<17);		/* restore HWCR.wrap32dis */
 		_WRMSR(msr, lo, hi);
 	}
-	if (!_SSE2){
+	if (!_SSE2) {
 		cr4 = read_cr4();
 		cr4 &= ~(1<<9); 	/* restore cr4.OSFXSR */
 		write_cr4(cr4);
@@ -456,7 +456,7 @@ static void dqsTrainRcvrEn_SW(struct MCTStatStruc *pMCTstat,
 	{
 		u8 Channel;
 		printk(BIOS_DEBUG, "TrainRcvrEn: CH_MaxRdLat:\n");
-		for (Channel = 0; Channel<2; Channel++) {
+		for (Channel = 0; Channel < 2; Channel++) {
 			printk(BIOS_DEBUG, "Channel: %02x: %02x\n", Channel, pDCTstat->CH_MaxRdLat[Channel]);
 		}
 	}
@@ -472,10 +472,10 @@ static void dqsTrainRcvrEn_SW(struct MCTStatStruc *pMCTstat,
 		printk(BIOS_DEBUG, "TrainRcvrEn: CH_D_B_RCVRDLY:\n");
 		for (Channel = 0; Channel < 2; Channel++) {
 			printk(BIOS_DEBUG, "Channel: %02x\n", Channel);
-			for (Receiver = 0; Receiver<8; Receiver+=2) {
+			for (Receiver = 0; Receiver < 8; Receiver+=2) {
 				printk(BIOS_DEBUG, "\t\tReceiver: %02x: ", Receiver);
 				p = pDCTstat->CH_D_B_RCVRDLY[Channel][Receiver>>1];
-				for (i=0;i<8; i++) {
+				for (i = 0; i < 8; i++) {
 					val  = p[i];
 					printk(BIOS_DEBUG, "%02x ", val);
 				}
@@ -494,7 +494,7 @@ static void dqsTrainRcvrEn_SW(struct MCTStatStruc *pMCTstat,
 
 u8 mct_InitReceiver_D(struct DCTStatStruc *pDCTstat, u8 dct)
 {
-	if (pDCTstat->DIMMValidDCT[dct] == 0 ) {
+	if (pDCTstat->DIMMValidDCT[dct] == 0) {
 		return 8;
 	} else {
 		return 0;
@@ -526,7 +526,7 @@ static void mct_DisableDQSRcvEn_D(struct DCTStatStruc *pDCTstat)
 		ch_end = 2;
 	}
 
-	for (ch=0; ch<ch_end; ch++) {
+	for (ch = 0; ch < ch_end; ch++) {
 		reg = 0x78 + 0x100 * ch;
 		val = Get_NB32(dev, reg);
 		val &= ~(1 << DqsRcvEnTrain);
@@ -562,14 +562,14 @@ void mct_SetRcvrEnDly_D(struct DCTStatStruc *pDCTstat, u8 RcvrEnDly,
 	/* DimmOffset not needed for CH_D_B_RCVRDLY array */
 
 
-	for (i=0; i < 8; i++) {
+	for (i = 0; i < 8; i++) {
 		if (FinalValue) {
 			/*calculate dimm offset */
 			p = pDCTstat->CH_D_B_RCVRDLY[Channel][Receiver >> 1];
 			RcvrEnDly = p[i];
 		}
 
-		/* if flag=0, set DqsRcvEn value to reg. */
+		/* if flag = 0, set DqsRcvEn value to reg. */
 		/* get the register index from table */
 		index = Table_DQSRcvEn_Offset[i >> 1];
 		index += Addl_Index;	/* DIMMx DqsRcvEn byte0 */
@@ -661,7 +661,7 @@ static void mct_SetMaxLatency_D(struct DCTStatStruc *pDCTstat, u8 Channel, u8 DQ
 	valx = (val + 3) << 2;
 
 	val = Get_NB32(pDCTstat->dev_nbmisc, 0xD4);
-	SubTotal *= ((val & 0x1f) + 4 ) * 3;
+	SubTotal *= ((val & 0x1f) + 4) * 3;
 
 	SubTotal /= valx;
 	if (SubTotal % valx) {	/* round up */
@@ -723,7 +723,7 @@ static u8 mct_SavePassRcvEnDly_D(struct DCTStatStruc *pDCTstat,
 			mask_Saved &= mask_Pass;
 			p = pDCTstat->CH_D_B_RCVRDLY[Channel][receiver>>1];
 		}
-		for (i=0; i < 8; i++) {
+		for (i = 0; i < 8; i++) {
 			/* cmp per byte lane */
 			if (mask_Pass & (1 << i)) {
 				if (!(mask_Saved & (1 << i))) {
@@ -757,7 +757,7 @@ static u8 mct_CompareTestPatternQW0_D(struct MCTStatStruc *pMCTstat,
 
 
 	if (Pass == FirstPass) {
-		if (pattern==1) {
+		if (pattern == 1) {
 			test_buf = (u8 *)TestPattern1_D;
 		} else {
 			test_buf = (u8 *)TestPattern0_D;
@@ -769,13 +769,13 @@ static u8 mct_CompareTestPatternQW0_D(struct MCTStatStruc *pMCTstat,
 	SetUpperFSbase(addr);
 	addr <<= 8;
 
-	if ((pDCTstat->Status & (1<<SB_128bitmode)) && channel ) {
+	if ((pDCTstat->Status & (1<<SB_128bitmode)) && channel) {
 		addr += 8;	/* second channel */
 		test_buf += 8;
 	}
 
 	print_debug_dqs_pair("\t\t\t\t\t\t  test_buf = ", (u32)test_buf, "  |  addr_lo = ", addr,  4);
-	for (i=0; i<8; i++) {
+	for (i = 0; i < 8; i++) {
 		value = read32_fs(addr);
 		print_debug_dqs_pair("\t\t\t\t\t\t\t\t ", test_buf[i], "  |  ", value, 4);
 
@@ -790,7 +790,7 @@ static u8 mct_CompareTestPatternQW0_D(struct MCTStatStruc *pMCTstat,
 
 	if (Pass == FirstPass) {
 		/* if first pass, at least one byte lane pass
-		 * ,then DQS_PASS=1 and will set to related reg.
+		 * ,then DQS_PASS = 1 and will set to related reg.
 		 */
 		if (pDCTstat->DqsRcvEn_Pass != 0) {
 			result = DQS_PASS;
@@ -800,7 +800,7 @@ static u8 mct_CompareTestPatternQW0_D(struct MCTStatStruc *pMCTstat,
 
 	} else {
 		/* if second pass, at least one byte lane fail
-		 * ,then DQS_FAIL=1 and will set to related reg.
+		 * ,then DQS_FAIL = 1 and will set to related reg.
 		 */
 		if (pDCTstat->DqsRcvEn_Pass != 0xFF) {
 			result = DQS_FAIL;
@@ -843,7 +843,7 @@ static void mct_InitDQSPos4RcvrEn_D(struct MCTStatStruc *pMCTstat,
 	 * Read Position is 1/2 Memclock Delay
 	 */
 	u8 i;
-	for (i=0;i<2; i++){
+	for (i = 0; i < 2; i++) {
 		InitDQSPos4RcvrEn_D(pMCTstat, pDCTstat, i);
 	}
 }
@@ -867,8 +867,8 @@ static void InitDQSPos4RcvrEn_D(struct MCTStatStruc *pMCTstat,
 
 	// FIXME: add Cx support
 	dword = 0x00000000;
-	for (i=1; i<=3; i++) {
-		for (j=0; j<dn; j++)
+	for (i = 1; i <= 3; i++) {
+		for (j = 0; j < dn; j++)
 			/* DIMM0 Write Data Timing Low */
 			/* DIMM0 Write ECC Timing */
 			Set_NB32_index_wait(dev, index_reg, i + 0x100 * j, dword);
@@ -876,14 +876,14 @@ static void InitDQSPos4RcvrEn_D(struct MCTStatStruc *pMCTstat,
 
 	/* errata #180 */
 	dword = 0x2f2f2f2f;
-	for (i=5; i<=6; i++) {
-		for (j=0; j<dn; j++)
+	for (i = 5; i <= 6; i++) {
+		for (j = 0; j < dn; j++)
 			/* DIMM0 Read DQS Timing Control Low */
 			Set_NB32_index_wait(dev, index_reg, i + 0x100 * j, dword);
 	}
 
 	dword = 0x0000002f;
-	for (j=0; j<dn; j++)
+	for (j = 0; j < dn; j++)
 		/* DIMM0 Read DQS ECC Timing Control */
 		Set_NB32_index_wait(dev, index_reg, 7 + 0x100 * j, dword);
 }
@@ -969,7 +969,7 @@ void mctSetEccDQSRcvrEn_D(struct MCTStatStruc *pMCTstat,
 		if (!pDCTstat->NodePresent)
 			break;
 		if (pDCTstat->DCTSysLimit) {
-			for (i=0; i<2; i++)
+			for (i = 0; i < 2; i++)
 				CalcEccDQSRcvrEn_D(pMCTstat, pDCTstat, i);
 		}
 	}
@@ -1081,5 +1081,5 @@ void mct_Wait(u32 cycles)
 	saved = lo;
 	do {
 		_RDMSR(msr, &lo, &hi);
-	} while (lo - saved < cycles );
+	} while (lo - saved < cycles);
 }
diff --git a/src/northbridge/amd/amdmct/mct/mctsrc1p.c b/src/northbridge/amd/amdmct/mct/mctsrc1p.c
index e059e1e..c1b1133 100644
--- a/src/northbridge/amd/amdmct/mct/mctsrc1p.c
+++ b/src/northbridge/amd/amdmct/mct/mctsrc1p.c
@@ -50,7 +50,7 @@ static u8 mct_Average_RcvrEnDly_1Pass(struct DCTStatStruc *pDCTstat, u8 Channel,
 	MaxValue = 0;
 	p = pDCTstat->CH_D_B_RCVRDLY[Channel][Receiver >> 1];
 
-	for (i=0; i < 8; i++) {
+	for (i = 0; i < 8; i++) {
 		/* get left value from DCTStatStruc.CHA_D0_B0_RCVRDLY*/
 		val = p[i];
 		/* get right value from DCTStatStruc.CHA_D0_B0_RCVRDLY_1*/
diff --git a/src/northbridge/amd/amdmct/mct/mctsrc2p.c b/src/northbridge/amd/amdmct/mct/mctsrc2p.c
index bd3c503..c7c92ac 100644
--- a/src/northbridge/amd/amdmct/mct/mctsrc2p.c
+++ b/src/northbridge/amd/amdmct/mct/mctsrc2p.c
@@ -62,7 +62,7 @@ u8 mct_Get_Start_RcvrEnDly_Pass(struct DCTStatStruc *pDCTstat,
 		bn = 8;
 //		print_tx("mct_Get_Start_RcvrEnDly_Pass: Channel:", Channel);
 //		print_tx("mct_Get_Start_RcvrEnDly_Pass: Receiver:", Receiver);
-		for ( i=0;i<bn; i++) {
+		for (i = 0; i < bn; i++) {
 			val  = p[i];
 //			print_tx("mct_Get_Start_RcvrEnDly_Pass: i:", i);
 //			print_tx("mct_Get_Start_RcvrEnDly_Pass: val:", val);
@@ -100,7 +100,7 @@ u8 mct_Average_RcvrEnDly_Pass(struct DCTStatStruc *pDCTstat,
 		//FIXME: which byte?
 		p_1 = pDCTstat->B_RCVRDLY_1;
 //		p_1 = pDCTstat->CH_D_B_RCVRDLY_1[Channel][Receiver>>1];
-		for (i=0; i<bn; i++) {
+		for (i = 0; i < bn; i++) {
 			val = p[i];
 			/* left edge */
 			if (val != (RcvrEnDlyLimit - 1)) {
@@ -120,7 +120,7 @@ u8 mct_Average_RcvrEnDly_Pass(struct DCTStatStruc *pDCTstat,
 			pDCTstat->DimmTrainFail &= ~(1<<(Receiver + Channel));
 		}
 	} else {
-		for (i=0; i < bn; i++) {
+		for (i = 0; i < bn; i++) {
 			val = p[i];
 			/* Add 1/2 Memlock delay */
 			//val += Pass1MemClkDly;
diff --git a/src/northbridge/amd/amdmct/mct/mcttmrl.c b/src/northbridge/amd/amdmct/mct/mcttmrl.c
index 0eb3c61..1095259 100644
--- a/src/northbridge/amd/amdmct/mct/mcttmrl.c
+++ b/src/northbridge/amd/amdmct/mct/mcttmrl.c
@@ -149,7 +149,7 @@ static void maxRdLatencyTrain_D(struct MCTStatStruc *pMCTstat,
 		print_debug_dqs("\tMaxRdLatencyTrain51: Channel ",Channel, 1);
 		pDCTstat->Channel = Channel;
 
-		if ( (pDCTstat->Status & (1 << SB_128bitmode)) && Channel)
+		if ((pDCTstat->Status & (1 << SB_128bitmode)) && Channel)
 			break;		/*if ganged mode, skip DCT 1 */
 
 		TestAddr0 = GetMaxRdLatTestAddr_D(pMCTstat, pDCTstat, Channel, &RcvrEnDly,	 &valid);
@@ -164,7 +164,7 @@ static void maxRdLatencyTrain_D(struct MCTStatStruc *pMCTstat,
 		while (MaxRdLatDly < MAX_RD_LAT) {	/* sweep Delay value here */
 			mct_setMaxRdLatTrnVal_D(pDCTstat, Channel, MaxRdLatDly);
 			ReadMaxRdLat1CLTestPattern_D(TestAddr0);
-			if ( CompareMaxRdLatTestPattern_D(pattern_buf, TestAddr0) == DQS_PASS)
+			if (CompareMaxRdLatTestPattern_D(pattern_buf, TestAddr0) == DQS_PASS)
 				break;
 			SetTargetWTIO_D(TestAddr0);
 			FlushMaxRdLatTestPattern_D(TestAddr0);
@@ -185,7 +185,7 @@ static void maxRdLatencyTrain_D(struct MCTStatStruc *pMCTstat,
 		lo &= ~(1<<17);	/* restore HWCR.wrap32dis */
 		_WRMSR(addr, lo, hi);
 	}
-	if (!_SSE2){
+	if (!_SSE2) {
 		cr4 = read_cr4();
 		cr4 &= ~(1<<9);	/* restore cr4.OSFXSR */
 		write_cr4(cr4);
@@ -195,7 +195,7 @@ static void maxRdLatencyTrain_D(struct MCTStatStruc *pMCTstat,
 	{
 		u8 Channel;
 		printk(BIOS_DEBUG, "maxRdLatencyTrain: CH_MaxRdLat:\n");
-		for (Channel = 0; Channel<2; Channel++) {
+		for (Channel = 0; Channel < 2; Channel++) {
 			printk(BIOS_DEBUG, "Channel: %02x: %02x\n", Channel, pDCTstat->CH_MaxRdLat[Channel]);
 		}
 	}
@@ -213,7 +213,7 @@ static void mct_setMaxRdLatTrnVal_D(struct DCTStatStruc *pDCTstat,
 
 	if (pDCTstat->GangedMode) {
 		Channel = 0; // for safe
-		for (i=0; i<2; i++)
+		for (i = 0; i < 2; i++)
 			pDCTstat->CH_MaxRdLat[i] = MaxRdLatVal;
 	} else {
 		pDCTstat->CH_MaxRdLat[Channel] = MaxRdLatVal;
@@ -223,7 +223,7 @@ static void mct_setMaxRdLatTrnVal_D(struct DCTStatStruc *pDCTstat,
 	reg = 0x78 + Channel * 0x100;
 	val = Get_NB32(dev, reg);
 	val &= ~(0x3ff<<22);
-	val |= MaxRdLatVal<<22;
+	val |= MaxRdLatVal << 22;
 	/* program MaxRdLatency to correspond with current delay */
 	Set_NB32(dev, reg, val);
 
@@ -244,10 +244,10 @@ static u8 CompareMaxRdLatTestPattern_D(u32 pattern_buf, u32 addr)
 	u8 ret = DQS_PASS;
 
 	SetUpperFSbase(addr);
-	addr_lo = addr<<8;
+	addr_lo = addr << 8;
 
 	_EXECFENCE;
-	for (i=0; i<(16*3); i++) {
+	for (i = 0; i < (16*3); i++) {
 		val = read32_fs(addr_lo);
 		val_test = test_buf[i];
 
@@ -292,11 +292,11 @@ static u32 GetMaxRdLatTestAddr_D(struct MCTStatStruc *pMCTstat,
 	*valid = 0;
 
 	for (ch = ch_start; ch < ch_end; ch++) {
-		for (d=0; d<4; d++) {
-			for (Byte = 0; Byte<bn; Byte++) {
+		for (d = 0; d < 4; d++) {
+			for (Byte = 0; Byte < bn; Byte++) {
 				u8 tmp;
 				tmp = pDCTstat->CH_D_B_RCVRDLY[ch][d][Byte];
-				if (tmp>Max) {
+				if (tmp > Max) {
 					Max = tmp;
 					Channel_Max = Channel;
 					d_Max = d;
@@ -382,7 +382,7 @@ u8 mct_GetStartMaxRdLat_D(struct MCTStatStruc *pMCTstat,
 
 
 	val = Get_NB32(pDCTstat->dev_nbmisc, 0xD4);
-	val = ((val & 0x1f) + 4 ) * 3;
+	val = ((val & 0x1f) + 4) * 3;
 
 	/* Calculate 1 MemClk + 1 NCLK delay in NCLKs for margin */
 	valxx = val << 2;
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mct_d.c b/src/northbridge/amd/amdmct/mct_ddr3/mct_d.c
index 08d8d43..da2a4fe 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mct_d.c
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mct_d.c
@@ -2625,7 +2625,7 @@ static void mctAutoInitMCT_D(struct MCTStatStruc *pMCTstat,
 	 * 1. BSP in Big Real Mode
 	 * 2. Stack at SS:SP, located somewhere between A000:0000 and F000:FFFF
 	 * 3. Checksummed or Valid NVRAM bits
-	 * 4. MCG_CTL=-1, MC4_CTL_EN=0 for all CPUs
+	 * 4. MCG_CTL = -1, MC4_CTL_EN = 0 for all CPUs
 	 * 5. MCi_STS from shutdown/warm reset recorded (if desired) prior to entry
 	 * 6. All var MTRRs reset to zero
 	 * 7. State of NB_CFG.DisDatMsk set properly on all CPUs
@@ -3819,7 +3819,7 @@ static void LoadDQSSigTmgRegs_D(struct MCTStatStruc *pMCTstat,
 					}
 				}
 			}
-			for (Channel = 0; Channel<2; Channel++) {
+			for (Channel = 0; Channel < 2; Channel++) {
 				SetEccDQSRcvrEn_D(pDCTstat, Channel);
 			}
 
@@ -3859,7 +3859,7 @@ static void LoadDQSSigTmgRegs_D(struct MCTStatStruc *pMCTstat,
 				}
 			}
 
-			for (Channel = 0; Channel<2; Channel++) {
+			for (Channel = 0; Channel < 2; Channel++) {
 				reg = 0x78;
 				val = Get_NB32_DCT(dev, Channel, reg);
 				val &= ~(0x3ff<<22);
@@ -3993,7 +3993,7 @@ static void HTMemMapInit_D(struct MCTStatStruc *pMCTstat,
 				val = Get_NB32(dev, reg);
 				Set_NB32(devx, reg, val);
 				reg += 4;
-			} while ( reg < 0x80);
+			} while (reg < 0x80);
 		} else {
 			break;			/* stop at first absent Node */
 		}
@@ -4015,7 +4015,7 @@ static void MCTMemClr_D(struct MCTStatStruc *pMCTstat,
 	uint32_t dword;
 	struct DCTStatStruc *pDCTstat;
 
-	if (!mctGet_NVbits(NV_DQSTrainCTL)){
+	if (!mctGet_NVbits(NV_DQSTrainCTL)) {
 		/* FIXME: callback to wrapper: mctDoWarmResetMemClr_D */
 	} else {	/* NV_DQSTrainCTL == 1 */
 		for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
@@ -4080,7 +4080,7 @@ static void DCTMemClr_Sync_D(struct MCTStatStruc *pMCTstat,
 	printk(BIOS_DEBUG, "%s: Start\n", __func__);
 
 	/* Ensure that a memory clear operation has completed on one node */
-	if (pDCTstat->DCTSysLimit){
+	if (pDCTstat->DCTSysLimit) {
 		printk(BIOS_DEBUG, "%s: Waiting for memory clear to complete", __func__);
 		do {
 			dword = Get_NB32(dev, 0x110);
@@ -4223,7 +4223,7 @@ static void DCTFinalInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *p
 		dword &= ~(1 << ParEn);
 		Set_NB32_DCT(pDCTstat->dev_dct, dct, 0x90, dword);
 
-		/* To maximize power savings when DisDramInterface=1b,
+		/* To maximize power savings when DisDramInterface = 1b,
 		 * all of the MemClkDis bits should also be set.
 		 */
 		Set_NB32_DCT(pDCTstat->dev_dct, dct, 0x88, 0xff000000);
@@ -4369,16 +4369,16 @@ static void SPD2ndTiming(struct MCTStatStruc *pMCTstat,
 	Trc = 0;
 	Twr = 0;
 	Twtr = 0;
-	for (i=0; i < 2; i++)
+	for (i = 0; i < 2; i++)
 		Etr[i] = 0;
-	for (i=0; i < 4; i++)
+	for (i = 0; i < 4; i++)
 		Trfc[i] = 0;
 	Tfaw = 0;
 
-	for ( i = 0; i< MAX_DIMMS_SUPPORTED; i++) {
+	for (i = 0; i< MAX_DIMMS_SUPPORTED; i++) {
 		LDIMM = i >> 1;
 		if (pDCTstat->DIMMValid & (1 << i)) {
-			val = pDCTstat->spd_data.spd_bytes[dct + i][SPD_MTBDivisor];	/* MTB=Dividend/Divisor */
+			val = pDCTstat->spd_data.spd_bytes[dct + i][SPD_MTBDivisor];	/* MTB = Dividend/Divisor */
 			MTB16x = ((pDCTstat->spd_data.spd_bytes[dct + i][SPD_MTBDividend] & 0xff) << 4);
 			MTB16x /= val; /* transfer to MTB*16 */
 
@@ -4574,7 +4574,7 @@ static void SPD2ndTiming(struct MCTStatStruc *pMCTstat,
 	pDCTstat->Twtr = val;
 
 	/* Trfc0-Trfc3 */
-	for (i=0; i<4; i++)
+	for (i = 0; i < 4; i++)
 		pDCTstat->Trfc[i] = Trfc[i];
 
 	/* Tfaw */
@@ -4647,7 +4647,7 @@ static void SPD2ndTiming(struct MCTStatStruc *pMCTstat,
 		Set_NB32_DCT(dev, dct, 0x204, dword);				/* DRAM Timing 1 */
 
 		/* Trfc0-Trfc3 */
-		for (i=0; i<4; i++)
+		for (i = 0; i < 4; i++)
 			if (pDCTstat->Trfc[i] == 0x0)
 				pDCTstat->Trfc[i] = 0x1;
 		dword = Get_NB32_DCT(dev, dct, 0x208);				/* DRAM Timing 2 */
@@ -4714,7 +4714,7 @@ static void SPD2ndTiming(struct MCTStatStruc *pMCTstat,
 		DramTimingHi |= val<<16;
 
 		val = 0;
-		for (i=4;i>0;i--) {
+		for (i = 4; i > 0; i--) {
 			val <<= 3;
 			val |= Trfc[i-1];
 		}
@@ -4850,7 +4850,7 @@ static void GetPresetmaxF_D(struct MCTStatStruc *pMCTstat,
 		proposedFreq = 800;	 /* Rev F0 programmable max memclock is */
 
 	/*Get User defined limit if  "limit" mode */
-	if ( mctGet_NVbits(NV_MCTUSRTMGMODE) == 1) {
+	if (mctGet_NVbits(NV_MCTUSRTMGMODE) == 1) {
 		word = Get_Fk_D(mctGet_NVbits(NV_MemCkVal) + 1);
 		if (word < proposedFreq)
 			proposedFreq = word;
@@ -4984,7 +4984,7 @@ static void SPDGetTCL_D(struct MCTStatStruc *pMCTstat,
 		   determine the desired CAS Latency. If tCKproposed is not a standard JEDEC
 		   value (2.5, 1.875, 1.5, or 1.25 ns) then tCKproposed must be adjusted to the
 		   next lower standard tCK value for calculating CLdesired.
-		   CLdesired = ceiling ( tAAmin(all) / tCKproposed )
+		   CLdesired = ceiling (tAAmin(all) / tCKproposed)
 		   where tAAmin is defined in Byte 16. The ceiling function requires that the
 		   quotient be rounded up always. */
 		CLdesired = tAAmin16x / tCKproposed16x;
@@ -5163,7 +5163,7 @@ static u8 AutoConfig_D(struct MCTStatStruc *pMCTstat,
 
 	if (mctGet_NVbits(NV_ECC_CAP))
 		if (Status & (1 << SB_ECCDIMMs))
-			if ( mctGet_NVbits(NV_ECC))
+			if (mctGet_NVbits(NV_ECC))
 				DramConfigLo |= 1 << DimmEcEn;
 
 	DramConfigLo = mct_DisDllShutdownSR(pMCTstat, pDCTstat, DramConfigLo, dct);
@@ -5210,7 +5210,7 @@ static u8 AutoConfig_D(struct MCTStatStruc *pMCTstat,
 			DramConfigHi |= 1 << 18;	/* R4 (4-Rank Registered DIMMs) */
 	}
 
-	if (0) /* call back not needed mctOverrideDcqBypMax_D ) */
+	if (0) /* call back not needed mctOverrideDcqBypMax_D) */
 		val = mctGet_NVbits(NV_BYPMAX);
 	else
 		val = 0x0f; /* recommended setting (default) */
@@ -5224,7 +5224,7 @@ static u8 AutoConfig_D(struct MCTStatStruc *pMCTstat,
 	 1. We will assume that MemClkDis field has been preset prior to this
 	    point.
 	 2. We will only set MemClkDis bits if a DIMM is NOT present AND if:
-	    NV_AllMemClks <>0 AND SB_DiagClks ==0 */
+	    NV_AllMemClks <>0 AND SB_DiagClks == 0 */
 
 	/* Dram Timing Low (owns Clock Enable bits) */
 	DramTimingLo = Get_NB32_DCT(dev, dct, 0x88);
@@ -5253,7 +5253,7 @@ static u8 AutoConfig_D(struct MCTStatStruc *pMCTstat,
 			dword = 0;
 			byte = 0xFF;
 			while (dword < MAX_CS_SUPPORTED) {
-				if (pDCTstat->CSPresent & (1<<dword)){
+				if (pDCTstat->CSPresent & (1<<dword)) {
 					/* re-enable clocks for the enabled CS */
 					val = p[dword];
 					byte &= ~val;
@@ -5330,11 +5330,11 @@ static void SPDSetBanks_D(struct MCTStatStruc *pMCTstat,
 
 		if (pDCTstat->DIMMValid & (1<<byte)) {
 			byte = pDCTstat->spd_data.spd_bytes[ChipSel + dct][SPD_Addressing];
-			Rows = (byte >> 3) & 0x7; /* Rows:0b=12-bit,... */
-			Cols = byte & 0x7; /* Cols:0b=9-bit,... */
+			Rows = (byte >> 3) & 0x7; /* Rows:0b = 12-bit,... */
+			Cols = byte & 0x7; /* Cols:0b = 9-bit,... */
 
 			byte = pDCTstat->spd_data.spd_bytes[ChipSel + dct][SPD_Density];
-			Banks = (byte >> 4) & 7; /* Banks:0b=3-bit,... */
+			Banks = (byte >> 4) & 7; /* Banks:0b = 3-bit,... */
 
 			byte = pDCTstat->spd_data.spd_bytes[ChipSel + dct][SPD_Organization];
 			Ranks = ((byte >> 3) & 7) + 1;
@@ -5351,7 +5351,7 @@ static void SPDSetBanks_D(struct MCTStatStruc *pMCTstat,
 
 			byte |= Rows << 3;	/* RRRBCC internal encode */
 
-			for (dword=0; dword < 13; dword++) {
+			for (dword = 0; dword < 13; dword++) {
 				if (byte == Tab_BankAddr[dword])
 					break;
 			}
@@ -5367,7 +5367,7 @@ static void SPDSetBanks_D(struct MCTStatStruc *pMCTstat,
 			   or 2pow(rows+cols+banks-5)-1*/
 			csMask = 0;
 
-			byte = Rows + Cols;		/* cl=rows+cols*/
+			byte = Rows + Cols;		/* cl = rows+cols*/
 			byte += 21;			/* row:12+col:9 */
 			byte -= 2;			/* 3 banks - 5 */
 
@@ -5435,7 +5435,7 @@ static void SPDCalcWidth_D(struct MCTStatStruc *pMCTstat,
 
 	/* Check Symmetry of Channel A and Channel B DIMMs
 	  (must be matched for 128-bit mode).*/
-	for (i=0; i < MAX_DIMMS_SUPPORTED; i += 2) {
+	for (i = 0; i < MAX_DIMMS_SUPPORTED; i += 2) {
 		if ((pDCTstat->DIMMValid & (1 << i)) && (pDCTstat->DIMMValid & (1<<(i+1)))) {
 			byte = pDCTstat->spd_data.spd_bytes[i][SPD_Addressing] & 0x7;
 			byte1 = pDCTstat->spd_data.spd_bytes[i + 1][SPD_Addressing] & 0x7;
@@ -5498,7 +5498,7 @@ static void StitchMemory_D(struct MCTStatStruc *pMCTstat,
 
 	_DSpareEn = 0;
 
-	/* CS Sparing 1=enabled, 0=disabled */
+	/* CS Sparing 1 = enabled, 0 = disabled */
 	if (mctGet_NVbits(NV_CS_SpareCTL) & 1) {
 		if (MCT_DIMM_SPARE_NO_WARM) {
 			/* Do no warm-reset DIMM spare */
@@ -5513,7 +5513,7 @@ static void StitchMemory_D(struct MCTStatStruc *pMCTstat,
 					pDCTstat->ErrStatus |= 1 << SB_SpareDis;
 			}
 		} else {
-			if (!mctGet_NVbits(NV_DQSTrainCTL)) { /*DQS Training 1=enabled, 0=disabled */
+			if (!mctGet_NVbits(NV_DQSTrainCTL)) { /*DQS Training 1 = enabled, 0 = disabled */
 				word = pDCTstat->CSPresent;
 				val = bsf(word);
 				word &= ~(1 << val);
@@ -5527,13 +5527,13 @@ static void StitchMemory_D(struct MCTStatStruc *pMCTstat,
 	}
 
 	nxtcsBase = 0;		/* Next available cs base ADDR[39:8] */
-	for (p=0; p < MAX_DIMMS_SUPPORTED; p++) {
+	for (p = 0; p < MAX_DIMMS_SUPPORTED; p++) {
 		BiggestBank = 0;
 		for (q = 0; q < MAX_CS_SUPPORTED; q++) { /* from DIMMS to CS */
 			if (pDCTstat->CSPresent & (1 << q)) {  /* bank present? */
 				reg  = 0x40 + (q << 2);  /* Base[q] reg.*/
 				val = Get_NB32_DCT(dev, dct, reg);
-				if (!(val & 3)) {	/* (CSEnable|Spare==1)bank is enabled already? */
+				if (!(val & 3)) {	/* (CSEnable|Spare == 1)bank is enabled already? */
 					reg = 0x60 + (q << 1); /*Mask[q] reg.*/
 					val = Get_NB32_DCT(dev, dct, reg);
 					val >>= 19;
@@ -5549,7 +5549,7 @@ static void StitchMemory_D(struct MCTStatStruc *pMCTstat,
 			}	/*if bank present */
 		}	/* while q */
 		if (BiggestBank !=0) {
-			curcsBase = nxtcsBase;		/* curcsBase=nxtcsBase*/
+			curcsBase = nxtcsBase;		/* curcsBase = nxtcsBase*/
 			/* DRAM CS Base b Address Register offset */
 			reg = 0x40 + (b << 2);
 			if (_DSpareEn) {
@@ -5579,7 +5579,7 @@ static void StitchMemory_D(struct MCTStatStruc *pMCTstat,
 		}
 
 		/* bank present but disabled?*/
-		if ( pDCTstat->CSTestFail & (1 << p)) {
+		if (pDCTstat->CSTestFail & (1 << p)) {
 			/* DRAM CS Base b Address Register offset */
 			reg = (p << 2) + 0x40;
 			val = 1 << TestFail;
@@ -5611,12 +5611,12 @@ static u8 DIMMPresence_D(struct MCTStatStruc *pMCTstat,
 	/* Check DIMMs present, verify checksum, flag SDRAM type,
 	 * build population indicator bitmaps, and preload bus loading
 	 * of DIMMs into DCTStatStruc.
-	 * MAAload=number of devices on the "A" bus.
-	 * MABload=number of devices on the "B" bus.
-	 * MAAdimms=number of DIMMs on the "A" bus slots.
-	 * MABdimms=number of DIMMs on the "B" bus slots.
-	 * DATAAload=number of ranks on the "A" bus slots.
-	 * DATABload=number of ranks on the "B" bus slots.
+	 * MAAload = number of devices on the "A" bus.
+	 * MABload = number of devices on the "B" bus.
+	 * MAAdimms = number of DIMMs on the "A" bus slots.
+	 * MABdimms = number of DIMMs on the "B" bus slots.
+	 * DATAAload = number of ranks on the "A" bus slots.
+	 * DATABload = number of ranks on the "B" bus slots.
 	 */
 	u16 i, j, k;
 	u8 smbaddr;
@@ -5747,7 +5747,7 @@ static u8 DIMMPresence_D(struct MCTStatStruc *pMCTstat,
 				byte &= 7;
 				if (byte == 3) { /* 4ranks */
 					/* if any DIMMs are QR, we have to make two passes through DIMMs*/
-					if ( pDCTstat->DimmQRPresent == 0) {
+					if (pDCTstat->DimmQRPresent == 0) {
 						MaxDimms <<= 1;
 					}
 					if (i < DimmSlots) {
@@ -5767,7 +5767,7 @@ static u8 DIMMPresence_D(struct MCTStatStruc *pMCTstat,
 				else if (devwidth == 2)
 					bytex = 4;
 
-				byte++;		/* al+1=rank# */
+				byte++;		/* al+1 = rank# */
 				if (byte == 2)
 					bytex <<= 1;	/*double Addr bus load value for dual rank DIMMs*/
 
@@ -5847,7 +5847,7 @@ static u8 DIMMPresence_D(struct MCTStatStruc *pMCTstat,
 			}
 		}
 		if (pDCTstat->DimmECCPresent != 0) {
-			if ((pDCTstat->DimmECCPresent ^ pDCTstat->DIMMValid )== 0) {
+			if ((pDCTstat->DimmECCPresent ^ pDCTstat->DIMMValid) == 0) {
 				/* all DIMMs are ECC capable */
 				pDCTstat->Status |= 1<<SB_ECCDIMMs;
 			}
@@ -5961,7 +5961,7 @@ static void mct_initDCT(struct MCTStatStruc *pMCTstat,
 				val &= ~(1 << ParEn);
 				Set_NB32_DCT(pDCTstat->dev_dct, 1, 0x90, val);
 
-				/* To maximize power savings when DisDramInterface=1b,
+				/* To maximize power savings when DisDramInterface = 1b,
 				 * all of the MemClkDis bits should also be set.
 				 */
 				Set_NB32_DCT(pDCTstat->dev_dct, 1, 0x88, 0xff000000);
@@ -6119,7 +6119,7 @@ static u8 mct_PlatformSpec(struct MCTStatStruc *pMCTstat,
 		i_start = dct;
 		i_end = dct + 1;
 	}
-	for (i=i_start; i<i_end; i++) {
+	for (i = i_start; i < i_end; i++) {
 		index_reg = 0x98;
 		Set_NB32_index_wait_DCT(dev, i, index_reg, 0x00, pDCTstat->CH_ODC_CTL[i]);	/* Channel A/B Output Driver Compensation Control */
 		Set_NB32_index_wait_DCT(dev, i, index_reg, 0x04, pDCTstat->CH_ADDR_TMG[i]);	/* Channel A/B Output Driver Compensation Control */
@@ -6152,7 +6152,7 @@ static void mct_AfterGetCLT(struct MCTStatStruc *pMCTstat,
 				struct DCTStatStruc *pDCTstat, u8 dct)
 {
 	if (!pDCTstat->GangedMode) {
-		if (dct == 0 ) {
+		if (dct == 0) {
 			pDCTstat->DIMMValid = pDCTstat->DIMMValidDCT[dct];
 			if (pDCTstat->DIMMValidDCT[dct] == 0)
 				pDCTstat->ErrCode = SC_StopError;
@@ -6172,7 +6172,7 @@ static u8 mct_SPDCalcWidth(struct MCTStatStruc *pMCTstat,
 	u8 ret;
 	u32 val;
 
-	if ( dct == 0) {
+	if (dct == 0) {
 		SPDCalcWidth_D(pMCTstat, pDCTstat);
 		ret = mct_setMode(pMCTstat, pDCTstat);
 	} else {
@@ -6313,7 +6313,7 @@ static void mct_OtherTiming(struct MCTStatStruc *pMCTstat,
 				pDCTstat->DIMMValid = pDCTstat->DIMMValidDCT[0];
 				Set_OtherTiming(pMCTstat, pDCTstat, 0);
 			}
-			if (pDCTstat->DIMMValidDCT[1] && !pDCTstat->GangedMode ) {
+			if (pDCTstat->DIMMValidDCT[1] && !pDCTstat->GangedMode) {
 				pDCTstat->DIMMValid = pDCTstat->DIMMValidDCT[1];
 				Set_OtherTiming(pMCTstat, pDCTstat, 1);
 			}
@@ -6568,8 +6568,8 @@ static u16 Get_DqsRcvEnGross_MaxMin(struct DCTStatStruc *pDCTstat,
 	if (index == 0x12)
 		ecc_reg = 1;
 
-	for (i=0; i < 8; i+=2) {
-		if ( pDCTstat->DIMMValid & (1 << i)) {
+	for (i = 0; i < 8; i+=2) {
+		if (pDCTstat->DIMMValid & (1 << i)) {
 			val = Get_NB32_index_wait_DCT(dev, dct, index_reg, index);
 			val &= 0x00E000E0;
 			byte = (val >> 5) & 0xFF;
@@ -6607,11 +6607,11 @@ static u16 Get_WrDatGross_MaxMin(struct DCTStatStruc *pDCTstat,
 
 	Smallest = 3;
 	Largest = 0;
-	for (i=0; i < 2; i++) {
+	for (i = 0; i < 2; i++) {
 		val = Get_NB32_index_wait_DCT(dev, dct, index_reg, index);
 		val &= 0x60606060;
 		val >>= 5;
-		for (j=0; j < 4; j++) {
+		for (j = 0; j < 4; j++) {
 			byte = val & 0xFF;
 			if (byte < Smallest)
 				Smallest = byte;
@@ -6774,7 +6774,7 @@ static void mct_FinalMCT_D(struct MCTStatStruc *pMCTstat,
 	/* ClrClToNB_D postponed until we're done executing from ROM */
 	mct_ClrWbEnhWsbDis_D(pMCTstat, pDCTstat);
 
-	/* set F3x8C[DisFastTprWr] on all DR, if L3Size=0 */
+	/* set F3x8C[DisFastTprWr] on all DR, if L3Size = 0 */
 	if (pDCTstat->LogicalCPUID & AMD_DR_ALL) {
 		if (!(cpuid_edx(0x80000006) & 0xFFFC0000)) {
 			val = Get_NB32(pDCTstat->dev_nbmisc, 0x8C);
@@ -6948,7 +6948,7 @@ static void mct_HTMemMapExt(struct MCTStatStruc *pMCTstat,
 
 	/* Copy dram map from F1x40/44,F1x48/4c,
 	  to F1x120/124(Node0),F1x120/124(Node1),...*/
-	for (Node=0; Node < MAX_NODES_SUPPORTED; Node++) {
+	for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
 		pDCTstat = pDCTstatA + Node;
 		devx = pDCTstat->dev_map;
 
@@ -6975,7 +6975,7 @@ static void mct_HTMemMapExt(struct MCTStatStruc *pMCTstat,
 			val |= Dramlimit;
 			Set_NB32(devx, reg, val);
 
-			if ( pMCTstat->GStatus & ( 1 << GSB_HWHole)) {
+			if (pMCTstat->GStatus & (1 << GSB_HWHole)) {
 				reg = 0xF0;
 				val = Get_NB32(devx, reg);
 				val |= (1 << DramMemHoistValid);
@@ -7328,7 +7328,7 @@ static void InitPhyCompensation(struct MCTStatStruc *pMCTstat,
 	} else {
 		dword = Get_NB32_index_wait_DCT(dev, dct, index_reg, 0x00);
 		dword = 0;
-		for (i=0; i < 6; i++) {
+		for (i = 0; i < 6; i++) {
 			switch (i) {
 				case 0:
 				case 4:
@@ -7668,7 +7668,7 @@ static void mct_ProgramODT_D(struct MCTStatStruc *pMCTstat,
 			dword = 0x00000800;
 		else
 			dword = 0x00000000;
-		for (i=0; i < 2; i++) {
+		for (i = 0; i < 2; i++) {
 			Set_NB32_DCT(dev, i, 0x98, 0x0D000030);
 			Set_NB32_DCT(dev, i, 0x9C, dword);
 			Set_NB32_DCT(dev, i, 0x98, 0x4D040F30);
@@ -7958,11 +7958,11 @@ void ProgDramMRSReg_D(struct MCTStatStruc *pMCTstat,
 			DramMRS |= mct_DramTermDyn_RDimm(pMCTstat, pDCTstat, byte);
 		}
 
-		/* Qoff=0, output buffers enabled */
+		/* Qoff = 0, output buffers enabled */
 		/* Tcwl */
 		DramMRS |= (pDCTstat->Speed - 4) << 20;
-		/* ASR=1, auto self refresh */
-		/* SRT=0 */
+		/* ASR = 1, auto self refresh */
+		/* SRT = 0 */
 		DramMRS |= 1 << 18;
 	}
 
@@ -7989,10 +7989,10 @@ void mct_SetDramConfigHi_D(struct MCTStatStruc *pMCTstat,
 	 * Solution: From the bug report:
 	 *  1. A software-initiated frequency change should be wrapped into the
 	 *     following sequence :
-	 * 	- a) Disable Compensation (F2[1, 0]9C_x08[30] )
+	 * 	- a) Disable Compensation (F2[1, 0]9C_x08[30])
 	 * 	b) Reset the Begin Compensation bit (D3CMP->COMP_CONFIG[0]) in all the compensation engines
 	 * 	c) Do frequency change
-	 * 	d) Enable Compensation (F2[1, 0]9C_x08[30] )
+	 * 	d) Enable Compensation (F2[1, 0]9C_x08[30])
 	 *  2. A software-initiated Disable Compensation should always be
 	 *     followed by step b) of the above steps.
 	 * Silicon Status: Fixed In Rev B0
@@ -8275,9 +8275,9 @@ static void AfterDramInit_D(struct DCTStatStruc *pDCTstat, u8 dct) {
 
 /* ==========================================================
  *  6-bit Bank Addressing Table
- *  RR=rows-13 binary
- *  B=Banks-2 binary
- *  CCC=Columns-9 binary
+ *  RR = rows-13 binary
+ *  B = Banks-2 binary
+ *  CCC = Columns-9 binary
  * ==========================================================
  *  DCT	CCCBRR	Rows	Banks	Columns	64-bit CS Size
  *  Encoding
@@ -8311,7 +8311,7 @@ uint8_t crcCheck(struct DCTStatStruc *pDCTstat, uint8_t dimm)
 	for (Index = 0; Index < byte_use; Index ++) {
 		byte = pDCTstat->spd_data.spd_bytes[dimm][Index];
 		CRC ^= byte << 8;
-		for (i=0; i<8; i++) {
+		for (i = 0; i < 8; i++) {
 			if (CRC & 0x8000) {
 				CRC <<= 1;
 				CRC ^= 0x1021;
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mct_d.h b/src/northbridge/amd/amdmct/mct_ddr3/mct_d.h
index e1d9da5..c42e452 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mct_d.h
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mct_d.h
@@ -33,16 +33,16 @@
 #define PT_C3		5
 #define PT_FM2		6
 
-#define J_MIN		0		/* j loop constraint. 1=CL 2.0 T*/
-#define J_MAX		5		/* j loop constraint. 5=CL 7.0 T*/
-#define K_MIN		1		/* k loop constraint. 1=200 MHz*/
-#define K_MAX		5		/* k loop constraint. 5=533 MHz*/
-#define CL_DEF		2		/* Default value for failsafe operation. 2=CL 4.0 T*/
-#define T_DEF		1		/* Default value for failsafe operation. 1=5ns (cycle time)*/
-
-#define BSCRate	1		/* reg bit field=rate of dram scrubber for ecc*/
+#define J_MIN		0		/* j loop constraint. 1 = CL 2.0 T*/
+#define J_MAX		5		/* j loop constraint. 5 = CL 7.0 T*/
+#define K_MIN		1		/* k loop constraint. 1 = 200 MHz*/
+#define K_MAX		5		/* k loop constraint. 5 = 533 MHz*/
+#define CL_DEF		2		/* Default value for failsafe operation. 2 = CL 4.0 T*/
+#define T_DEF		1		/* Default value for failsafe operation. 1 = 5ns (cycle time)*/
+
+#define BSCRate	1		/* reg bit field = rate of dram scrubber for ecc*/
 					/* memory initialization (ecc and check-bits).*/
-					/* 1=40 ns/64 bytes.*/
+					/* 1 = 40 ns/64 bytes.*/
 #define FirstPass	1		/* First pass through RcvEn training*/
 #define SecondPass	2		/* Second pass through Rcven training*/
 
@@ -336,7 +336,7 @@ struct DCTStatStruc {		/* A per Node structure*/
 /* DCTStatStruct_F -  start */
 	u8 Node_ID;			/* Node ID of current controller */
 	uint8_t Internal_Node_ID;	/* Internal Node ID of the current controller */
-	uint8_t Dual_Node_Package;	/* 1=Dual node package (G34) */
+	uint8_t Dual_Node_Package;	/* 1 = Dual node package (G34) */
 	uint8_t stopDCT[2];		/* Set if the DCT will be stopped */
 	u8 ErrCode;			/* Current error condition of Node
 		0= no error
@@ -353,7 +353,7 @@ struct DCTStatStruc {		/* A per Node structure*/
 		/* SPD address of..MB2_CS_L[0,1]*/
 		/* SPD address of..MA3_CS_L[0,1]*/
 		/* SPD address of..MB3_CS_L[0,1]*/
-	u16 DIMMPresent;		/*For each bit n 0..7, 1=DIMM n is present.
+	u16 DIMMPresent;		/*For each bit n 0..7, 1 = DIMM n is present.
 		DIMM#  Select Signal
 		0  MA0_CS_L[0,1]
 		1  MB0_CS_L[0,1]
@@ -363,15 +363,15 @@ struct DCTStatStruc {		/* A per Node structure*/
 		5  MB2_CS_L[0,1]
 		6  MA3_CS_L[0,1]
 		7  MB3_CS_L[0,1]*/
-	u16 DIMMValid;		/* For each bit n 0..7, 1=DIMM n is valid and is/will be configured*/
-	u16 DIMMMismatch;	/* For each bit n 0..7, 1=DIMM n is mismatched, channel B is always considered the mismatch */
-	u16 DIMMSPDCSE;		/* For each bit n 0..7, 1=DIMM n SPD checksum error*/
-	u16 DimmECCPresent;	/* For each bit n 0..7, 1=DIMM n is ECC capable.*/
-	u16 DimmPARPresent;	/* For each bit n 0..7, 1=DIMM n is ADR/CMD Parity capable.*/
-	u16 Dimmx4Present;	/* For each bit n 0..7, 1=DIMM n contains x4 data devices.*/
-	u16 Dimmx8Present;	/* For each bit n 0..7, 1=DIMM n contains x8 data devices.*/
-	u16 Dimmx16Present;	/* For each bit n 0..7, 1=DIMM n contains x16 data devices.*/
-	u16 DIMM2Kpage;		/* For each bit n 0..7, 1=DIMM n contains 1K page devices.*/
+	u16 DIMMValid;		/* For each bit n 0..7, 1 = DIMM n is valid and is/will be configured*/
+	u16 DIMMMismatch;	/* For each bit n 0..7, 1 = DIMM n is mismatched, channel B is always considered the mismatch */
+	u16 DIMMSPDCSE;		/* For each bit n 0..7, 1 = DIMM n SPD checksum error*/
+	u16 DimmECCPresent;	/* For each bit n 0..7, 1 = DIMM n is ECC capable.*/
+	u16 DimmPARPresent;	/* For each bit n 0..7, 1 = DIMM n is ADR/CMD Parity capable.*/
+	u16 Dimmx4Present;	/* For each bit n 0..7, 1 = DIMM n contains x4 data devices.*/
+	u16 Dimmx8Present;	/* For each bit n 0..7, 1 = DIMM n contains x8 data devices.*/
+	u16 Dimmx16Present;	/* For each bit n 0..7, 1 = DIMM n contains x16 data devices.*/
+	u16 DIMM2Kpage;		/* For each bit n 0..7, 1 = DIMM n contains 1K page devices.*/
 	u8 MAload[2];		/* Number of devices loading MAA bus*/
 		/* Number of devices loading MAB bus*/
 	u8 MAdimms[2];		/*Number of DIMMs loading CH A*/
@@ -379,17 +379,17 @@ struct DCTStatStruc {		/* A per Node structure*/
 	u8 DATAload[2];		/*Number of ranks loading CH A DATA*/
 		/* Number of ranks loading CH B DATA*/
 	u8 DIMMAutoSpeed;	/*Max valid Mfg. Speed of DIMMs
-		1=200MHz
-		2=266MHz
-		3=333MHz
-		4=400MHz
-		5=533MHz*/
+		1 = 200MHz
+		2 = 266MHz
+		3 = 333MHz
+		4 = 400MHz
+		5 = 533MHz*/
 	u8 DIMMCASL;		/* Min valid Mfg. CL bitfield
-		0=2.0
-		1=3.0
-		2=4.0
-		3=5.0
-		4=6.0 */
+		0 = 2.0
+		1 = 3.0
+		2 = 4.0
+		3 = 5.0
+		4 = 6.0 */
 	u16 DIMMTrcd;		/* Minimax Trcd*40 (ns) of DIMMs*/
 	u16 DIMMTrp;		/* Minimax Trp*40 (ns) of DIMMs*/
 	u16 DIMMTrtp;		/* Minimax Trtp*40 (ns) of DIMMs*/
@@ -399,16 +399,16 @@ struct DCTStatStruc {		/* A per Node structure*/
 	u16 DIMMTrrd;		/* Minimax Trrd*40 (ns) of DIMMs*/
 	u16 DIMMTwtr;		/* Minimax Twtr*40 (ns) of DIMMs*/
 	u8 Speed;		/* Bus Speed (to set Controller)
-		1=200MHz
-		2=266MHz
-		3=333MHz
-		4=400MHz */
+		1 = 200MHz
+		2 = 266MHz
+		3 = 333MHz
+		4 = 400MHz */
 	u8 CASL;		/* CAS latency DCT setting
-		0=2.0
-		1=3.0
-		2=4.0
-		3=5.0
-		4=6.0 */
+		0 = 2.0
+		1 = 3.0
+		2 = 4.0
+		3 = 5.0
+		4 = 6.0 */
 	u8 Trcd;		/* DCT Trcd (busclocks) */
 	u8 Trp;			/* DCT Trp (busclocks) */
 	u8 Trtp;		/* DCT Trtp (busclocks) */
@@ -418,27 +418,27 @@ struct DCTStatStruc {		/* A per Node structure*/
 	u8 Trrd;		/* DCT Trrd (busclocks) */
 	u8 Twtr;		/* DCT Twtr (busclocks) */
 	u8 Trfc[4];		/* DCT Logical DIMM0 Trfc
-		0=75ns (for 256Mb devs)
-		1=105ns (for 512Mb devs)
-		2=127.5ns (for 1Gb devs)
-		3=195ns (for 2Gb devs)
-		4=327.5ns (for 4Gb devs) */
+		0 = 75ns (for 256Mb devs)
+		1 = 105ns (for 512Mb devs)
+		2 = 127.5ns (for 1Gb devs)
+		3 = 195ns (for 2Gb devs)
+		4 = 327.5ns (for 4Gb devs) */
 		/* DCT Logical DIMM1 Trfc (see Trfc0 for format) */
 		/* DCT Logical DIMM2 Trfc (see Trfc0 for format) */
 		/* DCT Logical DIMM3 Trfc (see Trfc0 for format) */
-	u16 CSPresent;		/* For each bit n 0..7, 1=Chip-select n is present */
-	u16 CSTestFail;		/* For each bit n 0..7, 1=Chip-select n is present but disabled */
+	u16 CSPresent;		/* For each bit n 0..7, 1 = Chip-select n is present */
+	u16 CSTestFail;		/* For each bit n 0..7, 1 = Chip-select n is present but disabled */
 	u32 DCTSysBase;		/* BASE[39:8] (system address) of this Node's DCTs. */
 	u32 DCTHoleBase;	/* If not zero, BASE[39:8] (system address) of dram hole for HW remapping.  Dram hole exists on this Node's DCTs. */
 	u32 DCTSysLimit;	/* LIMIT[39:8] (system address) of this Node's DCTs */
 	u16 PresetmaxFreq;	/* Maximum OEM defined DDR frequency
-		200=200MHz (DDR400)
-		266=266MHz (DDR533)
-		333=333MHz (DDR667)
-		400=400MHz (DDR800) */
+		200 = 200MHz (DDR400)
+		266 = 266MHz (DDR533)
+		333 = 333MHz (DDR667)
+		400 = 400MHz (DDR800) */
 	u8 _2Tmode;		/* 1T or 2T CMD mode (slow access mode)
-		1=1T
-		2=2T */
+		1 = 1T
+		2 = 2T */
 	u8 TrwtTO;		/* DCT TrwtTO (busclocks)*/
 	u8 Twrrd;		/* DCT Twrrd (busclocks)*/
 	u8 Twrwr;		/* DCT Twrwr (busclocks)*/
@@ -462,9 +462,9 @@ struct DCTStatStruc {		/* A per Node structure*/
 		/* CHB Byte 0-7 Read DQS Delay */
 	u32 PtrPatternBufA;	/* Ptr on stack to aligned DQS testing pattern*/
 	u32 PtrPatternBufB;	/* Ptr on stack to aligned DQS testing pattern*/
-	u8 Channel;		/* Current Channel (0= CH A, 1=CH B)*/
+	u8 Channel;		/* Current Channel (0= CH A, 1 = CH B)*/
 	u8 ByteLane;		/* Current Byte Lane (0..7)*/
-	u8 Direction;		/* Current DQS-DQ training write direction (0=read, 1=write)*/
+	u8 Direction;		/* Current DQS-DQ training write direction (0 = read, 1 = write)*/
 	u8 Pattern;		/* Current pattern*/
 	u8 DQSDelay;		/* Current DQS delay value*/
 	u32 TrainErrors;	/* Current Training Errors*/
@@ -545,15 +545,15 @@ struct DCTStatStruc {		/* A per Node structure*/
 	u8 WrDatGrossH;
 	u8 DqsRcvEnGrossL;
 	/* NOTE: Not used - u8 NodeSpeed */		/* Bus Speed (to set Controller) */
-		/* 1=200MHz */
-		/* 2=266MHz */
-		/* 3=333MHz */
+		/* 1 = 200MHz */
+		/* 2 = 266MHz */
+		/* 3 = 333MHz */
 	/* NOTE: Not used - u8 NodeCASL	*/	/* CAS latency DCT setting */
-		/* 0=2.0 */
-		/* 1=3.0 */
-		/* 2=4.0 */
-		/* 3=5.0 */
-		/* 4=6.0 */
+		/* 0 = 2.0 */
+		/* 1 = 3.0 */
+		/* 2 = 4.0 */
+		/* 3 = 5.0 */
+		/* 4 = 6.0 */
 	u8 TrwtWB;
 	u8 CurrRcvrCHADelay;	/* for keep current RcvrEnDly of chA*/
 	u16 T1000;		/* get the T1000 figure (cycle time (ns)*1K)*/
@@ -852,7 +852,7 @@ struct amd_s3_persistent_data {
 #define SB_SWNodeHole		8	/* Remapping of Node Base on this Node to create a gap.*/
 #define SB_HWHole		9	/* Memory Hole created on this Node using HW remapping.*/
 #define SB_Over400MHz		10	/* DCT freq >= 400MHz flag*/
-#define SB_DQSPos_Pass2		11	/* Using for TrainDQSPos DIMM0/1, when freq>=400MHz*/
+#define SB_DQSPos_Pass2		11	/* Using for TrainDQSPos DIMM0/1, when freq >= 400MHz*/
 #define SB_DQSRcvLimit		12	/* Using for DQSRcvEnTrain to know we have reached to upper bound.*/
 #define SB_ExtConfig		13	/* Indicator the default setting for extend PCI configuration support*/
 
@@ -862,73 +862,73 @@ struct amd_s3_persistent_data {
 ===============================================================================*/
 /*Platform Configuration*/
 #define NV_PACK_TYPE		0	/* CPU Package Type (2-bits)
-					    0=NPT L1
-					    1=NPT M2
-					    2=NPT S1*/
+					    0 = NPT L1
+					    1 = NPT M2
+					    2 = NPT S1*/
 #define NV_MAX_NODES		1	/* Number of Nodes/Sockets (4-bits)*/
 #define NV_MAX_DIMMS		2	/* Number of DIMM slots for the specified Node ID (4-bits)*/
 #define NV_MAX_MEMCLK		3	/* Maximum platform demonstrated Memclock (10-bits)
-					    200=200MHz (DDR400)
-					    266=266MHz (DDR533)
-					    333=333MHz (DDR667)
-					    400=400MHz (DDR800)*/
+					    200 = 200MHz (DDR400)
+					    266 = 266MHz (DDR533)
+					    333 = 333MHz (DDR667)
+					    400 = 400MHz (DDR800)*/
 #define NV_MIN_MEMCLK		4	/* Minimum platform demonstrated Memclock (10-bits) */
 #define NV_ECC_CAP		5	/* Bus ECC capable (1-bits)
-					    0=Platform not capable
-					    1=Platform is capable*/
+					    0 = Platform not capable
+					    1 = Platform is capable*/
 #define NV_4RANKType		6	/* Quad Rank DIMM slot type (2-bits)
-					    0=Normal
-					    1=R4 (4-Rank Registered DIMMs in AMD server configuration)
-					    2=S4 (Unbuffered SO-DIMMs)*/
+					    0 = Normal
+					    1 = R4 (4-Rank Registered DIMMs in AMD server configuration)
+					    2 = S4 (Unbuffered SO-DIMMs)*/
 #define NV_BYPMAX		7	/* Value to set DcqBypassMax field (See Function 2, Offset 94h, [27:24] of BKDG for field definition).
-					    4=4 times bypass (normal for non-UMA systems)
-					    7=7 times bypass (normal for UMA systems)*/
+					    4 = 4 times bypass (normal for non-UMA systems)
+					    7 = 7 times bypass (normal for UMA systems)*/
 #define NV_RDWRQBYP		8	/* Value to set RdWrQByp field (See Function 2, Offset A0h, [3:2] of BKDG for field definition).
-					    2=8 times (normal for non-UMA systems)
-					    3=16 times (normal for UMA systems)*/
+					    2 = 8 times (normal for non-UMA systems)
+					    3 = 16 times (normal for UMA systems)*/
 
 
 /*Dram Timing*/
 #define NV_MCTUSRTMGMODE	10	/* User Memclock Mode (2-bits)
-					    0=Auto, no user limit
-					    1=Auto, user limit provided in NV_MemCkVal
-					    2=Manual, user value provided in NV_MemCkVal*/
+					    0 = Auto, no user limit
+					    1 = Auto, user limit provided in NV_MemCkVal
+					    2 = Manual, user value provided in NV_MemCkVal*/
 #define NV_MemCkVal		11	/* Memory Clock Value (2-bits)
-					    0=200MHz
-					    1=266MHz
-					    2=333MHz
-					    3=400MHz*/
+					    0 = 200MHz
+					    1 = 266MHz
+					    2 = 333MHz
+					    3 = 400MHz*/
 
 /*Dram Configuration*/
 #define NV_BankIntlv		20	/* Dram Bank (chip-select) Interleaving (1-bits)
-					    0=disable
-					    1=enable*/
+					    0 = disable
+					    1 = enable*/
 #define NV_AllMemClks		21	/* Turn on All DIMM clocks (1-bits)
-					    0=normal
-					    1=enable all memclocks*/
+					    0 = normal
+					    1 = enable all memclocks*/
 #define NV_SPDCHK_RESTRT	22	/* SPD Check control bitmap (1-bits)
-					    0=Exit current node init if any DIMM has SPD checksum error
-					    1=Ignore faulty SPD checksums (Note: DIMM cannot be enabled)*/
+					    0 = Exit current node init if any DIMM has SPD checksum error
+					    1 = Ignore faulty SPD checksums (Note: DIMM cannot be enabled)*/
 #define NV_DQSTrainCTL		23	/* DQS Signal Timing Training Control
-					    0=skip DQS training
-					    1=perform DQS training*/
+					    0 = skip DQS training
+					    1 = perform DQS training*/
 #define NV_NodeIntlv		24	/* Node Memory Interleaving (1-bits)
-					    0=disable
-					    1=enable*/
+					    0 = disable
+					    1 = enable*/
 #define NV_BurstLen32		25	/* BurstLength32 for 64-bit mode (1-bits)
-					    0=disable (normal)
-					    1=enable (4 beat burst when width is 64-bits)*/
+					    0 = disable (normal)
+					    1 = enable (4 beat burst when width is 64-bits)*/
 
 /*Dram Power*/
 #define NV_CKE_PDEN		30	/* CKE based power down mode (1-bits)
-					    0=disable
-					    1=enable*/
+					    0 = disable
+					    1 = enable*/
 #define NV_CKE_CTL		31	/* CKE based power down control (1-bits)
-					    0=per Channel control
-					    1=per Chip select control*/
+					    0 = per Channel control
+					    1 = per Chip select control*/
 #define NV_CLKHZAltVidC3	32	/* Memclock tri-stating during C3 and Alt VID (1-bits)
-					    0=disable
-					    1=enable*/
+					    0 = disable
+					    1 = enable*/
 
 /*Memory Map/Mgt.*/
 #define NV_BottomIO		40	/* Bottom of 32-bit IO space (8-bits)
@@ -936,8 +936,8 @@ struct amd_s3_persistent_data {
 #define NV_BottomUMA		41	/* Bottom of shared graphics dram (8-bits)
 					    NV_BottomUMA[7:0]=Addr[31:24]*/
 #define NV_MemHole		42	/* Memory Hole Remapping (1-bits)
-					    0=disable
-					    1=enable  */
+					    0 = disable
+					    1 = enable  */
 
 /*ECC*/
 #define NV_ECC			50	/* Dram ECC enable*/
@@ -949,13 +949,13 @@ struct amd_s3_persistent_data {
 #define NV_L3BKScrub		57	/* L3 ECC Background Scrubber CTL*/
 #define NV_DCBKScrub		58	/* DCache ECC Background Scrubber CTL*/
 #define NV_CS_SpareCTL		59	/* Chip Select Spare Control bit 0:
-					       0=disable Spare
-					       1=enable Spare */
+					       0 = disable Spare
+					       1 = enable Spare */
 					/* Chip Select Spare Control bit 1-4:
 					     Reserved, must be zero*/
 #define NV_SyncOnUnEccEn	61	/* SyncOnUnEccEn control
-					   0=disable
-					   1=enable*/
+					   0 = disable
+					   1 = enable*/
 #define NV_Unganged		62
 
 #define NV_ChannelIntlv	63	/* Channel Interleaving (3-bits)
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mct_d_gcc.h b/src/northbridge/amd/amdmct/mct_ddr3/mct_d_gcc.h
index a7fac8f..74fadde 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mct_d_gcc.h
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mct_d_gcc.h
@@ -37,7 +37,7 @@ static inline void _RDTSC(u32 *lo, u32 *hi)
 	__asm__ volatile (
 		 "rdtsc"
 		 : "=a" (*lo), "=d"(*hi)
-		 );
+		);
 }
 
 static inline void _cpu_id(u32 addr, u32 *val)
@@ -57,7 +57,7 @@ static u32 bsr(u32 x)
 	u8 i;
 	u32 ret = 0;
 
-	for (i=31; i>0; i--) {
+	for (i = 31; i > 0; i--) {
 		if (x & (1<<i)) {
 			ret = i;
 			break;
@@ -73,7 +73,7 @@ static u32 bsf(u32 x)
 	u8 i;
 	u32 ret = 32;
 
-	for (i=0; i<32; i++) {
+	for (i = 0; i < 32; i++) {
 		if (x & (1<<i)) {
 			ret = i;
 			break;
@@ -83,9 +83,9 @@ static u32 bsf(u32 x)
 	return ret;
 }
 
-#define _MFENCE asm volatile ( "mfence")
+#define _MFENCE asm volatile ("mfence")
 
-#define _SFENCE asm volatile ( "sfence" )
+#define _SFENCE asm volatile ("sfence")
 
 /* prevent speculative execution of following instructions */
 #define _EXECFENCE asm volatile ("outb %al, $0xed")
@@ -301,7 +301,7 @@ static u32 stream_to_int(u8 *p)
 
 	val = 0;
 
-	for (i=3; i>=0; i--) {
+	for (i = 3; i >= 0; i--) {
 		val <<= 8;
 		valx = *(p+i);
 		val |= valx;
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mctardk6.c b/src/northbridge/amd/amdmct/mct_ddr3/mctardk6.c
index 3f01308..d6480ab 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mctardk6.c
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mctardk6.c
@@ -50,7 +50,7 @@ void mctGet_PS_Cfg_D(struct MCTStatStruc *pMCTstat,
  *    : ODC_CTL    - Output Driver Compensation Control Register Value
  *    : CMDmode    - CMD mode
  */
-static void Get_ChannelPS_Cfg0_D( u8 MAAdimms, u8 Speed, u8 MAAload,
+static void Get_ChannelPS_Cfg0_D(u8 MAAdimms, u8 Speed, u8 MAAload,
 				u8 DATAAload, u32 *AddrTmgCTL, u32 *ODC_CTL,
 				u8 *CMDmode)
 {
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mctchi_d.c b/src/northbridge/amd/amdmct/mct_ddr3/mctchi_d.c
index 6c25f2c..30cf10e 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mctchi_d.c
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mctchi_d.c
@@ -33,8 +33,8 @@ void InterleaveChannels_D(struct MCTStatStruc *pMCTstat,
 
 	/* call back to wrapper not needed ManualChannelInterleave_D(); */
 	/* call back - DctSelIntLvAddr = mctGet_NVbits(NV_ChannelIntlv);*/	/* override interleave */
-	/* Manually set: typ=5, otherwise typ=7. */
-	DctSelIntLvAddr = mctGet_NVbits(NV_ChannelIntlv); /* typ=5: Hash*: exclusive OR of address bits[20:16, 6]. */
+	/* Manually set: typ = 5, otherwise typ = 7. */
+	DctSelIntLvAddr = mctGet_NVbits(NV_ChannelIntlv); /* typ = 5: Hash*: exclusive OR of address bits[20:16, 6]. */
 
 	if (DctSelIntLvAddr & 1) {
 		DctSelIntLvAddr >>= 1;
@@ -67,7 +67,7 @@ void InterleaveChannels_D(struct MCTStatStruc *pMCTstat,
 				if (dct1_size == dct0_size) {
 					dct1_size = 0;
 					DctSelHi = 0x04;	/* DctSelHiRngEn = 0 */
-				} else if (dct1_size > dct0_size ) {
+				} else if (dct1_size > dct0_size) {
 					dct1_size = dct0_size;
 					DctSelHi = 0x07;	/* DctSelHiRngEn = 1, DctSelHi = 1 */
 				}
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mctcsi_d.c b/src/northbridge/amd/amdmct/mct_ddr3/mctcsi_d.c
index 3f56765..e42a127 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mctcsi_d.c
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mctcsi_d.c
@@ -45,7 +45,7 @@ void InterleaveBanks_D(struct MCTStatStruc *pMCTstat,
 	while (DoIntlv && (ChipSel < MAX_CS_SUPPORTED)) {
 		reg = 0x40+(ChipSel<<2);	/* Dram CS Base 0 */
 		val = Get_NB32_DCT(dev, dct, reg);
-		if ( val & (1<<CSEnable)) {
+		if (val & (1<<CSEnable)) {
 			EnChipSels++;
 			reg = 0x60+((ChipSel>>1)<<2); /*Dram CS Mask 0 */
 			val = Get_NB32_DCT(dev, dct, reg);
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mctdqs_d.c b/src/northbridge/amd/amdmct/mct_ddr3/mctdqs_d.c
index 06a70e6..71a4b79 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mctdqs_d.c
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mctdqs_d.c
@@ -247,12 +247,12 @@ static void SetEccDQSRdWrPos_D_Fam10(struct MCTStatStruc *pMCTstat,
 	u8 channel;
 	u8 direction;
 
-	for (channel = 0; channel < 2; channel++){
+	for (channel = 0; channel < 2; channel++) {
 		for (direction = 0; direction < 2; direction++) {
 			pDCTstat->Channel = channel;	/* Channel A or B */
 			pDCTstat->Direction = direction; /* Read or write */
 			CalcEccDQSPos_D(pMCTstat, pDCTstat, pDCTstat->CH_EccDQSLike[channel], pDCTstat->CH_EccDQSScale[channel], ChipSel);
-			print_debug_dqs_pair("\t\tSetEccDQSRdWrPos: channel ", channel, direction==DQS_READDIR? " R dqs_delay":" W dqs_delay",	pDCTstat->DQSDelay, 2);
+			print_debug_dqs_pair("\t\tSetEccDQSRdWrPos: channel ", channel, direction == DQS_READDIR? " R dqs_delay":" W dqs_delay",	pDCTstat->DQSDelay, 2);
 			pDCTstat->ByteLane = 8;
 			StoreDQSDatStrucVal_D(pMCTstat, pDCTstat, ChipSel);
 			mct_SetDQSDelayCSR_D(pMCTstat, pDCTstat, ChipSel);
@@ -294,7 +294,7 @@ static void CalcEccDQSPos_D(struct MCTStatStruc *pMCTstat,
 		GetDQSDatStrucVal_D(pMCTstat, pDCTstat, ChipSel);
 		DQSDelay1 = pDCTstat->DQSDelay;
 
-		if (DQSDelay0>DQSDelay1) {
+		if (DQSDelay0 > DQSDelay1) {
 			DQSDelay = DQSDelay0 - DQSDelay1;
 		} else {
 			DQSDelay = DQSDelay1 - DQSDelay0;
@@ -306,7 +306,7 @@ static void CalcEccDQSPos_D(struct MCTStatStruc *pMCTstat,
 
 		DQSDelay >>= 8;		/* 256 */
 
-		if (DQSDelay0>DQSDelay1) {
+		if (DQSDelay0 > DQSDelay1) {
 			DQSDelay = DQSDelay1 - DQSDelay;
 		} else {
 			DQSDelay += DQSDelay1;
@@ -493,7 +493,7 @@ static void TrainDQSRdWrPos_D_Fam10(struct MCTStatStruc *pMCTstat,
 			}
 
 			print_debug_dqs("\t\t\t\tTrainDQSRdWrPos: 14 TestAddr ", TestAddr, 4);
-			SetUpperFSbase(TestAddr);	/* fs:eax=far ptr to target */
+			SetUpperFSbase(TestAddr);	/* fs:eax = far ptr to target */
 
 			print_debug_dqs("\t\t\t\tTrainDQSRdWrPos: 12 Receiver ", Receiver, 2);
 
@@ -556,7 +556,7 @@ static void TrainDQSRdWrPos_D_Fam10(struct MCTStatStruc *pMCTstat,
 						ResetTargetWTIO_D();
 
 						/* Read and compare pattern */
-						bytelane_test_results &= (CompareDQSTestPattern_D(pMCTstat, pDCTstat, TestAddr << 8) & 0xff); /* [Lane 7 :: Lane 0] 0=fail, 1=pass */
+						bytelane_test_results &= (CompareDQSTestPattern_D(pMCTstat, pDCTstat, TestAddr << 8) & 0xff); /* [Lane 7 :: Lane 0] 0 = fail, 1 = pass */
 
 						/* If all lanes have already failed testing bypass remaining re-read attempt(s) */
 						if (bytelane_test_results == 0x0)
@@ -650,7 +650,7 @@ static void TrainDQSRdWrPos_D_Fam10(struct MCTStatStruc *pMCTstat,
 				ResetTargetWTIO_D();
 
 				/* Read and compare pattern from the base test address */
-				bytelane_test_results = (CompareDQSTestPattern_D(pMCTstat, pDCTstat, TestAddr << 8) & 0xff); /* [Lane 7 :: Lane 0] 0=fail, 1=pass */
+				bytelane_test_results = (CompareDQSTestPattern_D(pMCTstat, pDCTstat, TestAddr << 8) & 0xff); /* [Lane 7 :: Lane 0] 0 = fail, 1 = pass */
 
 				/* Store any lanes that passed testing for later use */
 				for (lane = 0; lane < 8; lane++)
@@ -814,7 +814,7 @@ static void TrainDQSRdWrPos_D_Fam10(struct MCTStatStruc *pMCTstat,
 				for (ReceiverDTD = 0; ReceiverDTD < MAX_CS_SUPPORTED; ReceiverDTD += 2) {
 					printk(BIOS_DEBUG, "\t\tReceiver: %02x:", ReceiverDTD);
 					p = pDCTstat->CH_D_DIR_B_DQS[ChannelDTD][ReceiverDTD >> 1][Dir];
-					for (i=0;i<8; i++) {
+					for (i = 0; i < 8; i++) {
 						val  = p[i];
 						printk(BIOS_DEBUG, " %02x", val);
 					}
@@ -834,7 +834,7 @@ static void TrainDQSRdWrPos_D_Fam10(struct MCTStatStruc *pMCTstat,
 		lo &= ~(1<<17);		/* restore HWCR.wrap32dis */
 		_WRMSR(addr, lo, hi);
 	}
-	if (!_SSE2){
+	if (!_SSE2) {
 		cr4 = read_cr4();
 		cr4 &= ~(1<<9);		/* restore cr4.OSFXSR */
 		write_cr4(cr4);
@@ -1583,7 +1583,7 @@ static uint8_t TrainDQSRdWrPos_D_Fam15(struct MCTStatStruc *pMCTstat,
 					for (ReceiverDTD = 0; ReceiverDTD < MAX_CS_SUPPORTED; ReceiverDTD += 2) {
 						printk(BIOS_DEBUG, "\t\tReceiver: %02x:", ReceiverDTD);
 						p = pDCTstat->CH_D_DIR_B_DQS[ChannelDTD][ReceiverDTD >> 1][Dir];
-						for (i=0;i<8; i++) {
+						for (i = 0; i < 8; i++) {
 							val  = p[i];
 							printk(BIOS_DEBUG, " %02x", val);
 						}
@@ -1843,7 +1843,7 @@ static void TrainDQSReceiverEnCyc_D_Fam15(struct MCTStatStruc *pMCTstat,
 				for (ReceiverDTD = 0; ReceiverDTD < MAX_CS_SUPPORTED; ReceiverDTD += 2) {
 					printk(BIOS_DEBUG, "\t\tReceiver: %02x:", ReceiverDTD);
 					p = pDCTstat->CH_D_DIR_B_DQS[ChannelDTD][ReceiverDTD >> 1][Dir];
-					for (i=0;i<8; i++) {
+					for (i = 0; i < 8; i++) {
 						val  = p[i];
 						printk(BIOS_DEBUG, " %02x", val);
 					}
@@ -1863,7 +1863,7 @@ static void TrainDQSReceiverEnCyc_D_Fam15(struct MCTStatStruc *pMCTstat,
 		lo &= ~(1<<17);		/* restore HWCR.wrap32dis */
 		_WRMSR(addr, lo, hi);
 	}
-	if (!_SSE2){
+	if (!_SSE2) {
 		cr4 = read_cr4();
 		cr4 &= ~(1<<9);		/* restore cr4.OSFXSR */
 		write_cr4(cr4);
@@ -1890,11 +1890,11 @@ static void SetupDqsPattern_D(struct MCTStatStruc *pMCTstat,
 	buf = (u32 *)(((u32)buffer + 0x10) & (0xfffffff0));
 	if (pDCTstat->Status & (1<<SB_128bitmode)) {
 		pDCTstat->Pattern = 1;	/* 18 cache lines, alternating qwords */
-		for (i=0; i<16*18; i++)
+		for (i = 0; i < 16*18; i++)
 			buf[i] = TestPatternJD1b_D[i];
 	} else {
 		pDCTstat->Pattern = 0;	/* 9 cache lines, sequential qwords */
-		for (i=0; i<16*9; i++)
+		for (i = 0; i < 16*9; i++)
 			buf[i] = TestPatternJD1a_D[i];
 	}
 	pDCTstat->PtrPatternBufA = (u32)buf;
@@ -1966,10 +1966,10 @@ static u8 ChipSelPresent_D(struct MCTStatStruc *pMCTstat,
 	else
 		dct = 0;
 
-	if (ChipSel < MAX_CS_SUPPORTED){
+	if (ChipSel < MAX_CS_SUPPORTED) {
 		reg = 0x40 + (ChipSel << 2);
 		val = Get_NB32_DCT(dev, dct, reg);
-		if (val & ( 1 << 0))
+		if (val & (1 << 0))
 			ret = 1;
 	}
 
@@ -2058,7 +2058,7 @@ static u16 CompareDQSTestPattern_D(struct MCTStatStruc *pMCTstat, struct DCTStat
 	}
 
 	bytelane = 0;		/* bytelane counter */
-	bitmap = 0xFFFF;	/* bytelane test bitmap, 1=pass */
+	bitmap = 0xFFFF;	/* bytelane test bitmap, 1 = pass */
 	MEn1Results = 0xFFFF;
 	BeatCnt = 0;
 	for (i = 0; i < (9 * 64 / 4); i++) { /* sizeof testpattern. /4 due to next loop */
@@ -2102,7 +2102,7 @@ static u16 CompareDQSTestPattern_D(struct MCTStatStruc *pMCTstat, struct DCTStat
 		if (!bitmap)
 			break;
 
-		if (bytelane == 0){
+		if (bytelane == 0) {
 			BeatCnt += 4;
 			if (!(pDCTstat->Status & (1 << SB_128bitmode))) {
 				if (BeatCnt == 8) BeatCnt = 0; /* 8 beat burst */
@@ -2132,7 +2132,7 @@ static void FlushDQSTestPattern_D(struct DCTStatStruc *pDCTstat,
 					u32 addr_lo)
 {
 	/* Flush functions in mct_gcc.h */
-	if (pDCTstat->Pattern == 0){
+	if (pDCTstat->Pattern == 0) {
 		FlushDQSTestPattern_L9(addr_lo);
 	} else {
 		FlushDQSTestPattern_L18(addr_lo);
@@ -2349,9 +2349,9 @@ u32 mct_GetMCTSysAddr_D(struct MCTStatStruc *pMCTstat,
 
 	val &= ~0xe007c01f;
 
-	/* unganged mode DCT0+DCT1, sys addr of DCT1=node
+	/* unganged mode DCT0+DCT1, sys addr of DCT1 = node
 	 * base+DctSelBaseAddr+local ca base*/
-	if ((Channel) && (pDCTstat->GangedMode == 0) && ( pDCTstat->DIMMValidDCT[0] > 0)) {
+	if ((Channel) && (pDCTstat->GangedMode == 0) && (pDCTstat->DIMMValidDCT[0] > 0)) {
 		reg = 0x110;
 		dword = Get_NB32(dev, reg);
 		dword &= 0xfffff800;
@@ -2365,7 +2365,7 @@ u32 mct_GetMCTSysAddr_D(struct MCTStatStruc *pMCTstat,
 			val += dword;
 		}
 	} else {
-		/* sys addr=node base+local cs base */
+		/* sys addr = node base+local cs base */
 		val += pDCTstat->DCTSysBase;
 
 		/* New stuff */
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mctecc_d.c b/src/northbridge/amd/amdmct/mct_ddr3/mctecc_d.c
index 5d31849..ca36789 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mctecc_d.c
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mctecc_d.c
@@ -36,7 +36,7 @@ static u8 isDramECCEn_D(struct DCTStatStruc *pDCTstat);
  *
  * Conditions for setting background scrubber.
  *  1. node is present
- *  2. node has dram functioning (WE=RE=1)
+ *  2. node has dram functioning (WE = RE = 1)
  *  3. all eccdimms (or bit 17 of offset 90,fn 2)
  *  4. no chip-select gap exists
  *
@@ -152,10 +152,10 @@ u8 ECCInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA)
 			val = Get_NB32(dev, reg);
 
 			/* WE/RE is checked */
-			if ((val & 3)==3) {	/* Node has dram populated */
+			if ((val & 3) == 3) {	/* Node has dram populated */
 				/* Negate 'all nodes/dimms ECC' flag if non ecc
 				   memory populated */
-				if ( pDCTstat->Status & (1<<SB_ECCDIMMs)) {
+				if (pDCTstat->Status & (1 << SB_ECCDIMMs)) {
 					LDramECC = isDramECCEn_D(pDCTstat);
 					if (pDCTstat->ErrCode != SC_RunningOK) {
 						pDCTstat->Status &=  ~(1 << SB_ECCDIMMs);
@@ -195,9 +195,9 @@ u8 ECCInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA)
 	}
 
 	if (AllECC)
-		pMCTstat->GStatus |= 1<<GSB_ECCDIMMs;
+		pMCTstat->GStatus |= 1 << GSB_ECCDIMMs;
 	else
-		pMCTstat->GStatus &= ~(1<<GSB_ECCDIMMs);
+		pMCTstat->GStatus &= ~(1 << GSB_ECCDIMMs);
 
 	/* Program the Dram BKScrub CTL to the proper (user selected) value.*/
 	/* Reset MC4_STS. */
@@ -206,11 +206,11 @@ u8 ECCInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA)
 		pDCTstat = pDCTstatA + Node;
 		LDramECC = 0;
 		if (NodePresent_D(Node)) {	/* If Node is present */
-			reg = 0x40+(Node<<3);	/* Dram Base Node 0 + index */
+			reg = 0x40+(Node << 3);	/* Dram Base Node 0 + index */
 			val = Get_NB32(pDCTstat->dev_map, reg);
 			curBase = val & 0xffff0000;
 			/*WE/RE is checked because memory config may have been */
-			if ((val & 3)==3) {	/* Node has dram populated */
+			if ((val & 3) == 3) {	/* Node has dram populated */
 				if (isDramECCEn_D(pDCTstat)) {	/* if ECC is enabled on this dram */
 					dev = pDCTstat->dev_nbmisc;
 					val = curBase << 8;
@@ -322,16 +322,16 @@ static void setSyncOnUnEccEn_D(struct MCTStatStruc *pMCTstat,
 		struct DCTStatStruc *pDCTstat;
 		pDCTstat = pDCTstatA + Node;
 		if (NodePresent_D(Node)) {	/* If Node is present*/
-			reg = 0x40+(Node<<3);	/* Dram Base Node 0 + index*/
+			reg = 0x40+(Node << 3);	/* Dram Base Node 0 + index*/
 			val = Get_NB32(pDCTstat->dev_map, reg);
 			/*WE/RE is checked because memory config may have been*/
-			if ((val & 3)==3) {	/* Node has dram populated*/
-				if ( isDramECCEn_D(pDCTstat)) {
+			if ((val & 3) == 3) {	/* Node has dram populated*/
+				if (isDramECCEn_D(pDCTstat)) {
 					/*if ECC is enabled on this dram*/
 					dev = pDCTstat->dev_nbmisc;
 					reg = 0x44;	/* MCA NB Configuration*/
 					val = Get_NB32(dev, reg);
-					val |= (1<<SyncOnUcEccEn);
+					val |= (1 << SyncOnUcEccEn);
 					Set_NB32(dev, reg, val);
 				}
 			}	/* Node has Dram*/
@@ -353,11 +353,11 @@ static u8 isDramECCEn_D(struct DCTStatStruc *pDCTstat)
 	} else {
 		ch_end = 2;
 	}
-	for (i=0; i<ch_end; i++) {
-		if (pDCTstat->DIMMValidDCT[i] > 0){
+	for (i = 0; i < ch_end; i++) {
+		if (pDCTstat->DIMMValidDCT[i] > 0) {
 			reg = 0x90;		/* Dram Config Low */
 			val = Get_NB32_DCT(dev, i, reg);
-			if (val & (1<<DimmEcEn)) {
+			if (val & (1 << DimmEcEn)) {
 				/* set local flag 'dram ecc capable' */
 				isDimmECCEn = 1;
 				break;
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mctmtr_d.c b/src/northbridge/amd/amdmct/mct_ddr3/mctmtr_d.c
index 8ed2bef..558b3e3 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mctmtr_d.c
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mctmtr_d.c
@@ -34,11 +34,11 @@ void CPUMemTyping_D(struct MCTStatStruc *pMCTstat,
 
 	/* Set temporary top of memory from Node structure data.
 	 * Adjust temp top of memory down to accommodate 32-bit IO space.
-	 * Bottom40bIO=top of memory, right justified 8 bits
+	 * Bottom40bIO = top of memory, right justified 8 bits
 	 * 	(defines dram versus IO space type)
-	 * Bottom32bIO=sub 4GB top of memory, right justified 8 bits
+	 * Bottom32bIO = sub 4GB top of memory, right justified 8 bits
 	 * 	(defines dram versus IO space type)
-	 * Cache32bTOP=sub 4GB top of WB cacheable memory,
+	 * Cache32bTOP = sub 4GB top of WB cacheable memory,
 	 * 	right justified 8 bits
 	 */
 
@@ -82,8 +82,8 @@ void CPUMemTyping_D(struct MCTStatStruc *pMCTstat,
 		 */
 	addr = 0x204;	/* MTRR phys base 2*/
 			/* use TOP_MEM as limit*/
-			/* Limit=TOP_MEM|TOM2*/
-			/* Base=0*/
+			/* Limit = TOP_MEM|TOM2*/
+			/* Base = 0*/
 	printk(BIOS_DEBUG, "\t CPUMemTyping: Cache32bTOP:%x\n", Cache32bTOP);
 	SetMTRRrangeWB_D(0, &Cache32bTOP, &addr);
 				/* Base */
@@ -112,10 +112,10 @@ void CPUMemTyping_D(struct MCTStatStruc *pMCTstat,
 	addr = 0xC0010010;		/* SYS_CFG */
 	_RDMSR(addr, &lo, &hi);
 	if (Bottom40bIO) {
-		lo |= (1<<21);		/* MtrrTom2En=1 */
+		lo |= (1<<21);		/* MtrrTom2En = 1 */
 		lo |= (1<<22);		/* Tom2ForceMemTypeWB */
 	} else {
-		lo &= ~(1<<21);		/* MtrrTom2En=0 */
+		lo &= ~(1<<21);		/* MtrrTom2En = 0 */
 		lo &= ~(1<<22);		/* Tom2ForceMemTypeWB */
 	}
 	_WRMSR(addr, lo, hi);
@@ -146,7 +146,7 @@ static void SetMTRRrange_D(u32 Base, u32 *pLimit, u32 *pMtrrAddr, u16 MtrrType)
 	 * next set bit in a forward or backward sequence of bits (as a function
 	 * of the Limit). We start with the ascending path, to ensure that
 	 * regions are naturally aligned, then we switch to the descending path
-	 * to maximize MTRR usage efficiency. Base=0 is a special case where we
+	 * to maximize MTRR usage efficiency. Base = 0 is a special case where we
 	 * start with the descending path. Correct Mask for region is
 	 * 2comp(Size-1)-1, which is 2comp(Limit-Base-1)-1
 	 */
@@ -172,17 +172,17 @@ static void SetMTRRrange_D(u32 Base, u32 *pLimit, u32 *pMtrrAddr, u16 MtrrType)
 			curSize = valx;
 			valx += curBase;
 		}
-		curLimit = valx;		/*eax=curBase, edx=curLimit*/
+		curLimit = valx;		/*eax = curBase, edx = curLimit*/
 		valx = val>>24;
 		val <<= 8;
 
 		/* now program the MTRR */
 		val |= MtrrType;		/* set cache type (UC or WB)*/
 		_WRMSR(addr, val, valx);	/* prog. MTRR with current region Base*/
-		val = ((~(curSize - 1))+1) - 1;	/* Size-1*/ /*Mask=2comp(Size-1)-1*/
+		val = ((~(curSize - 1))+1) - 1;	/* Size-1*/ /*Mask = 2comp(Size-1)-1*/
 		valx = (val >> 24) | (0xff00);	/* GH have 48 bits addr */
 		val <<= 8;
-		val |= ( 1 << 11);			/* set MTRR valid*/
+		val |= (1 << 11);			/* set MTRR valid*/
 		addr++;
 		_WRMSR(addr, val, valx);	/* prog. MTRR with current region Mask*/
 		val = curLimit;
@@ -213,9 +213,9 @@ void UMAMemTyping_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat
 	/*======================================================================
 	 * Adjust temp top of memory down to accommodate UMA memory start
 	 *======================================================================*/
-	/* Bottom32bIO=sub 4GB top of memory, right justified 8 bits
+	/* Bottom32bIO = sub 4GB top of memory, right justified 8 bits
 	 * (defines dram versus IO space type)
-	 * Cache32bTOP=sub 4GB top of WB cacheable memory, right justified 8 bits */
+	 * Cache32bTOP = sub 4GB top of WB cacheable memory, right justified 8 bits */
 
 	Bottom32bIO = pMCTstat->Sub4GCacheTop >> 8;
 
@@ -234,7 +234,7 @@ void UMAMemTyping_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat
 		addr = 0x200;
 		lo = 0;
 		hi = lo;
-		while ( addr < 0x20C) {
+		while (addr < 0x20C) {
 			_WRMSR(addr, lo, hi);		/* prog. MTRR with current region Mask */
 			addr++;						/* next MTRR pair addr */
 		}
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mctndi_d.c b/src/northbridge/amd/amdmct/mct_ddr3/mctndi_d.c
index 9a769ad..bf84171 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mctndi_d.c
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mctndi_d.c
@@ -68,7 +68,7 @@ void InterleaveNodes_D(struct MCTStatStruc *pMCTstat,
 				NodesWmem++;
 				Base &= 0xFFFF0000;	/* Base[39:8] */
 
-				if (pDCTstat->Status & (1 << SB_HWHole )) {
+				if (pDCTstat->Status & (1 << SB_HWHole)) {
 
 					/* to get true amount of dram,
 					 * subtract out memory hole if HW dram remapping */
@@ -83,7 +83,7 @@ void InterleaveNodes_D(struct MCTStatStruc *pMCTstat,
 				DctSelBase = Get_NB32(pDCTstat->dev_dct, 0x114);
 				if (DctSelBase) {
 					DctSelBase <<= 8;
-					if ( pDCTstat->Status & (1 << SB_HWHole)) {
+					if (pDCTstat->Status & (1 << SB_HWHole)) {
 						if (DctSelBase >= 0x1000000) {
 							DctSelBase -= HWHoleSz;
 						}
@@ -100,7 +100,7 @@ void InterleaveNodes_D(struct MCTStatStruc *pMCTstat,
 				MemSize &= 0xFFFF0000;
 				MemSize += 0x00010000;
 				MemSize -= Base;
-				if ( pDCTstat->Status & (1 << SB_HWHole)) {
+				if (pDCTstat->Status & (1 << SB_HWHole)) {
 					MemSize -= HWHoleSz;
 				}
 				if (Node == 0) {
@@ -139,7 +139,7 @@ void InterleaveNodes_D(struct MCTStatStruc *pMCTstat,
 	if (DoIntlv) {
 		MCTMemClr_D(pMCTstat, pDCTstatA);
 		/* Program Interleaving enabled on Node 0 map only.*/
-		MemSize0 <<= bsf(Nodes);	/* MemSize=MemSize*2 (or 4, or 8) */
+		MemSize0 <<= bsf(Nodes);	/* MemSize = MemSize*2 (or 4, or 8) */
 		Dct0MemSize <<= bsf(Nodes);
 		MemSize0 += HWHoleSz;
 		Base = ((Nodes - 1) << 8) | 3;
@@ -180,7 +180,7 @@ void InterleaveNodes_D(struct MCTStatStruc *pMCTstat,
 				HoleBase = pMCTstat->HoleBase;
 				if (Dct0MemSize >= HoleBase) {
 					val = HWHoleSz;
-					if ( Node == 0) {
+					if (Node == 0) {
 						val += Dct0MemSize;
 					}
 				} else {
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mctrci.c b/src/northbridge/amd/amdmct/mct_ddr3/mctrci.c
index 951a712..ac24c6d 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mctrci.c
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mctrci.c
@@ -336,14 +336,14 @@ void mct_DramControlReg_Init_D(struct MCTStatStruc *pMCTstat,
 			printk(BIOS_SPEW, "%s: F2xA8: %08x\n", __func__, val);
 
 			if (is_fam15h()) {
-				for (cw=0; cw <=15; cw ++) {
+				for (cw = 0; cw <=15; cw ++) {
 					val = mct_ControlRC(pMCTstat, pDCTstat, dct, MrsChipSel << rc_word_chip_select_lower_bit(), cw);
 					mct_SendCtrlWrd(pMCTstat, pDCTstat, dct, val);
 					if ((cw == 2) || (cw == 8) || (cw == 10))
 						precise_ndelay_fam15(pMCTstat, 6000);
 				}
 			} else {
-				for (cw=0; cw <=15; cw ++) {
+				for (cw = 0; cw <=15; cw ++) {
 					mct_Wait(1600);
 					val = mct_ControlRC(pMCTstat, pDCTstat, dct, MrsChipSel << rc_word_chip_select_lower_bit(), cw);
 					mct_SendCtrlWrd(pMCTstat, pDCTstat, dct, val);
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mctsdi.c b/src/northbridge/amd/amdmct/mct_ddr3/mctsdi.c
index 670d640..18af172 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mctsdi.c
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mctsdi.c
@@ -912,7 +912,7 @@ static u32 mct_MR1(struct MCTStatStruc *pMCTstat,
 		/* program MrsAddress[11]=TDQS: based on F2x[1,0]94[RDqsEn] */
 		if (Get_NB32_DCT(dev, dct, 0x94) & (1 << RDqsEn)) {
 			u8 bit;
-			/* Set TDQS=1b for x8 DIMM, TDQS=0b for x4 DIMM, when mixed x8 & x4 */
+			/* Set TDQS = 1b for x8 DIMM, TDQS = 0b for x4 DIMM, when mixed x8 & x4 */
 			bit = (ret >> 21) << 1;
 			if ((dct & 1) != 0)
 				bit ++;
@@ -1063,7 +1063,7 @@ static void mct_SendZQCmd(struct DCTStatStruc *pDCTstat, u8 dct)
 	printk(BIOS_DEBUG, "%s: Start\n", __func__);
 
 	/*1.Program MrsAddress[10]=1
-	  2.Set SendZQCmd=1
+	  2.Set SendZQCmd = 1
 	 */
 	dword = Get_NB32_DCT(dev, dct, 0x7C);
 	dword &= ~0xFFFFFF;
@@ -1071,7 +1071,7 @@ static void mct_SendZQCmd(struct DCTStatStruc *pDCTstat, u8 dct)
 	dword |= 1 << SendZQCmd;
 	Set_NB32_DCT(dev, dct, 0x7C, dword);
 
-	/* Wait for SendZQCmd=0 */
+	/* Wait for SendZQCmd = 0 */
 	do {
 		dword = Get_NB32_DCT(dev, dct, 0x7C);
 	} while (dword & (1 << SendZQCmd));
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mctsrc.c b/src/northbridge/amd/amdmct/mct_ddr3/mctsrc.c
index 8024179..eac2013 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mctsrc.c
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mctsrc.c
@@ -76,7 +76,7 @@ static void SetupRcvrPattern(struct MCTStatStruc *pMCTstat,
 	p_A = (u32 *)SetupDqsPattern_1PassB(pass);
 	p_B = (u32 *)SetupDqsPattern_1PassA(pass);
 
-	for (i=0;i<16;i++) {
+	for (i = 0; i < 16; i++) {
 		buf_a[i] = p_A[i];
 		buf_b[i] = p_B[i];
 	}
@@ -560,7 +560,7 @@ static uint32_t convert_testaddr_and_channel_to_address(struct DCTStatStruc *pDC
 	SetUpperFSbase(testaddr);
 	testaddr <<= 8;
 
-	if ((pDCTstat->Status & (1<<SB_128bitmode)) && channel ) {
+	if ((pDCTstat->Status & (1<<SB_128bitmode)) && channel) {
 		testaddr += 8;	/* second channel */
 	}
 
@@ -636,7 +636,7 @@ static void dqsTrainRcvrEn_SW_Fam10(struct MCTStatStruc *pMCTstat,
 	}
 
 	cr4 = read_cr4();
-	if (cr4 & ( 1 << 9)) {	/* save the old value */
+	if (cr4 & (1 << 9)) {	/* save the old value */
 		_SSE2 = 1;
 	}
 	cr4 |= (1 << 9);	/* OSFXSR enable SSE2 */
@@ -986,7 +986,7 @@ static void dqsTrainRcvrEn_SW_Fam10(struct MCTStatStruc *pMCTstat,
 		msr.lo &= ~(1<<17);	/* restore HWCR.wrap32dis */
 		wrmsr(HWCR, msr);
 	}
-	if (!_SSE2){
+	if (!_SSE2) {
 		cr4 = read_cr4();
 		cr4 &= ~(1<<9); 	/* restore cr4.OSFXSR */
 		write_cr4(cr4);
@@ -996,7 +996,7 @@ static void dqsTrainRcvrEn_SW_Fam10(struct MCTStatStruc *pMCTstat,
 	{
 		u8 ChannelDTD;
 		printk(BIOS_DEBUG, "TrainRcvrEn: CH_MaxRdLat:\n");
-		for (ChannelDTD = 0; ChannelDTD<2; ChannelDTD++) {
+		for (ChannelDTD = 0; ChannelDTD < 2; ChannelDTD++) {
 			printk(BIOS_DEBUG, "Channel:%x: %x\n",
 			       ChannelDTD, pDCTstat->CH_MaxRdLat[ChannelDTD][0]);
 		}
@@ -1013,10 +1013,10 @@ static void dqsTrainRcvrEn_SW_Fam10(struct MCTStatStruc *pMCTstat,
 		printk(BIOS_DEBUG, "TrainRcvrEn: CH_D_B_RCVRDLY:\n");
 		for (ChannelDTD = 0; ChannelDTD < 2; ChannelDTD++) {
 			printk(BIOS_DEBUG, "Channel:%x\n", ChannelDTD);
-			for (ReceiverDTD = 0; ReceiverDTD<8; ReceiverDTD+=2) {
+			for (ReceiverDTD = 0; ReceiverDTD < 8; ReceiverDTD+=2) {
 				printk(BIOS_DEBUG, "\t\tReceiver:%x:", ReceiverDTD);
 				p = pDCTstat->CH_D_B_RCVRDLY[ChannelDTD][ReceiverDTD>>1];
-				for (i=0;i<8; i++) {
+				for (i = 0; i < 8; i++) {
 					valDTD = p[i];
 					printk(BIOS_DEBUG, " %03x", valDTD);
 				}
@@ -1246,7 +1246,7 @@ static void dqsTrainRcvrEn_SW_Fam15(struct MCTStatStruc *pMCTstat,
 	}
 
 	cr4 = read_cr4();
-	if (cr4 & ( 1 << 9)) {	/* save the old value */
+	if (cr4 & (1 << 9)) {	/* save the old value */
 		_SSE2 = 1;
 	}
 	cr4 |= (1 << 9);	/* OSFXSR enable SSE2 */
@@ -1500,7 +1500,7 @@ static void dqsTrainRcvrEn_SW_Fam15(struct MCTStatStruc *pMCTstat,
 		lo &= ~(1<<17);		/* restore HWCR.wrap32dis */
 		_WRMSR(msr, lo, hi);
 	}
-	if (!_SSE2){
+	if (!_SSE2) {
 		cr4 = read_cr4();
 		cr4 &= ~(1<<9); 	/* restore cr4.OSFXSR */
 		write_cr4(cr4);
@@ -1510,7 +1510,7 @@ static void dqsTrainRcvrEn_SW_Fam15(struct MCTStatStruc *pMCTstat,
 	{
 		u8 ChannelDTD;
 		printk(BIOS_DEBUG, "TrainRcvrEn: CH_MaxRdLat:\n");
-		for (ChannelDTD = 0; ChannelDTD<2; ChannelDTD++) {
+		for (ChannelDTD = 0; ChannelDTD < 2; ChannelDTD++) {
 			printk(BIOS_DEBUG, "Channel:%x: %x\n",
 			       ChannelDTD, pDCTstat->CH_MaxRdLat[ChannelDTD][0]);
 		}
@@ -1527,10 +1527,10 @@ static void dqsTrainRcvrEn_SW_Fam15(struct MCTStatStruc *pMCTstat,
 		printk(BIOS_DEBUG, "TrainRcvrEn: CH_D_B_RCVRDLY:\n");
 		for (ChannelDTD = 0; ChannelDTD < 2; ChannelDTD++) {
 			printk(BIOS_DEBUG, "Channel:%x\n", ChannelDTD);
-			for (ReceiverDTD = 0; ReceiverDTD<8; ReceiverDTD+=2) {
+			for (ReceiverDTD = 0; ReceiverDTD < 8; ReceiverDTD+=2) {
 				printk(BIOS_DEBUG, "\t\tReceiver:%x:", ReceiverDTD);
 				p = pDCTstat->CH_D_B_RCVRDLY[ChannelDTD][ReceiverDTD>>1];
-				for (i=0;i<8; i++) {
+				for (i = 0; i < 8; i++) {
 					valDTD = p[i];
 					printk(BIOS_DEBUG, " %03x", valDTD);
 				}
@@ -1604,7 +1604,7 @@ static void dqsTrainMaxRdLatency_SW_Fam15(struct MCTStatStruc *pMCTstat,
 	ch_end = 2;
 
 	cr4 = read_cr4();
-	if (cr4 & ( 1 << 9)) {	/* save the old value */
+	if (cr4 & (1 << 9)) {	/* save the old value */
 		_SSE2 = 1;
 	}
 	cr4 |= (1 << 9);	/* OSFXSR enable SSE2 */
@@ -1720,7 +1720,7 @@ static void dqsTrainMaxRdLatency_SW_Fam15(struct MCTStatStruc *pMCTstat,
 		lo &= ~(1<<17);		/* restore HWCR.wrap32dis */
 		_WRMSR(msr, lo, hi);
 	}
-	if (!_SSE2){
+	if (!_SSE2) {
 		cr4 = read_cr4();
 		cr4 &= ~(1<<9); 	/* restore cr4.OSFXSR */
 		write_cr4(cr4);
@@ -1730,7 +1730,7 @@ static void dqsTrainMaxRdLatency_SW_Fam15(struct MCTStatStruc *pMCTstat,
 	{
 		u8 ChannelDTD;
 		printk(BIOS_DEBUG, "TrainMaxRdLatency: CH_MaxRdLat:\n");
-		for (ChannelDTD = 0; ChannelDTD<2; ChannelDTD++) {
+		for (ChannelDTD = 0; ChannelDTD < 2; ChannelDTD++) {
 			printk(BIOS_DEBUG, "Channel:%x: %x\n",
 			       ChannelDTD, pDCTstat->CH_MaxRdLat[ChannelDTD][0]);
 		}
@@ -1745,7 +1745,7 @@ static void dqsTrainMaxRdLatency_SW_Fam15(struct MCTStatStruc *pMCTstat,
 
 u8 mct_InitReceiver_D(struct DCTStatStruc *pDCTstat, u8 dct)
 {
-	if (pDCTstat->DIMMValidDCT[dct] == 0 ) {
+	if (pDCTstat->DIMMValidDCT[dct] == 0) {
 		return 8;
 	} else {
 		return 0;
@@ -1766,7 +1766,7 @@ static void mct_DisableDQSRcvEn_D(struct DCTStatStruc *pDCTstat)
 		ch_end = 2;
 	}
 
-	for (ch=0; ch<ch_end; ch++) {
+	for (ch = 0; ch < ch_end; ch++) {
 		reg = 0x78;
 		val = Get_NB32_DCT(dev, ch, reg);
 		val &= ~(1 << DqsRcvEnTrain);
@@ -1800,14 +1800,14 @@ void mct_SetRcvrEnDly_D(struct DCTStatStruc *pDCTstat, u16 RcvrEnDly,
 	}
 
 	/* DimmOffset not needed for CH_D_B_RCVRDLY array */
-	for (i=0; i < 8; i++) {
+	for (i = 0; i < 8; i++) {
 		if (FinalValue) {
 			/*calculate dimm offset */
 			p = pDCTstat->CH_D_B_RCVRDLY[Channel][Receiver >> 1];
 			RcvrEnDly = p[i];
 		}
 
-		/* if flag=0, set DqsRcvEn value to reg. */
+		/* if flag = 0, set DqsRcvEn value to reg. */
 		/* get the register index from table */
 		index = Table_DQSRcvEn_Offset[i >> 1];
 		index += Addl_Index;	/* DIMMx DqsRcvEn byte0 */
@@ -1852,7 +1852,7 @@ static void mct_SetMaxLatency_D(struct DCTStatStruc *pDCTstat, u8 Channel, u16 D
 		uint8_t package_type = mctGet_NVbits(NV_PACK_TYPE);
 		if ((package_type == PT_L1)		/* Socket F (1207) */
 			|| (package_type == PT_M2)	/* Socket AM3 */
-			|| (package_type == PT_S1)) {	/* Socket S1g<x> */
+			|| (package_type == PT_S1)) {	/* Socket S1g <x> */
 			cpu_val_n = 10;
 			cpu_val_p = 11;
 		} else {
@@ -1950,7 +1950,7 @@ static void mct_InitDQSPos4RcvrEn_D(struct MCTStatStruc *pMCTstat,
 	 * Read Position is 1/2 Memclock Delay
 	 */
 	u8 i;
-	for (i=0;i<2; i++){
+	for (i = 0; i < 2; i++) {
 		InitDQSPos4RcvrEn_D(pMCTstat, pDCTstat, i);
 	}
 }
@@ -1972,8 +1972,8 @@ static void InitDQSPos4RcvrEn_D(struct MCTStatStruc *pMCTstat,
 
 	/* FIXME: add Cx support */
 	dword = 0x00000000;
-	for (i=1; i<=3; i++) {
-		for (j=0; j<dn; j++)
+	for (i = 1; i <= 3; i++) {
+		for (j = 0; j < dn; j++)
 			/* DIMM0 Write Data Timing Low */
 			/* DIMM0 Write ECC Timing */
 			Set_NB32_index_wait_DCT(dev, Channel, index_reg, i + 0x100 * j, dword);
@@ -1981,14 +1981,14 @@ static void InitDQSPos4RcvrEn_D(struct MCTStatStruc *pMCTstat,
 
 	/* errata #180 */
 	dword = 0x2f2f2f2f;
-	for (i=5; i<=6; i++) {
-		for (j=0; j<dn; j++)
+	for (i = 5; i <= 6; i++) {
+		for (j = 0; j < dn; j++)
 			/* DIMM0 Read DQS Timing Control Low */
 			Set_NB32_index_wait_DCT(dev, Channel, index_reg, i + 0x100 * j, dword);
 	}
 
 	dword = 0x0000002f;
-	for (j=0; j<dn; j++)
+	for (j = 0; j < dn; j++)
 		/* DIMM0 Read DQS ECC Timing Control */
 		Set_NB32_index_wait_DCT(dev, Channel, index_reg, 7 + 0x100 * j, dword);
 }
@@ -2087,7 +2087,7 @@ void mctSetEccDQSRcvrEn_D(struct MCTStatStruc *pMCTstat,
 		if (!pDCTstat->NodePresent)
 			break;
 		if (pDCTstat->DCTSysLimit) {
-			for (i=0; i<2; i++)
+			for (i = 0; i < 2; i++)
 				CalcEccDQSRcvrEn_D(pMCTstat, pDCTstat, i);
 		}
 	}
@@ -2427,5 +2427,5 @@ void mct_Wait(u32 cycles)
 	saved = lo;
 	do {
 		_RDMSR(msr, &lo, &hi);
-	} while (lo - saved < cycles );
+	} while (lo - saved < cycles);
 }
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mctsrc1p.c b/src/northbridge/amd/amdmct/mct_ddr3/mctsrc1p.c
index d535735..30cf19b 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mctsrc1p.c
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mctsrc1p.c
@@ -49,7 +49,7 @@ static u16 mct_Average_RcvrEnDly_1Pass(struct DCTStatStruc *pDCTstat, u8 Channel
 	MaxValue = 0;
 	p = pDCTstat->CH_D_B_RCVRDLY[Channel][Receiver >> 1];
 
-	for (i=0; i < 8; i++) {
+	for (i = 0; i < 8; i++) {
 		/* get left value from DCTStatStruc.CHA_D0_B0_RCVRDLY*/
 		val = p[i];
 		/* get right value from DCTStatStruc.CHA_D0_B0_RCVRDLY_1*/
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mctsrc2p.c b/src/northbridge/amd/amdmct/mct_ddr3/mctsrc2p.c
index 2f4d4da..7f67824 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mctsrc2p.c
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mctsrc2p.c
@@ -58,7 +58,7 @@ u8 mct_Get_Start_RcvrEnDly_Pass(struct DCTStatStruc *pDCTstat,
 		u8 bn;
 		bn = 8;
 
-		for ( i=0;i<bn; i++) {
+		for (i = 0; i < bn; i++) {
 			val  = p[i];
 
 			if (val > max) {
@@ -91,7 +91,7 @@ u16 mct_Average_RcvrEnDly_Pass(struct DCTStatStruc *pDCTstat,
 		/* FIXME: which byte? */
 		p_1 = pDCTstat->B_RCVRDLY_1;
 		/* p_1 = pDCTstat->CH_D_B_RCVRDLY_1[Channel][Receiver>>1]; */
-		for (i=0; i<bn; i++) {
+		for (i = 0; i < bn; i++) {
 			val = p[i];
 			/* left edge */
 			if (val != (RcvrEnDlyLimit - 1)) {
@@ -111,7 +111,7 @@ u16 mct_Average_RcvrEnDly_Pass(struct DCTStatStruc *pDCTstat,
 			pDCTstat->DimmTrainFail &= ~(1<<(Receiver + Channel));
 		}
 	} else {
-		for (i=0; i < bn; i++) {
+		for (i = 0; i < bn; i++) {
 			val = p[i];
 			/* Add 1/2 Memlock delay */
 			/* val += Pass1MemClkDly; */
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mcttmrl.c b/src/northbridge/amd/amdmct/mct_ddr3/mcttmrl.c
index 15eb67e..78db68c 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mcttmrl.c
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mcttmrl.c
@@ -144,7 +144,7 @@ static void maxRdLatencyTrain_D(struct MCTStatStruc *pMCTstat,
 		print_debug_dqs("\tMaxRdLatencyTrain51: Channel ",Channel, 1);
 		pDCTstat->Channel = Channel;
 
-		if ( (pDCTstat->Status & (1 << SB_128bitmode)) && Channel)
+		if ((pDCTstat->Status & (1 << SB_128bitmode)) && Channel)
 			break;		/*if ganged mode, skip DCT 1 */
 
 		TestAddr0 = GetMaxRdLatTestAddr_D(pMCTstat, pDCTstat, Channel, &RcvrEnDly,	 &valid);
@@ -159,7 +159,7 @@ static void maxRdLatencyTrain_D(struct MCTStatStruc *pMCTstat,
 		while (MaxRdLatDly < MAX_RD_LAT) {	/* sweep Delay value here */
 			mct_setMaxRdLatTrnVal_D(pDCTstat, Channel, MaxRdLatDly);
 			ReadMaxRdLat1CLTestPattern_D(TestAddr0);
-			if ( CompareMaxRdLatTestPattern_D(pattern_buf, TestAddr0) == DQS_PASS)
+			if (CompareMaxRdLatTestPattern_D(pattern_buf, TestAddr0) == DQS_PASS)
 				break;
 			SetTargetWTIO_D(TestAddr0);
 			FlushMaxRdLatTestPattern_D(TestAddr0);
@@ -180,7 +180,7 @@ static void maxRdLatencyTrain_D(struct MCTStatStruc *pMCTstat,
 		lo &= ~(1<<17);	/* restore HWCR.wrap32dis */
 		_WRMSR(addr, lo, hi);
 	}
-	if (!_SSE2){
+	if (!_SSE2) {
 		cr4 = read_cr4();
 		cr4 &= ~(1<<9);	/* restore cr4.OSFXSR */
 		write_cr4(cr4);
@@ -190,7 +190,7 @@ static void maxRdLatencyTrain_D(struct MCTStatStruc *pMCTstat,
 	{
 		u8 ChannelDTD;
 		printk(BIOS_DEBUG, "maxRdLatencyTrain: CH_MaxRdLat:\n");
-		for (ChannelDTD = 0; ChannelDTD<2; ChannelDTD++) {
+		for (ChannelDTD = 0; ChannelDTD < 2; ChannelDTD++) {
 			printk(BIOS_DEBUG, "Channel: %02x: %02x\n", ChannelDTD, pDCTstat->CH_MaxRdLat[ChannelDTD][0]);
 		}
 	}
@@ -207,7 +207,7 @@ static void mct_setMaxRdLatTrnVal_D(struct DCTStatStruc *pDCTstat,
 
 	if (pDCTstat->GangedMode) {
 		Channel = 0; /* for safe */
-		for (i=0; i<2; i++)
+		for (i = 0; i < 2; i++)
 			pDCTstat->CH_MaxRdLat[i][0] = MaxRdLatVal;
 	} else {
 		pDCTstat->CH_MaxRdLat[Channel][0] = MaxRdLatVal;
@@ -239,7 +239,7 @@ static u8 CompareMaxRdLatTestPattern_D(u32 pattern_buf, u32 addr)
 	addr_lo = addr<<8;
 
 	_EXECFENCE;
-	for (i=0; i<(16*3); i++) {
+	for (i = 0; i < 16*3; i++) {
 		val = read32_fs(addr_lo);
 		val_test = test_buf[i];
 
@@ -284,11 +284,11 @@ static u32 GetMaxRdLatTestAddr_D(struct MCTStatStruc *pMCTstat,
 	*valid = 0;
 
 	for (ch = ch_start; ch < ch_end; ch++) {
-		for (d=0; d<4; d++) {
-			for (Byte = 0; Byte<bn; Byte++) {
+		for (d = 0; d < 4; d++) {
+			for (Byte = 0; Byte < bn; Byte++) {
 				u8 tmp;
 				tmp = pDCTstat->CH_D_B_RCVRDLY[ch][d][Byte];
-				if (tmp>Max) {
+				if (tmp > Max) {
 					Max = tmp;
 					Channel_Max = Channel;
 					d_Max = d;
@@ -371,7 +371,7 @@ u8 mct_GetStartMaxRdLat_D(struct MCTStatStruc *pMCTstat,
 	valx = (val) << 2;	/* SubTotal div 4 to scale 1/4 MemClk back to MemClk */
 
 	val = Get_NB32(pDCTstat->dev_nbmisc, 0xD4);
-	val = ((val & 0x1f) + 4 ) * 3;
+	val = ((val & 0x1f) + 4) * 3;
 
 	/* Calculate 1 MemClk + 1 NCLK delay in NCLKs for margin */
 	valxx = val << 2;
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mctwl.c b/src/northbridge/amd/amdmct/mct_ddr3/mctwl.c
index 44ea6e8..47c5004 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mctwl.c
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mctwl.c
@@ -426,7 +426,7 @@ void SetTargetFreq(struct MCTStatStruc *pMCTstat,
 		}
 	}
 
-	/* wait for 500 MCLKs after ExitSelfRef, 500*2.5ns=1250ns */
+	/* wait for 500 MCLKs after ExitSelfRef, 500*2.5ns = 1250ns */
 	mct_Wait(250);
 
 	if (pDCTstat->Status & (1 << SB_Registered)) {
@@ -474,9 +474,9 @@ void Restore_OnDimmMirror(struct MCTStatStruc *pMCTstat,
 {
 	if (pDCTstat->LogicalCPUID & (AMD_DR_Bx /* | AMD_RB_C0 */)) { /* We dont support RB-C0 now */
 		if (pDCTstat->MirrPresU_NumRegR & 0x55)
-			Modify_OnDimmMirror(pDCTstat, 0, 1); /* dct=0, set */
+			Modify_OnDimmMirror(pDCTstat, 0, 1); /* dct = 0, set */
 		if (pDCTstat->MirrPresU_NumRegR & 0xAA)
-			Modify_OnDimmMirror(pDCTstat, 1, 1); /* dct=1, set */
+			Modify_OnDimmMirror(pDCTstat, 1, 1); /* dct = 1, set */
 	}
 }
 void Clear_OnDimmMirror(struct MCTStatStruc *pMCTstat,
@@ -484,8 +484,8 @@ void Clear_OnDimmMirror(struct MCTStatStruc *pMCTstat,
 {
 	if (pDCTstat->LogicalCPUID & (AMD_DR_Bx /* | AMD_RB_C0 */)) { /* We dont support RB-C0 now */
 		if (pDCTstat->MirrPresU_NumRegR & 0x55)
-			Modify_OnDimmMirror(pDCTstat, 0, 0); /* dct=0, clear */
+			Modify_OnDimmMirror(pDCTstat, 0, 0); /* dct = 0, clear */
 		if (pDCTstat->MirrPresU_NumRegR & 0xAA)
-			Modify_OnDimmMirror(pDCTstat, 1, 0); /* dct=1, clear */
+			Modify_OnDimmMirror(pDCTstat, 1, 0); /* dct = 1, clear */
 	}
 }
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mhwlc_d.c b/src/northbridge/amd/amdmct/mct_ddr3/mhwlc_d.c
index 5c30bc5..dce6212 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mhwlc_d.c
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mhwlc_d.c
@@ -140,15 +140,15 @@ uint8_t AgesaHwWlPhase1(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCT
 			 */
 			if (dct)
 			{
-				Addl_Data_Offset=0x198;
-				Addl_Data_Port=0x19C;
+				Addl_Data_Offset = 0x198;
+				Addl_Data_Port = 0x19C;
 			}
 			else
 			{
-				Addl_Data_Offset=0x98;
-				Addl_Data_Port=0x9C;
+				Addl_Data_Offset = 0x98;
+				Addl_Data_Port = 0x9C;
 			}
-			Addr=0x0D00000C;
+			Addr = 0x0D00000C;
 			AmdMemPCIWriteBits(MAKE_SBDFO(0,0,24+(pDCTData->NodeId),FUN_DCT,Addl_Data_Offset), 31, 0, &Addr);
 			while ((get_Bits(pDCTData,FUN_DCT,pDCTData->NodeId, FUN_DCT, Addl_Data_Offset,
 					DctAccessDone, DctAccessDone)) == 0);
@@ -157,7 +157,7 @@ uint8_t AgesaHwWlPhase1(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCT
 			Value = bitTestReset(Value, 4); /* for x8 only */
 			Value = bitTestReset(Value, 5); /* for hardware WL training */
 			AmdMemPCIWriteBits(MAKE_SBDFO(0,0,24+(pDCTData->NodeId),FUN_DCT,Addl_Data_Port), 31, 0, &Value);
-			Addr=0x4D030F0C;
+			Addr = 0x4D030F0C;
 			AmdMemPCIWriteBits(MAKE_SBDFO(0,0,24+(pDCTData->NodeId),FUN_DCT,Addl_Data_Offset), 31, 0, &Addr);
 			while ((get_Bits(pDCTData,FUN_DCT,pDCTData->NodeId, FUN_DCT, Addl_Data_Offset,
 					DctAccessDone, DctAccessDone)) == 0);
@@ -397,8 +397,8 @@ u32 swapAddrBits_wl(struct DCTStatStruc *pDCTstat, uint8_t dct, uint32_t MRSValu
 			tempW &= 0x0A8;
 			tempW1 &= 0x0150;
 			MRSValue &= 0xFE07;
-			MRSValue |= (tempW<<1);
-			MRSValue |= (tempW1>>1);
+			MRSValue |= (tempW << 1);
+			MRSValue |= (tempW1 >> 1);
 		}
 	}
 	return MRSValue;
@@ -438,8 +438,8 @@ u32 swapBankBits(struct DCTStatStruc *pDCTstat, uint8_t dct, u32 MRSValue)
 			tempW &= 0x01;
 			tempW1 &= 0x02;
 			MRSValue = 0;
-			MRSValue |= (tempW<<1);
-			MRSValue |= (tempW1>>1);
+			MRSValue |= (tempW << 1);
+			MRSValue |= (tempW1 >> 1);
 		}
 	}
 	return MRSValue;
@@ -453,22 +453,22 @@ static uint16_t unbuffered_dimm_nominal_termination_emrs(uint8_t number_of_dimms
 
 	if (number_of_dimms == 1) {
 		if (MaxDimmsInstallable < 3) {
-			term = 0x04;	/* Rtt_Nom=RZQ/4=60 Ohm */
+			term = 0x04;	/* Rtt_Nom = RZQ/4 = 60 Ohm */
 		} else {
 			if (rank_count == 1) {
-				term = 0x04;	/* Rtt_Nom=RZQ/4=60 Ohm */
+				term = 0x04;	/* Rtt_Nom = RZQ/4 = 60 Ohm */
 			} else {
 				if (rank == 0)
-					term = 0x04;	/* Rtt_Nom=RZQ/4=60 Ohm */
+					term = 0x04;	/* Rtt_Nom = RZQ/4 = 60 Ohm */
 				else
-					term = 0x00;	/* Rtt_Nom=OFF */
+					term = 0x00;	/* Rtt_Nom = OFF */
 			}
 		}
 	} else {
 		if (frequency_index < 5)
-			term = 0x0044;	/* Rtt_Nom=RZQ/6=40 Ohm */
+			term = 0x0044;	/* Rtt_Nom = RZQ/6 = 40 Ohm */
 		else
-			term = 0x0204;	/* Rtt_Nom=RZQ/8=30 Ohm */
+			term = 0x0204;	/* Rtt_Nom = RZQ/8 = 30 Ohm */
 	}
 
 	return term;
@@ -482,15 +482,15 @@ static uint16_t unbuffered_dimm_dynamic_termination_emrs(uint8_t number_of_dimms
 
 	if (number_of_dimms == 1) {
 		if (MaxDimmsInstallable < 3) {
-			term = 0x00;	/* Rtt_WR=off */
+			term = 0x00;	/* Rtt_WR = off */
 		} else {
 			if (rank_count == 1)
-				term = 0x00;	/* Rtt_WR=off */
+				term = 0x00;	/* Rtt_WR = off */
 			else
-				term = 0x200;	/* Rtt_WR=RZQ/4=60 Ohm */
+				term = 0x200;	/* Rtt_WR = RZQ/4 = 60 Ohm */
 		}
 	} else {
-		term = 0x400;	/* Rtt_WR=RZQ/2=120 Ohm */
+		term = 0x400;	/* Rtt_WR = RZQ/2 = 120 Ohm */
 	}
 
 	return term;
@@ -558,7 +558,7 @@ void prepareDimms(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat,
 			tempW = mct_MR1(pMCTstat, pDCTstat, dct, dimm*2+rank) & 0xffff;
 			tempW &= ~(0x0244);
 		} else {
-			/* Set TDQS=1b for x8 DIMM, TDQS=0b for x4 DIMM, when mixed x8 & x4 */
+			/* Set TDQS = 1b for x8 DIMM, TDQS = 0b for x4 DIMM, when mixed x8 & x4 */
 			tempW2 = get_Bits(pDCTData, dct, pDCTData->NodeId,
 					FUN_DCT, DRAM_CONFIG_HIGH, RDqsEn, RDqsEn);
 			if (tempW2)
@@ -618,7 +618,7 @@ void prepareDimms(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat,
 		}
 
 		/* Apply Rtt_Nom to the MRS control word */
-		tempW=tempW|tempW1;
+		tempW = tempW|tempW1;
 
 		/* All ranks of the target DIMM are set to write levelization mode. */
 		if (wl)
@@ -702,8 +702,8 @@ void prepareDimms(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat,
 			{tempW = bitTestSet(tempW, 7);}
 			if (bitTest(tempW1,18))
 			{tempW = bitTestSet(tempW, 6);}
-			/* tempW=tempW|(((tempW1>>20)&0x7)<<3); */
-			tempW=tempW|((tempW1&0x00700000)>>17);
+			/* tempW = tempW|(((tempW1 >> 20) & 0x7 )<< 3); */
+			tempW = tempW|((tempW1&0x00700000) >> 17);
 			/* workaround for DR-B0 */
 			if ((pDCTData->LogicalCPUID & AMD_DR_Bx) && (pDCTData->Status[DCT_STATUS_REGISTERED]))
 				tempW+=0x8;
@@ -720,7 +720,7 @@ void prepareDimms(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat,
 		}
 
 		/* Apply Rtt_WR to the MRS control word */
-		tempW=tempW|tempW1;
+		tempW = tempW|tempW1;
 		tempW = swapAddrBits_wl(pDCTstat, dct, tempW);
 		if (is_fam15h())
 			set_Bits(pDCTData, dct, pDCTData->NodeId, FUN_DCT,
@@ -779,14 +779,14 @@ void prepareDimms(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat,
 					/* Program F2x[1, 0]7C[MrsAddress[15:0]] to the required
 					 * DDR3-defined function for write levelization.
 					 */
-					tempW = 0;/* DLL_DIS = 0, DIC = 0, AL = 0, TDQS = 0, Level=0, Qoff=0 */
+					tempW = 0;/* DLL_DIS = 0, DIC = 0, AL = 0, TDQS = 0, Level = 0, Qoff = 0 */
 
 					/* Retrieve normal settings of the MRS control word and clear Rtt_Nom */
 					if (is_fam15h()) {
 						tempW = mct_MR1(pMCTstat, pDCTstat, dct, currDimm*2+rank) & 0xffff;
 						tempW &= ~(0x0244);
 					} else {
-						/* Set TDQS=1b for x8 DIMM, TDQS=0b for x4 DIMM, when mixed x8 & x4 */
+						/* Set TDQS = 1b for x8 DIMM, TDQS = 0b for x4 DIMM, when mixed x8 & x4 */
 						tempW2 = get_Bits(pDCTData, dct, pDCTData->NodeId,
 								FUN_DCT, DRAM_CONFIG_HIGH, RDqsEn, RDqsEn);
 						if (tempW2)
@@ -811,7 +811,7 @@ void prepareDimms(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat,
 					}
 
 					/* Apply Rtt_Nom to the MRS control word */
-					tempW=tempW|tempW1;
+					tempW = tempW|tempW1;
 
 					/* Program MrsAddress[5,1]=output driver impedance control (DIC) */
 					if (is_fam15h()) {
@@ -877,8 +877,8 @@ void prepareDimms(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat,
 						{tempW = bitTestSet(tempW, 7);}
 						if (bitTest(tempW1,18))
 						{tempW = bitTestSet(tempW, 6);}
-						/* tempW=tempW|(((tempW1>>20)&0x7)<<3); */
-						tempW=tempW|((tempW1&0x00700000)>>17);
+						/* tempW = tempW|(((tempW1 >> 20) & 0x7) << 3); */
+						tempW = tempW|((tempW1&0x00700000) >> 17);
 						/* workaround for DR-B0 */
 						if ((pDCTData->LogicalCPUID & AMD_DR_Bx) && (pDCTData->Status[DCT_STATUS_REGISTERED]))
 							tempW+=0x8;
@@ -895,7 +895,7 @@ void prepareDimms(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat,
 					}
 
 					/* Apply Rtt_WR to the MRS control word */
-					tempW=tempW|tempW1;
+					tempW = tempW|tempW1;
 					tempW = swapAddrBits_wl(pDCTstat, dct, tempW);
 					if (is_fam15h())
 						set_Bits(pDCTData, dct, pDCTData->NodeId, FUN_DCT,
@@ -939,7 +939,7 @@ void programODT(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, ui
 	sMCTStruct *pMCTData = pDCTstat->C_MCTPtr;
 	sDCTStruct *pDCTData = pDCTstat->C_DCTPtr[dct];
 
-	u8 WrLvOdt1=0;
+	u8 WrLvOdt1 = 0;
 
 	if (is_fam15h()) {
 		/* On Family15h processors, the value for the specific CS being targetted
@@ -954,7 +954,7 @@ void programODT(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, ui
 		cs = (dimm * 2) + rank;
 
 		/* Fetch preprogammed ODT pattern from configuration registers */
-		dword = Get_NB32_DCT(pDCTstat->dev_dct, dct, ((cs>3)?0x23c:0x238));
+		dword = Get_NB32_DCT(pDCTstat->dev_dct, dct, ((cs > 3)?0x23c:0x238));
 		if ((cs == 7) || (cs == 3))
 			WrLvOdt1 = ((dword >> 24) & 0xf);
 		else if ((cs == 6) || (cs == 2))
@@ -1045,25 +1045,25 @@ void procConfig(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, ui
 	}
 	else
 	{
-		/* Program WrLvOdtEn=1 through set bit 12 of D3CSODT reg offset 0 for Rev.B */
+		/* Program WrLvOdtEn = 1 through set bit 12 of D3CSODT reg offset 0 for Rev.B */
 		if (dct)
 		{
-			Addl_Data_Offset=0x198;
-			Addl_Data_Port=0x19C;
+			Addl_Data_Offset = 0x198;
+			Addl_Data_Port = 0x19C;
 		}
 		else
 		{
-			Addl_Data_Offset=0x98;
-			Addl_Data_Port=0x9C;
+			Addl_Data_Offset = 0x98;
+			Addl_Data_Port = 0x9C;
 		}
-		Addr=0x0D008000;
+		Addr = 0x0D008000;
 		AmdMemPCIWriteBits(MAKE_SBDFO(0,0,24+(pDCTData->NodeId),FUN_DCT,Addl_Data_Offset), 31, 0, &Addr);
 		while ((get_Bits(pDCTData,FUN_DCT,pDCTData->NodeId, FUN_DCT, Addl_Data_Offset,
 				DctAccessDone, DctAccessDone)) == 0);
 		AmdMemPCIReadBits(MAKE_SBDFO(0,0,24+(pDCTData->NodeId),FUN_DCT,Addl_Data_Port), 31, 0, &Value);
 		Value = bitTestSet(Value, 12);
 		AmdMemPCIWriteBits(MAKE_SBDFO(0,0,24+(pDCTData->NodeId),FUN_DCT,Addl_Data_Port), 31, 0, &Value);
-		Addr=0x4D088F00;
+		Addr = 0x4D088F00;
 		AmdMemPCIWriteBits(MAKE_SBDFO(0,0,24+(pDCTData->NodeId),FUN_DCT,Addl_Data_Offset), 31, 0, &Addr);
 		while ((get_Bits(pDCTData,FUN_DCT,pDCTData->NodeId, FUN_DCT, Addl_Data_Offset,
 				DctAccessDone, DctAccessDone)) == 0);
@@ -1371,7 +1371,7 @@ void setWLByteDelay(struct DCTStatStruc *pDCTstat, uint8_t dct, u8 ByteLane, u8
 		while (ByteLane < lane_count)
 		{
 			/* This subtract 0xC workaround might be temporary. */
-			if ((pDCTData->WLPass==2) && (pDCTData->RegMan1Present & (1<<(dimm*2+dct)))) {
+			if ((pDCTData->WLPass == 2) && (pDCTData->RegMan1Present & (1 << (dimm*2+dct)))) {
 				tempW = (pDCTData->WLGrossDelay[index+ByteLane] << 5) | pDCTData->WLFineDelay[index+ByteLane];
 				tempW -= 0xC;
 				pDCTData->WLGrossDelay[index+ByteLane] = (u8)(tempW >> 5);
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/s3utils.c b/src/northbridge/amd/amdmct/mct_ddr3/s3utils.c
index 6589a39..97cadcb 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/s3utils.c
+++ b/src/northbridge/amd/amdmct/mct_ddr3/s3utils.c
@@ -351,12 +351,12 @@ void copy_mct_data_to_save_variable(struct amd_s3_persistent_data* persistent_da
 			data->f2x11c = pci_read_config32(dev_fn2, 0x11c);
 			data->f2x1b0 = pci_read_config32(dev_fn2, 0x1b0);
 			data->f3x44 = pci_read_config32(dev_fn3, 0x44);
-			for (i=0; i<16; i++) {
+			for (i = 0; i < 16; i++) {
 				data->msr0000020[i] = rdmsr_uint64_t(0x00000200 | i);
 			}
 			data->msr00000250 = rdmsr_uint64_t(0x00000250);
 			data->msr00000258 = rdmsr_uint64_t(0x00000258);
-			for (i=0; i<8; i++)
+			for (i = 0; i < 8; i++)
 				data->msr0000026[i] = rdmsr_uint64_t(0x00000260 | (i + 8));
 			data->msr000002ff = rdmsr_uint64_t(0x000002ff);
 			data->msrc0010010 = rdmsr_uint64_t(0xc0010010);
@@ -393,7 +393,7 @@ void copy_mct_data_to_save_variable(struct amd_s3_persistent_data* persistent_da
 				data->f2x204 = read_config32_dct(dev_fn2, node, channel, 0x204);
 				data->f2x208 = read_config32_dct(dev_fn2, node, channel, 0x208);
 				data->f2x20c = read_config32_dct(dev_fn2, node, channel, 0x20c);
-				for (i=0; i<4; i++)
+				for (i = 0; i < 4; i++)
 					data->f2x210[i] = read_config32_dct_nbpstate(dev_fn2, node, channel, i, 0x210);
 				data->f2x214 = read_config32_dct(dev_fn2, node, channel, 0x214);
 				data->f2x218 = read_config32_dct(dev_fn2, node, channel, 0x218);
@@ -407,7 +407,7 @@ void copy_mct_data_to_save_variable(struct amd_s3_persistent_data* persistent_da
 
 				data->f2x9cx0d0fe003 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0fe003);
 				data->f2x9cx0d0fe013 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0fe013);
-				for (i=0; i<9; i++)
+				for (i = 0; i < 9; i++)
 					data->f2x9cx0d0f0_8_0_1f[i] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f001f | (i << 8));
 				data->f2x9cx0d0f201f = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f201f);
 				data->f2x9cx0d0f211f = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f211f);
@@ -419,11 +419,11 @@ void copy_mct_data_to_save_variable(struct amd_s3_persistent_data* persistent_da
 				data->f2x9cx0d0fc11f = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0fc11f);
 				data->f2x9cx0d0fc21f = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0fc21f);
 				data->f2x9cx0d0f4009 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f4009);
-				for (i=0; i<9; i++)
+				for (i = 0; i < 9; i++)
 					data->f2x9cx0d0f0_8_0_02[i] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f0002 | (i << 8));
-				for (i=0; i<9; i++)
+				for (i = 0; i < 9; i++)
 					data->f2x9cx0d0f0_8_0_06[i] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f0006 | (i << 8));
-				for (i=0; i<9; i++)
+				for (i = 0; i < 9; i++)
 					data->f2x9cx0d0f0_8_0_0a[i] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f000a | (i << 8));
 
 				data->f2x9cx0d0f2002 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f2002);
@@ -450,7 +450,7 @@ void copy_mct_data_to_save_variable(struct amd_s3_persistent_data* persistent_da
 				data->f2x9cx0d0fc031 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0fc031);
 				data->f2x9cx0d0fc131 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0fc131);
 				data->f2x9cx0d0fc231 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0fc231);
-				for (i=0; i<9; i++)
+				for (i = 0; i < 9; i++)
 					data->f2x9cx0d0f0_0_f_31[i] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f0031 | (i << 8));
 
 				data->f2x9cx0d0f8021 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f8021);
@@ -463,8 +463,8 @@ void copy_mct_data_to_save_variable(struct amd_s3_persistent_data* persistent_da
 			data->f2x94 = read_config32_dct(dev_fn2, node, channel, 0x94);
 
 			/* Stage 6 */
-			for (i=0; i<9; i++)
-				for (j=0; j<3; j++)
+			for (i = 0; i < 9; i++)
+				for (j = 0; j < 3; j++)
 					data->f2x9cx0d0f0_f_8_0_0_8_4_0[i][j] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f0000 | (i << 8) | (j * 4));
 			data->f2x9cx00 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x00);
 			data->f2x9cx0a = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0a);
@@ -478,33 +478,33 @@ void copy_mct_data_to_save_variable(struct amd_s3_persistent_data* persistent_da
 			data->f2x9cx0d0fe007 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0fe007);
 
 			/* Stage 10 */
-			for (i=0; i<12; i++)
+			for (i = 0; i < 12; i++)
 				data->f2x9cx10[i] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x10 + i);
-			for (i=0; i<12; i++)
+			for (i = 0; i < 12; i++)
 				data->f2x9cx20[i] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x20 + i);
-			for (i=0; i<4; i++)
-				for (j=0; j<3; j++)
+			for (i = 0; i < 4; i++)
+				for (j = 0; j < 3; j++)
 					data->f2x9cx3_0_0_3_1[i][j] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, (0x01 + i) + (0x100 * j));
-			for (i=0; i<4; i++)
-				for (j=0; j<3; j++)
+			for (i = 0; i < 4; i++)
+				for (j = 0; j < 3; j++)
 					data->f2x9cx3_0_0_7_5[i][j] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, (0x05 + i) + (0x100 * j));
 			data->f2x9cx0d = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d);
-			for (i=0; i<9; i++)
+			for (i = 0; i < 9; i++)
 				data->f2x9cx0d0f0_f_0_13[i] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f0013 | (i << 8));
-			for (i=0; i<9; i++)
+			for (i = 0; i < 9; i++)
 				data->f2x9cx0d0f0_f_0_30[i] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f0030 | (i << 8));
-			for (i=0; i<4; i++)
+			for (i = 0; i < 4; i++)
 				data->f2x9cx0d0f2_f_0_30[i] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f2030 | (i << 8));
-			for (i=0; i<2; i++)
-				for (j=0; j<3; j++)
+			for (i = 0; i < 2; i++)
+				for (j = 0; j < 3; j++)
 					data->f2x9cx0d0f8_8_4_0[i][j] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f0000 | (i << 8) | (j * 4));
 			data->f2x9cx0d0f812f = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f812f);
 
 			/* Stage 11 */
 			if (IS_ENABLED(CONFIG_DIMM_DDR3)) {
-				for (i=0; i<12; i++)
+				for (i = 0; i < 12; i++)
 					data->f2x9cx30[i] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x30 + i);
-				for (i=0; i<12; i++)
+				for (i = 0; i < 12; i++)
 					data->f2x9cx40[i] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x40 + i);
 			}
 
@@ -599,28 +599,28 @@ void restore_mct_data_from_save_variable(struct amd_s3_persistent_data* persiste
 					continue;
 
 				/* Restore training parameters */
-				for (i=0; i<4; i++)
-					for (j=0; j<3; j++)
+				for (i = 0; i < 4; i++)
+					for (j = 0; j < 3; j++)
 						write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, (0x01 + i) + (0x100 * j), data->f2x9cx3_0_0_3_1[i][j]);
-				for (i=0; i<4; i++)
-					for (j=0; j<3; j++)
+				for (i = 0; i < 4; i++)
+					for (j = 0; j < 3; j++)
 						write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, (0x05 + i) + (0x100 * j), data->f2x9cx3_0_0_7_5[i][j]);
 
-				for (i=0; i<12; i++)
+				for (i = 0; i < 12; i++)
 					write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x10 + i, data->f2x9cx10[i]);
-				for (i=0; i<12; i++)
+				for (i = 0; i < 12; i++)
 					write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x20 + i, data->f2x9cx20[i]);
 
 				if (IS_ENABLED(CONFIG_DIMM_DDR3)) {
-					for (i=0; i<12; i++)
+					for (i = 0; i < 12; i++)
 						write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x30 + i, data->f2x9cx30[i]);
-					for (i=0; i<12; i++)
+					for (i = 0; i < 12; i++)
 						write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x40 + i, data->f2x9cx40[i]);
 				}
 
 				/* Restore MaxRdLatency */
 				if (is_fam15h()) {
-					for (i=0; i<4; i++)
+					for (i = 0; i < 4; i++)
 						write_config32_dct_nbpstate(PCI_DEV(0, 0x18 + node, 2), node, channel, i, 0x210, data->f2x210[i]);
 				} else {
 					write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x78, data->f2x78);
@@ -682,7 +682,7 @@ void restore_mct_data_from_save_variable(struct amd_s3_persistent_data* persiste
 			write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x11c, data->f2x11c);
 			write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x1b0, data->f2x1b0);
 			write_config32_dct(PCI_DEV(0, 0x18 + node, 3), node, channel, 0x44, data->f3x44);
-			for (i=0; i<16; i++) {
+			for (i = 0; i < 16; i++) {
 				wrmsr_uint64_t(0x00000200 | i, data->msr0000020[i]);
 			}
 			wrmsr_uint64_t(0x00000250, data->msr00000250);
@@ -692,7 +692,7 @@ void restore_mct_data_from_save_variable(struct amd_s3_persistent_data* persiste
 			 * destroying CAR while still executing from CAR!
 			 * For now, skip restoration...
 			 */
-			// for (i=0; i<8; i++)
+			// for (i = 0; i < 8; i++)
 			// 	wrmsr_uint64_t(0x00000260 | (i + 8), data->msr0000026[i]);
 			wrmsr_uint64_t(0x000002ff, data->msr000002ff);
 			wrmsr_uint64_t(0xc0010010, data->msrc0010010);
@@ -760,7 +760,7 @@ void restore_mct_data_from_save_variable(struct amd_s3_persistent_data* persiste
 				write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x204, data->f2x204);
 				write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x208, data->f2x208);
 				write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x20c, data->f2x20c);
-				for (i=0; i<4; i++)
+				for (i = 0; i < 4; i++)
 					write_config32_dct_nbpstate(PCI_DEV(0, 0x18 + node, 2), node, channel, i, 0x210, data->f2x210[i]);
 				write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x214, data->f2x214);
 				write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x218, data->f2x218);
@@ -773,7 +773,7 @@ void restore_mct_data_from_save_variable(struct amd_s3_persistent_data* persiste
 				write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x240, data->f2x240);
 
 				write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0fe013, data->f2x9cx0d0fe013);
-				for (i=0; i<9; i++)
+				for (i = 0; i < 9; i++)
 					write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f001f | (i << 8), data->f2x9cx0d0f0_8_0_1f[i]);
 				write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f201f, data->f2x9cx0d0f201f);
 				write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f211f, data->f2x9cx0d0f211f);
@@ -795,7 +795,7 @@ void restore_mct_data_from_save_variable(struct amd_s3_persistent_data* persiste
 				write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0fc031, data->f2x9cx0d0fc031);
 				write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0fc131, data->f2x9cx0d0fc131);
 				write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0fc231, data->f2x9cx0d0fc231);
-				for (i=0; i<9; i++)
+				for (i = 0; i < 9; i++)
 					write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f0031 | (i << 8), data->f2x9cx0d0f0_0_f_31[i]);
 
 				write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f8021, data->f2x9cx0d0f8021);
@@ -899,8 +899,8 @@ void restore_mct_data_from_save_variable(struct amd_s3_persistent_data* persiste
 			if (!persistent_data->node[node].node_present)
 				continue;
 
-			for (i=0; i<9; i++)
-				for (j=0; j<3; j++)
+			for (i = 0; i < 9; i++)
+				for (j = 0; j < 3; j++)
 					write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f0000 | (i << 8) | (j * 4), data->f2x9cx0d0f0_f_8_0_0_8_4_0[i][j]);
 			write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x00, data->f2x9cx00);
 			write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0a, data->f2x9cx0a);
@@ -920,11 +920,11 @@ void restore_mct_data_from_save_variable(struct amd_s3_persistent_data* persiste
 				dword |= (0x3 << 13);			/* DisAutoComp, DisablePredriverCal = 1 */
 				write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0fe003, dword);
 
-				for (i=0; i<9; i++)
+				for (i = 0; i < 9; i++)
 					write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f0006 | (i << 8), data->f2x9cx0d0f0_8_0_06[i]);
-				for (i=0; i<9; i++)
+				for (i = 0; i < 9; i++)
 					write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f000a | (i << 8), data->f2x9cx0d0f0_8_0_0a[i]);
-				for (i=0; i<9; i++)
+				for (i = 0; i < 9; i++)
 					write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f0002 | (i << 8), (0x8000 | data->f2x9cx0d0f0_8_0_02[i]));
 
 				write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f8006, data->f2x9cx0d0f8006);
@@ -1024,25 +1024,25 @@ void restore_mct_data_from_save_variable(struct amd_s3_persistent_data* persiste
 			if (!persistent_data->node[node].node_present)
 				continue;
 
-			for (i=0; i<12; i++)
+			for (i = 0; i < 12; i++)
 				write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x10 + i, data->f2x9cx10[i]);
-			for (i=0; i<12; i++)
+			for (i = 0; i < 12; i++)
 				write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x20 + i, data->f2x9cx20[i]);
-			for (i=0; i<4; i++)
-				for (j=0; j<3; j++)
+			for (i = 0; i < 4; i++)
+				for (j = 0; j < 3; j++)
 					write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, (0x01 + i) + (0x100 * j), data->f2x9cx3_0_0_3_1[i][j]);
-			for (i=0; i<4; i++)
-				for (j=0; j<3; j++)
+			for (i = 0; i < 4; i++)
+				for (j = 0; j < 3; j++)
 					write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, (0x05 + i) + (0x100 * j), data->f2x9cx3_0_0_7_5[i][j]);
 			write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d, data->f2x9cx0d);
-			for (i=0; i<9; i++)
+			for (i = 0; i < 9; i++)
 				write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f0013 | (i << 8), data->f2x9cx0d0f0_f_0_13[i]);
-			for (i=0; i<9; i++)
+			for (i = 0; i < 9; i++)
 				write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f0030 | (i << 8), data->f2x9cx0d0f0_f_0_30[i]);
-			for (i=0; i<4; i++)
+			for (i = 0; i < 4; i++)
 				write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f2030 | (i << 8), data->f2x9cx0d0f2_f_0_30[i]);
-			for (i=0; i<2; i++)
-				for (j=0; j<3; j++)
+			for (i = 0; i < 2; i++)
+				for (j = 0; j < 3; j++)
 					write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f0000 | (i << 8) | (j * 4), data->f2x9cx0d0f8_8_4_0[i][j]);
 			write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f812f, data->f2x9cx0d0f812f);
 		}
@@ -1056,9 +1056,9 @@ void restore_mct_data_from_save_variable(struct amd_s3_persistent_data* persiste
 				if (!persistent_data->node[node].node_present)
 					continue;
 
-				for (i=0; i<12; i++)
+				for (i = 0; i < 12; i++)
 					write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x30 + i, data->f2x9cx30[i]);
-				for (i=0; i<12; i++)
+				for (i = 0; i < 12; i++)
 					write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x40 + i, data->f2x9cx40[i]);
 			}
 		}
diff --git a/src/northbridge/amd/amdmct/wrappers/mcti.h b/src/northbridge/amd/amdmct/wrappers/mcti.h
index 92979ab..5eaff2c 100644
--- a/src/northbridge/amd/amdmct/wrappers/mcti.h
+++ b/src/northbridge/amd/amdmct/wrappers/mcti.h
@@ -63,7 +63,7 @@ UPDATE AS NEEDED
 #endif
 
 #ifndef MEM_MAX_LOAD_FREQ
-#if (CONFIG_DIMM_SUPPORT & 0x000F)==0x0005 	/* AMD_FAM10_DDR3 */
+#if (CONFIG_DIMM_SUPPORT & 0x000F) == 0x0005 	/* AMD_FAM10_DDR3 */
  #define MEM_MAX_LOAD_FREQ			933
  #define MEM_MIN_PLATFORM_FREQ_FAM10		400
  #define MEM_MIN_PLATFORM_FREQ_FAM15		333
diff --git a/src/northbridge/amd/amdmct/wrappers/mcti_d.c b/src/northbridge/amd/amdmct/wrappers/mcti_d.c
index 143468a..9cb981a 100644
--- a/src/northbridge/amd/amdmct/wrappers/mcti_d.c
+++ b/src/northbridge/amd/amdmct/wrappers/mcti_d.c
@@ -347,7 +347,7 @@ static void mctGet_MaxLoadFreq(struct DCTStatStruc *pDCTstat)
 		printk(BIOS_DEBUG, "mctGet_MaxLoadFreq: Channel 2: %d DIMM(s) detected\n", ch2_count);
 	}
 
-#if (CONFIG_DIMM_SUPPORT & 0x000F)==0x0005 /* AMD_FAM10_DDR3 */
+#if (CONFIG_DIMM_SUPPORT & 0x000F) == 0x0005 /* AMD_FAM10_DDR3 */
 	uint8_t dimm;
 
 	for (i = 0; i < MAX_DIMMS_SUPPORTED; i = i + 2) {
@@ -473,7 +473,7 @@ static void mctHookAfterDramInit(void)
 {
 }
 
-#if (CONFIG_DIMM_SUPPORT & 0x000F)==0x0005 /* AMD_FAM10_DDR3 */
+#if (CONFIG_DIMM_SUPPORT & 0x000F) == 0x0005 /* AMD_FAM10_DDR3 */
 static void vErratum372(struct DCTStatStruc *pDCTstat)
 {
 	msr_t msr = rdmsr(NB_CFG_MSR);
@@ -481,8 +481,8 @@ static void vErratum372(struct DCTStatStruc *pDCTstat)
 	int nbPstate1supported = !(msr.hi & (1 << (NB_GfxNbPstateDis -32)));
 
 	// is this the right way to check for NB pstate 1 or DDR3-1333 ?
-	if (((pDCTstat->PresetmaxFreq==1333)||(nbPstate1supported))
-	    &&(!pDCTstat->GangedMode)) {
+	if (((pDCTstat->PresetmaxFreq == 1333)||(nbPstate1supported))
+	    && (!pDCTstat->GangedMode)) {
 		/* DisableCf8ExtCfg */
 		msr.hi &= ~(3 << (51 - 32));
 		wrmsr(NB_CFG_MSR, msr);
@@ -491,15 +491,15 @@ static void vErratum372(struct DCTStatStruc *pDCTstat)
 
 static void vErratum414(struct DCTStatStruc *pDCTstat)
 {
-	int dct=0;
+	int dct = 0;
 	for (; dct < 2 ; dct++) {
 		int dRAMConfigHi = Get_NB32(pDCTstat->dev_dct,0x94 + (0x100 * dct));
-		int powerDown =  dRAMConfigHi & (1 << PowerDownEn );
-		int ddr3 = dRAMConfigHi & (1 << Ddr3Mode );
+		int powerDown =  dRAMConfigHi & (1 << PowerDownEn);
+		int ddr3 = dRAMConfigHi & (1 << Ddr3Mode);
 		int dRAMMRS = Get_NB32(pDCTstat->dev_dct,0x84 + (0x100 * dct));
 		int pchgPDModeSel = dRAMMRS & (1 << PchgPDModeSel);
-		if (powerDown && ddr3 && pchgPDModeSel )
-			Set_NB32(pDCTstat->dev_dct,0x84 + (0x100 * dct), dRAMMRS & ~(1 << PchgPDModeSel) );
+		if (powerDown && ddr3 && pchgPDModeSel)
+			Set_NB32(pDCTstat->dev_dct,0x84 + (0x100 * dct), dRAMMRS & ~(1 << PchgPDModeSel));
 	}
 }
 #endif
@@ -507,7 +507,7 @@ static void vErratum414(struct DCTStatStruc *pDCTstat)
 
 static void mctHookBeforeAnyTraining(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA)
 {
-#if (CONFIG_DIMM_SUPPORT & 0x000F)==0x0005 /* AMD_FAM10_DDR3 */
+#if (CONFIG_DIMM_SUPPORT & 0x000F) == 0x0005 /* AMD_FAM10_DDR3 */
   /* FIXME :  as of 25.6.2010 errata 350 and 372 should apply to  ((RB|BL|DA)-C[23])|(HY-D[01])|(PH-E0) but I don't find constants for all of them */
 	if (pDCTstatA->LogicalCPUID & (AMD_DRBH_Cx | AMD_DR_Dx)) {
 		vErratum372(pDCTstatA);
@@ -516,7 +516,7 @@ static void mctHookBeforeAnyTraining(struct MCTStatStruc *pMCTstat, struct DCTSt
 #endif
 }
 
-#if (CONFIG_DIMM_SUPPORT & 0x000F)==0x0005 /* AMD_FAM10_DDR3 */
+#if (CONFIG_DIMM_SUPPORT & 0x000F) == 0x0005 /* AMD_FAM10_DDR3 */
 static u32 mct_AdjustSPDTimings(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA, u32 val)
 {
 	if (pDCTstatA->LogicalCPUID & AMD_DR_Bx) {



More information about the coreboot-gerrit mailing list