diff --git a/src/main/java/net/floodlightcontroller/core/OFConnection.java b/src/main/java/net/floodlightcontroller/core/OFConnection.java
index 009b31bb9310b0d4110634fabbbf2b271b5b0b3c..c72c618c6d2073cd1738baa0d35c7a3c95a3b5e6 100644
--- a/src/main/java/net/floodlightcontroller/core/OFConnection.java
+++ b/src/main/java/net/floodlightcontroller/core/OFConnection.java
@@ -36,7 +36,6 @@ import org.jboss.netty.util.TimerTask;
 
 import java.util.Date;
 
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
 import net.floodlightcontroller.core.internal.Controller;
 import net.floodlightcontroller.core.internal.IOFConnectionListener;
 import net.floodlightcontroller.debugcounter.IDebugCounterService;
@@ -152,12 +151,6 @@ public class OFConnection implements IOFConnection, IOFConnectionBackend{
     }
 
     @Override
-    @LogMessageDoc(level = "WARN",
-                   message = "Sending OF message that modifies switch "
-                           + "state while in the slave role: {switch}",
-                   explanation = "An application has sent a message to a switch "
-                           + "that is not valid when the switch is in a slave role",
-                   recommendation = LogMessageDoc.REPORT_CONTROLLER_BUG)
     public void write(Iterable<OFMessage> msglist) {
         if (!isConnected()) {
             if (logger.isDebugEnabled())
@@ -434,4 +427,4 @@ public class OFConnection implements IOFConnection, IOFConnectionBackend{
 			
 		}
     }
-}
+}
\ No newline at end of file
diff --git a/src/main/java/net/floodlightcontroller/core/OFSwitch.java b/src/main/java/net/floodlightcontroller/core/OFSwitch.java
index 16684795ec488d7b0cdc71ba32218d462a8f1ad3..1b72fef170fd0a8b3b5827dd1ce39bc747dad533 100644
--- a/src/main/java/net/floodlightcontroller/core/OFSwitch.java
+++ b/src/main/java/net/floodlightcontroller/core/OFSwitch.java
@@ -37,7 +37,6 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import javax.annotation.Nonnull;
 
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
 import net.floodlightcontroller.core.internal.IOFSwitchManager;
 import net.floodlightcontroller.core.internal.TableFeatures;
 import net.floodlightcontroller.core.util.AppCookie;
@@ -249,8 +248,12 @@ public class OFSwitch implements IOFSwitchBackend {
 				newPortsByName.put(p.getName().toLowerCase(), p);
 				if (!p.getState().contains(OFPortState.LINK_DOWN) 
 						&& !p.getConfig().contains(OFPortConfig.PORT_DOWN)) {
-					newEnabledPortList.add(p);
-					newEnabledPortNumbers.add(p.getPortNo());
+					if (!newEnabledPortList.contains(p)) {
+						newEnabledPortList.add(p);
+					}
+					if (!newEnabledPortNumbers.contains(p.getPortNo())) {
+						newEnabledPortNumbers.add(p.getPortNo());
+					}
 				}
 			}
 			portsByName = Collections.unmodifiableMap(newPortsByName);
@@ -575,8 +578,12 @@ public class OFSwitch implements IOFSwitchBackend {
 					// Enabled = not down admin (config) or phys (state)
 					if (!p.getConfig().contains(OFPortConfig.PORT_DOWN)
 							&& !p.getState().contains(OFPortState.LINK_DOWN)) {
-						newEnabledPortList.add(p);
-						newEnabledPortNumbers.add(p.getPortNo());
+						if (!newEnabledPortList.contains(p)) {
+							newEnabledPortList.add(p);
+						}
+						if (!newEnabledPortNumbers.contains(p.getPortNo())) {
+							newEnabledPortNumbers.add(p.getPortNo());
+						}
 					}
 
 					// get changes
@@ -785,12 +792,6 @@ public class OFSwitch implements IOFSwitchBackend {
 	}
 
 	@Override
-	@LogMessageDoc(level="WARN",
-	message="Sending OF message that modifies switch " +
-			"state while in the slave role: {switch}",
-			explanation="An application has sent a message to a switch " +
-					"that is not valid when the switch is in a slave role",
-					recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG)
 	public void write(Iterable<OFMessage> msglist) {
 		if (isActive()) {
 			connections.get(OFAuxId.MAIN).write(msglist);
@@ -1110,12 +1111,6 @@ public class OFSwitch implements IOFSwitchBackend {
 	}
 
 	@Override
-	@LogMessageDoc(level="WARN",
-	message="Switch {switch} flow table is full",
-	explanation="The controller received flow table full " +
-			"message from the switch, could be caused by increased " +
-			"traffic pattern",
-			recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG)
 	public void setTableFull(boolean isFull) {
 		if (isFull && !flowTableFull) {
 			switchManager.addSwitchEvent(this.datapathId,
diff --git a/src/main/java/net/floodlightcontroller/core/annotations/LogMessageCategory.java b/src/main/java/net/floodlightcontroller/core/annotations/LogMessageCategory.java
deleted file mode 100644
index e9abf02a2e96d952ce136574b422d4c376f1e634..0000000000000000000000000000000000000000
--- a/src/main/java/net/floodlightcontroller/core/annotations/LogMessageCategory.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
-*    Copyright 2012, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.core.annotations;
-
-import java.lang.annotation.ElementType;
-import java.lang.annotation.Target;
-
-/**
- * Annotation used to set the category for log messages for a class
- * @author readams
- */
-@Target({ElementType.TYPE, ElementType.METHOD})
-public @interface LogMessageCategory {
-    /**
-     * The category for the log messages for this class
-     * @return
-     */
-    String value() default "Core";
-}
diff --git a/src/main/java/net/floodlightcontroller/core/annotations/LogMessageDoc.java b/src/main/java/net/floodlightcontroller/core/annotations/LogMessageDoc.java
deleted file mode 100644
index 08b48dc849009d62de8f04a6d948eac8f36dc39f..0000000000000000000000000000000000000000
--- a/src/main/java/net/floodlightcontroller/core/annotations/LogMessageDoc.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/**
-*    Copyright 2012, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.core.annotations;
-
-import java.lang.annotation.ElementType;
-import java.lang.annotation.Target;
-
-/**
- * Annotation used to document log messages.  This can be used to generate
- * documentation on syslog output.
- * @author readams
- */
-@Target({ElementType.TYPE, ElementType.METHOD})
-public @interface LogMessageDoc {
-    public static final String NO_ACTION = "No action is required.";
-    public static final String UNKNOWN_ERROR = "An unknown error occured";
-    public static final String GENERIC_ACTION = 
-            "Examine the returned error or exception and take " +
-            "appropriate action.";
-    public static final String CHECK_SWITCH = 
-            "Check the health of the indicated switch.  " + 
-            "Test and troubleshoot IP connectivity.";
-    public static final String HA_CHECK_SWITCH = 
-            "Check the health of the indicated switch.  If the problem " +
-            "persists or occurs repeatedly, it likely indicates a defect " +
-            "in the switch HA implementation.";
-    public static final String CHECK_CONTROLLER = 
-            "Verify controller system health, CPU usage, and memory.  " + 
-            "Rebooting the controller node may help if the controller " +
-            "node is in a distressed state.";
-    public static final String REPORT_CONTROLLER_BUG =
-            "This is likely a defect in the controller.  Please report this " +
-            "issue.  Restarting the controller or switch may help to " +
-            "alleviate.";
-    public static final String REPORT_SWITCH_BUG =
-            "This is likely a defect in the switch.  Please report this " +
-            "issue.  Restarting the controller or switch may help to " +
-            "alleviate.";
-    public static final String TRANSIENT_CONDITION =
-            "This is normally a transient condition that does not necessarily " +
-            "represent an error.  If, however, the condition persists or " +
-            "happens frequently you should report this as a controller defect.";
-
-    /**
-     * The log level for the log message
-     * @return the log level as a string
-     */
-    String level() default "INFO";
-    /**
-     * The message that will be printed
-     * @return the message
-     */
-    String message() default UNKNOWN_ERROR;
-    /**
-     * An explanation of the meaning of the log message
-     * @return the explanation
-     */
-    String explanation() default UNKNOWN_ERROR;
-    /**
-     * The recommended action associated with the log message
-     * @return the recommendation
-     */
-    String recommendation() default NO_ACTION;
-}
diff --git a/src/main/java/net/floodlightcontroller/core/annotations/LogMessageDocs.java b/src/main/java/net/floodlightcontroller/core/annotations/LogMessageDocs.java
deleted file mode 100644
index 663baf0bc738e26f30884351e0585c92446c8369..0000000000000000000000000000000000000000
--- a/src/main/java/net/floodlightcontroller/core/annotations/LogMessageDocs.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
-*    Copyright 2012, Big Switch Networks, Inc. 
-*    Originally created by David Erickson, Stanford University
-* 
-*    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.core.annotations;
-
-import java.lang.annotation.ElementType;
-import java.lang.annotation.Target;
-
-/**
- * Annotation used to document log messages.  This can be used to generate
- * documentation on syslog output.  This version allows multiple log messages
- * to be documentated on an interface.
- * @author readams
- */
-@Target({ElementType.TYPE, ElementType.METHOD})
-public @interface LogMessageDocs {
-    /**
-     * A list of {@link LogMessageDoc} elements
-     * @return the list of log message doc
-     */
-    LogMessageDoc[] value();
-}
diff --git a/src/main/java/net/floodlightcontroller/core/internal/Controller.java b/src/main/java/net/floodlightcontroller/core/internal/Controller.java
index 86299625e9b84e8273b58577286d5ac223208156..51c0dbc3232cff0944cf67771aad68fe1f6cfd1b 100644
--- a/src/main/java/net/floodlightcontroller/core/internal/Controller.java
+++ b/src/main/java/net/floodlightcontroller/core/internal/Controller.java
@@ -52,8 +52,6 @@ import net.floodlightcontroller.core.IOFSwitchListener;
 import net.floodlightcontroller.core.LogicalOFMessageCategory;
 import net.floodlightcontroller.core.PortChangeType;
 import net.floodlightcontroller.core.RoleInfo;
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
-import net.floodlightcontroller.core.annotations.LogMessageDocs;
 import net.floodlightcontroller.core.module.FloodlightModuleException;
 import net.floodlightcontroller.core.module.FloodlightModuleLoader;
 import net.floodlightcontroller.core.util.ListenerDispatcher;
@@ -91,8 +89,6 @@ import org.slf4j.LoggerFactory;
 import com.google.common.base.Optional;
 import com.google.common.base.Strings;
 
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-
 /**
  * The main controller class.  Handles all setup and network listeners
  */
@@ -420,20 +416,6 @@ public class Controller implements IFloodlightProviderService, IStorageSourceLis
      * FIXME: this method and the ChannelHandler disagree on which messages
      * should be dispatched and which shouldn't
      */
-    @LogMessageDocs({
-        @LogMessageDoc(level="ERROR",
-                message="Ignoring PacketIn (Xid = {xid}) because the data" +
-                        " field is empty.",
-                explanation="The switch sent an improperly-formatted PacketIn" +
-                        " message",
-                recommendation=LogMessageDoc.CHECK_SWITCH),
-        @LogMessageDoc(level="WARN",
-                message="Unhandled OF Message: {} from {}",
-                explanation="The switch sent a message not handled by " +
-                        "the controller")
-    })
-    @SuppressFBWarnings(value="SF_SWITCH_NO_DEFAULT",
-                        justification="False positive -- has default")
     @Override
     public void handleMessage(IOFSwitch sw, OFMessage m,
                                  FloodlightContext bContext) {
@@ -646,15 +628,6 @@ public class Controller implements IFloodlightProviderService, IStorageSourceLis
      * @return A valid role if role information is specified in the
      *         config params, otherwise null
      */
-    @LogMessageDocs({
-        @LogMessageDoc(message="Controller role set to {role}",
-                explanation="Setting the initial HA role to "),
-        @LogMessageDoc(level="ERROR",
-                message="Invalid current role value: {role}",
-                explanation="An invalid HA role value was read from the " +
-                            "properties file",
-                recommendation=LogMessageDoc.CHECK_CONTROLLER)
-    })
     protected HARole getInitialRole(Map<String, String> configParams) {
         HARole role = HARole.STANDBY;
         String roleString = configParams.get("role");
@@ -676,19 +649,6 @@ public class Controller implements IFloodlightProviderService, IStorageSourceLis
      * @throws IOException
      */
     @Override
-    @LogMessageDocs({
-        @LogMessageDoc(message="Listening for switch connections on {address}",
-                explanation="The controller is ready and listening for new" +
-                        " switch connections"),
-        @LogMessageDoc(message="Storage exception in controller " +
-                        "updates loop; terminating process",
-                explanation=ERROR_DATABASE,
-                recommendation=LogMessageDoc.CHECK_CONTROLLER),
-        @LogMessageDoc(level="ERROR",
-                message="Exception in controller updates loop",
-                explanation="Failed to dispatch controller event",
-                recommendation=LogMessageDoc.GENERIC_ACTION)
-    })
     public void run() {
         this.moduleLoaderState = ModuleLoaderState.COMPLETE;
 
@@ -772,11 +732,6 @@ public class Controller implements IFloodlightProviderService, IStorageSourceLis
      * Startup all of the controller's components
      * @param floodlightModuleLoader
      */
-    @LogMessageDoc(message="Waiting for storage source",
-                explanation="The system database is not yet ready",
-                recommendation="If this message persists, this indicates " +
-                        "that the system database has failed to start. " +
-                        LogMessageDoc.CHECK_CONTROLLER)
     public void startupComponents(FloodlightModuleLoader floodlightModuleLoader) throws FloodlightModuleException {
 
         this.moduleLoaderState = ModuleLoaderState.STARTUP;
@@ -810,10 +765,6 @@ public class Controller implements IFloodlightProviderService, IStorageSourceLis
         addInfoProvider("summary", this);
     }
     
-    @LogMessageDoc(level="ERROR",
-            message="failed to access storage: {reason}",
-            explanation="Could not retrieve forwarding configuration",
-            recommendation=LogMessageDoc.CHECK_CONTROLLER)
     private void readFlowPriorityConfigurationFromStorage() {
         try {
             Map<String, Object> row;
@@ -961,13 +912,6 @@ public class Controller implements IFloodlightProviderService, IStorageSourceLis
             "Flow priority configuration has changed after " +
             "controller startup. Restart controller for new " +
             "configuration to take effect.";
-    @LogMessageDoc(level="WARN",
-            message=FLOW_PRIORITY_CHANGED_AFTER_STARTUP,
-            explanation="A user has changed the priority with which access " +
-                    "and core flows are installed after controller startup. " +
-                    "Changing this setting will only take affect after a " +
-                    "controller restart",
-            recommendation="Restart controller")
     @Override
     public void rowsModified(String tableName, Set<Object> rowKeys) {
         if (tableName.equals(CONTROLLER_INTERFACE_TABLE_NAME)) {
@@ -1007,11 +951,6 @@ public class Controller implements IFloodlightProviderService, IStorageSourceLis
         return rb.getUptime();
     }
 
-    @LogMessageDoc(level="WARN",
-            message="Failure adding update {} to queue",
-            explanation="The controller tried to add an internal notification" +
-                        " to its message queue but the add failed.",
-            recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG)
     @Override
     public void addUpdateToQueue(IUpdate update) {
         try {
diff --git a/src/main/java/net/floodlightcontroller/core/internal/OFChannelHandler.java b/src/main/java/net/floodlightcontroller/core/internal/OFChannelHandler.java
index 8f3cbedb6d168729a0582c41e0630d95fb4c599b..7d45c1ee862f08dea0111bed7a63c37d94caf15e 100644
--- a/src/main/java/net/floodlightcontroller/core/internal/OFChannelHandler.java
+++ b/src/main/java/net/floodlightcontroller/core/internal/OFChannelHandler.java
@@ -25,8 +25,6 @@ import org.jboss.netty.util.Timer;
 
 import net.floodlightcontroller.core.IOFConnectionBackend;
 import net.floodlightcontroller.core.OFConnection;
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
-import net.floodlightcontroller.core.annotations.LogMessageDocs;
 import net.floodlightcontroller.core.internal.OpenflowPipelineFactory.PipelineHandler;
 import net.floodlightcontroller.core.internal.OpenflowPipelineFactory.PipelineHandshakeTimeout;
 import net.floodlightcontroller.core.internal.OpenflowPipelineFactory.PipelineIdleReadTimeout;
@@ -87,8 +85,8 @@ class OFChannelHandler extends IdleStateAwareChannelHandler {
 	private long handshakeTransactionIds = 0x00FFFFFFFFL;
 	
     private volatile long echoSendTime;
-
-
+    private volatile long featuresLatency;
+    
 	/**
 	 * Default implementation for message handlers in any OFChannelState.
 	 *
@@ -209,15 +207,6 @@ class OFChannelHandler extends IdleStateAwareChannelHandler {
 		 * @param sw The switch that sent the error
 		 * @param error The error message
 		 */
-		@LogMessageDoc(level="ERROR",
-				message="Error {error type} {error code} from {switch} " +
-						"in state {state}",
-						explanation="The switch responded with an unexpected error" +
-								"to an OpenFlow message from the controller",
-								recommendation="This could indicate improper network operation. " +
-										"If the problem persists restarting the switch and " +
-										"controller may help."
-				)
 		protected void logError(OFErrorMsg error) {
 			log.error("{} from switch {} in state {}",
 					new Object[] {
@@ -385,8 +374,7 @@ class OFChannelHandler extends IdleStateAwareChannelHandler {
 	 * we send capture the features reply.
 	 * Next state is CompleteState
 	 */
-	class WaitFeaturesReplyState extends OFChannelState{
-
+	class WaitFeaturesReplyState extends OFChannelState {
 		WaitFeaturesReplyState() {
 			super(false);
 		}
@@ -394,6 +382,8 @@ class OFChannelHandler extends IdleStateAwareChannelHandler {
 		void processOFFeaturesReply(OFFeaturesReply  m)
 				throws IOException {
 			featuresReply = m;
+			
+			featuresLatency = (System.currentTimeMillis() - featuresLatency) / 2;
 
 			// Mark handshake as completed
 			setState(new CompleteState());
@@ -433,6 +423,7 @@ class OFChannelHandler extends IdleStateAwareChannelHandler {
 		@Override
 		void enterState() throws IOException {
 			sendFeaturesRequest();
+			featuresLatency = System.currentTimeMillis();
 		}
 
 		@Override
@@ -474,9 +465,12 @@ class OFChannelHandler extends IdleStateAwareChannelHandler {
 					setAuxChannelIdle();
 				}
 			}
+			
+			connection.updateLatency(U64.of(featuresLatency));
+			echoSendTime = 0;
+			
 			// Notify the connection broker
 			notifyConnectionOpened(connection);
-
 		}
 	};
 
@@ -594,9 +588,6 @@ class OFChannelHandler extends IdleStateAwareChannelHandler {
 	}
 
 	@Override
-	@LogMessageDoc(message="New switch connection from {ip address}",
-	explanation="A new switch has connected from the " +
-			"specified IP address")
 	public void channelConnected(ChannelHandlerContext ctx,
 			ChannelStateEvent e) throws Exception {
 		log.debug("channelConnected on OFChannelHandler {}", String.format("%08x", System.identityHashCode(this)));
@@ -608,8 +599,6 @@ class OFChannelHandler extends IdleStateAwareChannelHandler {
 	}
 
 	@Override
-	@LogMessageDoc(message="Disconnected switch {switch information}",
-	explanation="The specified switch has disconnected.")
 	public void channelDisconnected(ChannelHandlerContext ctx,
 			ChannelStateEvent e) throws Exception {
 		// Only handle cleanup connection is even known
@@ -623,47 +612,6 @@ class OFChannelHandler extends IdleStateAwareChannelHandler {
 	}
 
 	@Override
-	@LogMessageDocs({
-		@LogMessageDoc(level="ERROR",
-				message="Disconnecting switch {switch} due to read timeout",
-				explanation="The connected switch has failed to send any " +
-						"messages or respond to echo requests",
-						recommendation=LogMessageDoc.CHECK_SWITCH),
-						@LogMessageDoc(level="ERROR",
-						message="Disconnecting switch {switch}: failed to " +
-								"complete handshake",
-								explanation="The switch did not respond correctly " +
-										"to handshake messages",
-										recommendation=LogMessageDoc.CHECK_SWITCH),
-										@LogMessageDoc(level="ERROR",
-										message="Disconnecting switch {switch} due to IO Error: {}",
-										explanation="There was an error communicating with the switch",
-										recommendation=LogMessageDoc.CHECK_SWITCH),
-										@LogMessageDoc(level="ERROR",
-										message="Disconnecting switch {switch} due to switch " +
-												"state error: {error}",
-												explanation="The switch sent an unexpected message",
-												recommendation=LogMessageDoc.CHECK_SWITCH),
-												@LogMessageDoc(level="ERROR",
-												message="Disconnecting switch {switch} due to " +
-														"message parse failure",
-														explanation="Could not parse a message from the switch",
-														recommendation=LogMessageDoc.CHECK_SWITCH),
-														@LogMessageDoc(level="ERROR",
-														message="Terminating controller due to storage exception",
-														explanation=Controller.ERROR_DATABASE,
-														recommendation=LogMessageDoc.CHECK_CONTROLLER),
-														@LogMessageDoc(level="ERROR",
-														message="Could not process message: queue full",
-														explanation="OpenFlow messages are arriving faster than " +
-																" the controller can process them.",
-																recommendation=LogMessageDoc.CHECK_CONTROLLER),
-																@LogMessageDoc(level="ERROR",
-																message="Error while processing message " +
-																		"from switch {switch} {cause}",
-																		explanation="An error occurred processing the switch message",
-																		recommendation=LogMessageDoc.GENERIC_ACTION)
-	})
 	public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e)
 			throws Exception {
 		if (e.getCause() instanceof ReadTimeoutException) {
diff --git a/src/main/java/net/floodlightcontroller/core/internal/OFSwitchHandshakeHandler.java b/src/main/java/net/floodlightcontroller/core/internal/OFSwitchHandshakeHandler.java
index 6ce132a0a362b8dc455102fd26414b96837597a7..9621b995b44892ab6d1ca1ba4307b8f15ab71650 100644
--- a/src/main/java/net/floodlightcontroller/core/internal/OFSwitchHandshakeHandler.java
+++ b/src/main/java/net/floodlightcontroller/core/internal/OFSwitchHandshakeHandler.java
@@ -22,8 +22,6 @@ import net.floodlightcontroller.core.IOFSwitch.SwitchStatus;
 import net.floodlightcontroller.core.IOFSwitchBackend;
 import net.floodlightcontroller.core.PortChangeEvent;
 import net.floodlightcontroller.core.SwitchDescription;
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
-import net.floodlightcontroller.core.annotations.LogMessageDocs;
 import net.floodlightcontroller.core.internal.OFSwitchAppHandshakePlugin.PluginResultType;
 import net.floodlightcontroller.util.OFDPAUtils;
 
@@ -229,13 +227,6 @@ public class OFSwitchHandshakeHandler implements IOFConnectionListener {
 		 * @param role The role to send to the switch.
 		 * @throws IOException
 		 */
-		@LogMessageDoc(level="WARN",
-				message="Reasserting master role on switch {SWITCH}, " +
-						"likely a configruation error with multiple masters",
-						explanation="The controller keeps getting permission error " +
-								"from switch, likely due to switch connected to another " +
-								"controller also in master mode",
-								recommendation=LogMessageDoc.CHECK_SWITCH)
 		synchronized void sendRoleRequestIfNotPending(OFControllerRole role, long xid)
 				throws IOException {
 			long now = System.nanoTime();
@@ -883,15 +874,6 @@ public class OFSwitchHandshakeHandler implements IOFConnectionListener {
 		 * Log an OpenFlow error message from a switch
 		 * @param error The error message
 		 */
-		@LogMessageDoc(level="ERROR",
-				message="Error {error type} {error code} from {switch} " +
-						"in state {state}",
-						explanation="The switch responded with an unexpected error" +
-								"to an OpenFlow message from the controller",
-								recommendation="This could indicate improper network operation. " +
-										"If the problem persists restarting the switch and " +
-										"controller may help."
-				)
 		protected void logError(OFErrorMsg error) {
 			log.error("{} from switch {} in state {}",
 					new Object[] {
@@ -1098,23 +1080,12 @@ public class OFSwitchHandshakeHandler implements IOFConnectionListener {
 	 * we send a DescriptionStatsRequest to the switch.
 	 * Next state: WAIT_DESCRIPTION_STAT_REPLY
 	 */
-	public class WaitConfigReplyState extends OFSwitchHandshakeState {
-
+	public class WaitConfigReplyState extends OFSwitchHandshakeState {		
 		WaitConfigReplyState() {
 			super(false);
 		}
 
 		@Override
-		@LogMessageDocs({
-			@LogMessageDoc(level="WARN",
-					message="Config Reply from {switch} has " +
-							"miss length set to {length}",
-							explanation="The controller requires that the switch " +
-									"use a miss length of 0xffff for correct " +
-									"function",
-									recommendation="Use a different switch to ensure " +
-					"correct function")
-		})
 		void processOFGetConfigReply(OFGetConfigReply m) {
 			if (m.getMissSendLen() == 0xffff) {
 				log.trace("Config Reply from switch {} confirms "
@@ -1179,15 +1150,12 @@ public class OFSwitchHandshakeHandler implements IOFConnectionListener {
 	 */
 	public class WaitDescriptionStatReplyState extends OFSwitchHandshakeState{
 
+		long timestamp;
+		
 		WaitDescriptionStatReplyState() {
 			super(false);
 		}
 
-		@LogMessageDoc(message="Switch {switch info} bound to class " +
-				"{switch driver}, description {switch description}",
-				explanation="The specified switch has been bound to " +
-						"a switch driver based on the switch description" +
-				"received from the switch")
 		@Override
 		void processOFStatsReply(OFStatsReply m) {
 			// Read description, if it has been updated
@@ -1207,12 +1175,12 @@ public class OFSwitchHandshakeHandler implements IOFConnectionListener {
 			if (portDescStats != null) {
 				sw.setPortDescStats(portDescStats);
 			}
+			
 			/*
 			 * Need to add after setting the features.
 			 */
 			switchManager.switchAdded(sw);
 
-
 			// Handle pending messages now that we have a sw object
 			handlePendingPortStatusMessages(description);
 
@@ -1542,15 +1510,6 @@ public class OFSwitchHandshakeHandler implements IOFConnectionListener {
 			setSwitchStatus(SwitchStatus.MASTER);
 		}
 
-		@LogMessageDoc(level="WARN",
-				message="Received permission error from switch {} while" +
-						"being master. Reasserting master role.",
-						explanation="The switch has denied an operation likely " +
-								"indicating inconsistent controller roles",
-								recommendation="This situation can occurs transiently during role" +
-										" changes. If, however, the condition persists or happens" +
-										" frequently this indicates a role inconsistency. " +
-										LogMessageDoc.CHECK_CONTROLLER )
 		@Override
 		void processOFError(OFErrorMsg m) {
 			// role changer will ignore the error if it isn't for it
@@ -1747,15 +1706,6 @@ public class OFSwitchHandshakeHandler implements IOFConnectionListener {
 		}
 
 		@Override
-		@LogMessageDoc(level="WARN",
-		message="Received PacketIn from switch {} while" +
-				"being slave. Reasserting slave role.",
-				explanation="The switch has receive a PacketIn despite being " +
-						"in slave role indicating inconsistent controller roles",
-						recommendation="This situation can occurs transiently during role" +
-								" changes. If, however, the condition persists or happens" +
-								" frequently this indicates a role inconsistency. " +
-								LogMessageDoc.CHECK_CONTROLLER )
 		void processOFPacketIn(OFPacketIn m) {
 			// we don't expect packetIn while slave, reassert we are slave
 			switchManagerCounters.packetInWhileSwitchIsSlave.increment();
diff --git a/src/main/java/net/floodlightcontroller/core/internal/OFSwitchManager.java b/src/main/java/net/floodlightcontroller/core/internal/OFSwitchManager.java
index bace34438cd4a7c8080e3d36dcbc5dbe48c3fc58..dbf9cc2449c9cfe77cf562296071662f5f478c61 100644
--- a/src/main/java/net/floodlightcontroller/core/internal/OFSwitchManager.java
+++ b/src/main/java/net/floodlightcontroller/core/internal/OFSwitchManager.java
@@ -39,8 +39,6 @@ import net.floodlightcontroller.core.LogicalOFMessageCategory;
 import net.floodlightcontroller.core.PortChangeType;
 import net.floodlightcontroller.core.SwitchDescription;
 import net.floodlightcontroller.core.SwitchSyncRepresentation;
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
-import net.floodlightcontroller.core.annotations.LogMessageDocs;
 import net.floodlightcontroller.core.internal.Controller.IUpdate;
 import net.floodlightcontroller.core.internal.Controller.ModuleLoaderState;
 import net.floodlightcontroller.core.module.FloodlightModuleContext;
@@ -118,7 +116,6 @@ public class OFSwitchManager implements IOFSwitchManager, INewOFConnectionListen
 	private ConcurrentHashMap<DatapathId, OFSwitchHandshakeHandler> switchHandlers;
 	private ConcurrentHashMap<DatapathId, IOFSwitchBackend> switches;
 	private ConcurrentHashMap<DatapathId, IOFSwitch> syncedSwitches;
-	private Set<DatapathId> pastSwitches;
 
 	private ISwitchDriverRegistry driverRegistry;
 
@@ -197,27 +194,6 @@ public class OFSwitchManager implements IOFSwitchManager, INewOFConnectionListen
 		}
 	}
 
-	@LogMessageDocs({
-		@LogMessageDoc(level="ERROR",
-				message="Switch {switch} activated but was already active",
-				explanation="A switch that was already activated was " +
-						"activated again. This should not happen.",
-						recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG
-				),
-				@LogMessageDoc(level="WARN",
-				message="New switch added {switch} for already-added switch {switch}",
-				explanation="A switch with the same DPID as another switch " +
-						"connected to the controller.  This can be caused by " +
-						"multiple switches configured with the same DPID, or " +
-						"by a switch reconnected very quickly after " +
-						"disconnecting.",
-						recommendation="If this happens repeatedly, it is likely there " +
-								"are switches with duplicate DPIDs on the network.  " +
-								"Reconfigure the appropriate switches.  If it happens " +
-								"very rarely, then it is likely this is a transient " +
-								"network problem that can be ignored."
-						)
-	})
 	@Override
 	public synchronized void switchStatusChanged(IOFSwitchBackend sw, SwitchStatus oldStatus, SwitchStatus newStatus) {
 		DatapathId dpid = sw.getId();
@@ -678,8 +654,6 @@ public class OFSwitchManager implements IOFSwitchManager, INewOFConnectionListen
 
 		this.switchListeners = new CopyOnWriteArraySet<IOFSwitchListener>();
 		
-		this.pastSwitches = new HashSet<DatapathId>();
-
 		/* TODO @Ryan
 		try {
 			this.storeClient = this.syncService.getStoreClient(
@@ -922,7 +896,7 @@ public class OFSwitchManager implements IOFSwitchManager, INewOFConnectionListen
 
 		try {
 			try {
-				jp = f.createJsonParser(json);
+				jp = f.createParser(json);
 			} catch (JsonParseException e) {
 				throw new IOException(e);
 			}
diff --git a/src/main/java/net/floodlightcontroller/core/module/FloodlightModuleLoader.java b/src/main/java/net/floodlightcontroller/core/module/FloodlightModuleLoader.java
index ded7b6d34b0a952de765d4713a83cc13ddd81ff2..9a906671de3d7b30e91322ddb4709c1fe2a9f3f8 100644
--- a/src/main/java/net/floodlightcontroller/core/module/FloodlightModuleLoader.java
+++ b/src/main/java/net/floodlightcontroller/core/module/FloodlightModuleLoader.java
@@ -40,8 +40,6 @@ import java.util.ServiceConfigurationError;
 import java.util.ServiceLoader;
 import java.util.Set;
 
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
-import net.floodlightcontroller.core.annotations.LogMessageDocs;
 import net.floodlightcontroller.core.module.FloodlightModulePriority.Priority;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -203,28 +201,6 @@ public class FloodlightModuleLoader {
      * @return An IFloodlightModuleContext with all the modules to be started
      * @throws FloodlightModuleException
      */
-    @LogMessageDocs({
-        @LogMessageDoc(level="INFO",
-                message="Loading modules from {file name}",
-                explanation="The controller is initializing its module " +
-                        "configuration from the specified properties " +
-                        "file or directory"),
-        @LogMessageDoc(level="INFO",
-                message="Loading default modules",
-                explanation="The controller is initializing its module " +
-                        "configuration to the default configuration"),
-        @LogMessageDoc(level="ERROR",
-                message="Could not load module configuration file",
-                explanation="The controller failed to read the " +
-                        "module configuration file",
-                recommendation="Verify that the module configuration is " +
-                        "present. " + LogMessageDoc.CHECK_CONTROLLER),
-        @LogMessageDoc(level="ERROR",
-                message="Could not load default modules",
-                explanation="The controller failed to read the default " +
-                        "module configuration",
-                recommendation=LogMessageDoc.CHECK_CONTROLLER)
-    })
     public IFloodlightModuleContext loadModulesFromConfig(String fName)
             throws FloodlightModuleException {
         Properties prop = new Properties();
@@ -558,11 +534,6 @@ public class FloodlightModuleLoader {
      * Parses configuration parameters for each module
      * @param prop The properties file to use
      */
-    @LogMessageDoc(level="WARN",
-                   message="Module {module} not found or loaded. " +
-                           "Not adding configuration option {key} = {value}",
-                   explanation="Ignoring a configuration parameter for a " +
-                        "module that is not loaded.")
     protected void parseConfigParameters(Properties prop) {
         if (prop == null) return;
 
diff --git a/src/main/java/net/floodlightcontroller/core/util/ListenerDispatcher.java b/src/main/java/net/floodlightcontroller/core/util/ListenerDispatcher.java
index 4a6b978c55e6f5c7a2eec59ccd984ab010f462d9..82a34a238b44a726bdcd43a095c67a1a848c76c4 100644
--- a/src/main/java/net/floodlightcontroller/core/util/ListenerDispatcher.java
+++ b/src/main/java/net/floodlightcontroller/core/util/ListenerDispatcher.java
@@ -22,7 +22,6 @@ import java.util.HashSet;
 import java.util.List;
 
 import net.floodlightcontroller.core.IListener;
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -60,14 +59,6 @@ public class ListenerDispatcher<U, T extends IListener<U>> {
      * Add a listener to the list of listeners
      * @param listener
      */
-    @LogMessageDoc(level="ERROR",
-                   message="No listener dependency solution: " +
-                           "No listeners without incoming dependencies",
-                   explanation="The set of listeners installed " +
-                           "have dependencies with no solution",
-                   recommendation="Install a different set of listeners " +
-                           "or install all dependencies.  This is a defect in " +
-                           "the controller installation.")
     public void addListener(U type, T listener) {
         List<T> newlisteners = new ArrayList<T>();
         if (listeners != null)
diff --git a/src/main/java/net/floodlightcontroller/core/util/SingletonTask.java b/src/main/java/net/floodlightcontroller/core/util/SingletonTask.java
index 7a145f82f1f668b0415d353c8d04cc4ecc8a97a8..447c8b109463c90640dfd80781346cf1026f7524 100644
--- a/src/main/java/net/floodlightcontroller/core/util/SingletonTask.java
+++ b/src/main/java/net/floodlightcontroller/core/util/SingletonTask.java
@@ -20,7 +20,6 @@ package net.floodlightcontroller.core.util;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
 
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -36,8 +35,7 @@ import org.slf4j.LoggerFactory;
  * * If the task has begun, set a bit to restart it after the current task finishes
  */
 public class SingletonTask {
-    protected static final Logger logger = 
-            LoggerFactory.getLogger(SingletonTask.class);
+    protected static final Logger logger = LoggerFactory.getLogger(SingletonTask.class);
             
     protected static class SingletonTaskContext  {
         protected boolean taskShouldRun = false;
@@ -57,13 +55,11 @@ public class SingletonTask {
         }
 
         @Override
-        @LogMessageDoc(level="ERROR",
-                       message="Exception while executing task",
-                       recommendation=LogMessageDoc.GENERIC_ACTION)
         public void run() {
             synchronized (parent.context) {
-                if (canceled || !parent.context.taskShouldRun)
+                if (canceled || !parent.context.taskShouldRun) {
                     return;
+                }
 
                 parent.context.taskRunning = true;
                 parent.context.taskShouldRun = false;
@@ -135,10 +131,9 @@ public class SingletonTask {
                     // schedule to restart at the right time
                     if (delay > 0) {
                         long now = System.nanoTime();
-                        long then = 
-                            now + TimeUnit.NANOSECONDS.convert(delay, unit);
+                        long then = now + TimeUnit.NANOSECONDS.convert(delay, unit);
                         context.waitingTask.nextschedule = then;
-//                        logger.debug("rescheduled task " + this + " for " + TimeUnit.SECONDS.convert(then, TimeUnit.NANOSECONDS) + "s. A bunch of these messages -may- indicate you have a blocked task.");
+                        logger.debug("rescheduled task " + this + " for " + TimeUnit.SECONDS.convert(then, TimeUnit.NANOSECONDS) + "s. A bunch of these messages -may- indicate you have a blocked task.");
                     } else {
                         context.waitingTask.nextschedule = 0;
                     }
@@ -158,10 +153,11 @@ public class SingletonTask {
         }
 
         if (needQueue) {
-            if (delay <= 0) 
+            if (delay <= 0) {
                 ses.execute(stw);
-            else
+            } else {
                 ses.schedule(stw, delay, unit);
+            }
         }
     }
-}
+}
\ No newline at end of file
diff --git a/src/main/java/net/floodlightcontroller/core/web/ControllerRoleResource.java b/src/main/java/net/floodlightcontroller/core/web/ControllerRoleResource.java
index c64f1a821be38a7c7fa49f1c7f21c51f106e6185..69c3723ae1e7dbde817c29991849cb9b6cd57115 100644
--- a/src/main/java/net/floodlightcontroller/core/web/ControllerRoleResource.java
+++ b/src/main/java/net/floodlightcontroller/core/web/ControllerRoleResource.java
@@ -23,7 +23,6 @@ import org.restlet.resource.ServerResource;
 
 import net.floodlightcontroller.core.IFloodlightProviderService;
 import net.floodlightcontroller.core.RoleInfo;
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
 
 import org.restlet.resource.Get;
 import org.restlet.resource.Post;
@@ -57,11 +56,6 @@ public class ControllerRoleResource extends ServerResource {
     }
 
     @Post
-    @LogMessageDoc(level="WARN",
-                   message="Invalid role value specified in REST API to " +
-                      "set controller role",
-                   explanation="An HA role change request was malformed.",
-                   recommendation=LogMessageDoc.CHECK_CONTROLLER)
     public Map<String, String> setRole(String json) {
     	Map<String, String> retValue = new HashMap<String, String>();
 
diff --git a/src/main/java/net/floodlightcontroller/core/web/SwitchResourceBase.java b/src/main/java/net/floodlightcontroller/core/web/SwitchResourceBase.java
index 358d7371710c331e5d0f08c47d3355a3fc37f289..0ade22924fe5683e3dce87c29508d7db0f220c03 100644
--- a/src/main/java/net/floodlightcontroller/core/web/SwitchResourceBase.java
+++ b/src/main/java/net/floodlightcontroller/core/web/SwitchResourceBase.java
@@ -22,7 +22,6 @@ import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 
 import net.floodlightcontroller.core.IOFSwitch;
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
 import net.floodlightcontroller.core.internal.IOFSwitchService;
 
 import org.projectfloodlight.openflow.protocol.OFFeaturesReply;
@@ -71,12 +70,6 @@ public class SwitchResourceBase extends ServerResource {
 	 * @return
 	 */
 	@SuppressWarnings("unchecked")
-	@LogMessageDoc(level="ERROR",
-	message="Failure retrieving statistics from switch {switch}",
-	explanation="An error occurred while retrieving statistics" +
-			"from the switch",
-			recommendation=LogMessageDoc.CHECK_SWITCH + " " +
-					LogMessageDoc.GENERIC_ACTION)
 	protected List<OFStatsReply> getSwitchStatistics(DatapathId switchId,
 			OFStatsType statType) {
 		IOFSwitchService switchService = (IOFSwitchService) getContext().getAttributes().get(IOFSwitchService.class.getCanonicalName());
diff --git a/src/main/java/net/floodlightcontroller/flowcache/PortDownReconciliation.java b/src/main/java/net/floodlightcontroller/flowcache/PortDownReconciliation.java
index 73da272ee58388a2f850ab784710c28608901461..0278ab5545f444a03ee344d9121d83fee055def6 100644
--- a/src/main/java/net/floodlightcontroller/flowcache/PortDownReconciliation.java
+++ b/src/main/java/net/floodlightcontroller/flowcache/PortDownReconciliation.java
@@ -49,8 +49,8 @@ import net.floodlightcontroller.flowcache.IFlowReconcileService;
 import net.floodlightcontroller.flowcache.OFMatchReconcile;
 import net.floodlightcontroller.flowcache.PriorityPendingQueue.EventPriority;
 import net.floodlightcontroller.linkdiscovery.ILinkDiscovery;
-import net.floodlightcontroller.linkdiscovery.LinkInfo;
 import net.floodlightcontroller.linkdiscovery.ILinkDiscovery.LDUpdate;
+import net.floodlightcontroller.linkdiscovery.internal.LinkInfo;
 import net.floodlightcontroller.linkdiscovery.ILinkDiscoveryService;
 import net.floodlightcontroller.routing.Link;
 import net.floodlightcontroller.topology.ITopologyListener;
diff --git a/src/main/java/net/floodlightcontroller/forwarding/Forwarding.java b/src/main/java/net/floodlightcontroller/forwarding/Forwarding.java
index 7f07b1a595a9bc7bc6098be628133b81b2adb0f8..32f0ff09cd11ffe2c71d57dd553c35871b895dc7 100644
--- a/src/main/java/net/floodlightcontroller/forwarding/Forwarding.java
+++ b/src/main/java/net/floodlightcontroller/forwarding/Forwarding.java
@@ -19,17 +19,14 @@ package net.floodlightcontroller.forwarding;
 
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collection;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 import net.floodlightcontroller.core.FloodlightContext;
 import net.floodlightcontroller.core.IFloodlightProviderService;
 import net.floodlightcontroller.core.IOFSwitch;
-import net.floodlightcontroller.devicemanager.IDevice;
-import net.floodlightcontroller.devicemanager.IDeviceService;
-import net.floodlightcontroller.devicemanager.SwitchPort;
 import net.floodlightcontroller.core.annotations.LogMessageCategory;
 import net.floodlightcontroller.core.annotations.LogMessageDoc;
 import net.floodlightcontroller.core.annotations.LogMessageDocs;
@@ -40,6 +37,9 @@ import net.floodlightcontroller.core.module.IFloodlightModule;
 import net.floodlightcontroller.core.module.IFloodlightService;
 import net.floodlightcontroller.core.util.AppCookie;
 import net.floodlightcontroller.debugcounter.IDebugCounterService;
+import net.floodlightcontroller.devicemanager.IDevice;
+import net.floodlightcontroller.devicemanager.IDeviceService;
+import net.floodlightcontroller.devicemanager.SwitchPort;
 import net.floodlightcontroller.packet.Ethernet;
 import net.floodlightcontroller.packet.IPv4;
 import net.floodlightcontroller.packet.IPv6;
@@ -52,12 +52,13 @@ import net.floodlightcontroller.routing.Route;
 import net.floodlightcontroller.topology.ITopologyService;
 
 import org.projectfloodlight.openflow.protocol.OFFlowMod;
-import org.projectfloodlight.openflow.protocol.match.Match;
-import org.projectfloodlight.openflow.protocol.match.MatchField;
 import org.projectfloodlight.openflow.protocol.OFFlowModCommand;
 import org.projectfloodlight.openflow.protocol.OFPacketIn;
 import org.projectfloodlight.openflow.protocol.OFPacketOut;
 import org.projectfloodlight.openflow.protocol.OFVersion;
+import org.projectfloodlight.openflow.protocol.action.OFAction;
+import org.projectfloodlight.openflow.protocol.match.Match;
+import org.projectfloodlight.openflow.protocol.match.MatchField;
 import org.projectfloodlight.openflow.types.DatapathId;
 import org.projectfloodlight.openflow.types.EthType;
 import org.projectfloodlight.openflow.types.IPv4Address;
@@ -69,7 +70,6 @@ import org.projectfloodlight.openflow.types.OFPort;
 import org.projectfloodlight.openflow.types.OFVlanVidMatch;
 import org.projectfloodlight.openflow.types.U64;
 import org.projectfloodlight.openflow.types.VlanVid;
-import org.projectfloodlight.openflow.protocol.action.OFAction;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -136,7 +136,7 @@ public class Forwarding extends ForwardingBase implements IFloodlightModule {
 		OFFlowMod.Builder fmb = sw.getOFFactory().buildFlowAdd(); // this will be a drop-flow; a flow that will not output to any ports
 		List<OFAction> actions = new ArrayList<OFAction>(); // set no action to drop
 		U64 cookie = AppCookie.makeCookie(FORWARDING_APP_ID, 0);
-
+		log.info("Droppingggg");
 		fmb.setCookie(cookie)
 		.setHardTimeout(FLOWMOD_DEFAULT_HARD_TIMEOUT)
 		.setIdleTimeout(FLOWMOD_DEFAULT_IDLE_TIMEOUT)
@@ -162,111 +162,82 @@ public class Forwarding extends ForwardingBase implements IFloodlightModule {
 		// Check if we have the location of the destination
 		IDevice dstDevice = IDeviceService.fcStore.get(cntx, IDeviceService.CONTEXT_DST_DEVICE);
 
+		DatapathId source=sw.getId();
+		log.info("Packet arrived on switch {}",source);
+		log.info("Port number {}", inPort);
 		if (dstDevice != null) {
 			IDevice srcDevice = IDeviceService.fcStore.get(cntx, IDeviceService.CONTEXT_SRC_DEVICE);
-			DatapathId srcIsland = topologyService.getL2DomainId(sw.getId());
+			DatapathId srcIsland = topologyService.getL2DomainId(source);
 
 			if (srcDevice == null) {
-				log.debug("No device entry found for source device");
+				log.info("No device entry found for source device");
 				return;
 			}
 			if (srcIsland == null) {
-				log.debug("No openflow island found for source {}/{}",
+				log.info("No openflow island found for source {}/{}",
 						sw.getId().toString(), inPort);
 				return;
 			}
 
-			// Validate that we have a destination known on the same island
 			// Validate that the source and destination are not on the same switchport
-			boolean on_same_island = false;
+
 			boolean on_same_if = false;
 			for (SwitchPort dstDap : dstDevice.getAttachmentPoints()) {
-				DatapathId dstSwDpid = dstDap.getSwitchDPID();
-				DatapathId dstIsland = topologyService.getL2DomainId(dstSwDpid);
-				log.trace("Source Island: {}, Destination Island: {}", srcIsland, dstIsland);
-				log.trace("Source Device: {}, Destination Device: {}", srcDevice, dstDevice);
-				if ((dstIsland != null) && dstIsland.equals(srcIsland)) {
-					on_same_island = true;
-					if (sw.getId().equals(dstSwDpid) && inPort.equals(dstDap.getPort())) {
-						on_same_if = true;
-					}
-					break;
+				if (sw.getId().equals(dstDap.getSwitchDPID()) && inPort.equals(dstDap.getPort())) {
+					on_same_if = true;
 				}
-			}
-
-			if (!on_same_island) {
-				// Flood since we don't know the dst device
-				if (log.isTraceEnabled()) {
-					log.trace("No first hop island found for destination " +
-							"device {}, Action = flooding", dstDevice);
-				}
-				doFlood(sw, pi, cntx);
-				return;
+				break;
 			}
 
 			if (on_same_if) {
-				if (log.isTraceEnabled()) {
-					log.trace("Both source and destination are on the same " +
-							"switch/port {}/{}, Action = NOP",
-							sw.toString(), inPort);
-				}
+				log.info("Both source and destination are on the same " +
+						"switch/port {}/{}, Action = NOP",
+						sw.toString(), inPort);
 				return;
 			}
 
-			// Install all the routes where both src and dst have attachment
-			// points.  Since the lists are stored in sorted order we can
-			// traverse the attachment points in O(m+n) time
-			SwitchPort[] srcDaps = srcDevice.getAttachmentPoints();
-			Arrays.sort(srcDaps, clusterIdComparator);
 			SwitchPort[] dstDaps = dstDevice.getAttachmentPoints();
-			Arrays.sort(dstDaps, clusterIdComparator);
-
-			int iSrcDaps = 0, iDstDaps = 0;
-
-			while ((iSrcDaps < srcDaps.length) && (iDstDaps < dstDaps.length)) {
-				SwitchPort srcDap = srcDaps[iSrcDaps];
-				SwitchPort dstDap = dstDaps[iDstDaps];
-
-				// srcCluster and dstCluster here cannot be null as
-				// every switch will be at least in its own L2 domain.
-				DatapathId srcCluster = topologyService.getL2DomainId(srcDap.getSwitchDPID());
-				DatapathId dstCluster = topologyService.getL2DomainId(dstDap.getSwitchDPID());
-
-				int srcVsDest = srcCluster.compareTo(dstCluster);
-				if (srcVsDest == 0) {
-					if (!srcDap.equals(dstDap)) {
-						Route route =
-								routingEngineService.getRoute(srcDap.getSwitchDPID(), 
-										srcDap.getPort(),
-										dstDap.getSwitchDPID(),
-										dstDap.getPort(), U64.of(0)); //cookie = 0, i.e., default route
-						if (route != null) {
-							if (log.isTraceEnabled()) {
-								log.trace("pushRoute inPort={} route={} " +
-										"destination={}:{}",
-										new Object[] { inPort, route,
-										dstDap.getSwitchDPID(),
-										dstDap.getPort()});
-							}
-
-							U64 cookie = AppCookie.makeCookie(FORWARDING_APP_ID, 0);
-
-							Match m = createMatchFromPacket(sw, inPort, cntx);
-
-							pushRoute(route, m, pi, sw.getId(), cookie,
-									cntx, requestFlowRemovedNotifn, false,
-									OFFlowModCommand.ADD);
-						}
-					}
-					iSrcDaps++;
-					iDstDaps++;
-				} else if (srcVsDest < 0) {
-					iSrcDaps++;
-				} else {
-					iDstDaps++;
+
+			SwitchPort dstDap = null;
+
+			// looking for the last attachment (at the network edge)
+			for (SwitchPort ap : dstDaps){
+				if (topologyService.isEdge(ap.getSwitchDPID(),ap.getPort())){
+					dstDap = ap;
+					break;
 				}
+			}	
+
+			if (!topologyService.isEdge(source, inPort)) {
+				// It's possible that we learned packed destination while it was in flight
+				log.info("Packet destination is known, but packet was not received on the edge port. Flooding... {}");
+				doFlood(sw, pi, cntx);
+				return; 
+			}				
+			Route route = routingEngineService.getRoute(source, 
+					inPort,
+					dstDap.getSwitchDPID(),
+					dstDap.getPort(), U64.of(0)); //cookie = 0, i.e., default route
+
+			if (route != null) {
+
+				log.info("pushRoute inPort={} route={} " +
+						"destination={}:{}",
+						new Object[] { inPort, route,
+						dstDap.getSwitchDPID(),
+						dstDap.getPort()});
+
+
+				U64 cookie = AppCookie.makeCookie(FORWARDING_APP_ID, 0);
+
+				Match m = createMatchFromPacket(sw, inPort, cntx);
+				log.info("Cretaing flow rules on the route, match rule: {}", m);
+				pushRoute(route, m, pi, sw.getId(), cookie,
+						cntx, requestFlowRemovedNotifn, false,
+						OFFlowModCommand.ADD);	
 			}
 		} else {
+			log.info("Destination unknown, flooding");
 			// Flood since we don't know the dst device
 			doFlood(sw, pi, cntx);
 		}
@@ -306,7 +277,6 @@ public class Forwarding extends ForwardingBase implements IFloodlightModule {
 		}
 
 		// TODO Detect switch type and match to create hardware-implemented flow
-		// TODO Allow for IPv6 matches
 		if (eth.getEtherType() == EthType.IPv4) { /* shallow check for equality is okay for EthType */
 			IPv4 ip = (IPv4) eth.getPayload();
 			IPv4Address srcIp = ip.getSourceAddress();
@@ -393,25 +363,17 @@ public class Forwarding extends ForwardingBase implements IFloodlightModule {
 							recommendation=LogMessageDoc.CHECK_SWITCH)
 	protected void doFlood(IOFSwitch sw, OFPacketIn pi, FloodlightContext cntx) {
 		OFPort inPort = (pi.getVersion().compareTo(OFVersion.OF_12) < 0 ? pi.getInPort() : pi.getMatch().get(MatchField.IN_PORT));
-		if (topologyService.isIncomingBroadcastAllowed(sw.getId(), inPort) == false) {
-			if (log.isTraceEnabled()) {
-				log.trace("doFlood, drop broadcast packet, pi={}, " +
-						"from a blocked port, srcSwitch=[{},{}], linkInfo={}",
-						new Object[] {pi, sw.getId(), inPort});
-			}
-			return;
-		}
-
 		// Set Action to flood
 		OFPacketOut.Builder pob = sw.getOFFactory().buildPacketOut();
 		List<OFAction> actions = new ArrayList<OFAction>();
-		if (sw.hasAttribute(IOFSwitch.PROP_SUPPORTS_OFPP_FLOOD)) {
-			actions.add(sw.getOFFactory().actions().output(OFPort.FLOOD, Integer.MAX_VALUE)); // FLOOD is a more selective/efficient version of ALL
-		} else {
-			actions.add(sw.getOFFactory().actions().output(OFPort.ALL, Integer.MAX_VALUE));
+		Set<OFPort> broadcastPorts = this.topologyService.getSwitchBroadcastPorts(sw.getId());
+
+		for (OFPort p : broadcastPorts) {
+			if (p.equals(inPort)) continue;
+			actions.add(sw.getOFFactory().actions().output(p, Integer.MAX_VALUE));
 		}
 		pob.setActions(actions);
-
+		// log.info("actions {}",actions);
 		// set buffer-id, in-port and packet-data based on packet-in
 		pob.setBufferId(OFBufferId.NO_BUFFER);
 		pob.setInPort(inPort);
@@ -501,7 +463,7 @@ public class Forwarding extends ForwardingBase implements IFloodlightModule {
 		}
 		tmp = configParameters.get("priority");
 		if (tmp != null) {
-			FLOWMOD_DEFAULT_PRIORITY = Integer.parseInt(tmp);
+			FLOWMOD_DEFAULT_PRIORITY = 35777;
 			log.info("Default priority set to {}.", FLOWMOD_DEFAULT_PRIORITY);
 		} else {
 			log.info("Default priority not configured. Using {}.", FLOWMOD_DEFAULT_PRIORITY);
@@ -537,4 +499,4 @@ public class Forwarding extends ForwardingBase implements IFloodlightModule {
 	public void startUp(FloodlightModuleContext context) {
 		super.startUp();
 	}
-}
\ No newline at end of file
+}
diff --git a/src/main/java/net/floodlightcontroller/linkdiscovery/ILinkDiscovery.java b/src/main/java/net/floodlightcontroller/linkdiscovery/ILinkDiscovery.java
index 051f2d41267616703829963e1d3991948f4b474f..20ebbc2ebc72ba62cedd42af8091954d4ec910a4 100644
--- a/src/main/java/net/floodlightcontroller/linkdiscovery/ILinkDiscovery.java
+++ b/src/main/java/net/floodlightcontroller/linkdiscovery/ILinkDiscovery.java
@@ -86,6 +86,7 @@ public interface ILinkDiscovery {
             this.operation = oper;
             this.src = switchId;
             this.srcType = stype;
+            this.latency = U64.ZERO;
         }
 
         // For port up or port down; and tunnel port added and removed.
@@ -93,6 +94,7 @@ public interface ILinkDiscovery {
             this.src = sw;
             this.srcPort = port;
             this.operation = operation;
+            this.latency = U64.ZERO;
         }
 
         public DatapathId getSrc() {
diff --git a/src/main/java/net/floodlightcontroller/linkdiscovery/ILinkDiscoveryService.java b/src/main/java/net/floodlightcontroller/linkdiscovery/ILinkDiscoveryService.java
index df179c5952c4c7b738ed17065afe98549c94be87..096795d94ba5020a68e28870d41d9c5736ad3696 100644
--- a/src/main/java/net/floodlightcontroller/linkdiscovery/ILinkDiscoveryService.java
+++ b/src/main/java/net/floodlightcontroller/linkdiscovery/ILinkDiscoveryService.java
@@ -27,6 +27,7 @@ import org.projectfloodlight.openflow.types.OFPort;
 
 import net.floodlightcontroller.core.IOFSwitch;
 import net.floodlightcontroller.core.module.IFloodlightService;
+import net.floodlightcontroller.linkdiscovery.internal.LinkInfo;
 import net.floodlightcontroller.routing.Link;
 import net.floodlightcontroller.topology.NodePortTuple;
 
diff --git a/src/main/java/net/floodlightcontroller/linkdiscovery/LinkInfo.java b/src/main/java/net/floodlightcontroller/linkdiscovery/LinkInfo.java
deleted file mode 100644
index 6c2d7334e436cabe3f44eca1547d4f1425c3d440..0000000000000000000000000000000000000000
--- a/src/main/java/net/floodlightcontroller/linkdiscovery/LinkInfo.java
+++ /dev/null
@@ -1,156 +0,0 @@
-/**
-*    Copyright 2011, Big Switch Networks, Inc.*    Originally created by David Erickson, Stanford University
-**    Licensed under the Apache License, Version 2.0 (the "License"); you may
-*    not use this file except in compliance with the License. You may obtain
-*    a copy of the License at
-*
-*         http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-*    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-*    License for the specific language governing permissions and limitations
-*    under the License.
-**/
-
-package net.floodlightcontroller.linkdiscovery;
-
-import java.util.Date;
-
-import net.floodlightcontroller.linkdiscovery.ILinkDiscovery.LinkType;
-
-import com.fasterxml.jackson.annotation.JsonIgnore;
-
-public class LinkInfo {
-
-    public LinkInfo(Date firstSeenTime,
-                    Date lastLldpReceivedTime,
-                    Date lastBddpReceivedTime) {
-        super();
-        this.firstSeenTime = firstSeenTime;
-        this.lastLldpReceivedTime = lastLldpReceivedTime;
-        this.lastBddpReceivedTime = lastBddpReceivedTime;
-    }
-
-    /*
-     * Do not use this constructor. Used primarily for JSON
-     * Serialization/Deserialization
-     */
-    public LinkInfo() {
-        this.firstSeenTime = null;
-        this.lastLldpReceivedTime = null;
-        this.lastBddpReceivedTime = null;
-    }
-
-    public LinkInfo(LinkInfo fromLinkInfo) {
-        this.firstSeenTime = fromLinkInfo.getFirstSeenTime();
-        this.lastLldpReceivedTime = fromLinkInfo.getUnicastValidTime();
-        this.lastBddpReceivedTime = fromLinkInfo.getMulticastValidTime();
-    }
-
-    protected Date firstSeenTime;
-    protected Date lastLldpReceivedTime; /* Standard LLLDP received time */
-    protected Date lastBddpReceivedTime; /* Modified LLDP received time  */
-
-    /** The port states stored here are topology's last knowledge of
-     * the state of the port. This mostly mirrors the state
-     * maintained in the port list in IOFSwitch (i.e. the one returned
-     * from getPort), except that during a port status message the
-     * IOFSwitch port state will already have been updated with the
-     * new port state, so topology needs to keep its own copy so that
-     * it can determine if the port state has changed and therefore
-     * requires the new state to be written to storage.
-     */
-
-    public Date getFirstSeenTime() {
-        return firstSeenTime;
-    }
-
-    public void setFirstSeenTime(Date firstSeenTime) {
-        this.firstSeenTime = firstSeenTime;
-    }
-
-    public Date getUnicastValidTime() {
-        return lastLldpReceivedTime;
-    }
-
-    public void setUnicastValidTime(Date unicastValidTime) {
-        this.lastLldpReceivedTime = unicastValidTime;
-    }
-
-    public Date getMulticastValidTime() {
-        return lastBddpReceivedTime;
-    }
-
-    public void setMulticastValidTime(Date multicastValidTime) {
-        this.lastBddpReceivedTime = multicastValidTime;
-    }
-
-    @JsonIgnore
-    public LinkType getLinkType() {
-        if (lastLldpReceivedTime != null) {
-            return LinkType.DIRECT_LINK;
-        } else if (lastBddpReceivedTime != null) {
-            return LinkType.MULTIHOP_LINK;
-        }
-        return LinkType.INVALID_LINK;
-    }
-
-    /* (non-Javadoc)
-     * @see java.lang.Object#hashCode()
-     */
-    @Override
-    public int hashCode() {
-        final int prime = 5557;
-        int result = 1;
-        result = prime * result + ((firstSeenTime == null) ? 0 : firstSeenTime.hashCode());
-        result = prime * result + ((lastLldpReceivedTime == null) ? 0 : lastLldpReceivedTime.hashCode());
-        result = prime * result + ((lastBddpReceivedTime == null) ? 0 : lastBddpReceivedTime.hashCode());
-        return result;
-    }
-
-    /* (non-Javadoc)
-     * @see java.lang.Object#equals(java.lang.Object)
-     */
-    @Override
-    public boolean equals(Object obj) {
-        if (this == obj)
-            return true;
-        if (obj == null)
-            return false;
-        if (!(obj instanceof LinkInfo))
-            return false;
-        LinkInfo other = (LinkInfo) obj;
-
-        if (firstSeenTime == null) {
-            if (other.firstSeenTime != null)
-                return false;
-        } else if (!firstSeenTime.equals(other.firstSeenTime))
-            return false;
-
-        if (lastLldpReceivedTime == null) {
-            if (other.lastLldpReceivedTime != null)
-                return false;
-        } else if (!lastLldpReceivedTime.equals(other.lastLldpReceivedTime))
-            return false;
-
-        if (lastBddpReceivedTime == null) {
-            if (other.lastBddpReceivedTime != null)
-                return false;
-        } else if (!lastBddpReceivedTime.equals(other.lastBddpReceivedTime))
-            return false;
-
-        return true;
-    }
-
-
-    /* (non-Javadoc)
-     * @see java.lang.Object#toString()
-     */
-    @Override
-    public String toString() {
-        return "LinkInfo [unicastValidTime=" + ((lastLldpReceivedTime == null) ? "null" : lastLldpReceivedTime.getTime())
-                + ", multicastValidTime=" + ((lastBddpReceivedTime == null) ? "null" : lastBddpReceivedTime.getTime())
-                + "]";
-    }
-}
diff --git a/src/main/java/net/floodlightcontroller/linkdiscovery/internal/LinkDiscoveryManager.java b/src/main/java/net/floodlightcontroller/linkdiscovery/internal/LinkDiscoveryManager.java
index 26dd12e59f4bbe46364e9e669d0fb941d86a9656..a26d8d8fd4a6c39539b2c92c347d0848a3d6ecdd 100644
--- a/src/main/java/net/floodlightcontroller/linkdiscovery/internal/LinkDiscoveryManager.java
+++ b/src/main/java/net/floodlightcontroller/linkdiscovery/internal/LinkDiscoveryManager.java
@@ -40,6 +40,8 @@ import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
+import javax.annotation.Nonnull;
+
 import net.floodlightcontroller.core.FloodlightContext;
 import net.floodlightcontroller.core.HAListenerTypeMarker;
 import net.floodlightcontroller.core.HARole;
@@ -51,9 +53,6 @@ import net.floodlightcontroller.core.IInfoProvider;
 import net.floodlightcontroller.core.IOFMessageListener;
 import net.floodlightcontroller.core.IOFSwitch;
 import net.floodlightcontroller.core.IOFSwitchListener;
-import net.floodlightcontroller.core.annotations.LogMessageCategory;
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
-import net.floodlightcontroller.core.annotations.LogMessageDocs;
 import net.floodlightcontroller.core.internal.IOFSwitchService;
 import net.floodlightcontroller.core.module.FloodlightModuleContext;
 import net.floodlightcontroller.core.module.FloodlightModuleException;
@@ -74,7 +73,6 @@ import net.floodlightcontroller.linkdiscovery.ILinkDiscovery.SwitchType;
 import net.floodlightcontroller.linkdiscovery.ILinkDiscovery.UpdateOperation;
 import net.floodlightcontroller.linkdiscovery.ILinkDiscoveryListener;
 import net.floodlightcontroller.linkdiscovery.ILinkDiscoveryService;
-import net.floodlightcontroller.linkdiscovery.LinkInfo;
 import net.floodlightcontroller.linkdiscovery.web.LinkDiscoveryWebRoutable;
 import net.floodlightcontroller.notification.INotificationManager;
 import net.floodlightcontroller.notification.NotificationManagerFactory;
@@ -128,7 +126,6 @@ import org.slf4j.LoggerFactory;
  * 
  * @edited Ryan Izard, rizard@g.clemson.edu, ryan.izard@bigswitch.com
  */
-@LogMessageCategory("Network Topology")
 public class LinkDiscoveryManager implements IOFMessageListener,
 IOFSwitchListener, IStorageSourceListener, ILinkDiscoveryService,
 IFloodlightModule, IInfoProvider {
@@ -210,6 +207,12 @@ IFloodlightModule, IInfoProvider {
 	protected ReentrantReadWriteLock lock;
 	int lldpTimeCount = 0;
 
+	/*
+	 * Latency tracking
+	 */
+	protected static int LATENCY_HISTORY_SIZE = 10;
+	protected static double LATENCY_UPDATE_THRESHOLD = 0.50;
+
 	/**
 	 * Flag to indicate if automatic port fast is enabled or not. Default is set
 	 * to false -- Initialized in the init method as well.
@@ -296,7 +299,7 @@ IFloodlightModule, IInfoProvider {
 
 		if (log.isTraceEnabled()) {
 			log.trace("Sending LLDP packet out of swich: {}, port: {}, reverse: {}",
-					new Object[] {iofSwitch.getId().toString(), port.toString(), Boolean.toString(isReverse)});
+				new Object[] {iofSwitch.getId().toString(), port.toString(), Boolean.toString(isReverse)});
 		}
 
 		// using "nearest customer bridge" MAC address for broadest possible
@@ -362,7 +365,7 @@ IFloodlightModule, IInfoProvider {
 		} else {
 			lldp.getOptionalTLVList().add(forwardTLV);
 		}
-		
+
 		/* 
 		 * Introduce a new TLV for med-granularity link latency detection.
 		 * If same controller, can assume system clock is the same, but
@@ -373,19 +376,24 @@ IFloodlightModule, IInfoProvider {
 		 * 
 		 * Note Long.SIZE is in bits (64).
 		 */
+		long time = System.currentTimeMillis();
+		long swLatency = iofSwitch.getLatency().getValue();
+		if (log.isTraceEnabled()) {
+			log.trace("SETTING LLDP LATENCY TLV: Current Time {}; {} control plane latency {}; sum {}", new Object[] { time, iofSwitch.getId(), swLatency, time + swLatency });
+		}
 		byte[] timestampTLVValue = ByteBuffer.allocate(Long.SIZE / 8 + 4)
 				.put((byte) 0x00)
 				.put((byte) 0x26)
 				.put((byte) 0xe1)
 				.put((byte) 0x01) /* 0x01 is what we'll use to differentiate DPID (0x00) from time (0x01) */
-				.putLong(System.currentTimeMillis() + iofSwitch.getLatency().getValue() /* account for our switch's one-way latency */)
+				.putLong(time + swLatency /* account for our switch's one-way latency */)
 				.array();
 
 		LLDPTLV timestampTLV = new LLDPTLV()
 		.setType((byte) 127)
 		.setLength((short) timestampTLVValue.length)
 		.setValue(timestampTLVValue);
-		
+
 		/* Now add TLV to our LLDP packet */
 		lldp.getOptionalTLVList().add(timestampTLV);
 
@@ -546,7 +554,7 @@ IFloodlightModule, IInfoProvider {
 		LinkInfo linkInfo = links.get(link);
 		LinkInfo retLinkInfo = null;
 		if (linkInfo != null) {
-			retLinkInfo  = new LinkInfo(linkInfo);
+			retLinkInfo = new LinkInfo(linkInfo);
 		}
 		lock.readLock().unlock();
 		return retLinkInfo;
@@ -655,7 +663,7 @@ IFloodlightModule, IInfoProvider {
 		// If LLDP is suppressed on this port, ignore received packet as well
 		IOFSwitch iofSwitch = switchService.getSwitch(sw);
 
-		log.warn("Received LLDP packet on sw {}, port {}", sw, inPort);
+		log.debug("Received LLDP packet on sw {}, port {}", sw, inPort);
 
 		if (!isIncomingDiscoveryAllowed(sw, inPort, isStandard))
 			return Command.STOP;
@@ -676,7 +684,7 @@ IFloodlightModule, IInfoProvider {
 		OFPort remotePort = OFPort.of(portBB.getShort());
 		IOFSwitch remoteSwitch = null;
 		long timestamp = 0;
-		
+
 		// Verify this LLDP packet matches what we're looking for
 		for (LLDPTLV lldptlv : lldp.getOptionalTLVList()) {
 			if (lldptlv.getType() == 127 && lldptlv.getLength() == 12
@@ -692,7 +700,12 @@ IFloodlightModule, IInfoProvider {
 					&& lldptlv.getValue()[2] == (byte) 0xe1
 					&& lldptlv.getValue()[3] == 0x01) { /* 0x01 for timestamp */
 				ByteBuffer tsBB = ByteBuffer.wrap(lldptlv.getValue()); /* skip OpenFlow OUI (4 bytes above) */
-				timestamp = tsBB.getLong(4) + iofSwitch.getLatency().getValue(); /* include the RX switch latency to "subtract" it */
+				long swLatency = iofSwitch.getLatency().getValue();
+				timestamp = tsBB.getLong(4); /* include the RX switch latency to "subtract" it */
+				if (log.isTraceEnabled()) {
+					log.trace("RECEIVED LLDP LATENCY TLV: Got timestamp of {}; Switch {} latency of {}", new Object[] { timestamp, iofSwitch.getId(), iofSwitch.getLatency().getValue() }); 
+				}
+				timestamp = timestamp + swLatency;
 			} else if (lldptlv.getType() == 12 && lldptlv.getLength() == 8) {
 				otherId = ByteBuffer.wrap(lldptlv.getValue()).getLong();
 				if (myId == otherId) myLLDP = true;
@@ -768,9 +781,14 @@ IFloodlightModule, IInfoProvider {
 
 		// Store the time of update to this link, and push it out to
 		// routingEngine
-		U64 latency = timestamp != 0 ? U64.of(System.currentTimeMillis() - timestamp) : U64.ZERO;
+		long time = System.currentTimeMillis();
+		U64 latency = (timestamp != 0 && (time - timestamp) > 0) ? U64.of(time - timestamp) : U64.ZERO;
+		if (log.isTraceEnabled()) {
+			log.trace("COMPUTING FINAL DATAPLANE LATENCY: Current time {}; Dataplane+{} latency {}; Overall latency from {} to {} is {}", 
+					new Object[] { time, iofSwitch.getId(), timestamp, remoteSwitch.getId(), iofSwitch.getId(), String.valueOf(latency.getValue()) });
+		}
 		Link lt = new Link(remoteSwitch.getId(), remotePort,
-				iofSwitch.getId(), inPort, latency); /* we assume 0 latency is undefined */
+				iofSwitch.getId(), inPort, latency);
 
 		if (!isLinkAllowed(lt.getSrc(), lt.getSrcPort(),
 				lt.getDst(), lt.getDstPort()))
@@ -782,13 +800,13 @@ IFloodlightModule, IInfoProvider {
 
 		Date firstSeenTime = new Date(System.currentTimeMillis());
 
-		if (isStandard)
-			lastLldpTime = new Date(System.currentTimeMillis());
-		else
-			lastBddpTime = new Date(System.currentTimeMillis());
+		if (isStandard) {
+			lastLldpTime = new Date(firstSeenTime.getTime());
+		} else {
+			lastBddpTime = new Date(firstSeenTime.getTime());
+		}
 
-		LinkInfo newLinkInfo = new LinkInfo(firstSeenTime, lastLldpTime,
-				lastBddpTime);
+		LinkInfo newLinkInfo = new LinkInfo(firstSeenTime, lastLldpTime, lastBddpTime);
 
 		addOrUpdateLink(lt, newLinkInfo);
 
@@ -805,6 +823,7 @@ IFloodlightModule, IInfoProvider {
 				// the reverse link does not exist.
 				if (newLinkInfo.getFirstSeenTime().getTime() > System.currentTimeMillis()
 						- LINK_TIMEOUT) {
+					log.debug("Sending reverse LLDP for link {}", lt);
 					this.sendDiscoveryMessage(lt.getDst(), lt.getDstPort(),
 							isStandard, true);
 				}
@@ -872,11 +891,6 @@ IFloodlightModule, IInfoProvider {
 	//  Internal Methods - Discovery Related
 	//***********************************
 
-	@LogMessageDoc(level = "ERROR",
-			message = "Error in link discovery updates loop",
-			explanation = "An unknown error occured while dispatching "
-					+ "link update notifications",
-					recommendation = LogMessageDoc.GENERIC_ACTION)
 	private void doUpdatesThread() throws InterruptedException {
 		do {
 			LDUpdate update = updates.take();
@@ -888,15 +902,16 @@ IFloodlightModule, IInfoProvider {
 				updateList.add(updates.remove());
 			}
 
-			if (linkDiscoveryAware != null) {
-				if (log.isTraceEnabled()) {
-					log.trace("Dispatching link discovery update {} {} {} {} {} for {}",
+			if (linkDiscoveryAware != null && !updateList.isEmpty()) {
+				if (log.isDebugEnabled()) {
+					log.debug("Dispatching link discovery update {} {} {} {} {} {}ms for {}",
 							new Object[] {
 							update.getOperation(),
-							update.getSrc().toString(),
+							update.getSrc(),
 							update.getSrcPort(),
-							update.getDst().toString(),
+							update.getDst(),
 							update.getDstPort(),
+							update.getLatency().getValue(),
 							linkDiscoveryAware });
 				}
 				try {
@@ -1180,11 +1195,6 @@ IFloodlightModule, IInfoProvider {
 	 * @param isReverse
 	 *            indicates whether the LLDP was sent as a response
 	 */
-	@LogMessageDoc(level = "ERROR",
-			message = "Failure sending LLDP out port {port} on switch {switch}",
-			explanation = "An I/O error occured while sending LLDP message "
-					+ "to the switch.",
-					recommendation = LogMessageDoc.CHECK_SWITCH)
 	protected void sendDiscoveryMessage(DatapathId sw, OFPort port,
 			boolean isStandard, boolean isReverse) {
 
@@ -1222,17 +1232,18 @@ IFloodlightModule, IInfoProvider {
 			IOFSwitch iofSwitch = switchService.getSwitch(sw);
 			if (iofSwitch == null) continue;
 			if (!iofSwitch.isActive()) continue; /* can't do anything if the switch is SLAVE */
-			if (iofSwitch.getEnabledPorts() != null) {
-				for (OFPortDesc ofp : iofSwitch.getEnabledPorts()) {
-					if (isLinkDiscoverySuppressed(sw, ofp.getPortNo())) {			
+			Collection<OFPort> c = iofSwitch.getEnabledPortNumbers();
+			if (c != null) {
+				for (OFPort ofp : c) {
+					if (isLinkDiscoverySuppressed(sw, ofp)) {			
 						continue;
 					}
 					log.trace("Enabled port: {}", ofp);
-					sendDiscoveryMessage(sw, ofp.getPortNo(), true, false);
+					sendDiscoveryMessage(sw, ofp, true, false);
 
 					// If the switch port is not already in the maintenance
 					// queue, add it.
-					NodePortTuple npt = new NodePortTuple(sw, ofp.getPortNo());
+					NodePortTuple npt = new NodePortTuple(sw, ofp);
 					addToMaintenanceQueue(npt);
 				}
 			}
@@ -1300,72 +1311,119 @@ IFloodlightModule, IInfoProvider {
 					new HashSet<Link>());
 		portLinks.get(dstNpt).add(lt);
 
+		newInfo.addObservedLatency(lt.getLatency());
+
 		return true;
 	}
 
-	protected boolean updateLink(Link lt, LinkInfo oldInfo, LinkInfo newInfo) {
+	/**
+	 * Determine if a link should be updated and set the time stamps if it should.
+	 * Also, determine the correct latency value for the link. An existing link
+	 * will have a list of latencies associated with its LinkInfo. If enough time has
+	 * elapsed to determine a good latency baseline average and the new average is
+	 * greater or less than the existing latency value by a set threshold, then the
+	 * latency should be updated. This allows for latencies to be smoothed and reduces
+	 * the number of link updates due to small fluctuations (or outliers) in instantaneous
+	 * link latency values.
+	 * 
+	 * @param lt with observed latency. Will be replaced with latency to use.
+	 * @param existingInfo with past observed latencies and time stamps
+	 * @param newInfo with updated time stamps
+	 * @return true if update occurred; false if no update should be dispatched
+	 */
+	protected boolean updateLink(@Nonnull Link lk, @Nonnull LinkInfo existingInfo, @Nonnull LinkInfo newInfo) {
 		boolean linkChanged = false;
-		// Since the link info is already there, we need to
-		// update the right fields.
-		if (newInfo.getUnicastValidTime() == null) {
-			// This is due to a multicast LLDP, so copy the old unicast
-			// value.
-			if (oldInfo.getUnicastValidTime() != null) {
-				newInfo.setUnicastValidTime(oldInfo.getUnicastValidTime());
-			}
-		} else if (newInfo.getMulticastValidTime() == null) {
-			// This is due to a unicast LLDP, so copy the old multicast
-			// value.
-			if (oldInfo.getMulticastValidTime() != null) {
-				newInfo.setMulticastValidTime(oldInfo.getMulticastValidTime());
+		boolean ignoreBDDP_haveLLDPalready = false;
+		
+		/*
+		 * Check if we are transitioning from one link type to another.
+		 * A transition is:
+		 * -- going from no LLDP time to an LLDP time (is OpenFlow link)
+		 * -- going from an LLDP time to a BDDP time (is non-OpenFlow link)
+		 * 
+		 * Note: Going from LLDP to BDDP means our LLDP link must have timed
+		 * out already (null in existing LinkInfo). Otherwise, we'll flap
+		 * between mulitcast and unicast links.
+		 */
+		if (existingInfo.getMulticastValidTime() == null && newInfo.getMulticastValidTime() != null) {
+			if (existingInfo.getUnicastValidTime() == null) { /* unicast must be null to go to multicast */
+				log.debug("Link is BDDP. Changed.");
+				linkChanged = true; /* detected BDDP */
+			} else {
+				ignoreBDDP_haveLLDPalready = true;
 			}
+		} else if (existingInfo.getUnicastValidTime() == null && newInfo.getUnicastValidTime() != null) {
+			log.debug("Link is LLDP. Changed.");
+			linkChanged = true; /* detected LLDP */
 		}
 
-		Date oldTime = oldInfo.getUnicastValidTime();
-		Date newTime = newInfo.getUnicastValidTime();
-		// the link has changed its state between openflow and
-		// non-openflow
-		// if the unicastValidTimes are null or not null
-		if (oldTime != null & newTime == null) {
-			linkChanged = true;
-		} else if (oldTime == null & newTime != null) {
+		/* 
+		 * If we're undergoing an LLDP update (non-null time), grab the new LLDP time.
+		 * If we're undergoing a BDDP update (non-null time), grab the new BDDP time.
+		 * 
+		 * Only do this if the new LinkInfo is non-null for each respective field.
+		 * We want to overwrite an existing LLDP/BDDP time stamp with null if it's
+		 * still valid.
+		 */
+		if (newInfo.getUnicastValidTime() != null) {
+			existingInfo.setUnicastValidTime(newInfo.getUnicastValidTime());
+		} else if (newInfo.getMulticastValidTime() != null) {
+			existingInfo.setMulticastValidTime(newInfo.getMulticastValidTime());
+		}	
+
+		/*
+		 * Update Link latency if we've accumulated enough latency data points
+		 * and if the average exceeds +/- the current stored latency by the
+		 * defined update threshold.
+		 */
+		U64 currentLatency = existingInfo.getCurrentLatency();
+		U64 latencyToUse = existingInfo.addObservedLatency(lk.getLatency());
+
+		if (currentLatency == null) {
+			/* no-op; already 'changed' as this is a new link */
+		} else if (!latencyToUse.equals(currentLatency) && !ignoreBDDP_haveLLDPalready) {
+			log.debug("Updating link {} latency to {}ms", lk.toKeyString(), latencyToUse.getValue());
+			lk.setLatency(latencyToUse);
 			linkChanged = true;
+		} else {
+			log.trace("No need to update link latency {}", lk.toString());
 		}
 
 		return linkChanged;
 	}
 
-	@LogMessageDocs({
-		@LogMessageDoc(message="Inter-switch link detected:",
-				explanation="Detected a new link between two openflow switches," +
-				"use show link to find current status"),
-				@LogMessageDoc(message="Inter-switch link updated:",
-				explanation="Detected a link change between two openflow switches, " +
-						"use show link to find current status")
-	})
 	protected boolean addOrUpdateLink(Link lt, LinkInfo newInfo) {
-
 		boolean linkChanged = false;
 
 		lock.writeLock().lock();
 		try {
-			// put the new info. if an old info exists, it will be returned.
-			LinkInfo oldInfo = links.put(lt, newInfo);
-			if (oldInfo != null
-					&& oldInfo.getFirstSeenTime().getTime() < newInfo.getFirstSeenTime().getTime())
-				newInfo.setFirstSeenTime(oldInfo.getFirstSeenTime());
+			/*
+			 * Put the new info only if new. We want a single LinkInfo
+			 * to exist per Link. This will allow us to track latencies
+			 * without having to conduct a deep, potentially expensive
+			 * copy each time a link is updated.
+			 */
+			LinkInfo existingInfo = null;
+			if (links.get(lt) == null) {
+				links.put(lt, newInfo); /* Only put if doesn't exist or null value */
+			} else {
+				existingInfo = links.get(lt);
+			}
+
+			/* Update existing LinkInfo with most recent time stamp */
+			if (existingInfo != null && existingInfo.getFirstSeenTime().before(newInfo.getFirstSeenTime())) {
+				existingInfo.setFirstSeenTime(newInfo.getFirstSeenTime());
+			}
 
 			if (log.isTraceEnabled()) {
-				log.trace("addOrUpdateLink: {} {}",
-						lt,
-						(newInfo.getMulticastValidTime() != null) ? "multicast"
-								: "unicast");
+				log.trace("addOrUpdateLink: {} {}", lt,
+						(newInfo.getMulticastValidTime() != null) ? "multicast" : "unicast");
 			}
 
 			UpdateOperation updateOperation = null;
 			linkChanged = false;
 
-			if (oldInfo == null) {
+			if (existingInfo == null) {
 				addLink(lt, newInfo);
 				updateOperation = UpdateOperation.LINK_UPDATED;
 				linkChanged = true;
@@ -1374,18 +1432,18 @@ IFloodlightModule, IInfoProvider {
 				// Add all to event history
 				LinkType linkType = getLinkType(lt, newInfo);
 				if (linkType == ILinkDiscovery.LinkType.DIRECT_LINK) {
-					log.info("Inter-switch link detected: {}", lt);
+					log.debug("Inter-switch link detected: {}", lt);
 					eventCategory.newEventNoFlush(new DirectLinkEvent(lt.getSrc(),
 							lt.getSrcPort(), lt.getDst(), lt.getDstPort(), "direct-link-added::rcvd LLDP"));
 				}
 				notifier.postNotification("Link added: " + lt.toString());
 			} else {
-				linkChanged = updateLink(lt, oldInfo, newInfo);
+				linkChanged = updateLink(lt, existingInfo, newInfo);
 				if (linkChanged) {
 					updateOperation = UpdateOperation.LINK_UPDATED;
 					LinkType linkType = getLinkType(lt, newInfo);
 					if (linkType == ILinkDiscovery.LinkType.DIRECT_LINK) {
-						log.info("Inter-switch link updated: {}", lt);
+						log.debug("Inter-switch link updated: {}", lt);
 						eventCategory.newEventNoFlush(new DirectLinkEvent(lt.getSrc(),
 								lt.getSrcPort(), lt.getDst(), lt.getDstPort(),
 								"link-port-state-updated::rcvd LLDP"));
@@ -1394,11 +1452,6 @@ IFloodlightModule, IInfoProvider {
 				}
 			}
 
-			// Write changes to storage. This will always write the updated
-			// valid time, plus the port states if they've changed (i.e. if
-			// they weren't set to null in the previous block of code.
-			writeLinkToStorage(lt, newInfo);
-
 			if (linkChanged) {
 				// find out if the link was added or removed here.
 				updates.add(new LDUpdate(lt.getSrc(), lt.getSrcPort(),
@@ -1406,7 +1459,22 @@ IFloodlightModule, IInfoProvider {
 						lt.getLatency(),
 						getLinkType(lt, newInfo),
 						updateOperation));
+				/* Update link structure (FIXME shouldn't have to do this, since it should be the same object) */
+				Iterator<Entry<Link, LinkInfo>> it = links.entrySet().iterator();
+				while (it.hasNext()) {
+					Entry<Link, LinkInfo> entry = it.next();
+					if (entry.getKey().equals(lt)) {
+						entry.getKey().setLatency(lt.getLatency());
+						break;
+					}
+				}
 			}
+			
+			// Write changes to storage. This will always write the updated
+			// valid time, plus the port states if they've changed (i.e. if
+			// they weren't set to null in the previous block of code.
+			writeLinkToStorage(lt, newInfo);
+			
 		} finally {
 			lock.writeLock().unlock();
 		}
@@ -1445,9 +1513,6 @@ IFloodlightModule, IInfoProvider {
 	 * @param links
 	 *            The List of @LinkTuple to delete.
 	 */
-	@LogMessageDoc(message="Inter-switch link removed:",
-			explanation="A previously detected link between two openflow switches no longer exists, " +
-			"use show link to find current status")
 	protected void deleteLinks(List<Link> links, String reason,
 			List<LDUpdate> updateList) {
 
@@ -1546,38 +1611,41 @@ IFloodlightModule, IInfoProvider {
 	protected void timeoutLinks() {
 		List<Link> eraseList = new ArrayList<Link>();
 		Long curTime = System.currentTimeMillis();
-		boolean linkChanged = false;
-
-		// reentrant required here because deleteLink also write locks
+		boolean unicastTimedOut = false;
+		
+		/* Reentrant required here because deleteLink also write locks. */
 		lock.writeLock().lock();
 		try {
-			Iterator<Entry<Link, LinkInfo>> it = this.links.entrySet()
-					.iterator();
+			Iterator<Entry<Link, LinkInfo>> it = this.links.entrySet().iterator();
 			while (it.hasNext()) {
 				Entry<Link, LinkInfo> entry = it.next();
 				Link lt = entry.getKey();
 				LinkInfo info = entry.getValue();
 
-				// Timeout the unicast and multicast LLDP valid times
-				// independently.
+				/* Timeout the unicast and multicast LLDP valid times independently. */
 				if ((info.getUnicastValidTime() != null)
 						&& (info.getUnicastValidTime().getTime()
 								+ (this.LINK_TIMEOUT * 1000) < curTime)) {
+					unicastTimedOut = true;
 					info.setUnicastValidTime(null);
-					linkChanged = true;
 				}
 				if ((info.getMulticastValidTime() != null)
 						&& (info.getMulticastValidTime().getTime()
 								+ (this.LINK_TIMEOUT * 1000) < curTime)) {
 					info.setMulticastValidTime(null);
-					linkChanged = true;
 				}
-				// Add to the erase list only if the unicast
-				// time is null.
-				if (info.getUnicastValidTime() == null
+				/* 
+				 * Add to the erase list only if the unicast time is null
+				 * and the multicast time is null as well. Otherwise, if
+				 * only the unicast time is null and we just set it to 
+				 * null (meaning it just timed out), then we transition
+				 * from unicast to multicast.
+				 */
+				if (info.getUnicastValidTime() == null 
 						&& info.getMulticastValidTime() == null) {
 					eraseList.add(entry.getKey());
-				} else if (linkChanged) {
+				} else if (unicastTimedOut) {
+					/* Just moved from unicast to multicast. */
 					updates.add(new LDUpdate(lt.getSrc(), lt.getSrcPort(),
 							lt.getDst(), lt.getDstPort(), lt.getLatency(),
 							getLinkType(lt, info),
@@ -1585,8 +1653,7 @@ IFloodlightModule, IInfoProvider {
 				}
 			}
 
-			// if any link was deleted or any link was changed.
-			if ((eraseList.size() > 0) || linkChanged) {
+			if (!eraseList.isEmpty()) {
 				deleteLinks(eraseList, "LLDP timeout");
 			}
 		} finally {
@@ -1597,14 +1664,6 @@ IFloodlightModule, IInfoProvider {
 	//******************
 	// Internal Helper Methods
 	//******************
-	@LogMessageDoc(level="WARN",
-			message="Could not get list of interfaces of local machine to " +
-					"encode in TLV: {detail-msg}",
-					explanation="Outgoing LLDP packets encode a unique hash to " +
-							"identify the local machine. The list of network " +
-							"interfaces is used as input and the controller failed " +
-							"to query this list",
-							recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG)
 	protected void setControllerTLV() {
 		// Setting the controllerTLVValue based on current nano time,
 		// controller's IP address, and the network interface object hash
@@ -1946,15 +2005,35 @@ IFloodlightModule, IInfoProvider {
 		// read our config options
 		Map<String, String> configOptions = context.getConfigParams(this);
 		try {
-			String histSize = configOptions.get("eventhistorysize");
+			String histSize = configOptions.get("event-history-size");
 			if (histSize != null) {
 				EVENT_HISTORY_SIZE = Short.parseShort(histSize);
 			}
 		} catch (NumberFormatException e) {
-			log.warn("Error event history size, using default of {} seconds", EVENT_HISTORY_SIZE);
+			log.warn("Error event history size. Using default of {} seconds", EVENT_HISTORY_SIZE);
 		}
 		log.debug("Event history size set to {}", EVENT_HISTORY_SIZE);
 
+		try {
+			String latencyHistorySize = configOptions.get("latency-history-size");
+			if (latencyHistorySize != null) {
+				LATENCY_HISTORY_SIZE = Integer.parseInt(latencyHistorySize);
+			}
+		} catch (NumberFormatException e) {
+			log.warn("Error in latency history size. Using default of {} LLDP intervals", LATENCY_HISTORY_SIZE);
+		}
+		log.info("Link latency history set to {} LLDP data points", LATENCY_HISTORY_SIZE, LATENCY_HISTORY_SIZE);
+
+		try {
+			String latencyUpdateThreshold = configOptions.get("latency-update-threshold");
+			if (latencyUpdateThreshold != null) {
+				LATENCY_UPDATE_THRESHOLD = Double.parseDouble(latencyUpdateThreshold);
+			}
+		} catch (NumberFormatException e) {
+			log.warn("Error in latency update threshold. Can be from 0 to 1.", LATENCY_UPDATE_THRESHOLD);
+		}
+		log.info("Latency update threshold set to +/-{} ({}%) of rolling historical average", LATENCY_UPDATE_THRESHOLD, LATENCY_UPDATE_THRESHOLD * 100);
+
 		// Set the autoportfast feature to false.
 		this.autoPortFastFeature = AUTOPORTFAST_DEFAULT;
 
@@ -1979,28 +2058,6 @@ IFloodlightModule, IInfoProvider {
 	}
 
 	@Override
-	@LogMessageDocs({
-		@LogMessageDoc(level = "ERROR",
-				message = "No storage source found.",
-				explanation = "Storage source was not initialized; cannot initialize "
-						+ "link discovery.",
-						recommendation = LogMessageDoc.REPORT_CONTROLLER_BUG),
-						@LogMessageDoc(level = "ERROR",
-						message = "Error in installing listener for "
-								+ "switch config table {table}",
-								explanation = "Failed to install storage notification for the "
-										+ "switch config table",
-										recommendation = LogMessageDoc.REPORT_CONTROLLER_BUG),
-										@LogMessageDoc(level = "ERROR",
-										message = "No storage source found.",
-										explanation = "Storage source was not initialized; cannot initialize "
-												+ "link discovery.",
-												recommendation = LogMessageDoc.REPORT_CONTROLLER_BUG),
-												@LogMessageDoc(level = "ERROR",
-												message = "Exception in LLDP send timer.",
-												explanation = "An unknown error occured while sending LLDP "
-														+ "messages to switches.",
-														recommendation = LogMessageDoc.CHECK_SWITCH) })
 	public void startUp(FloodlightModuleContext context) throws FloodlightModuleException {
 
 		// Initialize role to floodlight provider role.
@@ -2159,7 +2216,6 @@ IFloodlightModule, IInfoProvider {
 		}
 	}
 
-
 	//*********************
 	//  IInfoProvider
 	//*********************
@@ -2232,5 +2288,4 @@ IFloodlightModule, IInfoProvider {
 			//no-op
 		}
 	}
-
-}
+}
\ No newline at end of file
diff --git a/src/main/java/net/floodlightcontroller/linkdiscovery/internal/LinkInfo.java b/src/main/java/net/floodlightcontroller/linkdiscovery/internal/LinkInfo.java
new file mode 100644
index 0000000000000000000000000000000000000000..ee7a1fce484595114b25b68e150d7b2c5479e251
--- /dev/null
+++ b/src/main/java/net/floodlightcontroller/linkdiscovery/internal/LinkInfo.java
@@ -0,0 +1,254 @@
+/**
+ *    Copyright 2011, Big Switch Networks, Inc.*    Originally created by David Erickson, Stanford University
+ **    Licensed under the Apache License, Version 2.0 (the "License"); you may
+ *    not use this file except in compliance with the License. You may obtain
+ *    a copy of the License at
+ *
+ *         http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *    Unless required by applicable law or agreed to in writing, software
+ *    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ *    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ *    License for the specific language governing permissions and limitations
+ *    under the License.
+ **/
+
+package net.floodlightcontroller.linkdiscovery.internal;
+
+import java.util.ArrayDeque;
+import java.util.Date;
+
+import org.projectfloodlight.openflow.types.U64;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import net.floodlightcontroller.linkdiscovery.ILinkDiscovery.LinkType;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+
+public class LinkInfo {
+	private static final Logger log = LoggerFactory.getLogger(LinkInfo.class);
+	
+	private Date firstSeenTime;
+	private Date lastLldpReceivedTime; /* Standard LLDP received time */
+	private Date lastBddpReceivedTime; /* Modified LLDP received time  */
+	private U64 currentLatency;
+	private ArrayDeque<U64> latencyHistory;
+	private int latencyHistoryWindow;
+	private double latencyUpdateThreshold;
+	
+	public LinkInfo(Date firstSeenTime, Date lastLldpReceivedTime, Date lastBddpReceivedTime) {
+		this.firstSeenTime = firstSeenTime;
+		this.lastLldpReceivedTime = lastLldpReceivedTime;
+		this.lastBddpReceivedTime = lastBddpReceivedTime;
+		this.currentLatency = null;
+		this.latencyHistory = new ArrayDeque<U64>(LinkDiscoveryManager.LATENCY_HISTORY_SIZE);
+		this.latencyHistoryWindow = LinkDiscoveryManager.LATENCY_HISTORY_SIZE;
+		this.latencyUpdateThreshold = LinkDiscoveryManager.LATENCY_UPDATE_THRESHOLD;
+	}
+
+	public LinkInfo(LinkInfo fromLinkInfo) {
+		this.firstSeenTime = fromLinkInfo.getFirstSeenTime();
+		this.lastLldpReceivedTime = fromLinkInfo.getUnicastValidTime();
+		this.lastBddpReceivedTime = fromLinkInfo.getMulticastValidTime();
+		this.currentLatency = fromLinkInfo.currentLatency;
+		this.latencyHistory = new ArrayDeque<U64>(fromLinkInfo.getLatencyHistory());
+		this.latencyHistoryWindow = fromLinkInfo.getLatencyHistoryWindow();
+		this.latencyUpdateThreshold = fromLinkInfo.getLatencyUpdateThreshold();
+	}
+
+	/** 
+	 * The port states stored here are topology's last knowledge of
+	 * the state of the port. This mostly mirrors the state
+	 * maintained in the port list in IOFSwitch (i.e. the one returned
+	 * from getPort), except that during a port status message the
+	 * IOFSwitch port state will already have been updated with the
+	 * new port state, so topology needs to keep its own copy so that
+	 * it can determine if the port state has changed and therefore
+	 * requires the new state to be written to storage.
+	 */
+
+	private int getLatencyHistoryWindow() {
+		return latencyHistoryWindow;
+	}
+
+	private double getLatencyUpdateThreshold() {
+		return latencyUpdateThreshold;
+	}
+	
+	private ArrayDeque<U64> getLatencyHistory() {
+		return latencyHistory;
+	}
+
+	private U64 getLatencyHistoryAverage() {
+		if (!isLatencyHistoryFull()) {
+			return null;
+		} else { /* guaranteed to be at latencyHistoryWindow capacity */
+			double avg = 0;
+			for (U64 l : latencyHistory) {
+				avg = avg + l.getValue();
+			}
+			avg = avg / latencyHistoryWindow;
+			return U64.of((long) avg);
+		}
+	}
+	
+	/**
+	 * Retrieve the current latency, and if necessary
+	 * compute and replace the current latency with an
+	 * updated latency based on the historical average.
+	 * @return the most up-to-date latency as permitted by algorithm
+	 */
+	private U64 getLatency() {
+		U64 newLatency = getLatencyHistoryAverage();
+		if (newLatency != null) {
+			/* check threshold */
+			if ((((double) Math.abs(newLatency.getValue() - currentLatency.getValue())) 
+					/ (currentLatency.getValue() == 0 ? 1 : currentLatency.getValue())
+					) 
+					>= latencyUpdateThreshold) {
+				/* perform update */
+				log.debug("Updating link latency from {} to {}", currentLatency.getValue(), newLatency.getValue());
+				currentLatency = newLatency;
+			}
+		}
+		return currentLatency;
+	}
+
+	/**
+	 * Determine if we've observed enough latency values
+	 * to consider computing a new latency value based
+	 * on the historical average. A minimum history size
+	 * must be met prior to updating a latency.
+	 * 
+	 * @return true if full; false if not full
+	 */
+	private boolean isLatencyHistoryFull() {
+		return (latencyHistory.size() == latencyHistoryWindow);
+	}
+	
+	/**
+	 * Append a new (presumably most recent) latency
+	 * to the list. Sets the current latency if this
+	 * is the first latency update performed. Note
+	 * the latter serves as a latency initializer.
+	 * 
+	 * @param latency
+	 * @return latency to use for the link; either initial or historical average
+	 */
+	public U64 addObservedLatency(U64 latency) {
+		if (isLatencyHistoryFull()) {
+			latencyHistory.removeFirst();
+		}
+		latencyHistory.addLast(latency);
+
+		if (currentLatency == null) {
+			currentLatency = latency;
+			return currentLatency;
+		} else {
+			return getLatency();
+		}
+	}
+	
+	/**
+	 * Read-only. Retrieve the currently-assigned
+	 * latency for this link. Does not attempt to
+	 * update or compute an average.
+	 * @return the latency; null if an initial latency has not been set
+	 */
+	public U64 getCurrentLatency() {
+		return currentLatency;
+	}
+
+	public Date getFirstSeenTime() {
+		return firstSeenTime;
+	}
+
+	public void setFirstSeenTime(Date firstSeenTime) {
+		this.firstSeenTime = firstSeenTime;
+	}
+
+	public Date getUnicastValidTime() {
+		return lastLldpReceivedTime;
+	}
+
+	public void setUnicastValidTime(Date unicastValidTime) {
+		this.lastLldpReceivedTime = unicastValidTime;
+	}
+
+	public Date getMulticastValidTime() {
+		return lastBddpReceivedTime;
+	}
+
+	public void setMulticastValidTime(Date multicastValidTime) {
+		this.lastBddpReceivedTime = multicastValidTime;
+	}
+
+	@JsonIgnore
+	public LinkType getLinkType() {
+		if (lastLldpReceivedTime != null) {
+			return LinkType.DIRECT_LINK;
+		} else if (lastBddpReceivedTime != null) {
+			return LinkType.MULTIHOP_LINK;
+		}
+		return LinkType.INVALID_LINK;
+	}
+
+	/* (non-Javadoc)
+	 * @see java.lang.Object#hashCode()
+	 */
+	 @Override
+	 public int hashCode() {
+		final int prime = 5557;
+		int result = 1;
+		result = prime * result + ((firstSeenTime == null) ? 0 : firstSeenTime.hashCode());
+		result = prime * result + ((lastLldpReceivedTime == null) ? 0 : lastLldpReceivedTime.hashCode());
+		result = prime * result + ((lastBddpReceivedTime == null) ? 0 : lastBddpReceivedTime.hashCode());
+		return result;
+	 }
+
+	 /* (non-Javadoc)
+	  * @see java.lang.Object#equals(java.lang.Object)
+	  */
+	 @Override
+	 public boolean equals(Object obj) {
+		 if (this == obj)
+			 return true;
+		 if (obj == null)
+			 return false;
+		 if (!(obj instanceof LinkInfo))
+			 return false;
+		 LinkInfo other = (LinkInfo) obj;
+
+		 if (firstSeenTime == null) {
+			 if (other.firstSeenTime != null)
+				 return false;
+		 } else if (!firstSeenTime.equals(other.firstSeenTime))
+			 return false;
+
+		 if (lastLldpReceivedTime == null) {
+			 if (other.lastLldpReceivedTime != null)
+				 return false;
+		 } else if (!lastLldpReceivedTime.equals(other.lastLldpReceivedTime))
+			 return false;
+
+		 if (lastBddpReceivedTime == null) {
+			 if (other.lastBddpReceivedTime != null)
+				 return false;
+		 } else if (!lastBddpReceivedTime.equals(other.lastBddpReceivedTime))
+			 return false;
+
+		 return true;
+	 }
+
+
+	 /* (non-Javadoc)
+	  * @see java.lang.Object#toString()
+	  */
+	 @Override
+	 public String toString() {
+		 return "LinkInfo [unicastValidTime=" + ((lastLldpReceivedTime == null) ? "null" : lastLldpReceivedTime.getTime())
+				 + ", multicastValidTime=" + ((lastBddpReceivedTime == null) ? "null" : lastBddpReceivedTime.getTime())
+				 + "]";
+	 }
+}
diff --git a/src/main/java/net/floodlightcontroller/linkdiscovery/web/DirectedLinksResource.java b/src/main/java/net/floodlightcontroller/linkdiscovery/web/DirectedLinksResource.java
index 37dbd3953ee32287d8b1076e843faec154e1e15d..c6262ef6f3d959d0f2f60bb1a504cdbff7563188 100644
--- a/src/main/java/net/floodlightcontroller/linkdiscovery/web/DirectedLinksResource.java
+++ b/src/main/java/net/floodlightcontroller/linkdiscovery/web/DirectedLinksResource.java
@@ -23,8 +23,8 @@ import java.util.Set;
 
 import net.floodlightcontroller.linkdiscovery.ILinkDiscovery.LinkDirection;
 import net.floodlightcontroller.linkdiscovery.ILinkDiscovery.LinkType;
+import net.floodlightcontroller.linkdiscovery.internal.LinkInfo;
 import net.floodlightcontroller.linkdiscovery.ILinkDiscoveryService;
-import net.floodlightcontroller.linkdiscovery.LinkInfo;
 import net.floodlightcontroller.routing.Link;
 
 import org.restlet.resource.Get;
diff --git a/src/main/java/net/floodlightcontroller/linkdiscovery/web/ExternalLinksResource.java b/src/main/java/net/floodlightcontroller/linkdiscovery/web/ExternalLinksResource.java
index daf46642e0b2de4b50bcf545b0cab498475ebdb6..d25c693466947caa352ad522cb0d516f234564d5 100644
--- a/src/main/java/net/floodlightcontroller/linkdiscovery/web/ExternalLinksResource.java
+++ b/src/main/java/net/floodlightcontroller/linkdiscovery/web/ExternalLinksResource.java
@@ -23,8 +23,8 @@ import java.util.Set;
 
 import net.floodlightcontroller.linkdiscovery.ILinkDiscovery.LinkDirection;
 import net.floodlightcontroller.linkdiscovery.ILinkDiscovery.LinkType;
+import net.floodlightcontroller.linkdiscovery.internal.LinkInfo;
 import net.floodlightcontroller.linkdiscovery.ILinkDiscoveryService;
-import net.floodlightcontroller.linkdiscovery.LinkInfo;
 import net.floodlightcontroller.routing.Link;
 
 import org.projectfloodlight.openflow.types.DatapathId;
diff --git a/src/main/java/net/floodlightcontroller/linkdiscovery/web/LinksResource.java b/src/main/java/net/floodlightcontroller/linkdiscovery/web/LinksResource.java
index e7d85f79ffe4bee5d76de5f0e81523c3a43f8201..aa52be4a7138746bdaf38b95a4be06de5a91bea0 100644
--- a/src/main/java/net/floodlightcontroller/linkdiscovery/web/LinksResource.java
+++ b/src/main/java/net/floodlightcontroller/linkdiscovery/web/LinksResource.java
@@ -23,8 +23,8 @@ import java.util.Set;
 
 import net.floodlightcontroller.linkdiscovery.ILinkDiscovery.LinkDirection;
 import net.floodlightcontroller.linkdiscovery.ILinkDiscovery.LinkType;
+import net.floodlightcontroller.linkdiscovery.internal.LinkInfo;
 import net.floodlightcontroller.linkdiscovery.ILinkDiscoveryService;
-import net.floodlightcontroller.linkdiscovery.LinkInfo;
 import net.floodlightcontroller.routing.Link;
 
 import org.projectfloodlight.openflow.types.DatapathId;
diff --git a/src/main/java/net/floodlightcontroller/packetstreamer/PacketStreamerHandler.java b/src/main/java/net/floodlightcontroller/packetstreamer/PacketStreamerHandler.java
index a492cb82932bd02f3bcddab5cadf28f8e05e83bb..d5d92869671fbedc014413aa93adeffb91b9ba1e 100644
--- a/src/main/java/net/floodlightcontroller/packetstreamer/PacketStreamerHandler.java
+++ b/src/main/java/net/floodlightcontroller/packetstreamer/PacketStreamerHandler.java
@@ -16,9 +16,6 @@
 
 package net.floodlightcontroller.packetstreamer;
 
-import net.floodlightcontroller.core.annotations.LogMessageCategory;
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
-import net.floodlightcontroller.core.annotations.LogMessageDocs;
 import net.floodlightcontroller.packetstreamer.thrift.*;
 
 import java.nio.ByteBuffer;
@@ -35,7 +32,6 @@ import org.slf4j.LoggerFactory;
 /**
  * The PacketStreamer handler class that implements the service APIs.
  */
-@LogMessageCategory("OpenFlow Message Tracing")
 public class PacketStreamerHandler implements PacketStreamer.Iface {
 
 	/**
@@ -85,18 +81,6 @@ public class PacketStreamerHandler implements PacketStreamer.Iface {
      * @return A list of packets associated with the session
      */
     @Override
-    @LogMessageDocs({
-        @LogMessageDoc(level="ERROR",
-                message="Interrupted while waiting for session start",
-                explanation="The thread was interrupted waiting " +
-                     "for the packet streamer session to start",
-                recommendation=LogMessageDoc.CHECK_CONTROLLER),
-        @LogMessageDoc(level="ERROR",
-                message="Interrupted while waiting for packets",
-                explanation="The thread was interrupted waiting " +
-                        "for packets",
-                recommendation=LogMessageDoc.CHECK_CONTROLLER)
-    })
     public List<ByteBuffer> getPackets(String sessionid)
             throws org.apache.thrift.TException {
         List<ByteBuffer> packets = new ArrayList<ByteBuffer>();
@@ -134,18 +118,6 @@ public class PacketStreamerHandler implements PacketStreamer.Iface {
      * @throws TException
      */
     @Override
-    @LogMessageDocs({
-        @LogMessageDoc(level="ERROR",
-                message="Could not push empty message",
-                explanation="An empty message was sent to the packet streamer",
-                recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG),
-        @LogMessageDoc(level="ERROR",
-                message="queue for session {sessionId} is null",
-                explanation="The queue for the packet streamer session " +
-                		"is missing",
-                recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG)
-    })
-
     public int pushMessageSync(Message msg)
             throws org.apache.thrift.TException {
 
diff --git a/src/main/java/net/floodlightcontroller/packetstreamer/PacketStreamerServer.java b/src/main/java/net/floodlightcontroller/packetstreamer/PacketStreamerServer.java
index e709ff7c75ebe417c5ad0a4a604a19079ce3fe42..c0c19c47c807adfb541b73ffbdeacbeab4eec01a 100644
--- a/src/main/java/net/floodlightcontroller/packetstreamer/PacketStreamerServer.java
+++ b/src/main/java/net/floodlightcontroller/packetstreamer/PacketStreamerServer.java
@@ -27,14 +27,12 @@ import org.apache.thrift.transport.TNonblockingServerTransport;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import net.floodlightcontroller.core.annotations.LogMessageCategory;
 // Generated code
 import net.floodlightcontroller.packetstreamer.thrift.*;
 
 /**
  * The PacketStreamer Server that brokers the packet streaming service.
  */
-@LogMessageCategory("OpenFlow Message Tracing")
 public class PacketStreamerServer {
     protected static Logger log = LoggerFactory.getLogger(PacketStreamerServer.class);
     protected static int port = 9090;
diff --git a/src/main/java/net/floodlightcontroller/perfmon/PktInProcessingTime.java b/src/main/java/net/floodlightcontroller/perfmon/PktInProcessingTime.java
index 9e754469d917481db367d2afb67e014866b28c1e..74c79e35302cdfc7fa75ed82ffc102e2bbb0b4d5 100644
--- a/src/main/java/net/floodlightcontroller/perfmon/PktInProcessingTime.java
+++ b/src/main/java/net/floodlightcontroller/perfmon/PktInProcessingTime.java
@@ -29,8 +29,6 @@ import net.floodlightcontroller.core.FloodlightContext;
 import net.floodlightcontroller.core.IFloodlightProviderService;
 import net.floodlightcontroller.core.IOFMessageListener;
 import net.floodlightcontroller.core.IOFSwitch;
-import net.floodlightcontroller.core.annotations.LogMessageCategory;
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
 import net.floodlightcontroller.core.module.FloodlightModuleContext;
 import net.floodlightcontroller.core.module.FloodlightModuleException;
 import net.floodlightcontroller.core.module.IFloodlightModule;
@@ -66,7 +64,6 @@ import org.slf4j.LoggerFactory;
  *    syslog is generated instead
  * 
  */
-@LogMessageCategory("Performance Monitoring")
 public class PktInProcessingTime
     implements IFloodlightModule, IPktInProcessingTimeService {
 
@@ -149,11 +146,6 @@ public class PktInProcessingTime
     }
     
     @Override
-    @LogMessageDoc(level="WARN",
-            message="Time to process packet-in exceeded threshold: {}",
-            explanation="Time to process packet-in exceeded the configured " +
-            		"performance threshold",
-            recommendation=LogMessageDoc.CHECK_CONTROLLER)
     public void recordEndTimePktIn(IOFSwitch sw, OFMessage m, FloodlightContext cntx) {
         if (isEnabled()) {
             long procTimeNs = System.nanoTime() - startTimePktNs;
@@ -207,11 +199,6 @@ public class PktInProcessingTime
     }
     
     @Override
-    @LogMessageDoc(level="INFO",
-        message="Packet processing time threshold for warning" +
-            " set to {time} ms.",
-        explanation="Performance monitoring will log a warning if " +
-    		"packet processing time exceeds the configured threshold")
     public void startUp(FloodlightModuleContext context) {
         // Add our REST API
         restApi.addRestletRoutable(new PerfWebRoutable());
diff --git a/src/main/java/net/floodlightcontroller/routing/ForwardingBase.java b/src/main/java/net/floodlightcontroller/routing/ForwardingBase.java
index 175f464b25a974709caf1c47cbffe0916d132d7e..981090eaa0a9ae521f50a6279b02dbd5d02ebf8e 100644
--- a/src/main/java/net/floodlightcontroller/routing/ForwardingBase.java
+++ b/src/main/java/net/floodlightcontroller/routing/ForwardingBase.java
@@ -30,9 +30,6 @@ import net.floodlightcontroller.core.FloodlightContext;
 import net.floodlightcontroller.core.IFloodlightProviderService;
 import net.floodlightcontroller.core.IOFMessageListener;
 import net.floodlightcontroller.core.IOFSwitch;
-import net.floodlightcontroller.core.annotations.LogMessageCategory;
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
-import net.floodlightcontroller.core.annotations.LogMessageDocs;
 import net.floodlightcontroller.core.internal.IOFSwitchService;
 import net.floodlightcontroller.core.util.AppCookie;
 import net.floodlightcontroller.debugcounter.IDebugCounterService;
@@ -73,7 +70,6 @@ import org.slf4j.LoggerFactory;
  * responsible for programming flows to a switch in response to a policy
  * decision.
  */
-@LogMessageCategory("Flow Programming")
 public abstract class ForwardingBase implements IOFMessageListener {
 
 	protected static Logger log =
@@ -202,18 +198,6 @@ public abstract class ForwardingBase implements IOFMessageListener {
 	 *        OFFlowMod.OFPFC_MODIFY etc.
 	 * @return srcSwitchIncluded True if the source switch is included in this route
 	 */
-	@LogMessageDocs({
-		@LogMessageDoc(level="WARN",
-				message="Unable to push route, switch at DPID {dpid} not available",
-				explanation="A switch along the calculated path for the " +
-						"flow has disconnected.",
-						recommendation=LogMessageDoc.CHECK_SWITCH),
-						@LogMessageDoc(level="ERROR",
-						message="Failure writing flow mod",
-						explanation="An I/O error occurred while writing a " +
-								"flow modification to a switch",
-								recommendation=LogMessageDoc.CHECK_SWITCH)
-	})
 	public boolean pushRoute(Route route, Match match, OFPacketIn pi,
 			DatapathId pinSwitch, U64 cookie, FloodlightContext cntx,
 			boolean reqeustFlowRemovedNotifn, boolean doFlush,
@@ -327,20 +311,6 @@ public abstract class ForwardingBase implements IOFMessageListener {
 	 * @param cntx      context of the packet
 	 * @param flush     force to flush the packet.
 	 */
-	@LogMessageDocs({
-		@LogMessageDoc(level="ERROR",
-				message="BufferId is not and packet data is null. " +
-						"Cannot send packetOut. " +
-						"srcSwitch={dpid} inPort={port} outPort={port}",
-						explanation="The switch send a malformed packet-in." +
-								"The packet will be dropped",
-								recommendation=LogMessageDoc.REPORT_SWITCH_BUG),
-								@LogMessageDoc(level="ERROR",
-								message="Failure writing packet out",
-								explanation="An I/O error occurred while writing a " +
-										"packet out to a switch",
-										recommendation=LogMessageDoc.CHECK_SWITCH)
-	})
 
 	/**
 	 * Pushes a packet-out to a switch.  The assumption here is that
@@ -468,13 +438,6 @@ public abstract class ForwardingBase implements IOFMessageListener {
 		packetOutMultiPort(packet.serialize(), sw, inPort, outPorts, cntx);
 	}
 
-	@LogMessageDocs({
-		@LogMessageDoc(level="ERROR",
-				message="Failure writing deny flow mod",
-				explanation="An I/O error occurred while writing a " +
-						"deny flow mod to a switch",
-						recommendation=LogMessageDoc.CHECK_SWITCH)
-	})
 	public static boolean blockHost(IOFSwitchService switchService,
 			SwitchPort sw_tup, MacAddress host_mac, short hardTimeout, U64 cookie) {
 
diff --git a/src/main/java/net/floodlightcontroller/routing/Link.java b/src/main/java/net/floodlightcontroller/routing/Link.java
index a4017836cbbf93527721320eb6e3fbd22d7b229b..349fb3c81841a16d799c7db208f73174df475539 100755
--- a/src/main/java/net/floodlightcontroller/routing/Link.java
+++ b/src/main/java/net/floodlightcontroller/routing/Link.java
@@ -131,6 +131,8 @@ public class Link implements Comparable<Link> {
                 + ", dst=" + this.dst.toString()
                 + ", inPort="
                 + dstPort.toString()
+                + ", latency="
+                + String.valueOf(latency.getValue())
                 + "]";
     }
     
@@ -158,5 +160,4 @@ public class Link implements Comparable<Link> {
         
         return this.getDstPort().compareTo(a.getDstPort());
     }
-}
-
+}
\ No newline at end of file
diff --git a/src/main/java/net/floodlightcontroller/staticflowentry/StaticFlowEntries.java b/src/main/java/net/floodlightcontroller/staticflowentry/StaticFlowEntries.java
index c7b421c4a6cfd6aedce9248adb299c08d20c35be..12f24d8cc4a65c4a10506a37701e9d81c0f86df2 100644
--- a/src/main/java/net/floodlightcontroller/staticflowentry/StaticFlowEntries.java
+++ b/src/main/java/net/floodlightcontroller/staticflowentry/StaticFlowEntries.java
@@ -23,7 +23,6 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 
-import net.floodlightcontroller.core.annotations.LogMessageCategory;
 import net.floodlightcontroller.core.util.AppCookie;
 import net.floodlightcontroller.staticflowentry.web.StaticFlowEntryPusherResource;
 import net.floodlightcontroller.util.ActionUtils;
@@ -57,7 +56,6 @@ import org.projectfloodlight.openflow.types.U64;
  * Represents static flow entries to be maintained by the controller on the 
  * switches. 
  */
-@LogMessageCategory("Static Flow Pusher")
 public class StaticFlowEntries {
 	protected static Logger log = LoggerFactory.getLogger(StaticFlowEntries.class);
 	private static final int INFINITE_TIMEOUT = 0;
diff --git a/src/main/java/net/floodlightcontroller/staticflowentry/StaticFlowEntryPusher.java b/src/main/java/net/floodlightcontroller/staticflowentry/StaticFlowEntryPusher.java
index e0f33f14be22fa12358503e06007abd561068f85..d4e95df918bb5792906e633c6bf5cab14c9e3780 100644
--- a/src/main/java/net/floodlightcontroller/staticflowentry/StaticFlowEntryPusher.java
+++ b/src/main/java/net/floodlightcontroller/staticflowentry/StaticFlowEntryPusher.java
@@ -36,8 +36,6 @@ import net.floodlightcontroller.core.IOFMessageListener;
 import net.floodlightcontroller.core.IOFSwitch;
 import net.floodlightcontroller.core.IOFSwitchListener;
 import net.floodlightcontroller.core.PortChangeType;
-import net.floodlightcontroller.core.annotations.LogMessageCategory;
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
 import net.floodlightcontroller.core.internal.IOFSwitchService;
 import net.floodlightcontroller.core.module.FloodlightModuleContext;
 import net.floodlightcontroller.core.module.FloodlightModuleException;
@@ -77,7 +75,6 @@ import org.projectfloodlight.openflow.types.U64;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-@LogMessageCategory("Static Flow Pusher")
 /**
  * This module is responsible for maintaining a set of static flows on
  * switches. This is just a big 'ol dumb list of flows and something external
@@ -309,11 +306,6 @@ implements IOFSwitchListener, IFloodlightModule, IStaticFlowEntryPusherService,
 	 *
 	 * @return
 	 */
-	@LogMessageDoc(level="ERROR",
-			message="failed to access storage: {reason}",
-			explanation="Could not retrieve static flows from the system " +
-					"database",
-					recommendation=LogMessageDoc.CHECK_CONTROLLER)
 	private Map<String, Map<String, OFFlowMod>> readEntriesFromStorage() {
 		Map<String, Map<String, OFFlowMod>> entries = new ConcurrentHashMap<String, Map<String, OFFlowMod>>();
 		try {
@@ -575,11 +567,6 @@ implements IOFSwitchListener, IFloodlightModule, IStaticFlowEntryPusherService,
 		}
 	}
 
-	@LogMessageDoc(level="ERROR",
-			message="inconsistent internal state: no switch has rule {rule}",
-			explanation="Inconsistent internat state discovered while " +
-					"deleting a static flow rule",
-					recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG)
 	private void deleteStaticFlowEntry(String entryName) {
 		String dpid = entry2dpid.remove(entryName);
 
@@ -616,11 +603,6 @@ implements IOFSwitchListener, IFloodlightModule, IStaticFlowEntryPusherService,
 	 * @param dpid The datapath ID of the switch to write to
 	 * @param messages The list of OFMessages to write.
 	 */
-	@LogMessageDoc(level="ERROR",
-			message="Tried to write to switch {switch} but got {error}",
-			explanation="An I/O error occured while trying to write a " +
-					"static flow to a switch",
-					recommendation=LogMessageDoc.CHECK_SWITCH)
 	private void writeOFMessagesToSwitch(DatapathId dpid, List<OFMessage> messages) {
 		IOFSwitch ofswitch = switchService.getSwitch(dpid);
 		if (ofswitch != null) {  // is the switch connected
@@ -637,11 +619,6 @@ implements IOFSwitchListener, IFloodlightModule, IStaticFlowEntryPusherService,
 	 * @param dpid The datapath ID of the switch to write to
 	 * @param message The OFMessage to write.
 	 */
-	@LogMessageDoc(level="ERROR",
-			message="Tried to write to switch {switch} but got {error}",
-			explanation="An I/O error occured while trying to write a " +
-					"static flow to a switch",
-					recommendation=LogMessageDoc.CHECK_SWITCH)
 	private void writeOFMessageToSwitch(DatapathId dpid, OFMessage message) {
 		IOFSwitch ofswitch = switchService.getSwitch(dpid);
 		if (ofswitch != null) {  // is the switch connected
@@ -675,11 +652,6 @@ implements IOFSwitchListener, IFloodlightModule, IStaticFlowEntryPusherService,
 	 * @param sw The IOFSwitch to write to
 	 * @param flowMod The OFFlowMod to write
 	 */
-	@LogMessageDoc(level="ERROR",
-			message="Tried to write OFFlowMod to {switch} but got {error}",
-			explanation="An I/O error occured while trying to write a " +
-					"static flow to a switch",
-					recommendation=LogMessageDoc.CHECK_SWITCH)
 	private void writeFlowModToSwitch(IOFSwitch sw, OFFlowMod flowMod) {
 		sw.write(flowMod);
 		sw.flush();
@@ -777,12 +749,6 @@ implements IOFSwitchListener, IFloodlightModule, IStaticFlowEntryPusherService,
 	}
 
 	@Override
-	@LogMessageDoc(level="ERROR",
-	message="Got a FlowRemove message for a infinite " +
-			"timeout flow: {flow} from switch {switch}",
-			explanation="Flows with infinite timeouts should not expire. " +
-					"The switch has expired the flow anyway.",
-					recommendation=LogMessageDoc.REPORT_SWITCH_BUG)
 	public Command receive(IOFSwitch sw, OFMessage msg, FloodlightContext cntx) {
 		switch (msg.getType()) {
 		case FLOW_REMOVED:
diff --git a/src/main/java/net/floodlightcontroller/staticflowentry/web/StaticFlowEntryDeleteResource.java b/src/main/java/net/floodlightcontroller/staticflowentry/web/StaticFlowEntryDeleteResource.java
index 8c8baa3e3fd4dc0a7bac6210a366cb4e617b0574..d64b97103c24fdcfa385adf0f52195426a3632db 100644
--- a/src/main/java/net/floodlightcontroller/staticflowentry/web/StaticFlowEntryDeleteResource.java
+++ b/src/main/java/net/floodlightcontroller/staticflowentry/web/StaticFlowEntryDeleteResource.java
@@ -24,9 +24,6 @@ import org.restlet.resource.ServerResource;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-
-import net.floodlightcontroller.core.annotations.LogMessageCategory;
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
 import net.floodlightcontroller.staticflowentry.StaticFlowEntries;
 import net.floodlightcontroller.staticflowentry.StaticFlowEntryPusher;
 import net.floodlightcontroller.storage.IStorageSourceService;
@@ -41,15 +38,10 @@ import net.floodlightcontroller.storage.IStorageSourceService;
  * 		REST compliant.
  * 
  */
-@LogMessageCategory("Static Flow Pusher Delete Resource")
 public class StaticFlowEntryDeleteResource extends ServerResource {
     protected static Logger log = LoggerFactory.getLogger(StaticFlowEntryDeleteResource.class);
 
     @Post
-    @LogMessageDoc(level="ERROR",
-        message="Error deleting flow mod request: {request}",
-        explanation="An invalid delete request was sent to static flow pusher",
-        recommendation="Fix the format of the static flow mod request")
     public String del(String fmJson) {
         IStorageSourceService storageSource =
                 (IStorageSourceService)getContext().getAttributes().
diff --git a/src/main/java/net/floodlightcontroller/staticflowentry/web/StaticFlowEntryPusherResource.java b/src/main/java/net/floodlightcontroller/staticflowentry/web/StaticFlowEntryPusherResource.java
index cfce15d2bfe239b8cae657e5963eca212344a0ae..6c0897ba24636b3ee8f5102df3604dd34d0a4da6 100644
--- a/src/main/java/net/floodlightcontroller/staticflowentry/web/StaticFlowEntryPusherResource.java
+++ b/src/main/java/net/floodlightcontroller/staticflowentry/web/StaticFlowEntryPusherResource.java
@@ -27,8 +27,6 @@ import org.restlet.resource.ServerResource;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import net.floodlightcontroller.core.annotations.LogMessageCategory;
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
 import net.floodlightcontroller.staticflowentry.StaticFlowEntries;
 import net.floodlightcontroller.staticflowentry.StaticFlowEntryPusher;
 import net.floodlightcontroller.storage.IStorageSourceService;
@@ -39,7 +37,6 @@ import net.floodlightcontroller.util.MatchUtils;
  * @author alexreimers
  *
  */
-@LogMessageCategory("Static Flow Pusher")
 public class StaticFlowEntryPusherResource extends ServerResource {
 	protected static Logger log = LoggerFactory.getLogger(StaticFlowEntryPusherResource.class);
 
@@ -282,10 +279,6 @@ public class StaticFlowEntryPusherResource extends ServerResource {
 	 * @return A string status message
 	 */
 	@Post
-	@LogMessageDoc(level="ERROR",
-	message="Error parsing push flow mod request: {request}",
-	explanation="An invalid request was sent to static flow pusher",
-	recommendation="Fix the format of the static flow mod request")
 	public String store(String fmJson) {
 		IStorageSourceService storageSource =
 				(IStorageSourceService)getContext().getAttributes().
@@ -331,10 +324,6 @@ public class StaticFlowEntryPusherResource extends ServerResource {
 	}
 
 	@Delete
-	@LogMessageDoc(level="ERROR",
-	message="Error deleting flow mod request: {request}",
-	explanation="An invalid delete request was sent to static flow pusher",
-	recommendation="Fix the format of the static flow mod request")
 	public String del(String fmJson) {
 		IStorageSourceService storageSource =
 				(IStorageSourceService)getContext().getAttributes().
diff --git a/src/main/java/net/floodlightcontroller/storage/AbstractStorageSource.java b/src/main/java/net/floodlightcontroller/storage/AbstractStorageSource.java
index d7a6d88d6aeef5d53c2b70e557fe7e931470dc62..f26bdce87ec53c2bd5544dc5cd66e5f206405824 100644
--- a/src/main/java/net/floodlightcontroller/storage/AbstractStorageSource.java
+++ b/src/main/java/net/floodlightcontroller/storage/AbstractStorageSource.java
@@ -30,8 +30,6 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 
-import net.floodlightcontroller.core.annotations.LogMessageCategory;
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
 import net.floodlightcontroller.core.module.FloodlightModuleContext;
 import net.floodlightcontroller.core.module.FloodlightModuleException;
 import net.floodlightcontroller.core.module.IFloodlightModule;
@@ -45,8 +43,6 @@ import net.floodlightcontroller.storage.web.StorageWebRoutable;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-
-@LogMessageCategory("System Database")
 public abstract class AbstractStorageSource 
 implements IStorageSourceService, IFloodlightModule {
 	protected static Logger logger = LoggerFactory.getLogger(AbstractStorageSource.class);
@@ -82,10 +78,6 @@ implements IStorageSourceService, IFloodlightModule {
 			"An unknown error occurred while executing asynchronous " +
 					"database operation";
 
-	@LogMessageDoc(level="ERROR",
-			message="Failure in asynchronous call to executeQuery",
-			explanation=DB_ERROR_EXPLANATION,
-			recommendation=LogMessageDoc.GENERIC_ACTION)
 	abstract class StorageCallable<V> implements Callable<V> {
 		public V call() {
 			try {
@@ -101,10 +93,6 @@ implements IStorageSourceService, IFloodlightModule {
 		abstract protected V doStorageOperation();
 	}
 
-	@LogMessageDoc(level="ERROR",
-			message="Failure in asynchronous call to updateRows",
-			explanation=DB_ERROR_EXPLANATION,
-			recommendation=LogMessageDoc.GENERIC_ACTION)
 	abstract class StorageRunnable implements Runnable {
 		public void run() {
 			try {
@@ -459,11 +447,6 @@ implements IStorageSourceService, IFloodlightModule {
 		}
 	}
 
-	@LogMessageDoc(level="ERROR",
-			message="Exception caught handling storage notification",
-			explanation="An unknown error occured while trying to notify" +
-					" storage listeners",
-					recommendation=LogMessageDoc.GENERIC_ACTION)
 	protected synchronized void notifyListeners(StorageSourceNotification notification) {
 		if (logger.isTraceEnabled()) {
 			logger.trace("Notifying storage listeneres: {}", notification);
diff --git a/src/main/java/net/floodlightcontroller/topology/ITopologyService.java b/src/main/java/net/floodlightcontroller/topology/ITopologyService.java
index e1330bc70d62236ab1737bb2158669b4eb6c374b..77246df3fcd09de59561641e7c58779448a38299 100644
--- a/src/main/java/net/floodlightcontroller/topology/ITopologyService.java
+++ b/src/main/java/net/floodlightcontroller/topology/ITopologyService.java
@@ -17,12 +17,14 @@
 package net.floodlightcontroller.topology;
 
 import java.util.Date;
+import java.util.Map;
 import java.util.Set;
 
 import org.projectfloodlight.openflow.types.DatapathId;
 import org.projectfloodlight.openflow.types.OFPort;
 
 import net.floodlightcontroller.core.module.IFloodlightService;
+import net.floodlightcontroller.routing.Link;
 
 public interface ITopologyService extends IFloodlightService  {
 
@@ -39,7 +41,22 @@ public interface ITopologyService extends IFloodlightService  {
 
 	public DatapathId getOpenflowDomainId(DatapathId switchId);
 	public DatapathId getOpenflowDomainId(DatapathId switchId, boolean tunnelEnabled);
-
+	
+	
+	/**
+	 * Returns a complete list of the network links, including intra-cluster and inter-cluster links. Links are grouped for each DatapathId separately.
+	 */
+	public Map<DatapathId, Set<Link>> getAllLinks();
+	
+	/**
+	 * Checks if port "p" is edge port or belongs to one of the detected network links.
+	 */
+   	public boolean isEdge(DatapathId sw, OFPort p);
+	
+	/**
+	 * Returns list of switch ports allowed for broadcast
+	 */
+	public Set<OFPort> getSwitchBroadcastPorts(DatapathId sw);
 	/**
 	 * Returns the identifier of the L2 domain of a given switch.
 	 * @param switchId The DPID of the switch in long form
@@ -210,4 +227,4 @@ public interface ITopologyService extends IFloodlightService  {
 	 * has only quarantined ports. Will never return null.
 	 */
 	public Set<OFPort> getPorts(DatapathId sw);
-}
\ No newline at end of file
+}
diff --git a/src/main/java/net/floodlightcontroller/topology/TopologyInstance.java b/src/main/java/net/floodlightcontroller/topology/TopologyInstance.java
index 4122bfb4c23be6b34a36858841dd4bb5a9e0cba9..8aadb71ae3cd3d9a141132e0a8234d7d6fd81687 100644
--- a/src/main/java/net/floodlightcontroller/topology/TopologyInstance.java
+++ b/src/main/java/net/floodlightcontroller/topology/TopologyInstance.java
@@ -17,6 +17,7 @@
 package net.floodlightcontroller.topology;
 
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.LinkedList;
@@ -25,6 +26,13 @@ import java.util.Map;
 import java.util.PriorityQueue;
 import java.util.Set;
 
+import net.floodlightcontroller.routing.BroadcastTree;
+import net.floodlightcontroller.routing.Link;
+import net.floodlightcontroller.routing.Route;
+import net.floodlightcontroller.routing.RouteId;
+import net.floodlightcontroller.servicechaining.ServiceChain;
+import net.floodlightcontroller.util.ClusterDFS;
+
 import org.projectfloodlight.openflow.types.DatapathId;
 import org.projectfloodlight.openflow.types.OFPort;
 import org.projectfloodlight.openflow.types.U64;
@@ -35,20 +43,10 @@ import com.google.common.cache.CacheBuilder;
 import com.google.common.cache.CacheLoader;
 import com.google.common.cache.LoadingCache;
 
-import net.floodlightcontroller.util.ClusterDFS;
-import net.floodlightcontroller.core.annotations.LogMessageCategory;
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
-import net.floodlightcontroller.routing.BroadcastTree;
-import net.floodlightcontroller.routing.Link;
-import net.floodlightcontroller.routing.Route;
-import net.floodlightcontroller.routing.RouteId;
-import net.floodlightcontroller.servicechaining.ServiceChain;
-
 /**
  * A representation of a network topology.  Used internally by
  * {@link TopologyManager}
  */
-@LogMessageCategory("Network Topology")
 public class TopologyInstance {
 
     public static final short LT_SH_LINK = 1;
@@ -70,7 +68,7 @@ public class TopologyInstance {
     protected Map<NodePortTuple, Set<Link>> switchPortLinks; // Set of links organized by node port tuple
     /** Set of links that are blocked. */
     protected Set<Link> blockedLinks;
-
+  
     protected Set<DatapathId> switches;
     protected Set<NodePortTuple> broadcastDomainPorts;
     protected Set<NodePortTuple> tunnelPorts;
@@ -80,8 +78,24 @@ public class TopologyInstance {
 
     // States for routing
     protected Map<DatapathId, BroadcastTree> destinationRootedTrees;
-    protected Map<DatapathId, Set<NodePortTuple>> clusterBroadcastNodePorts;
+  
+    protected Map<DatapathId, Set<NodePortTuple>> clusterPorts;
     protected Map<DatapathId, BroadcastTree> clusterBroadcastTrees;
+ 
+    protected Map<DatapathId, Set<NodePortTuple>> clusterBroadcastNodePorts;
+	//Broadcast tree over whole topology which may be consisted of multiple clusters
+    protected BroadcastTree finiteBroadcastTree;
+	//Set of NodePortTuples of the finiteBroadcastTree
+    protected Set<NodePortTuple> broadcastNodePorts;  
+	//destinationRootedTrees over whole topology (not only intra-cluster tree)
+    protected Map<DatapathId, BroadcastTree> destinationRootedFullTrees;
+	//Set of all links organized by node port tuple. Note that switchPortLinks does not contain all links of multi-cluster topology.
+    protected Map<NodePortTuple, Set<Link>> allLinks;
+	//Set of all ports organized by DatapathId. Note that switchPorts map contains only ports with links.
+	protected Map<DatapathId, Set<OFPort>> allPorts;
+	// Maps broadcast ports to DatapathId
+    protected Map<DatapathId, Set<OFPort>> broadcastPortMap;
+    
 
     protected class PathCacheLoader extends CacheLoader<RouteId, Route> {
         TopologyInstance ti;
@@ -91,7 +105,7 @@ public class TopologyInstance {
 
         @Override
         public Route load(RouteId rid) {
-        	return ti.buildroute(rid);
+            return ti.buildroute(rid);
         }
     }
 
@@ -99,61 +113,50 @@ public class TopologyInstance {
     // in the cache.
     private final PathCacheLoader pathCacheLoader = new PathCacheLoader(this);
     protected LoadingCache<RouteId, Route> pathcache;
-
-    public TopologyInstance() {
-        this.switches = new HashSet<DatapathId>();
-        this.switchPorts = new HashMap<DatapathId, Set<OFPort>>();
-        this.switchPortLinks = new HashMap<NodePortTuple, Set<Link>>();
-        this.broadcastDomainPorts = new HashSet<NodePortTuple>();
-        this.tunnelPorts = new HashSet<NodePortTuple>();
-        this.blockedPorts = new HashSet<NodePortTuple>();
-        this.blockedLinks = new HashSet<Link>();
-    }
-
-    public TopologyInstance(Map<DatapathId, Set<OFPort>> switchPorts,
-                            Map<NodePortTuple, Set<Link>> switchPortLinks,
-                            Set<NodePortTuple> broadcastDomainPorts)
-    {
-        this.switches = new HashSet<DatapathId>(switchPorts.keySet());
-        this.switchPorts = new HashMap<DatapathId, Set<OFPort>>(switchPorts);
-        this.switchPortLinks = new HashMap<NodePortTuple,
-                Set<Link>>(switchPortLinks);
-        this.broadcastDomainPorts = new HashSet<NodePortTuple>(broadcastDomainPorts);
-        this.tunnelPorts = new HashSet<NodePortTuple>();
-        this.blockedPorts = new HashSet<NodePortTuple>();
-        this.blockedLinks = new HashSet<Link>();
-
-        clusters = new HashSet<Cluster>();
-        switchClusterMap = new HashMap<DatapathId, Cluster>();
-    }
+	
     public TopologyInstance(Map<DatapathId, Set<OFPort>> switchPorts,
                             Set<NodePortTuple> blockedPorts,
                             Map<NodePortTuple, Set<Link>> switchPortLinks,
                             Set<NodePortTuple> broadcastDomainPorts,
-                            Set<NodePortTuple> tunnelPorts){
+                            Set<NodePortTuple> tunnelPorts, Map<NodePortTuple, Set<Link>> allLinks, Map<DatapathId, Set<OFPort>> allPorts){
 
         // copy these structures
+	
         this.switches = new HashSet<DatapathId>(switchPorts.keySet());
         this.switchPorts = new HashMap<DatapathId, Set<OFPort>>();
         for(DatapathId sw: switchPorts.keySet()) {
             this.switchPorts.put(sw, new HashSet<OFPort>(switchPorts.get(sw)));
         }
+		
+		this.allPorts = new HashMap<DatapathId, Set<OFPort>>();
+		for(DatapathId sw: allPorts.keySet()) {
+            this.allPorts.put(sw, new HashSet<OFPort>(allPorts.get(sw)));
+        }
 
         this.blockedPorts = new HashSet<NodePortTuple>(blockedPorts);
         this.switchPortLinks = new HashMap<NodePortTuple, Set<Link>>();
         for(NodePortTuple npt: switchPortLinks.keySet()) {
             this.switchPortLinks.put(npt,
                                      new HashSet<Link>(switchPortLinks.get(npt)));
+        }
+		this.allLinks = new HashMap<NodePortTuple, Set<Link>>();
+        for(NodePortTuple npt: allLinks.keySet()) {
+            this.allLinks.put(npt,
+                                     new HashSet<Link>(allLinks.get(npt)));
         }
         this.broadcastDomainPorts = new HashSet<NodePortTuple>(broadcastDomainPorts);
         this.tunnelPorts = new HashSet<NodePortTuple>(tunnelPorts);
 
-        blockedLinks = new HashSet<Link>();
-        clusters = new HashSet<Cluster>();
-        switchClusterMap = new HashMap<DatapathId, Cluster>();
-        destinationRootedTrees = new HashMap<DatapathId, BroadcastTree>();
-        clusterBroadcastTrees = new HashMap<DatapathId, BroadcastTree>();
-        clusterBroadcastNodePorts = new HashMap<DatapathId, Set<NodePortTuple>>();
+        this.blockedLinks = new HashSet<Link>();
+       
+        this.clusters = new HashSet<Cluster>();
+        this.switchClusterMap = new HashMap<DatapathId, Cluster>();
+        this.destinationRootedTrees = new HashMap<DatapathId, BroadcastTree>();
+        this.destinationRootedFullTrees= new HashMap<DatapathId, BroadcastTree>();
+		this.broadcastNodePorts= new HashSet<NodePortTuple>();
+		this.broadcastPortMap = new HashMap<DatapathId,Set<OFPort>>();
+        this.clusterBroadcastTrees = new HashMap<DatapathId, BroadcastTree>();
+        this.clusterBroadcastNodePorts = new HashMap<DatapathId, Set<NodePortTuple>>();
 
         pathcache = CacheBuilder.newBuilder().concurrencyLevel(4)
                     .maximumSize(1000L)
@@ -164,9 +167,8 @@ public class TopologyInstance {
                                 }
                             });
     }
-
+	
     public void compute() {
-
         // Step 1: Compute clusters ignoring broadcast domain links
         // Create nodes for clusters in the higher level topology
         // Must ignore blocked links.
@@ -180,29 +182,64 @@ public class TopologyInstance {
         // unicast routing.  The trees are rooted at the destination.
         // Cost for tunnel links and direct links are the same.
         calculateShortestPathTreeInClusters();
-
-        // Step 3. Compute broadcast tree in each cluster.
+		
+		// Step 3. Compute broadcast tree in each cluster.
         // Cost for tunnel links are high to discourage use of
         // tunnel links.  The cost is set to the number of nodes
         // in the cluster + 1, to use as minimum number of
         // clusters as possible.
         calculateBroadcastNodePortsInClusters();
+        
+        // Step 4. Compute e2e shortest path trees on entire topology for unicast routing.
+		// The trees are rooted at the destination.
+        // Cost for tunnel links and direct links are the same.
+		calculateAllShortestPaths();
+		
+		// Step 5. Compute broadcast tree for the whole topology (needed to avoid loops).
+        // Cost for tunnel links are high to discourage use of
+        // tunnel links.  The cost is set to the number of nodes
+        // in the cluster + 1, to use as minimum number of
+        // clusters as possible.
+        calculateAllBroadcastNodePorts();
 
-        // Step 4. print topology.
+		// Step 6. Compute set of ports for broadcasting. Edge ports are included.
+       	calculateBroadcastPortMap();
+       	
+        // Step 7. print topology.
         printTopology();
     }
 
+	/*
+	 * Checks if OF port is edge port
+	 */
+    public boolean isEdge(DatapathId sw, OFPort portId){ 
+		NodePortTuple np = new NodePortTuple(sw, portId);
+		if (allLinks.get(np) == null){
+			return true;
+		}
+		else {
+			return false;
+		}
+    }   
+
+	/*
+	 * Returns broadcast ports for the given DatapathId
+	 */
+    public Set<OFPort> swBroadcastPorts(DatapathId sw){
+	
+		return this.broadcastPortMap.get(sw);
+    }
+
     public void printTopology() {
-        if (log.isTraceEnabled()) {
-            log.trace("-----------------------------------------------");
-            log.trace("Links: {}",this.switchPortLinks);
-            log.trace("broadcastDomainPorts: {}", broadcastDomainPorts);
-            log.trace("tunnelPorts: {}", tunnelPorts);
-            log.trace("clusters: {}", clusters);
-            log.trace("destinationRootedTrees: {}", destinationRootedTrees);
-            log.trace("clusterBroadcastNodePorts: {}", clusterBroadcastNodePorts);
-            log.trace("-----------------------------------------------");
-        }
+        log.debug("-----------------Topology-----------------------");
+        log.debug("All Links: {}", allLinks);
+		log.debug("Broadcast Tree: {}", finiteBroadcastTree);
+        log.debug("Broadcast Domain Ports: {}", broadcastDomainPorts);
+        log.debug("Tunnel Ports: {}", tunnelPorts);
+        log.debug("Clusters: {}", clusters);
+        log.debug("Destination Rooted Full Trees: {}", destinationRootedFullTrees);
+        log.debug("Broadcast Node Ports: {}", broadcastNodePorts);
+        log.debug("-----------------------------------------------");  
     }
 
     protected void addLinksToOpenflowDomains() {
@@ -239,10 +276,6 @@ public class TopologyInstance {
      *
      * http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
      */
-    @LogMessageDoc(level="ERROR",
-            message="No DFS object for switch {} found.",
-            explanation="The internal state of the topology module is corrupt",
-            recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG)
     public void identifyOpenflowDomains() {
         Map<DatapathId, ClusterDFS> dfsList = new HashMap<DatapathId, ClusterDFS>();
 
@@ -480,7 +513,8 @@ public class TopologyInstance {
         }
     }
 
-    protected BroadcastTree dijkstra(Cluster c, DatapathId root,
+	//calculates the broadcast tree in cluster. Old version of code.
+    protected BroadcastTree clusterDijkstra(Cluster c, DatapathId root,
                                      Map<Link, Integer> linkCost,
                                      boolean isDstRooted) {
         HashMap<DatapathId, Link> nexthoplinks = new HashMap<DatapathId, Link>();
@@ -539,12 +573,118 @@ public class TopologyInstance {
         BroadcastTree ret = new BroadcastTree(nexthoplinks, cost);
         return ret;
     }
+    
+	
+	/*
+	 * Dijkstra that calculates destination rooted trees over the entire topology.
+	*/
+    
+    protected BroadcastTree dijkstra(Map<DatapathId, Set<Link>> links, DatapathId root,
+            Map<Link, Integer> linkCost,
+            boolean isDstRooted) {
+    	HashMap<DatapathId, Link> nexthoplinks = new HashMap<DatapathId, Link>();
+    	HashMap<DatapathId, Integer> cost = new HashMap<DatapathId, Integer>();
+    	int w;
+    	
+    	for (DatapathId node: links.keySet()) {
+    		nexthoplinks.put(node, null);
+    		cost.put(node, MAX_PATH_WEIGHT);
+    	}
+		
+    	HashMap<DatapathId, Boolean> seen = new HashMap<DatapathId, Boolean>();
+    	PriorityQueue<NodeDist> nodeq = new PriorityQueue<NodeDist>();
+    	nodeq.add(new NodeDist(root, 0));
+    	cost.put(root, 0);
+    	while (nodeq.peek() != null) {
+    		NodeDist n = nodeq.poll();
+    		DatapathId cnode = n.getNode();
+    		int cdist = n.getDist();
+    		if (cdist >= MAX_PATH_WEIGHT) break;
+    		if (seen.containsKey(cnode)) continue;
+    		seen.put(cnode, true);
+
+    		for (Link link: links.get(cnode)) {
+    			DatapathId neighbor;
+
+    			if (isDstRooted == true) neighbor = link.getSrc();
+    			else neighbor = link.getDst();
+        
+    			// links directed toward cnode will result in this condition
+    			if (neighbor.equals(cnode)) continue;
+
+    			if (seen.containsKey(neighbor)) continue;
+
+    			if (linkCost == null || linkCost.get(link)==null) w = 1;
+    			else w = linkCost.get(link);
+    			int ndist = cdist + w; // the weight of the link, always 1 in current version of floodlight.
+    			if (ndist < cost.get(neighbor)) {
+    				cost.put(neighbor, ndist);
+    				nexthoplinks.put(neighbor, link);
+    				NodeDist ndTemp = new NodeDist(neighbor, ndist);
+    				// Remove an object that's already in there.
+    				// Note that the comparison is based on only the node id,
+    				// and not node id and distance.
+    				nodeq.remove(ndTemp);
+    				// add the current object to the queue.
+    				nodeq.add(ndTemp);
+    			}
+    		}
+    	}
+
+    	BroadcastTree ret = new BroadcastTree(nexthoplinks, cost);
+
+		return ret;
+	}
+	
+    /*
+	 * Modification of the calculateShortestPathTreeInClusters (dealing with whole topology, not individual clusters)
+	 */
+    public void calculateAllShortestPaths() {
+    	this.broadcastNodePorts.clear();
+    	this.destinationRootedFullTrees.clear();
+    	Map<Link, Integer> linkCost = new HashMap<Link, Integer>();
+        int tunnel_weight = switchPorts.size() + 1;
+		
+        for(NodePortTuple npt : tunnelPorts) {
+            if (allLinks.get(npt) == null) continue;
+            for(Link link : allLinks.get(npt)) {
+                if (link == null) continue;
+                linkCost.put(link, tunnel_weight);
+            }
+        }
+        
+        Map<DatapathId, Set<Link>> linkDpidMap = new HashMap<DatapathId, Set<Link>>();
+        for(DatapathId s : switches) {
+            if (switchPorts.get(s) == null) continue;
+            for (OFPort p: switchPorts.get(s)) {
+                NodePortTuple np = new NodePortTuple(s, p);
+                if (allLinks.get(np) == null) continue;
+                for(Link l: allLinks.get(np)) {
+                	if(linkDpidMap.containsKey(s)) {
+                		linkDpidMap.get(s).add(l);
+                	}
+                	else {
+                		linkDpidMap.put(s, new HashSet<Link>(Arrays.asList(l)));
+                	}
+                }
+            }
+        }   
+        
+        for (DatapathId node : linkDpidMap.keySet()) {
+        	BroadcastTree tree = dijkstra(linkDpidMap, node, linkCost, true);
+            destinationRootedFullTrees.put(node, tree);
+        }
+		//finiteBroadcastTree is randomly chosen in this implementation
+        if (this.destinationRootedFullTrees.size() > 0) {
+			this.finiteBroadcastTree =  destinationRootedFullTrees.values().iterator().next();
+        }         	
+    }
 
     protected void calculateShortestPathTreeInClusters() {
         pathcache.invalidateAll();
         destinationRootedTrees.clear();
 
-        Map<Link, Integer> linkCost = new HashMap<Link, Integer>();
+        Map <Link, Integer> linkCost = new HashMap<Link, Integer>();
         int tunnel_weight = switchPorts.size() + 1;
 
         for(NodePortTuple npt: tunnelPorts) {
@@ -557,7 +697,7 @@ public class TopologyInstance {
 
         for(Cluster c: clusters) {
             for (DatapathId node : c.links.keySet()) {
-                BroadcastTree tree = dijkstra(c, node, linkCost, true);
+                BroadcastTree tree = clusterDijkstra(c, node, linkCost, true);
                 destinationRootedTrees.put(node, tree);
             }
         }
@@ -570,11 +710,49 @@ public class TopologyInstance {
             clusterBroadcastTrees.put(c.id, tree);
         }
     }
-
+    
+	protected Set<NodePortTuple> getAllBroadcastNodePorts() {
+		return this.broadcastNodePorts;
+	}
+	
+    protected void calculateAllBroadcastNodePorts() {
+		if (this.destinationRootedFullTrees.size() > 0) {
+			this.finiteBroadcastTree=destinationRootedFullTrees.values().iterator().next();
+			Map<DatapathId, Link> links = finiteBroadcastTree.getLinks();
+			if (links == null) return;
+			for(DatapathId nodeId: links.keySet()) {
+				Link l = links.get(nodeId);
+				if (l == null) continue;
+				NodePortTuple npt1 = new NodePortTuple(l.getSrc(), l.getSrcPort());
+				NodePortTuple npt2 = new NodePortTuple(l.getDst(), l.getDstPort());
+				this.broadcastNodePorts.add(npt1);
+				this.broadcastNodePorts.add(npt2);
+			}    
+		}		
+    }
+
+    protected void calculateBroadcastPortMap(){
+		this.broadcastPortMap.clear();
+		if (destinationRootedFullTrees.size() == 0) return;
+
+		for (DatapathId sw : this.switches) {
+			for (OFPort p : this.allPorts.get(sw)){
+				NodePortTuple npt = new NodePortTuple(sw, p);
+				if (isEdge(sw, p) || broadcastNodePorts.contains(npt)) { 
+					if (broadcastPortMap.containsKey(sw)) {
+                		broadcastPortMap.get(sw).add(p);
+                	}
+                	else {
+                		broadcastPortMap.put(sw,new HashSet<OFPort>(Arrays.asList(p)));
+                	}
+				}      		
+			}
+		}
+    }
+	
     protected void calculateBroadcastNodePortsInClusters() {
-
         clusterBroadcastTrees.clear();
-
+        
         calculateBroadcastTreeInClusters();
 
         for(Cluster c: clusters) {
@@ -585,7 +763,7 @@ public class TopologyInstance {
             Set<NodePortTuple> nptSet = new HashSet<NodePortTuple>();
             Map<DatapathId, Link> links = tree.getLinks();
             if (links == null) continue;
-            for(DatapathId nodeId: links.keySet()) {
+            for (DatapathId nodeId: links.keySet()) {
                 Link l = links.get(nodeId);
                 if (l == null) continue;
                 NodePortTuple npt1 = new NodePortTuple(l.getSrc(), l.getSrcPort());
@@ -601,19 +779,19 @@ public class TopologyInstance {
         NodePortTuple npt;
         DatapathId srcId = id.getSrc();
         DatapathId dstId = id.getDst();
+		//set of NodePortTuples on the route
+        LinkedList<NodePortTuple> sPorts = new LinkedList<NodePortTuple>();
 
-        LinkedList<NodePortTuple> switchPorts = new LinkedList<NodePortTuple>();
-
-        if (destinationRootedTrees == null) return null;
-        if (destinationRootedTrees.get(dstId) == null) return null;
+        if (destinationRootedFullTrees == null) return null;
+        if (destinationRootedFullTrees.get(dstId) == null) return null;
 
-        Map<DatapathId, Link> nexthoplinks = destinationRootedTrees.get(dstId).getLinks();
+        Map<DatapathId, Link> nexthoplinks = destinationRootedFullTrees.get(dstId).getLinks();
 
         if (!switches.contains(srcId) || !switches.contains(dstId)) {
             // This is a switch that is not connected to any other switch
             // hence there was no update for links (and hence it is not
             // in the network)
-            log.debug("buildroute: Standalone switch: {}", srcId);
+            log.info("buildroute: Standalone switch: {}", srcId);
 
             // The only possible non-null path for this case is
             // if srcId equals dstId --- and that too is an 'empty' path []
@@ -621,41 +799,39 @@ public class TopologyInstance {
         } else if ((nexthoplinks!=null) && (nexthoplinks.get(srcId) != null)) {
             while (!srcId.equals(dstId)) {
                 Link l = nexthoplinks.get(srcId);
-
                 npt = new NodePortTuple(l.getSrc(), l.getSrcPort());
-                switchPorts.addLast(npt);
+                sPorts.addLast(npt);
                 npt = new NodePortTuple(l.getDst(), l.getDstPort());
-                switchPorts.addLast(npt);
+                sPorts.addLast(npt);
                 srcId = nexthoplinks.get(srcId).getDst();
             }
         }
         // else, no path exists, and path equals null
 
         Route result = null;
-        if (switchPorts != null && !switchPorts.isEmpty()) {
-            result = new Route(id, switchPorts);
+        if (sPorts != null && !sPorts.isEmpty()) {
+            result = new Route(id, sPorts);
         }
         if (log.isTraceEnabled()) {
             log.trace("buildroute: {}", result);
         }
-        return result; 
+        return result;
     }
 
+    /*
+     * Getter Functions
+     */
+
     protected int getCost(DatapathId srcId, DatapathId dstId) {
         BroadcastTree bt = destinationRootedTrees.get(dstId);
         if (bt == null) return -1;
         return (bt.getCost(srcId));
     }
-
-    /*
-     * Getter Functions
-     */
-
+    
     protected Set<Cluster> getClusters() {
         return clusters;
     }
 
-    // IRoutingEngineService interfaces
     protected boolean routeExists(DatapathId srcId, DatapathId dstId) {
         BroadcastTree bt = destinationRootedTrees.get(dstId);
         if (bt == null) return false;
@@ -664,12 +840,13 @@ public class TopologyInstance {
         return true;
     }
 
+	/*
+	* Calculates E2E route
+	*/
     protected Route getRoute(ServiceChain sc, DatapathId srcId, OFPort srcPort,
             DatapathId dstId, OFPort dstPort, U64 cookie) {
-
-
-        // Return null the route source and desitnation are the
-        // same switchports.
+        // Return null if the route source and destination are the
+        // same switch ports.
         if (srcId.equals(dstId) && srcPort.equals(dstPort))
             return null;
 
@@ -692,6 +869,7 @@ public class TopologyInstance {
         r = new Route(id, nptList);
         return r;
     }
+    
 
     // NOTE: Return a null route if srcId equals dstId.  The null route
     // need not be stored in the cache.  Moreover, the LoadingCache will
@@ -700,7 +878,6 @@ public class TopologyInstance {
         // Return null route if srcId equals dstId
         if (srcId.equals(dstId)) return null;
 
-
         RouteId id = new RouteId(srcId, dstId);
         Route result = null;
 
@@ -770,26 +947,26 @@ public class TopologyInstance {
         return true;
     }
 
-    protected boolean
-    isIncomingBroadcastAllowedOnSwitchPort(DatapathId sw, OFPort portId) {
-        if (isInternalToOpenflowDomain(sw, portId)) {
-            DatapathId clusterId = getOpenflowDomainId(sw);
+    /*
+	 * Takes finiteBroadcastTree into account to prevent loops in the network
+	 */
+    protected boolean isIncomingBroadcastAllowedOnSwitchPort(DatapathId sw, OFPort portId) {
+        if (!isEdge(sw, portId)){       
             NodePortTuple npt = new NodePortTuple(sw, portId);
-            if (clusterBroadcastNodePorts.get(clusterId).contains(npt))
+            if (broadcastNodePorts.contains(npt))
                 return true;
             else return false;
         }
         return true;
     }
 
-    public boolean isConsistent(DatapathId oldSw, OFPort oldPort, DatapathId newSw,
-                                OFPort newPort) {
+
+    public boolean isConsistent(DatapathId oldSw, OFPort oldPort, DatapathId newSw, OFPort newPort) {
         if (isInternalToOpenflowDomain(newSw, newPort)) return true;
         return (oldSw.equals(newSw) && oldPort.equals(newPort));
     }
 
-    protected Set<NodePortTuple>
-    getBroadcastNodePortsInCluster(DatapathId sw) {
+    protected Set<NodePortTuple> getBroadcastNodePortsInCluster(DatapathId sw) {
         DatapathId clusterId = getOpenflowDomainId(sw);
         return clusterBroadcastNodePorts.get(clusterId);
     }
@@ -826,7 +1003,7 @@ public class TopologyInstance {
     public Set<OFPort> getBroadcastPorts(DatapathId targetSw, DatapathId src, OFPort srcPort) {
         Set<OFPort> result = new HashSet<OFPort>();
         DatapathId clusterId = getOpenflowDomainId(targetSw);
-        for(NodePortTuple npt: clusterBroadcastNodePorts.get(clusterId)) {
+        for(NodePortTuple npt: clusterPorts.get(clusterId)) {
             if (npt.getNodeId().equals(targetSw)) {
                 result.add(npt.getPortId());
             }
@@ -834,14 +1011,11 @@ public class TopologyInstance {
         return result;
     }
 
-    public NodePortTuple
-    getAllowedOutgoingBroadcastPort(DatapathId src, OFPort srcPort, DatapathId dst,
-                                    OFPort dstPort) {
+    public NodePortTuple getAllowedOutgoingBroadcastPort(DatapathId src, OFPort srcPort, DatapathId dst, OFPort dstPort) {
         return null;
     }
 
-    public NodePortTuple
-    getAllowedIncomingBroadcastPort(DatapathId src, OFPort srcPort) {
+    public NodePortTuple getAllowedIncomingBroadcastPort(DatapathId src, OFPort srcPort) {
         return null;
     }
-}
+}
\ No newline at end of file
diff --git a/src/main/java/net/floodlightcontroller/topology/TopologyManager.java b/src/main/java/net/floodlightcontroller/topology/TopologyManager.java
index 4d23837bd28d397ebcfb3be0bd18ce0a88b57a56..303ab84908db2775fd6d07ce19ac5209bba69b8b 100644
--- a/src/main/java/net/floodlightcontroller/topology/TopologyManager.java
+++ b/src/main/java/net/floodlightcontroller/topology/TopologyManager.java
@@ -17,6 +17,7 @@
 package net.floodlightcontroller.topology;
 
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Date;
@@ -38,8 +39,6 @@ import net.floodlightcontroller.core.IHAListener;
 import net.floodlightcontroller.core.IOFMessageListener;
 import net.floodlightcontroller.core.IOFSwitch;
 import net.floodlightcontroller.core.LogicalOFMessageCategory;
-import net.floodlightcontroller.core.annotations.LogMessageCategory;
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
 import net.floodlightcontroller.core.internal.IOFSwitchService;
 import net.floodlightcontroller.core.module.FloodlightModuleContext;
 import net.floodlightcontroller.core.module.FloodlightModuleException;
@@ -84,7 +83,6 @@ import org.slf4j.LoggerFactory;
  * of the network graph, as well as implementing tools for finding routes
  * through the topology.
  */
-@LogMessageCategory("Network Topology")
 public class TopologyManager implements IFloodlightModule, ITopologyService, IRoutingService, ILinkDiscoveryListener, IOFMessageListener {
 
 	protected static Logger log = LoggerFactory.getLogger(TopologyManager.class);
@@ -268,25 +266,21 @@ public class TopologyManager implements IFloodlightModule, ITopologyService, IRo
 	 * Thread for recomputing topology.  The thread is always running,
 	 * however the function applyUpdates() has a blocking call.
 	 */
-	@LogMessageDoc(level="ERROR",
-			message="Error in topology instance task thread",
-			explanation="An unknown error occured in the topology " +
-					"discovery module.",
-					recommendation=LogMessageDoc.CHECK_CONTROLLER)
 	protected class UpdateTopologyWorker implements Runnable {
 		@Override
 		public void run() {
 			try {
-				if (ldUpdates.peek() != null)
+				if (ldUpdates.peek() != null) {
 					updateTopology();
+				}
 				handleMiscellaneousPeriodicEvents();
 			}
 			catch (Exception e) {
 				log.error("Error in topology instance task thread", e);
 			} finally {
-				if (floodlightProviderService.getRole() != HARole.STANDBY)
-					newInstanceTask.reschedule(TOPOLOGY_COMPUTE_INTERVAL_MS,
-							TimeUnit.MILLISECONDS);
+				if (floodlightProviderService.getRole() != HARole.STANDBY) {
+					newInstanceTask.reschedule(TOPOLOGY_COMPUTE_INTERVAL_MS, TimeUnit.MILLISECONDS);
+				}
 			}
 		}
 	}
@@ -311,6 +305,7 @@ public class TopologyManager implements IFloodlightModule, ITopologyService, IRo
 	// **********************
 	// ILinkDiscoveryListener
 	// **********************
+
 	@Override
 	public void linkDiscoveryUpdate(List<LDUpdate> updateList) {
 		if (log.isTraceEnabled()) {
@@ -331,9 +326,45 @@ public class TopologyManager implements IFloodlightModule, ITopologyService, IRo
 	// ITopologyService
 	// ****************
 
-	//
-	// ITopologyService interface methods
-	//
+	@Override
+	public Map<DatapathId, Set<Link>> getAllLinks(){
+
+		Map<DatapathId, Set<Link>> dpidLinks = new HashMap<DatapathId, Set<Link>>();
+		TopologyInstance ti = getCurrentInstance(true);
+		Set<DatapathId> switches = ti.getSwitches();
+
+		for(DatapathId s: switches) {
+			if (this.switchPorts.get(s) == null) continue;
+			for (OFPort p: switchPorts.get(s)) {
+				NodePortTuple np = new NodePortTuple(s, p);
+				if (this.switchPortLinks.get(np) == null) continue;
+				for(Link l: this.switchPortLinks.get(np)) {
+					if(dpidLinks.containsKey(s)) {
+						dpidLinks.get(s).add(l);
+					}
+					else {
+						dpidLinks.put(s,new HashSet<Link>(Arrays.asList(l)));
+					}
+
+				}
+			}
+		}
+
+		return dpidLinks;
+	}
+
+	@Override
+	public boolean isEdge(DatapathId sw, OFPort p){
+		TopologyInstance ti = getCurrentInstance(true);
+		return ti.isEdge(sw,p);
+	}
+
+	@Override
+	public Set<OFPort> getSwitchBroadcastPorts(DatapathId sw){
+		TopologyInstance ti = getCurrentInstance(true);
+		return ti.swBroadcastPorts(sw);
+	}
+
 	@Override
 	public Date getLastUpdateTime() {
 		return lastUpdateTime;
@@ -884,9 +915,9 @@ public class TopologyManager implements IFloodlightModule, ITopologyService, IRo
 		ScheduledExecutorService ses = threadPoolService.getScheduledExecutor();
 		newInstanceTask = new SingletonTask(ses, new UpdateTopologyWorker());
 
-		if (role != HARole.STANDBY)
-			newInstanceTask.reschedule(TOPOLOGY_COMPUTE_INTERVAL_MS,
-					TimeUnit.MILLISECONDS);
+		if (role != HARole.STANDBY) {
+			newInstanceTask.reschedule(TOPOLOGY_COMPUTE_INTERVAL_MS, TimeUnit.MILLISECONDS);
+		}
 
 		linkDiscoveryService.addListener(this);
 		floodlightProviderService.addOFMessageListener(OFType.PACKET_IN, this);
@@ -945,11 +976,6 @@ public class TopologyManager implements IFloodlightModule, ITopologyService, IRo
 	 * @param ports
 	 * @param cntx
 	 */
-	@LogMessageDoc(level="ERROR",
-			message="Failed to clear all flows on switch {switch}",
-			explanation="An I/O error occured while trying send " +
-					"topology discovery packet",
-					recommendation=LogMessageDoc.CHECK_SWITCH)
 	public void doMultiActionPacketOut(byte[] packetData, IOFSwitch sw,
 			Set<OFPort> ports,
 			FloodlightContext cntx) {
@@ -1036,7 +1062,7 @@ public class TopologyManager implements IFloodlightModule, ITopologyService, IRo
 			switches.add(pinSwitch);
 		}
 
-		for(DatapathId sid: switches) {
+		for (DatapathId sid : switches) {
 			IOFSwitch sw = switchService.getSwitch(sid);
 			if (sw == null) continue;
 			Collection<OFPort> enabledPorts = sw.getEnabledPortNumbers();
@@ -1051,7 +1077,7 @@ public class TopologyManager implements IFloodlightModule, ITopologyService, IRo
 			Set<OFPort> portsKnownToTopo = ti.getPortsWithLinks(sid);
 
 			if (portsKnownToTopo != null) {
-				for(OFPort p: portsKnownToTopo) {
+				for (OFPort p : portsKnownToTopo) {
 					NodePortTuple npt =
 							new NodePortTuple(sid, p);
 					if (ti.isBroadcastDomainPort(npt) == false) {
@@ -1105,10 +1131,6 @@ public class TopologyManager implements IFloodlightModule, ITopologyService, IRo
 	 * multiple link removed messages.  However, all the updates from
 	 * LinkDiscoveryManager would be propagated to the listeners of topology.
 	 */
-	@LogMessageDoc(level="ERROR",
-			message="Error reading link discovery update.",
-			explanation="Unable to process link discovery update",
-			recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG)
 	public List<LDUpdate> applyUpdates() {
 		List<LDUpdate> appliedUpdates = new ArrayList<LDUpdate>();
 		LDUpdate update = null;
@@ -1118,15 +1140,15 @@ public class TopologyManager implements IFloodlightModule, ITopologyService, IRo
 			} catch (Exception e) {
 				log.error("Error reading link discovery update.", e);
 			}
-			if (log.isTraceEnabled()) {
-				log.trace("Applying update: {}", update);
+			if (log.isDebugEnabled()) {
+				log.debug("Applying update: {}", update);
 			}
 
 			switch (update.getOperation()) {
 			case LINK_UPDATED:
 				addOrUpdateLink(update.getSrc(), update.getSrcPort(),
-						update.getDst(), update.getDstPort(), update.getLatency(),
-						update.getType());
+						update.getDst(), update.getDstPort(),
+						update.getLatency(), update.getType());
 				break;
 			case LINK_REMOVED:
 				removeLink(update.getSrc(), update.getSrcPort(),
@@ -1205,27 +1227,34 @@ public class TopologyManager implements IFloodlightModule, ITopologyService, IRo
 				identifyBroadcastDomainPorts();
 
 		// Remove all links incident on broadcast domain ports.
-		for(NodePortTuple npt: broadcastDomainPorts) {
+		for (NodePortTuple npt : broadcastDomainPorts) {
 			if (switchPortLinks.get(npt) == null) continue;
-			for(Link link: switchPortLinks.get(npt)) {
+			for (Link link : switchPortLinks.get(npt)) {
 				removeLinkFromStructure(openflowLinks, link);
 			}
 		}
 
 		// Remove all tunnel links.
-		for(NodePortTuple npt: tunnelPorts) {
+		for (NodePortTuple npt: tunnelPorts) {
 			if (switchPortLinks.get(npt) == null) continue;
-			for(Link link: switchPortLinks.get(npt)) {
+			for (Link link : switchPortLinks.get(npt)) {
 				removeLinkFromStructure(openflowLinks, link);
 			}
 		}
+		//switchPorts contains only ports that are part of links. Calculation of broadcast ports needs set of all ports. 
+		Map<DatapathId, Set<OFPort>> allPorts = new HashMap<DatapathId, Set<OFPort>>();;
+		for (DatapathId sw : switchPorts.keySet()){
+			allPorts.put(sw, this.getPorts(sw));
+		}
 
 		TopologyInstance nt = new TopologyInstance(switchPorts,
 				blockedPorts,
 				openflowLinks,
 				broadcastDomainPorts,
-				tunnelPorts);
+				tunnelPorts,switchPortLinks,allPorts);
+
 		nt.compute();
+
 		// We set the instances with and without tunnels to be identical.
 		// If needed, we may compute them differently.
 		currentInstance = nt;
@@ -1236,6 +1265,7 @@ public class TopologyManager implements IFloodlightModule, ITopologyService, IRo
 						new HashMap<DatapathId, List<NodePortTuple>>(),
 						0);
 		eventCategory.newEventWithFlush(new TopologyEvent(reason, topologyInfo));
+
 		return true;
 	}
 
@@ -1256,18 +1286,18 @@ public class TopologyManager implements IFloodlightModule, ITopologyService, IRo
 		// Copy switchPortLinks
 		Map<NodePortTuple, Set<Link>> spLinks =
 				new HashMap<NodePortTuple, Set<Link>>();
-		for(NodePortTuple npt: switchPortLinks.keySet()) {
+		for (NodePortTuple npt : switchPortLinks.keySet()) {
 			spLinks.put(npt, new HashSet<Link>(switchPortLinks.get(npt)));
 		}
 
-		for(NodePortTuple npt: spLinks.keySet()) {
+		for (NodePortTuple npt : spLinks.keySet()) {
 			Set<Link> links = spLinks.get(npt);
 			boolean bdPort = false;
 			ArrayList<Link> linkArray = new ArrayList<Link>();
 			if (links.size() > 2) {
 				bdPort = true;
 			} else if (links.size() == 2) {
-				for(Link l: links) {
+				for (Link l : links) {
 					linkArray.add(l);
 				}
 				// now, there should be two links in [0] and [1].
@@ -1348,40 +1378,40 @@ public class TopologyManager implements IFloodlightModule, ITopologyService, IRo
 	}
 
 	/**
-	 * Add the given link to the data structure.  Returns true if a link was
-	 * added.
+	 * Add the given link to the data structure.
 	 * @param s
 	 * @param l
-	 * @return
 	 */
-	private boolean addLinkToStructure(Map<NodePortTuple,
-			Set<Link>> s, Link l) {
-		boolean result1 = false, result2 = false;
-
+	private void addLinkToStructure(Map<NodePortTuple, Set<Link>> s, Link l) {
 		NodePortTuple n1 = new NodePortTuple(l.getSrc(), l.getSrcPort());
 		NodePortTuple n2 = new NodePortTuple(l.getDst(), l.getDstPort());
 
 		if (s.get(n1) == null) {
 			s.put(n1, new HashSet<Link>());
-		}
+		} 
 		if (s.get(n2) == null) {
 			s.put(n2, new HashSet<Link>());
 		}
-		result1 = s.get(n1).add(l);
-		result2 = s.get(n2).add(l);
-
-		return (result1 || result2);
+		/* 
+		 * Since we don't include latency in .equals(), we need
+		 * to explicitly remove the existing link (if present).
+		 * Otherwise, new latency values for existing links will
+		 * never be accepted.
+		 */
+		s.get(n1).remove(l);
+		s.get(n2).remove(l);
+		s.get(n1).add(l);
+		s.get(n2).add(l);
 	}
 
 	/**
-	 * Delete the given link from the data strucure.  Returns true if the
+	 * Delete the given link from the data structure.  Returns true if the
 	 * link was deleted.
 	 * @param s
 	 * @param l
 	 * @return
 	 */
-	private boolean removeLinkFromStructure(Map<NodePortTuple,
-			Set<Link>> s, Link l) {
+	private boolean removeLinkFromStructure(Map<NodePortTuple, Set<Link>> s, Link l) {
 
 		boolean result1 = false, result2 = false;
 		NodePortTuple n1 = new NodePortTuple(l.getSrc(), l.getSrcPort());
@@ -1463,7 +1493,7 @@ public class TopologyManager implements IFloodlightModule, ITopologyService, IRo
 
 	public void removeLink(DatapathId srcId, OFPort srcPort,
 			DatapathId dstId, OFPort dstPort) {
-		Link link = new Link(srcId, srcPort, dstId, dstPort, U64.ZERO /* not needed for lookup */);
+		Link link = new Link(srcId, srcPort, dstId, dstPort, U64.ZERO /* does not matter for remove (not included in .equals() of Link) */);
 		removeLink(link);
 	}
 
@@ -1531,4 +1561,4 @@ public class TopologyManager implements IFloodlightModule, ITopologyService, IRo
 
 		return ports;
 	}
-}
\ No newline at end of file
+}
diff --git a/src/main/java/net/floodlightcontroller/util/ActionUtils.java b/src/main/java/net/floodlightcontroller/util/ActionUtils.java
index a32cfc103351ae418d65419069eb45f737f61e8a..1f8c8269f19ac092adf2d8dfb526eafff7564031 100644
--- a/src/main/java/net/floodlightcontroller/util/ActionUtils.java
+++ b/src/main/java/net/floodlightcontroller/util/ActionUtils.java
@@ -6,8 +6,6 @@ import java.util.List;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
-
 import org.projectfloodlight.openflow.protocol.OFFactories;
 import org.projectfloodlight.openflow.protocol.OFFlowMod;
 import org.projectfloodlight.openflow.protocol.OFVersion;
@@ -149,10 +147,6 @@ public class ActionUtils {
 	 * @param actions; A list of OFActions to encode into one string
 	 * @return A dpctl-style string of the actions
 	 */
-	@LogMessageDoc(level="ERROR",
-			message="Could not decode action {action}",
-			explanation="A static flow entry contained an invalid action",
-			recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG)
 	public static String actionsToString(List<OFAction> actions, Logger log) {
 		StringBuilder sb = new StringBuilder();
 		for (OFAction a : actions) {
@@ -354,10 +348,6 @@ public class ActionUtils {
 	 * @param bigString The string containing all the actions
 	 * @param log A logger to log for errors.
 	 */
-	@LogMessageDoc(level="ERROR",
-			message="Unexpected action '{action}', '{subaction}'",
-			explanation="A static flow entry contained an invalid action",
-			recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG)
 	public static void fromString(OFFlowMod.Builder fmb, String bigString, Logger log) {
 		List<OFAction> actions = new LinkedList<OFAction>();
 		if (bigString != null && !bigString.trim().isEmpty()) {
@@ -877,10 +867,6 @@ public class ActionUtils {
 	 * @param log
 	 * @return
 	 */
-	@LogMessageDoc(level="ERROR",
-			message="Invalid subaction: '{subaction}'",
-			explanation="A static flow entry contained an invalid subaction",
-			recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG)
 	private static OFActionOutput decode_output(String actionToDecode, OFVersion version, Logger log) {
 		Matcher n = Pattern.compile("((all)|(controller)|(local)|(ingress-port)|(normal)|(flood))").matcher(actionToDecode);
 		OFActionOutput.Builder ab = OFFactories.getFactory(version).actions().buildOutput();
diff --git a/src/main/java/net/floodlightcontroller/util/LoadMonitor.java b/src/main/java/net/floodlightcontroller/util/LoadMonitor.java
index 5b234cd5e01113cf4311a6eaf56d6cf48b4843a4..39dbcb3e5908e60191a1af39dc7569c311ea7b8d 100644
--- a/src/main/java/net/floodlightcontroller/util/LoadMonitor.java
+++ b/src/main/java/net/floodlightcontroller/util/LoadMonitor.java
@@ -27,9 +27,6 @@ import java.util.concurrent.TimeUnit;
 
 import org.slf4j.Logger;
 
-import net.floodlightcontroller.core.annotations.LogMessageDocs;
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
-
 public class LoadMonitor implements Runnable {
 
     public enum LoadLevel {
@@ -99,16 +96,6 @@ public class LoadMonitor implements Runnable {
     }
 
     @Override
-    @LogMessageDocs({
-        @LogMessageDoc(
-            message="System under very heavy load, dropping some packet-ins",
-            explanation="We detcted that the system was under very heavy" +
-                        "  load, dropping some packet-ins temporarily"),
-        @LogMessageDoc(
-            message="System under heavy load, dropping some new flows",
-            explanation="We detcted that the system was under heavy load," +
-                        " dropping some new flows temporarily")
-    })
     public void run() {
         if (!isLinux) return;
 
@@ -164,11 +151,7 @@ public class LoadMonitor implements Runnable {
         itersLoaded = 0;
         return;
     }
-
-    @LogMessageDoc(
-        message="Exception in reading load monitor params, using defaults",
-        explanation="There was an error in inializing load monitor's props," +
-                    " using default parameters")
+    
     protected String runcmd(String cmd) {
         String line;
         StringBuilder ret = new StringBuilder();
diff --git a/src/main/java/org/sdnplatform/sync/internal/DefaultStoreClient.java b/src/main/java/org/sdnplatform/sync/internal/DefaultStoreClient.java
index 56d000d4f74a6000de763ea3f8efa9a4e2f732b3..6dc0211f24817a437b8afa159e4a4752dacfe525 100644
--- a/src/main/java/org/sdnplatform/sync/internal/DefaultStoreClient.java
+++ b/src/main/java/org/sdnplatform/sync/internal/DefaultStoreClient.java
@@ -42,7 +42,6 @@ public class DefaultStoreClient<K, V> extends AbstractStoreClient<K, V> {
     private Class<K> keyClass;
     private TypeReference<K> keyType;
 
-    @SuppressWarnings("unchecked")
     public DefaultStoreClient(IStore<K, V> delegate,
                               IInconsistencyResolver<Versioned<V>> resolver,
                               AbstractSyncManager syncManager,
diff --git a/src/main/java/org/sdnplatform/sync/internal/StoreRegistry.java b/src/main/java/org/sdnplatform/sync/internal/StoreRegistry.java
index cb71bd396fc3816ab5fc3b5ed1c8e9eed01099ba..b83ed1a3d4790559e10d0eb9eed988449a69ca83 100644
--- a/src/main/java/org/sdnplatform/sync/internal/StoreRegistry.java
+++ b/src/main/java/org/sdnplatform/sync/internal/StoreRegistry.java
@@ -10,8 +10,6 @@ import java.util.concurrent.locks.ReentrantLock;
 
 import javax.sql.ConnectionPoolDataSource;
 
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
-
 import org.sdnplatform.sync.Versioned;
 import org.sdnplatform.sync.ISyncService.Scope;
 import org.sdnplatform.sync.error.PersistException;
@@ -138,11 +136,6 @@ public class StoreRegistry {
      * @param key the key
      * @param value the value
      */
-    @LogMessageDoc(level="ERROR",
-                   message="Failed to queue hint for store {storeName}",
-                   explanation="There was an error synchronizing data to " + 
-                               "remote nodes",
-                   recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG)
     public void queueHint(String storeName, 
                           ByteArray key, Versioned<byte[]> value) {
         try {
diff --git a/src/main/java/org/sdnplatform/sync/internal/SyncManager.java b/src/main/java/org/sdnplatform/sync/internal/SyncManager.java
index cfe27e00e7738d7daeab5cd02162512e1ab19f3b..9db240c9c665b3da816a1b3f8803d1fd49914dc2 100644
--- a/src/main/java/org/sdnplatform/sync/internal/SyncManager.java
+++ b/src/main/java/org/sdnplatform/sync/internal/SyncManager.java
@@ -51,9 +51,6 @@ import org.sdnplatform.sync.thrift.SyncValueMessage;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import net.floodlightcontroller.core.annotations.LogMessageCategory;
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
-import net.floodlightcontroller.core.annotations.LogMessageDocs;
 import net.floodlightcontroller.core.module.FloodlightModuleContext;
 import net.floodlightcontroller.core.module.FloodlightModuleException;
 import net.floodlightcontroller.core.module.IFloodlightService;
@@ -70,7 +67,6 @@ import net.floodlightcontroller.threadpool.IThreadPoolService;
  * @author readams
  * @see ISyncService
  */
-@LogMessageCategory("State Synchronization")
 public class SyncManager extends AbstractSyncManager {
     protected static final Logger logger =
             LoggerFactory.getLogger(SyncManager.class.getName());
@@ -210,9 +206,6 @@ public class SyncManager extends AbstractSyncManager {
     /**
      * Perform a synchronization with the node specified
      */
-    @LogMessageDoc(level="INFO",
-                   message="[{id}->{id}] Synchronizing local state to remote node",
-                   explanation="Normal state resynchronization is occurring")
     public void antientropy(Node node) {
         if (!rpcService.isConnected(node.getNodeId())) return;
 
@@ -359,10 +352,6 @@ public class SyncManager extends AbstractSyncManager {
      * @param e the storage engine for the values
      * @param kv the values to synchronize
      */
-    @LogMessageDoc(level="WARN",
-            message="Sync task queue full and not emptying",
-            explanation="The synchronization service is overloaded",
-            recommendation=LogMessageDoc.CHECK_CONTROLLER)
     public void queueSyncTask(SynchronizingStorageEngine e,
                               ByteArray key, Versioned<byte[]> value) {
         storeRegistry.queueHint(e.getName(), key, value);
@@ -592,15 +581,6 @@ public class SyncManager extends AbstractSyncManager {
     // Local methods
     // ***************
 
-    @LogMessageDocs({
-        @LogMessageDoc(level="INFO",
-                message="[{id}] Updating sync configuration {config}",
-                explanation="The sync service cluster configuration has been updated"),
-        @LogMessageDoc(level="INFO",
-                message="[{id}] Local node configuration changed; restarting sync" +
-                        "service",
-                explanation="The sync service must be restarted to update its configuration")
-    })
     protected void doUpdateConfiguration()
             throws FloodlightModuleException {
 
@@ -674,10 +654,6 @@ public class SyncManager extends AbstractSyncManager {
      * Periodically perform cleanup
      * @author readams
      */
-    @LogMessageDoc(level="ERROR",
-            message="Cleanup task failed",
-            explanation="Failed to clean up deleted data in the store",
-            recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG)
     protected class CleanupTask implements Runnable {
         @Override
         public void run() {
@@ -699,10 +675,6 @@ public class SyncManager extends AbstractSyncManager {
      * Periodically perform antientropy
      * @author readams
      */
-    @LogMessageDoc(level="ERROR",
-            message="Antientropy task failed",
-            explanation="Failed to synchronize state between two nodes",
-            recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG)
     protected class AntientropyTask implements Runnable {
         @Override
         public void run() {
@@ -725,10 +697,6 @@ public class SyncManager extends AbstractSyncManager {
      * Worker task to periodically rescan the configuration
      * @author readams
      */
-    @LogMessageDoc(level="ERROR",
-            message="Failed to update configuration",
-            explanation="An error occured while updating sync service configuration",
-            recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG)
     protected class UpdateConfigTask implements Runnable {
         @Override
         public void run() {
@@ -750,10 +718,6 @@ public class SyncManager extends AbstractSyncManager {
      * appropriate messages to the node I/O channels
      * @author readams
      */
-    @LogMessageDoc(level="ERROR",
-            message="Error occured in synchronization worker",
-            explanation="Failed to synchronize state to remote node",
-            recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG)
     protected class HintWorker implements Runnable {
         ArrayList<Hint> tasks = new ArrayList<Hint>(50);
         protected Map<String, SyncMessage> messages =
diff --git a/src/main/java/org/sdnplatform/sync/internal/config/FallbackCCProvider.java b/src/main/java/org/sdnplatform/sync/internal/config/FallbackCCProvider.java
index 09278ae2c76ae9182858cd7f3bc7afd8e4f9ebe2..3217e2a91e3044c4fc5095f73fc77f3196ad660a 100644
--- a/src/main/java/org/sdnplatform/sync/internal/config/FallbackCCProvider.java
+++ b/src/main/java/org/sdnplatform/sync/internal/config/FallbackCCProvider.java
@@ -3,8 +3,6 @@ package org.sdnplatform.sync.internal.config;
 import java.util.Collections;
 import java.util.Map;
 
-import net.floodlightcontroller.core.annotations.LogMessageCategory;
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
 import net.floodlightcontroller.core.module.FloodlightModuleContext;
 
 import org.sdnplatform.sync.error.SyncException;
@@ -17,7 +15,6 @@ import org.slf4j.LoggerFactory;
  * Provide a fallback local configuration
  * @author readams
  */
-@LogMessageCategory("State Synchronization")
 public class FallbackCCProvider implements IClusterConfigProvider {
     protected static final Logger logger =
             LoggerFactory.getLogger(FallbackCCProvider.class.getName());
@@ -32,10 +29,6 @@ public class FallbackCCProvider implements IClusterConfigProvider {
     }
 
     @Override
-    @LogMessageDoc(level="INFO",
-        message="Cluster not yet configured; using fallback " + 
-                "local configuration",
-        explanation="No other nodes are known")
     public ClusterConfig getConfig() throws SyncException {
         if (!warned) {
             logger.info("Cluster not yet configured; using fallback local " + 
diff --git a/src/main/java/org/sdnplatform/sync/internal/remote/RemoteSyncManager.java b/src/main/java/org/sdnplatform/sync/internal/remote/RemoteSyncManager.java
index 5bf2e888595249860f8594ee6f3ec43039311b7b..efcc3ec67d602d722420d0b1989a77b97045a720 100644
--- a/src/main/java/org/sdnplatform/sync/internal/remote/RemoteSyncManager.java
+++ b/src/main/java/org/sdnplatform/sync/internal/remote/RemoteSyncManager.java
@@ -37,8 +37,6 @@ import org.sdnplatform.sync.thrift.Store;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import net.floodlightcontroller.core.annotations.LogMessageCategory;
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
 import net.floodlightcontroller.core.module.FloodlightModuleContext;
 import net.floodlightcontroller.core.module.FloodlightModuleException;
 import net.floodlightcontroller.core.module.IFloodlightService;
@@ -48,7 +46,6 @@ import net.floodlightcontroller.core.module.IFloodlightService;
  * remote sync manager over a TCP connection
  * @author readams
  */
-@LogMessageCategory("State Synchronization")
 public class RemoteSyncManager extends AbstractSyncManager {
     protected static final Logger logger =
             LoggerFactory.getLogger(RemoteSyncManager.class.getName());
@@ -271,10 +268,6 @@ public class RemoteSyncManager extends AbstractSyncManager {
         return future;
     }
 
-    @LogMessageDoc(level="WARN",
-                   message="Unexpected sync message reply type={type} id={id}",
-                   explanation="An error occurred in the sync protocol",
-                   recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG)
     public void dispatchReply(int xid,
                               SyncReply reply) {
         RemoteSyncFuture future = futureMap.get(Integer.valueOf(xid));
diff --git a/src/main/java/org/sdnplatform/sync/internal/rpc/AbstractRPCChannelHandler.java b/src/main/java/org/sdnplatform/sync/internal/rpc/AbstractRPCChannelHandler.java
index 3ea5f1a947f6a167de023a705ec48b18c4e19851..8fe90ff43552e1dbec4e9e9191039bddc89c8767 100644
--- a/src/main/java/org/sdnplatform/sync/internal/rpc/AbstractRPCChannelHandler.java
+++ b/src/main/java/org/sdnplatform/sync/internal/rpc/AbstractRPCChannelHandler.java
@@ -11,10 +11,6 @@ import javax.crypto.Mac;
 import javax.crypto.spec.SecretKeySpec;
 import javax.xml.bind.DatatypeConverter;
 
-import net.floodlightcontroller.core.annotations.LogMessageCategory;
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
-import net.floodlightcontroller.core.annotations.LogMessageDocs;
-
 import org.jboss.netty.channel.Channel;
 import org.jboss.netty.channel.ChannelHandlerContext;
 import org.jboss.netty.channel.ChannelStateEvent;
@@ -65,7 +61,6 @@ import org.slf4j.LoggerFactory;
  * a {@link SyncMessage} which will provide specific type information. 
  * @author readams
  */
-@LogMessageCategory("State Synchronization")
 public abstract class AbstractRPCChannelHandler 
     extends IdleStateAwareChannelHandler {
     protected static final Logger logger =
@@ -132,32 +127,6 @@ public abstract class AbstractRPCChannelHandler
     }
 
     @Override
-    @LogMessageDocs({
-        @LogMessageDoc(level="ERROR",
-                message="[{id}->{id}] Disconnecting client due to read timeout",
-                explanation="The connected client has failed to send any " +
-                            "messages or respond to echo requests",
-                recommendation=LogMessageDoc.CHECK_CONTROLLER),
-        @LogMessageDoc(level="ERROR",
-                message="[{id}->{id}] Disconnecting RPC node due to " +
-                    "handshake timeout",
-                explanation="The remote node did not complete the handshake",
-                recommendation=LogMessageDoc.CHECK_CONTROLLER),
-                @LogMessageDoc(level="ERROR",
-                message="[{id}->{id}] IOException: {message}",
-                explanation="There was an error communicating with the " + 
-                            "remote client",
-                recommendation=LogMessageDoc.GENERIC_ACTION),
-                @LogMessageDoc(level="ERROR",
-                message="[{id}->{id}] ConnectException: {message} {error}",
-                explanation="There was an error connecting to the " + 
-                            "remote node",
-                recommendation=LogMessageDoc.GENERIC_ACTION),
-        @LogMessageDoc(level="ERROR",
-                message="[{}->{}] An error occurred on RPC channel",
-                explanation="An error occurred processing the message",
-                recommendation=LogMessageDoc.GENERIC_ACTION),
-    })
     public void exceptionCaught(ChannelHandlerContext ctx,
                                 ExceptionEvent e) throws Exception {
         if (e.getCause() instanceof ReadTimeoutException) {
@@ -219,10 +188,6 @@ public abstract class AbstractRPCChannelHandler
      * @param ctx the context
      * @param message the message object
      */
-    @LogMessageDoc(level="WARN",
-                   message="[{id}->{id}] Unhandled message: {message type}",
-                   explanation="An unrecognized event occurred",
-                   recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG)
     protected void handleUnknownMessage(ChannelHandlerContext ctx, 
                                         Object message) {
         logger.warn("[{}->{}] Unhandled message: {}", 
@@ -343,11 +308,6 @@ public abstract class AbstractRPCChannelHandler
         
     }
 
-    @LogMessageDoc(level="WARN",
-                   message="Failed to authenticate connection from {remote}: {message}",
-                   explanation="Challenge/Response authentication failed",
-                   recommendation="Check the included error message, and " + 
-                           "verify the shared secret is correctly-configured")
     protected void handshake(HelloMessage request, Channel channel) {
         try {
             switch (getAuthScheme()) {
@@ -527,11 +487,6 @@ public abstract class AbstractRPCChannelHandler
                           MessageType.CLUSTER_JOIN_RESPONSE, channel);
     }
 
-    @LogMessageDoc(level="ERROR",
-                   message="[{id}->{id}] Error for message {id} ({type}): " + 
-                           "{message} {error code}",
-                   explanation="Remote client sent an error",
-                   recommendation=LogMessageDoc.GENERIC_ACTION)
     protected void handleError(ErrorMessage error, Channel channel) {
         logger.error("[{}->{}] Error for message {} ({}): {} ({})", 
                      new Object[]{getLocalNodeIdString(), 
@@ -554,11 +509,6 @@ public abstract class AbstractRPCChannelHandler
      * @param type the type of the message that generated the error
      * @return the {@link SyncError} message
      */
-    @LogMessageDoc(level="ERROR",
-                   message="Unexpected error processing message {} ({})",
-                   explanation="An error occurred while processing an " + 
-                               "RPC message",
-                   recommendation=LogMessageDoc.GENERIC_ACTION)
     protected SyncMessage getError(int transactionId, Exception error, 
                                    MessageType type) {
         int ec = SyncException.ErrorType.GENERIC.getValue();
@@ -590,11 +540,6 @@ public abstract class AbstractRPCChannelHandler
      * @param type The type of the message that generated the error
      * @param channel the channel to write the error
      */
-    @LogMessageDoc(level="WARN",
-                    message="[{id}->{id}] Received unexpected message: {type}",
-                    explanation="A inappriopriate message was sent by the remote" +
-                            "client",
-                    recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG)
     protected void unexpectedMessage(int transactionId,
                                      MessageType type,
                                      Channel channel) {
diff --git a/src/main/java/org/sdnplatform/sync/internal/rpc/RPCChannelHandler.java b/src/main/java/org/sdnplatform/sync/internal/rpc/RPCChannelHandler.java
index 516f387a291f2b655c1a6d0af2cd4f2fc7abed29..bb7f942cf14abf72bd7aefba05b6d46291f3756a 100644
--- a/src/main/java/org/sdnplatform/sync/internal/rpc/RPCChannelHandler.java
+++ b/src/main/java/org/sdnplatform/sync/internal/rpc/RPCChannelHandler.java
@@ -6,8 +6,6 @@ import java.util.List;
 import java.util.Random;
 import java.util.Map.Entry;
 
-import net.floodlightcontroller.core.annotations.LogMessageCategory;
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
 import net.floodlightcontroller.debugcounter.IDebugCounter;
 
 import org.jboss.netty.channel.Channel;
@@ -42,7 +40,6 @@ import org.slf4j.LoggerFactory;
  * Channel handler for the RPC service
  * @author readams
  */
-@LogMessageCategory("State Synchronization")
 public class RPCChannelHandler extends AbstractRPCChannelHandler {
     protected static final Logger logger =
             LoggerFactory.getLogger(RPCChannelHandler.class);
@@ -88,14 +85,6 @@ public class RPCChannelHandler extends AbstractRPCChannelHandler {
     }
 
     @Override
-    @LogMessageDoc(level="ERROR",
-              message="[{id}->{id}] Attempted connection from unrecognized " +
-                      "floodlight node {id}; disconnecting",
-              explanation="A unknown node connected.  This can happen " +
-                      "transiently if new nodes join the cluster.",
-              recommendation="If the problem persists, verify your cluster" +
-                "configuration and that you don't have unauthorized agents " +
-                "in your network.")
     protected void handleHello(HelloMessage hello, Channel channel) {
         if (!hello.isSetNodeId()) {
             // this is a client connection.  Don't set this up as a node
diff --git a/src/main/java/org/sdnplatform/sync/internal/rpc/RPCService.java b/src/main/java/org/sdnplatform/sync/internal/rpc/RPCService.java
index 97f498a050b064e8effb85d46f69ff5463ffab23..60dbcbbc8e9089476db022de386e70c4bd51162f 100644
--- a/src/main/java/org/sdnplatform/sync/internal/rpc/RPCService.java
+++ b/src/main/java/org/sdnplatform/sync/internal/rpc/RPCService.java
@@ -16,9 +16,6 @@ import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 import java.util.concurrent.LinkedTransferQueue;
 
-import net.floodlightcontroller.core.annotations.LogMessageCategory;
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
-import net.floodlightcontroller.core.annotations.LogMessageDocs;
 import net.floodlightcontroller.core.util.SingletonTask;
 import net.floodlightcontroller.debugcounter.IDebugCounterService;
 
@@ -40,13 +37,10 @@ import org.sdnplatform.sync.thrift.MessageType;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-
-
 /**
  * A lightweight RPC mechanism built on netty.
  * @author readams
  */
-@LogMessageCategory("State Synchronization")
 public class RPCService {
     protected static final Logger logger =
             LoggerFactory.getLogger(RPCService.class);
@@ -223,14 +217,6 @@ public class RPCService {
     /**
      * Stop the RPC service
      */
-    @LogMessageDocs({
-        @LogMessageDoc(level="WARN",
-                message="Failed to cleanly shut down RPC server",
-                explanation="Could not close all open sockets cleanly"),
-        @LogMessageDoc(level="WARN",
-        message="Interrupted while shutting down RPC server",
-        explanation="Could not close all open sockets cleanly")
-    })
     public void shutdown() {
         shutDown = true;
         try {
@@ -436,9 +422,6 @@ public class RPCService {
     /**
      * Start listening sockets
      */
-    @LogMessageDoc(level="INFO",
-                   message="Listening for internal floodlight RPC on {port}",
-                   explanation="The internal RPC service is ready for connections")
     protected void startServer(ChannelPipelineFactory pipelineFactory) {
         final ServerBootstrap bootstrap =
                 new ServerBootstrap(
diff --git a/src/main/java/org/sdnplatform/sync/internal/store/JavaDBStorageEngine.java b/src/main/java/org/sdnplatform/sync/internal/store/JavaDBStorageEngine.java
index 2acc2fc222d6489520e2266bc12d3da0a7bf0e69..c29b88ddaed94520cdc4bcda54db897726746d8a 100644
--- a/src/main/java/org/sdnplatform/sync/internal/store/JavaDBStorageEngine.java
+++ b/src/main/java/org/sdnplatform/sync/internal/store/JavaDBStorageEngine.java
@@ -18,10 +18,7 @@ import java.util.NoSuchElementException;
 import javax.sql.ConnectionPoolDataSource;
 import javax.xml.bind.DatatypeConverter;
 
-import net.floodlightcontroller.core.annotations.LogMessageCategory;
-
 import org.apache.derby.jdbc.EmbeddedConnectionPoolDataSource40;
-
 import org.sdnplatform.sync.IClosableIterator;
 import org.sdnplatform.sync.IVersion;
 import org.sdnplatform.sync.Versioned;
@@ -46,7 +43,6 @@ import com.fasterxml.jackson.dataformat.smile.SmileFactory;
  * Persistent storage engine that keeps its data in a JDB database
  * @author readams
  */
-@LogMessageCategory("State Synchronization")
 public class JavaDBStorageEngine implements IStorageEngine<ByteArray, byte[]> {
     protected static final Logger logger =
             LoggerFactory.getLogger(JavaDBStorageEngine.class.getName());
@@ -154,7 +150,8 @@ public class JavaDBStorageEngine implements IStorageEngine<ByteArray, byte[]> {
         }
     }
 
-    @Override
+    @SuppressWarnings("resource")
+	@Override
     public void put(ByteArray key, Versioned<byte[]> value) 
             throws SyncException {
         StoreUtils.assertValidKey(key);
diff --git a/src/main/java/org/sdnplatform/sync/internal/store/ListenerStorageEngine.java b/src/main/java/org/sdnplatform/sync/internal/store/ListenerStorageEngine.java
index 9d6f9d79e9e1612eaaf9a98620d3add0941e5b95..0bd1a439a2af18ef82a5fd85c57a9cb4a6a673d2 100644
--- a/src/main/java/org/sdnplatform/sync/internal/store/ListenerStorageEngine.java
+++ b/src/main/java/org/sdnplatform/sync/internal/store/ListenerStorageEngine.java
@@ -6,8 +6,6 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map.Entry;
 
-import net.floodlightcontroller.core.annotations.LogMessageCategory;
-import net.floodlightcontroller.core.annotations.LogMessageDoc;
 import net.floodlightcontroller.debugcounter.IDebugCounter;
 import net.floodlightcontroller.debugcounter.IDebugCounterService;
 
@@ -26,7 +24,6 @@ import org.slf4j.LoggerFactory;
  * registered listeners of changes
  * @author readams
  */
-@LogMessageCategory("State Synchronization")
 public class ListenerStorageEngine
     implements IStorageEngine<ByteArray, byte[]> {
     protected static Logger logger =
@@ -144,10 +141,6 @@ public class ListenerStorageEngine
         notifyListeners(Collections.singleton(key).iterator(), type);
     }
 
-    @LogMessageDoc(level="ERROR",
-                   message="An error occurred in a sync listener",
-                   explanation="An unexpected error occured in a handler for " +
-                               "an update to shared state.")
     protected void notifyListeners(Iterator<ByteArray> keys, UpdateType type) {
         for (MappingStoreListener msl : listeners) {
             try {
diff --git a/src/main/resources/floodlightdefault.properties b/src/main/resources/floodlightdefault.properties
index 8c644c730d806b8140cd24cd19409d23d4ccc00c..a838b13188514ddc2db698122563ab3387fe3364 100644
--- a/src/main/resources/floodlightdefault.properties
+++ b/src/main/resources/floodlightdefault.properties
@@ -23,6 +23,8 @@ org.sdnplatform.sync.internal.SyncManager.port=6642
 net.floodlightcontroller.forwarding.Forwarding.match=vlan, mac, ip, transport
 net.floodlightcontroller.core.internal.FloodlightProvider.openflowPort=6653
 net.floodlightcontroller.core.internal.FloodlightProvider.role=ACTIVE
+net.floodlightcontroller.linkdiscovery.internal.LinkDiscoveryManager.latency-history-size=10
+net.floodlightcontroller.linkdiscovery.internal.LinkDiscoveryManager.latency-update-threshold=0.5
 net.floodlightcontroller.core.internal.OFSwitchManager.defaultMaxTablesToReceiveTableMissFlow=2
 net.floodlightcontroller.core.internal.OFSwitchManager.maxTablesToReceiveTableMissFlowPerDpid={"00:00:00:00:00:00:00:01":"1","2":"1"}
 net.floodlightcontroller.core.internal.OFSwitchManager.clearTablesOnInitialHandshakeAsMaster=YES
diff --git a/src/test/java/net/floodlightcontroller/forwarding/ForwardingTest.java b/src/test/java/net/floodlightcontroller/forwarding/ForwardingTest.java
index 51f1faefb541b21acefbe0151385aa1a7277ad9f..f95bbd522bf3c96c5cfb32df15844402f9a3341f 100644
--- a/src/test/java/net/floodlightcontroller/forwarding/ForwardingTest.java
+++ b/src/test/java/net/floodlightcontroller/forwarding/ForwardingTest.java
@@ -23,8 +23,10 @@ import static org.junit.Assert.*;
 import java.util.ArrayList;
 import java.util.Date;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 import net.floodlightcontroller.core.FloodlightContext;
 import net.floodlightcontroller.core.IFloodlightProviderService;
@@ -269,9 +271,9 @@ public class ForwardingTest extends FloodlightTestCase {
 				.setXid(15)
 				.build();
 
-		// Mock Packet-out with OFPP_FLOOD action
+		// Mock Packet-out with OFPP_FLOOD action (list of ports to flood)
 		poactions = new ArrayList<OFAction>();
-		poactions.add(factory.actions().output(OFPort.FLOOD, Integer.MAX_VALUE));
+		poactions.add(factory.actions().output(OFPort.of(10), Integer.MAX_VALUE));
 		packetOutFlooded = factory.buildPacketOut()
 				.setBufferId(this.packetIn.getBufferId())
 				.setInPort(packetIn.getMatch().get(MatchField.IN_PORT))
@@ -459,6 +461,8 @@ public class ForwardingTest extends FloodlightTestCase {
 		expect(topology.isAttachmentPointPort(DatapathId.of(1L),  OFPort.of(1))).andReturn(true).anyTimes();
 		expect(topology.isAttachmentPointPort(DatapathId.of(2L),  OFPort.of(3))).andReturn(true).anyTimes();
 		expect(topology.isIncomingBroadcastAllowed(DatapathId.of(anyLong()), OFPort.of(anyShort()))).andReturn(true).anyTimes();
+		expect(topology.isEdge(DatapathId.of(1L), OFPort.of(1))).andReturn(true).anyTimes();
+		expect(topology.isEdge(DatapathId.of(2L), OFPort.of(3))).andReturn(true).anyTimes();
 
 		// Reset mocks, trigger the packet in, and validate results
 		replay(sw1, sw2, routingEngine, topology);
@@ -529,7 +533,9 @@ public class ForwardingTest extends FloodlightTestCase {
 		expect(topology.isAttachmentPointPort(DatapathId.of(1L),  OFPort.of(1))).andReturn(true).anyTimes();
 		expect(topology.isAttachmentPointPort(DatapathId.of(2L),  OFPort.of(3))).andReturn(true).anyTimes();
 		expect(topology.isIncomingBroadcastAllowed(DatapathId.of(anyLong()), OFPort.of(anyShort()))).andReturn(true).anyTimes();
-
+		expect(topology.isEdge(DatapathId.of(1L), OFPort.of(1))).andReturn(true).anyTimes();
+		expect(topology.isEdge(DatapathId.of(2L), OFPort.of(3))).andReturn(true).anyTimes();
+		
 		// Reset mocks, trigger the packet in, and validate results
 		replay(sw1, sw2, routingEngine, topology);
 		forwarding.receive(sw1, this.packetInIPv6, cntx);
@@ -592,8 +598,10 @@ public class ForwardingTest extends FloodlightTestCase {
 		reset(topology);
 		expect(topology.isIncomingBroadcastAllowed(DatapathId.of(anyLong()), OFPort.of(anyShort()))).andReturn(true).anyTimes();
 		expect(topology.getL2DomainId(DatapathId.of(1L))).andReturn(DatapathId.of(1L)).anyTimes();
-		expect(topology.isAttachmentPointPort(DatapathId.of(1L),  OFPort.of(1))).andReturn(true).anyTimes();
-		expect(topology.isAttachmentPointPort(DatapathId.of(1L),  OFPort.of(3))).andReturn(true).anyTimes();
+		expect(topology.isAttachmentPointPort(DatapathId.of(1L), OFPort.of(1))).andReturn(true).anyTimes();
+		expect(topology.isAttachmentPointPort(DatapathId.of(1L), OFPort.of(3))).andReturn(true).anyTimes();
+		expect(topology.isEdge(DatapathId.of(1L), OFPort.of(1))).andReturn(true).anyTimes();
+		expect(topology.isEdge(DatapathId.of(1L), OFPort.of(3))).andReturn(true).anyTimes();
 
 		// Reset mocks, trigger the packet in, and validate results
 		replay(sw1, sw2, routingEngine, topology);
@@ -648,7 +656,9 @@ public class ForwardingTest extends FloodlightTestCase {
 		expect(topology.getL2DomainId(DatapathId.of(1L))).andReturn(DatapathId.of(1L)).anyTimes();
 		expect(topology.isAttachmentPointPort(DatapathId.of(1L),  OFPort.of(1))).andReturn(true).anyTimes();
 		expect(topology.isAttachmentPointPort(DatapathId.of(1L),  OFPort.of(3))).andReturn(true).anyTimes();
-
+		expect(topology.isEdge(DatapathId.of(1L), OFPort.of(1))).andReturn(true).anyTimes();
+		expect(topology.isEdge(DatapathId.of(1L), OFPort.of(3))).andReturn(true).anyTimes();
+		
 		// Reset mocks, trigger the packet in, and validate results
 		replay(sw1, sw2, routingEngine, topology);
 		forwarding.receive(sw1, this.packetInIPv6, cntx);
@@ -729,17 +739,18 @@ public class ForwardingTest extends FloodlightTestCase {
 		// expect no Flow-mod but expect the packet to be flooded
 
 		Capture<OFMessage> wc1 = new Capture<OFMessage>(CaptureType.ALL);
+		
+		Set<OFPort> bcastPorts = new HashSet<OFPort>();
+		bcastPorts.add(OFPort.of(10));
 
 		// Reset mocks, trigger the packet in, and validate results
 		reset(topology);
-		expect(topology.isIncomingBroadcastAllowed(DatapathId.of(1L), OFPort.of(1))).andReturn(true).anyTimes();
+		expect(topology.getSwitchBroadcastPorts(DatapathId.of(1L))).andReturn(bcastPorts).once();
 		expect(topology.isAttachmentPointPort(DatapathId.of(anyLong()),
 				OFPort.of(anyShort())))
 				.andReturn(true)
 				.anyTimes();
-		expect(sw1.hasAttribute(IOFSwitch.PROP_SUPPORTS_OFPP_FLOOD))
-		.andReturn(true).anyTimes();
-		// Reset XID to expected (dependent on prior unit tests)
+		expect(sw1.hasAttribute(IOFSwitch.PROP_SUPPORTS_OFPP_FLOOD)).andReturn(true).anyTimes();
 		sw1.write(capture(wc1));
 		expectLastCall().once();
 		replay(sw1, sw2, routingEngine, topology);
@@ -760,10 +771,13 @@ public class ForwardingTest extends FloodlightTestCase {
 		// expect no Flow-mod but expect the packet to be flooded
 
 		Capture<OFMessage> wc1 = new Capture<OFMessage>(CaptureType.ALL);
+		
+		Set<OFPort> bcastPorts = new HashSet<OFPort>();
+		bcastPorts.add(OFPort.of(10));
 
 		// Reset mocks, trigger the packet in, and validate results
 		reset(topology);
-		expect(topology.isIncomingBroadcastAllowed(DatapathId.of(1L), OFPort.of(1))).andReturn(true).anyTimes();
+		expect(topology.getSwitchBroadcastPorts(DatapathId.of(1L))).andReturn(bcastPorts).once();
 		expect(topology.isAttachmentPointPort(DatapathId.of(anyLong()),
 				OFPort.of(anyShort())))
 				.andReturn(true)
diff --git a/src/test/java/net/floodlightcontroller/linkdiscovery/internal/LinkDiscoveryManagerTest.java b/src/test/java/net/floodlightcontroller/linkdiscovery/internal/LinkDiscoveryManagerTest.java
index cd7fe7ae38caec6fa499d4fcec08155cf2631ce3..a4c8d244785fcccf952d5db5133bd7e01e72264f 100644
--- a/src/test/java/net/floodlightcontroller/linkdiscovery/internal/LinkDiscoveryManagerTest.java
+++ b/src/test/java/net/floodlightcontroller/linkdiscovery/internal/LinkDiscoveryManagerTest.java
@@ -48,7 +48,6 @@ import net.floodlightcontroller.debugevent.IDebugEventService;
 import net.floodlightcontroller.debugevent.MockDebugEventService;
 import net.floodlightcontroller.linkdiscovery.ILinkDiscoveryListener;
 import net.floodlightcontroller.linkdiscovery.ILinkDiscoveryService;
-import net.floodlightcontroller.linkdiscovery.LinkInfo;
 import net.floodlightcontroller.packet.Data;
 import net.floodlightcontroller.packet.Ethernet;
 import net.floodlightcontroller.packet.IPacket;
@@ -171,16 +170,44 @@ public class LinkDiscoveryManagerTest extends FloodlightTestCase {
         replay(sw1, sw2);
     }
 
+    @Test
+    public void testLinkLatency() throws Exception {
+        LinkDiscoveryManager.LATENCY_HISTORY_SIZE = 5;
+        LinkDiscoveryManager.LATENCY_UPDATE_THRESHOLD = 0.25;
+        
+        LinkInfo info = new LinkInfo(new Date(), new Date(), null);
+
+        /*
+         * Should retain initial latency until LATENCY_HISTORY_SIZE
+         * data points are accumulated.
+         */
+        assertEquals(U64.of(0), info.addObservedLatency(U64.of(0)));
+        assertEquals(U64.of(0), info.addObservedLatency(U64.of(10)));
+        assertEquals(U64.of(0), info.addObservedLatency(U64.of(20)));
+        assertEquals(U64.of(0), info.addObservedLatency(U64.of(30)));
+        assertEquals(U64.of(20), info.addObservedLatency(U64.of(40)));
+        
+        /*
+         * LATENCY_HISTORY_SIZE is maintained. Oldest value is evicted
+         * per new value added. New average should be computed each
+         * addition, but latency should not change until current latency
+         * versus historical average latency differential threshold is
+         * exceeded again.
+         */
+        assertEquals(U64.of(20), info.addObservedLatency(U64.of(20))); /* avg = 24; diff = 4; 4/24 = 1/6 = 17% !>= 25% --> no update */
+        assertEquals(U64.of(26), info.addObservedLatency(U64.of(20))); /* avg = 26; diff = 6; 6/20 = 3/10 = 33% >= 25% --> update */
+        assertEquals(U64.of(26), info.addObservedLatency(U64.of(20))); /* avg = 26; diff = 0; 0/20 = 0/10 = 0% !>= 25% --> no update */
+    }
+    
     @Test
     public void testAddOrUpdateLink() throws Exception {
         LinkDiscoveryManager linkDiscovery = getLinkDiscoveryManager();
-
-        Link lt = new Link(DatapathId.of(1L), OFPort.of(2), DatapathId.of(2L), OFPort.of(1), U64.ZERO);
+        U64 latency = U64.of(100);
+        Link lt = new Link(DatapathId.of(1L), OFPort.of(2), DatapathId.of(2L), OFPort.of(1), latency);
         LinkInfo info = new LinkInfo(new Date(),
                                      new Date(), null);
         linkDiscovery.addOrUpdateLink(lt, info);
 
-
         NodePortTuple srcNpt = new NodePortTuple(DatapathId.of(1L), OFPort.of(2));
         NodePortTuple dstNpt = new NodePortTuple(DatapathId.of(2L), OFPort.of(1));
 
@@ -192,6 +219,7 @@ public class LinkDiscoveryManagerTest extends FloodlightTestCase {
         assertNotNull(linkDiscovery.portLinks.get(dstNpt));
         assertTrue(linkDiscovery.portLinks.get(dstNpt).contains(lt));
         assertTrue(linkDiscovery.links.containsKey(lt));
+        assertTrue(linkDiscovery.switchLinks.get(lt.getSrc()).iterator().next().getLatency().equals(latency));
     }
 
     @Test
diff --git a/src/test/java/net/floodlightcontroller/topology/TopologyInstanceTest.java b/src/test/java/net/floodlightcontroller/topology/TopologyInstanceTest.java
index 445e4c00cae2b951928c2919ad7c4a449deb9933..634ebf689b959f50eed1412e804f8b367bf53392 100644
--- a/src/test/java/net/floodlightcontroller/topology/TopologyInstanceTest.java
+++ b/src/test/java/net/floodlightcontroller/topology/TopologyInstanceTest.java
@@ -24,8 +24,10 @@ import java.util.Set;
 
 import static org.junit.Assert.*;
 import net.floodlightcontroller.core.IFloodlightProviderService;
+import net.floodlightcontroller.core.internal.IOFSwitchService;
 import net.floodlightcontroller.core.module.FloodlightModuleContext;
 import net.floodlightcontroller.core.test.MockFloodlightProvider;
+import net.floodlightcontroller.core.test.MockSwitchManager;
 import net.floodlightcontroller.core.test.MockThreadPoolService;
 import net.floodlightcontroller.debugcounter.IDebugCounterService;
 import net.floodlightcontroller.debugcounter.MockDebugCounterService;
@@ -64,11 +66,12 @@ public class TopologyInstanceTest {
         linkDiscovery = EasyMock.createMock(ILinkDiscoveryService.class);
         mockFloodlightProvider = new MockFloodlightProvider();
         fmc.addService(IFloodlightProviderService.class, mockFloodlightProvider);
+        fmc.addService(IOFSwitchService.class, new MockSwitchManager());
         fmc.addService(ILinkDiscoveryService.class, linkDiscovery);
         fmc.addService(IDebugCounterService.class, new MockDebugCounterService());
         fmc.addService(IDebugEventService.class, new MockDebugEventService());
         MockThreadPoolService tp = new MockThreadPoolService();
-        topologyManager  = new TopologyManager();
+        topologyManager = new TopologyManager();
         fmc.addService(IThreadPoolService.class, tp);
         topologyManager.init(fmc);
         tp.init(fmc);
@@ -178,7 +181,6 @@ public class TopologyInstanceTest {
                                          {1,2,3}, 
                                          {4}
             };
-            //tm.recompute();
             createTopologyFromLinks(linkArray);
             verifyClusters(expectedClusters);
         }