Skip to content
Snippets Groups Projects
Commit 397b50a1 authored by Srinivasan Ramasubramanian's avatar Srinivasan Ramasubramanian
Browse files

Rewriting switch cluster computation to return strongly connected components...

Rewriting switch cluster computation to return strongly connected components in the presence of unidirectional links.  The notion of internal switchPort is also changed.
parent 69aa2b25
No related branches found
No related tags found
No related merge requests found
...@@ -56,6 +56,7 @@ import net.floodlightcontroller.topology.LinkInfo; ...@@ -56,6 +56,7 @@ import net.floodlightcontroller.topology.LinkInfo;
import net.floodlightcontroller.topology.LinkTuple; import net.floodlightcontroller.topology.LinkTuple;
import net.floodlightcontroller.topology.SwitchCluster; import net.floodlightcontroller.topology.SwitchCluster;
import net.floodlightcontroller.topology.SwitchPortTuple; import net.floodlightcontroller.topology.SwitchPortTuple;
import net.floodlightcontroller.util.ClusterDFS;
import org.openflow.protocol.OFMessage; import org.openflow.protocol.OFMessage;
import org.openflow.protocol.OFPacketIn; import org.openflow.protocol.OFPacketIn;
...@@ -508,8 +509,10 @@ public class TopologyImpl implements IOFMessageListener, IOFSwitchListener, ...@@ -508,8 +509,10 @@ public class TopologyImpl implements IOFMessageListener, IOFSwitchListener,
} }
protected void sendLLDPs(IOFSwitch sw) { protected void sendLLDPs(IOFSwitch sw) {
for (OFPhysicalPort port : sw.getEnabledPorts()) { if (sw.getEnabledPorts() != null) {
sendLLDPs(sw, port); for (OFPhysicalPort port : sw.getEnabledPorts()) {
sendLLDPs(sw, port);
}
} }
} }
...@@ -905,16 +908,40 @@ public class TopologyImpl implements IOFMessageListener, IOFSwitchListener, ...@@ -905,16 +908,40 @@ public class TopologyImpl implements IOFMessageListener, IOFSwitchListener,
@Override @Override
public boolean isInternal(SwitchPortTuple idPort) { public boolean isInternal(SwitchPortTuple idPort) {
lock.readLock().lock(); lock.readLock().lock();
boolean result; boolean result = false;
// A SwitchPortTuple is internal if the switch is a core switch
// or the current switch and the switch connected on the switch
// port tuple are in the same cluster.
try { try {
result = this.portLinks.containsKey(idPort) || // If it is a core switch, then return true
idPort.getSw().hasAttribute(IOFSwitch.SWITCH_IS_CORE_SWITCH); if (idPort.getSw().hasAttribute(IOFSwitch.SWITCH_IS_CORE_SWITCH))
result = true;
else {
Set<LinkTuple> ltSet = this.portLinks.get(idPort);
// The assumption is there could be at most two links for this
// switch port: one incoming and one outgoing to the same
// other switch. So, verifying one of these two links is
// sufficient.
if (ltSet != null) {
for(LinkTuple lt: ltSet) {
Long c1 = lt.getSrc().getSw().getSwitchClusterId();
Long c2 = lt.getDst().getSw().getSwitchClusterId();
result = c1.equals(c2);
break;
}
}
}
} finally { } finally {
lock.readLock().unlock(); lock.readLock().unlock();
} }
return result; return result;
} }
@Override @Override
public LinkInfo getLinkInfo(SwitchPortTuple idPort, boolean isSrcPort) { public LinkInfo getLinkInfo(SwitchPortTuple idPort, boolean isSrcPort) {
Set<LinkTuple> links = this.portLinks.get(idPort); Set<LinkTuple> links = this.portLinks.get(idPort);
...@@ -966,27 +993,6 @@ public class TopologyImpl implements IOFMessageListener, IOFSwitchListener, ...@@ -966,27 +993,6 @@ public class TopologyImpl implements IOFMessageListener, IOFSwitchListener,
return linkInfo.linkStpBlocked(); return linkInfo.linkStpBlocked();
} }
private void traverseCluster(Set<LinkTuple> links, SwitchCluster cluster) {
// NOTE: This function assumes that the caller has already acquired
// a write lock on the "lock" data member.
// FIXME: To handle large networks we probably should recode this to not
// use recursion to avoid stack overflow.
// FIXME: See if we can optimize not creating a new SwitchCluster object for
// each switch.
for (LinkTuple link: links) {
// FIXME: Is this the right check for handling STP correctly?
if (!linkStpBlocked(link)) {
IOFSwitch dstSw = link.getDst().getSw();
if (switchClusterMap.get(dstSw) == null) {
cluster.add(dstSw);
switchClusterMap.put(dstSw, cluster);
Set<LinkTuple> dstLinks = switchLinks.get(dstSw);
traverseCluster(dstLinks, cluster);
}
}
}
}
protected void updateClusters() { protected void updateClusters() {
// NOTE: This function assumes that the caller has already acquired // NOTE: This function assumes that the caller has already acquired
// a write lock on the "lock" data member. // a write lock on the "lock" data member.
...@@ -998,32 +1004,128 @@ public class TopologyImpl implements IOFMessageListener, IOFSwitchListener, ...@@ -998,32 +1004,128 @@ public class TopologyImpl implements IOFMessageListener, IOFSwitchListener,
log.trace("Updating topology cluster info"); log.trace("Updating topology cluster info");
} }
// Initialize all the new structures.
switchClusterMap = new HashMap<IOFSwitch, SwitchCluster>(); switchClusterMap = new HashMap<IOFSwitch, SwitchCluster>();
clusters = new HashSet<SwitchCluster>(); clusters = new HashSet<SwitchCluster>();
Map<Long, IOFSwitch> switches = floodlightProvider.getSwitches(); Map<Long, IOFSwitch> switches = floodlightProvider.getSwitches();
Set<Long> switchKeys = new HashSet<Long>(switches.keySet()); Set<Long> switchKeys = new HashSet<Long>(switches.keySet());
Map<IOFSwitch, ClusterDFS> dfsList = new HashMap<IOFSwitch, ClusterDFS>();
for (Map.Entry<IOFSwitch, Set<LinkTuple>> entry: switchLinks.entrySet()) { for (Map.Entry<IOFSwitch, Set<LinkTuple>> entry: switchLinks.entrySet()) {
IOFSwitch sw = entry.getKey(); IOFSwitch sw = entry.getKey();
switchKeys.remove(sw.getId()); switchKeys.remove(sw.getId());
if (switchClusterMap.get(sw) == null) { ClusterDFS cdfs = new ClusterDFS();
SwitchCluster cluster = new SwitchCluster(); dfsList.put(sw, cdfs);
cluster.add(sw); }
switchClusterMap.put(sw, cluster);
clusters.add(cluster); // Get a set of switch keys in a set
traverseCluster(entry.getValue(), cluster); Set<IOFSwitch> currSet = new HashSet<IOFSwitch>();
for (Map.Entry<IOFSwitch, Set<LinkTuple>> entry: switchLinks.entrySet()) {
// check if they have been dfs visited already, if not start
// a new dfs.
//dfsTraverse(parentIndex, currIndex, key, switches, dfsList);
IOFSwitch sw = entry.getKey();
ClusterDFS cdfs = dfsList.get(sw);
if (cdfs == null) {
log.error("Do DFS object for key found.");
}else if (!cdfs.isVisited()) {
this.dfsTraverse(0, 1, sw, switches, dfsList, currSet, clusters);
} }
} }
// switchKeys contains switches that have no links to other switches // switchKeys contains switches that have no links to other switches
// Each of these switches would be in their own one-switch cluster // Each of these switches would be in their own one-switch cluster
for (Long key: switchKeys) { for (Long key: switchKeys) {
IOFSwitch sw = switches.get(key); IOFSwitch sw = switches.get(key);
if (sw != null) if (sw != null) {
sw.setSwitchClusterId(sw.getId()); SwitchCluster sc = new SwitchCluster();
sc.add(sw);
switchClusterMap.put(sw, sc);
clusters.add(sc);
}
} }
updates.add(new Update(UpdateOperation.CLUSTER_MERGED)); updates.add(new Update(UpdateOperation.CLUSTER_MERGED));
} }
protected long dfsTraverse (long parentIndex, long currIndex,
IOFSwitch currSw, Map<Long, IOFSwitch> switches,
Map<IOFSwitch, ClusterDFS> dfsList, Set <IOFSwitch> currSet,
Set <SwitchCluster> clusters) {
//Get the DFS object corresponding to the current switch
ClusterDFS currDFS = dfsList.get(currSw);
// Get all the links corresponding to this switch
Set<LinkTuple> links = switchLinks.get(currSw);
//Assign the DFS object with right values.
currDFS.setVisited(true);
currDFS.setParentDFSIndex(parentIndex);
currDFS.setDfsIndex(currIndex);
currIndex++;
// Traverse the graph through every outgoing link.
for(LinkTuple lt: links) {
IOFSwitch dstSw = lt.getDst().getSw();
// ignore incoming links.
if (dstSw == currSw) continue;
// ignore outgoing links if it is blocked.
if (linkStpBlocked(lt)) continue;
// Get the DFS object corresponding to the dstSw
ClusterDFS dstDFS = dfsList.get(dstSw);
if (dstDFS.getDfsIndex() < currDFS.getDfsIndex()) {
// could be a potential lowpoint
if (dstDFS.getDfsIndex() < currDFS.getLowpoint())
currDFS.setLowpoint(dstDFS.getDfsIndex());
} else if (!dstDFS.isVisited()) {
// make a DFS visit
currIndex = dfsTraverse(currDFS.getDfsIndex(), currIndex, dstSw,
switches, dfsList, currSet, clusters);
// update lowpoint after the visit
if (dstDFS.getLowpoint() < currDFS.getLowpoint())
currDFS.setLowpoint(dstDFS.getLowpoint());
}
// else, it is a node already visited with a higher
// dfs index, just ignore.
}
// Add current node to currSet.
currSet.add(currSw);
// Cluster computation.
// If the node's lowpoint is greater than its parent's DFS index,
// we need to form a new cluster with all the switches in the
// currSet.
if (currDFS.getLowpoint() > currDFS.getParentDFSIndex()) {
// The cluster thus far forms a strongly connected component.
// create a new switch cluster and the switches in the current
// set to the switch cluster.
SwitchCluster sc = new SwitchCluster();
Iterator<IOFSwitch> e = currSet.iterator();
while (e.hasNext()) {
IOFSwitch sw = e.next();
sc.add(sw);
switchClusterMap.put(sw, sc);
}
// delete all the nodes in the current set.
currSet.clear();
// add the newly formed switch clusters to the cluster set.
clusters.add(sc);
}
return currIndex;
}
public Set<IOFSwitch> getSwitchesInCluster(IOFSwitch sw) { public Set<IOFSwitch> getSwitchesInCluster(IOFSwitch sw) {
SwitchCluster cluster = switchClusterMap.get(sw); SwitchCluster cluster = switchClusterMap.get(sw);
if (cluster == null){ if (cluster == null){
......
package net.floodlightcontroller.util;
public class ClusterDFS {
long dfsIndex;
long parentDFSIndex;
long lowpoint;
boolean visited;
public ClusterDFS() {
visited = false;
dfsIndex = Long.MAX_VALUE;
parentDFSIndex = Long.MAX_VALUE;
lowpoint = Long.MAX_VALUE;
}
public long getDfsIndex() {
return dfsIndex;
}
public void setDfsIndex(long dfsIndex) {
this.dfsIndex = dfsIndex;
}
public long getParentDFSIndex() {
return parentDFSIndex;
}
public void setParentDFSIndex(long parentDFSIndex) {
this.parentDFSIndex = parentDFSIndex;
}
public long getLowpoint() {
return lowpoint;
}
public void setLowpoint(long lowpoint) {
this.lowpoint = lowpoint;
}
public boolean isVisited() {
return visited;
}
public void setVisited(boolean visited) {
this.visited = visited;
}
}
...@@ -59,7 +59,6 @@ import org.openflow.protocol.OFMatch; ...@@ -59,7 +59,6 @@ import org.openflow.protocol.OFMatch;
import org.openflow.protocol.OFMessage; import org.openflow.protocol.OFMessage;
import org.openflow.protocol.OFPacketIn; import org.openflow.protocol.OFPacketIn;
import org.openflow.protocol.OFPacketOut; import org.openflow.protocol.OFPacketOut;
import org.openflow.protocol.OFPort;
import org.openflow.protocol.OFType; import org.openflow.protocol.OFType;
import org.openflow.protocol.OFPacketIn.OFPacketInReason; import org.openflow.protocol.OFPacketIn.OFPacketInReason;
import org.openflow.protocol.action.OFAction; import org.openflow.protocol.action.OFAction;
......
...@@ -265,41 +265,47 @@ public class TopologyImplTest extends FloodlightTestCase { ...@@ -265,41 +265,47 @@ public class TopologyImplTest extends FloodlightTestCase {
// Create several switches // Create several switches
IOFSwitch[] switches = new IOFSwitch[6]; IOFSwitch[] switches = new IOFSwitch[6];
Map<Long, IOFSwitch> switchMap = new HashMap<Long, IOFSwitch>();
for (int i = 0; i < 6; i++) { for (int i = 0; i < 6; i++) {
switches[i] = createMockSwitch((long)i+1); switches[i] = createMockSwitch((long)i+1);
switches[i].setSwitchClusterId((long)i+1); switches[i].setSwitchClusterId((long)i+1);
replay(switches[i]); replay(switches[i]);
switchMap.put(new Long(switches[i].getId()), switches[i]);
} }
mockFloodlightProvider.setSwitches(switchMap);
// Create links among the switches // Create links among the switches
int linkInfoArray1[][] = { int linkInfoArray1[][] = {
// SrcSw#, SrcPort#, SrcPortState, DstSw#, DstPort#, DstPortState // SrcSw#, SrcPort#, SrcPortState, DstSw#, DstPort#, DstPortState
{ 1, 1, 0, 2, 1, 0}, { 1, 1, 0, 2, 1, 0},
{ 2, 1, 0, 1, 1, 0},
{ 1, 2, 0, 3, 1, 0},
{ 3, 1, 0, 1, 2, 0},
{ 2, 2, 0, 3, 2, 0}, { 2, 2, 0, 3, 2, 0},
{ 3, 2, 0, 2, 2, 0}, { 3, 1, 0, 1, 2, 0},
{ 2, 3, 0, 4, 2, 0}, { 2, 3, 0, 4, 2, 0},
{ 4, 2, 0, 2, 3, 0},
{ 3, 3, 0, 4, 1, 0}, { 3, 3, 0, 4, 1, 0},
{ 4, 1, 0, 3, 3, 0},
{ 5, 3, 0, 6, 1, 0},
{ 6, 1, 0, 5, 3, 0},
}; };
createLinks(topology, switches, linkInfoArray1); createLinks(topology, switches, linkInfoArray1);
int expectedClusters1[][] = { int expectedClusters1[][] = {
{1,2,3,4}, {1,2,3},
{5,6} {4},
{5},
{6}
}; };
verifyClusters(topology, switches, expectedClusters1); verifyClusters(topology, switches, expectedClusters1);
int linkInfoArray2[][] = { int linkInfoArray2[][] = {
{ 4, 3, 0, 5, 1, 0}, { 3, 2, 0, 2, 2, 0},
{ 5, 1, 0, 4, 3, 0}, { 2, 1, 0, 1, 1, 0},
{ 2, 4, 0, 5, 2, 0}, { 1, 2, 0, 3, 1, 0},
{ 5, 2, 0, 2, 4, 0}, { 4, 2, 0, 2, 3, 0},
{ 4, 1, 0, 3, 3, 0},
{ 4, 3, 0, 5, 1, 0},
{ 5, 1, 0, 4, 3, 0},
{ 2, 4, 0, 5, 2, 0},
{ 5, 2, 0, 2, 4, 0},
{ 6, 1, 0, 5, 3, 0},
{ 5, 3, 0, 6, 1, 0},
}; };
createLinks(topology, switches, linkInfoArray2); createLinks(topology, switches, linkInfoArray2);
int expectedClusters2[][] = { int expectedClusters2[][] = {
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment