diff --git a/.gitignore b/.gitignore index 638f2ea666d0127a1cd5c45673558ed9074cc950..1205d8a1aee3d42aa13e0789222f04dca116f4cd 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,4 @@ target thrift *.swp *.pyc +findbugs-results diff --git a/build.xml b/build.xml index 18eebc9aed874eaa100844dc7a1566e1d0e01335..0f95be469b708e6b9763abb7c48783df3a302d98 100644 --- a/build.xml +++ b/build.xml @@ -46,6 +46,8 @@ <property name="thrift.package" value="net/floodlightcontroller/packetstreamer/thrift"/> <property name="ant.build.javac.source" value="1.6"/> <property name="ant.build.javac.target" value="1.6"/> + <property name="findbugs.home" value="../build/findbugs-2.0.2"/> + <property name="findbugs.results" value="findbugs-results" /> <property name="lib" location="lib"/> <patternset id="lib"> @@ -65,6 +67,10 @@ <include name="jython-2.5.2.jar"/> <include name="libthrift-0.7.0.jar"/> <include name="guava-13.0.1.jar" /> + <include name="commons-logging-1.1.1.jar" /> + <include name="httpclient-4.2.2.jar" /> + <include name="httpcore-4.2.2.jar" /> + <include name="json-simple-1.1.1.jar" /> </patternset> <path id="classpath"> @@ -276,4 +282,34 @@ </exec> </target> + <target name="findbugs-xml" depends="init,compile"> + <taskdef name="findbugs" classname="edu.umd.cs.findbugs.anttask.FindBugsTask" classpath="${findbugs.home}/lib/findbugs-ant.jar"/> + <mkdir dir="${findbugs.results}"/> + <findbugs home="${findbugs.home}" + output="xml" + outputFile="${findbugs.results}/results.xml" > + <sourcePath path="${source}" /> + <sourcePath path="${thrift.out.dir}" /> + <class location="${build}" /> + <auxClasspath> + <path refid="classpath" /> + </auxClasspath> + </findbugs> + </target> + + <target name="findbugs" depends="init,compile"> + <taskdef name="findbugs" classname="edu.umd.cs.findbugs.anttask.FindBugsTask" classpath="${findbugs.home}/lib/findbugs-ant.jar"/> + <mkdir dir="${findbugs.results}"/> + <findbugs home="${findbugs.home}" + output="html" + outputFile="${findbugs.results}/results.html" > + <sourcePath path="${source}" /> + <sourcePath path="${thrift.out.dir}" /> + <class location="${build}" /> + <auxClasspath> + <path refid="classpath" /> + </auxClasspath> + </findbugs> + </target> + </project> diff --git a/lib/commons-logging-1.1.1.jar b/lib/commons-logging-1.1.1.jar new file mode 100644 index 0000000000000000000000000000000000000000..1deef144cb17ed2c11c6cdcdcb2d9530fa8d0b47 Binary files /dev/null and b/lib/commons-logging-1.1.1.jar differ diff --git a/lib/httpclient-4.2.2.jar b/lib/httpclient-4.2.2.jar new file mode 100644 index 0000000000000000000000000000000000000000..5f768c46407a6506d46766d1011f2fac342ec333 Binary files /dev/null and b/lib/httpclient-4.2.2.jar differ diff --git a/lib/httpcore-4.2.2.jar b/lib/httpcore-4.2.2.jar new file mode 100644 index 0000000000000000000000000000000000000000..a64cd2f5e3eff5ea0ff91a545ba2da4dc3fd612b Binary files /dev/null and b/lib/httpcore-4.2.2.jar differ diff --git a/lib/json-simple-1.1.1.jar b/lib/json-simple-1.1.1.jar new file mode 100644 index 0000000000000000000000000000000000000000..66347a6c86b7d6442358ca7643e4dc484fb01866 Binary files /dev/null and b/lib/json-simple-1.1.1.jar differ diff --git a/src/main/java/net/floodlightcontroller/devicemanager/internal/Device.java b/src/main/java/net/floodlightcontroller/devicemanager/internal/Device.java index 19cc0271bc38381ea4ff769e1f4a16200669f80a..9d59caac4a54fc004dd42ff70c209527ae2d9377 100755 --- a/src/main/java/net/floodlightcontroller/devicemanager/internal/Device.java +++ b/src/main/java/net/floodlightcontroller/devicemanager/internal/Device.java @@ -458,11 +458,14 @@ entity.getLastSeenTimestamp().getTime()); } else if (oldAPFlag) { // retain oldAP as is. Put the newAP in oldAPs for flagging // possible duplicates. - oldAPList = new ArrayList<AttachmentPoint>(); - if (oldAPs != null) oldAPList.addAll(oldAPs); - // Add ot oldAPList only if it was picked up from the oldAPList - oldAPList.add(newAP); - this.oldAPs = oldAPList; + oldAPList = new ArrayList<AttachmentPoint>(); + if (oldAPs != null) oldAPList.addAll(oldAPs); + // Add to oldAPList only if it was picked up from the oldAPList + oldAPList.add(newAP); + this.oldAPs = oldAPList; + if (!topology.isInSameBroadcastDomain(oldAP.getSw(), oldAP.getPort(), + newAP.getSw(), newAP.getPort())) + return true; // attachment point changed. } return false; } diff --git a/src/main/java/net/floodlightcontroller/devicemanager/internal/DeviceManagerImpl.java b/src/main/java/net/floodlightcontroller/devicemanager/internal/DeviceManagerImpl.java index ad418a5087462777625d782a4d146a59e918490a..8f9281243b563713c2196dd80616e1019f96628b 100755 --- a/src/main/java/net/floodlightcontroller/devicemanager/internal/DeviceManagerImpl.java +++ b/src/main/java/net/floodlightcontroller/devicemanager/internal/DeviceManagerImpl.java @@ -75,6 +75,7 @@ DeviceManagerImpl.DeviceUpdate.Change.*; import org.openflow.protocol.OFMatchWithSwDpid; import org.openflow.protocol.OFMessage; import org.openflow.protocol.OFPacketIn; +import org.openflow.protocol.OFPort; import org.openflow.protocol.OFType; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -253,8 +254,8 @@ IFlowReconcileListener, IInfoProvider, IHAListener { @Override public int compare(AttachmentPoint oldAP, AttachmentPoint newAP) { - //First compare based on L2 domain ID; + long oldSw = oldAP.getSw(); short oldPort = oldAP.getPort(); long oldDomain = topology.getL2DomainId(oldSw); @@ -268,6 +269,16 @@ IFlowReconcileListener, IInfoProvider, IHAListener { if (oldDomain < newDomain) return -1; else if (oldDomain > newDomain) return 1; + + // Give preference to OFPP_LOCAL always + if (oldPort != OFPort.OFPP_LOCAL.getValue() && + newPort == OFPort.OFPP_LOCAL.getValue()) { + return -1; + } else if (oldPort == OFPort.OFPP_LOCAL.getValue() && + newPort != OFPort.OFPP_LOCAL.getValue()) { + return 1; + } + // We expect that the last seen of the new AP is higher than // old AP, if it is not, just reverse and send the negative // of the result. diff --git a/src/main/java/net/floodlightcontroller/flowcache/PortDownReconciliation.java b/src/main/java/net/floodlightcontroller/flowcache/PortDownReconciliation.java index 8ec19a636fdbd812997fe074c0b26fa4b686fdda..7360b4f5af30eb2eb228d03cb38027ce920f259e 100644 --- a/src/main/java/net/floodlightcontroller/flowcache/PortDownReconciliation.java +++ b/src/main/java/net/floodlightcontroller/flowcache/PortDownReconciliation.java @@ -1,18 +1,18 @@ /** -* Copyright 2012, Jason Parraga, Marist College -* -* Licensed under the Apache License, Version 2.0 (the "License"); you may -* not use this file except in compliance with the License. You may obtain -* a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -* License for the specific language governing permissions and limitations -* under the License. -**/ + * Copyright 2012, Jason Parraga, Marist College + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. You may obtain + * a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + **/ package net.floodlightcontroller.flowcache; import java.util.ArrayList; @@ -69,280 +69,281 @@ import net.floodlightcontroller.topology.ITopologyService; */ public class PortDownReconciliation implements IFloodlightModule, - ITopologyListener, IFlowReconcileListener { - protected static Logger log = LoggerFactory - .getLogger(PortDownReconciliation.class); - - protected ITopologyService topology; - protected IFloodlightProviderService floodlightProvider; - protected IFlowReconcileService frm; - protected ILinkDiscoveryService lds; - protected Map<Link, LinkInfo> links; - protected FloodlightContext cntx; - protected static boolean waiting = false; - protected int statsQueryXId; - protected static List<OFFlowStatisticsReply> statsReply; - - // ITopologyListener - @Override - public void topologyChanged() { - for (LDUpdate ldu : topology.getLastLinkUpdates()) { - if (ldu.getOperation().equals( - ILinkDiscovery.UpdateOperation.PORT_DOWN)) { - - // Get the switch ID for the OFMatchWithSwDpid object - long affectedSwitch = floodlightProvider.getSwitches() - .get(ldu.getSrc()).getId(); - - // Create an OFMatchReconcile object - OFMatchReconcile ofmr = new OFMatchReconcile(); - - // Generate an OFMatch objects for the OFMatchWithSwDpid object - OFMatch match = new OFMatch().setWildcards(OFMatch.OFPFW_ALL); - - // Generate the OFMatchWithSwDpid - OFMatchWithSwDpid ofmatchsw = new OFMatchWithSwDpid(match, - affectedSwitch); - - // Set the action to update the path to remove flows routing - // towards the downed port - ofmr.rcAction = OFMatchReconcile.ReconcileAction.UPDATE_PATH; - - // Set the match, with the switch dpid - ofmr.ofmWithSwDpid = ofmatchsw; - - // Assign the downed port to the OFMatchReconcile's outPort data member (I added this to - // the OFMatchReconcile class) - ofmr.outPort = ldu.getSrcPort(); - - // Tell the reconcile manager to reconcile matching flows - frm.reconcileFlow(ofmr); - } - } - } - - @Override - public Collection<Class<? extends IFloodlightService>> getModuleServices() { - return null; - } - - @Override - public Map<Class<? extends IFloodlightService>, IFloodlightService> getServiceImpls() { - return null; - } - - @Override - public Collection<Class<? extends IFloodlightService>> getModuleDependencies() { - Collection<Class<? extends IFloodlightService>> l = new ArrayList<Class<? extends IFloodlightService>>(); - l.add(IFloodlightProviderService.class); - l.add(ITopologyService.class); - l.add(IFlowReconcileService.class); - l.add(ILinkDiscoveryService.class); - return l; - } - - @Override - public void init(FloodlightModuleContext context) - throws FloodlightModuleException { - floodlightProvider = context - .getServiceImpl(IFloodlightProviderService.class); - topology = context.getServiceImpl(ITopologyService.class); - frm = context.getServiceImpl(IFlowReconcileService.class); - lds = context.getServiceImpl(ILinkDiscoveryService.class); - cntx = new FloodlightContext(); - } - - @Override - public void startUp(FloodlightModuleContext context) { - topology.addListener(this); - frm.addFlowReconcileListener(this); - } - - @Override - public String getName() { - return "portdownreconciliation"; - } - - @Override - public boolean isCallbackOrderingPrereq(OFType type, String name) { - return false; - } - - @Override - public boolean isCallbackOrderingPostreq(OFType type, String name) { - return true; - } - - /** - * Base case for the reconciliation of flows. This is triggered at the - * switch which is immediately affected by the PORT_DOWN event - * - * @return the Command whether to STOP or Continue - */ - @Override - public net.floodlightcontroller.core.IListener.Command reconcileFlows( - ArrayList<OFMatchReconcile> ofmRcList) { - if (lds != null) { - links = new HashMap<Link, LinkInfo>(); - // Get all the switch links from the topology - if (lds.getLinks() != null) - links.putAll(lds.getLinks()); - - for (OFMatchReconcile ofmr : ofmRcList) { - // We only care about OFMatchReconcile objects that wish to - // update the path to a switch - if (ofmr.rcAction - .equals(OFMatchReconcile.ReconcileAction.UPDATE_PATH)) { - // Get the switch object from the OFMatchReconcile - IOFSwitch sw = floodlightProvider.getSwitches().get( - ofmr.ofmWithSwDpid.getSwitchDataPathId()); - - // Map data structure that holds the invalid matches and the ingress ports of those matches - Map<Short, List<OFMatch>> invalidBaseIngressAndMatches = new HashMap<Short, List<OFMatch>>(); - - // Get the invalid flows - List<OFFlowStatisticsReply> flows = getFlows(sw, - ofmr.outPort); - - // Analyze all the flows with outPorts equaling the downed - // port and extract OFMatch's to trace back to neighbors - for (OFFlowStatisticsReply flow : flows) { - // Create a reference to the match for ease - OFMatch match = flow.getMatch(); - - // Here we utilize an index of input ports which point - // to multiple invalid matches - if (invalidBaseIngressAndMatches.containsKey(match - .getInputPort())) - // If the input port is already in the index, add - // the match to it's list - invalidBaseIngressAndMatches.get( - match.getInputPort()).add(match); - else { - // Otherwise create a new list and add it to the - // index - List<OFMatch> matches = new ArrayList<OFMatch>(); - matches.add(match); - invalidBaseIngressAndMatches.put( - match.getInputPort(), matches); - } - } - - // Remove invalid flows from the base switch, if they exist - if (!flows.isEmpty()) { - log.debug("Removing flows on switch : " + sw.getId() - + " with outport: " + ofmr.outPort); - clearFlowMods(sw, ofmr.outPort); - } - - // Create a list of neighboring switches we need to remove - // invalid flows from - Map<IOFSwitch, Map<Short, List<OFMatch>>> neighborSwitches = new HashMap<IOFSwitch, Map<Short, List<OFMatch>>>(); - - // Loop through all the links - for (Link link : links.keySet()) { - // Filter out links we care about - if (link.getDst() == sw.getId()) { - // Loop through the links to neighboring switches - // which have invalid flows - for (Entry<Short, List<OFMatch>> invalidBaseIngressAndMatch : invalidBaseIngressAndMatches - .entrySet()) { - // Find links on the network which link to the - // ingress ports that have invalidly routed - // flows - if (link.getDstPort() == invalidBaseIngressAndMatch - .getKey()) { - Map<Short, List<OFMatch>> invalidNeighborOutportAndMatch = new HashMap<Short, List<OFMatch>>(); - // Insert the neighbor's outPort to the base - // switch and the invalid match - invalidNeighborOutportAndMatch.put(link - .getSrcPort(), - invalidBaseIngressAndMatch - .getValue()); - // Link a neighbor switch's invalid match - // and outport to their Switch object - neighborSwitches.put(floodlightProvider - .getSwitches().get(link.getSrc()), - invalidNeighborOutportAndMatch); - } - } - } - } - log.debug("We have " + neighborSwitches.size() - + " neighboring switches to deal with!"); - // Loop through all the switches we found to have potential - // issues - for (IOFSwitch neighborSwitch : neighborSwitches.keySet()) { - log.debug("NeighborSwitch ID : " - + neighborSwitch.getId()); - if (neighborSwitches.get(neighborSwitch) != null) - deleteInvalidFlows(neighborSwitch, - neighborSwitches.get(neighborSwitch)); - } - } - return Command.CONTINUE; - } - } else { - log.error("Link Discovery Service Is Null"); - } - return Command.CONTINUE; - } - /** - * - * @param sw - * the switch object that we wish to get flows from - * @param outPort - * the output action port we wish to find flows with - * @return a list of OFFlowStatisticsReply objects or essentially flows - */ - public List<OFFlowStatisticsReply> getFlows(IOFSwitch sw, Short outPort) { - - statsReply = new ArrayList<OFFlowStatisticsReply>(); - List<OFStatistics> values = null; - Future<List<OFStatistics>> future; - - // Statistics request object for getting flows + ITopologyListener, IFlowReconcileListener { + protected static Logger log = LoggerFactory.getLogger(PortDownReconciliation.class); + + protected ITopologyService topology; + protected IFloodlightProviderService floodlightProvider; + protected IFlowReconcileService frm; + protected ILinkDiscoveryService lds; + protected Map<Link, LinkInfo> links; + protected FloodlightContext cntx; + protected static boolean waiting = false; + protected int statsQueryXId; + protected static List<OFFlowStatisticsReply> statsReply; + + // ITopologyListener + @Override + public void topologyChanged() { + for (LDUpdate ldu : topology.getLastLinkUpdates()) { + if (ldu.getOperation() + .equals(ILinkDiscovery.UpdateOperation.PORT_DOWN)) { + + // Get the switch ID for the OFMatchWithSwDpid object + long affectedSwitch = floodlightProvider.getSwitches() + .get(ldu.getSrc()) + .getId(); + + // Create an OFMatchReconcile object + OFMatchReconcile ofmr = new OFMatchReconcile(); + + // Generate an OFMatch objects for the OFMatchWithSwDpid object + OFMatch match = new OFMatch().setWildcards(OFMatch.OFPFW_ALL); + + // Generate the OFMatchWithSwDpid + OFMatchWithSwDpid ofmatchsw = new OFMatchWithSwDpid(match, + affectedSwitch); + + // Set the action to update the path to remove flows routing + // towards the downed port + ofmr.rcAction = OFMatchReconcile.ReconcileAction.UPDATE_PATH; + + // Set the match, with the switch dpid + ofmr.ofmWithSwDpid = ofmatchsw; + + // Assign the downed port to the OFMatchReconcile's outPort data + // member (I added this to + // the OFMatchReconcile class) + ofmr.outPort = ldu.getSrcPort(); + + // Tell the reconcile manager to reconcile matching flows + frm.reconcileFlow(ofmr); + } + } + } + + @Override + public Collection<Class<? extends IFloodlightService>> + getModuleServices() { + return null; + } + + @Override + public Map<Class<? extends IFloodlightService>, IFloodlightService> + getServiceImpls() { + return null; + } + + @Override + public Collection<Class<? extends IFloodlightService>> + getModuleDependencies() { + Collection<Class<? extends IFloodlightService>> l = new ArrayList<Class<? extends IFloodlightService>>(); + l.add(IFloodlightProviderService.class); + l.add(ITopologyService.class); + l.add(IFlowReconcileService.class); + l.add(ILinkDiscoveryService.class); + return l; + } + + @Override + public + void + init(FloodlightModuleContext context) + throws FloodlightModuleException { + floodlightProvider = context.getServiceImpl(IFloodlightProviderService.class); + topology = context.getServiceImpl(ITopologyService.class); + frm = context.getServiceImpl(IFlowReconcileService.class); + lds = context.getServiceImpl(ILinkDiscoveryService.class); + cntx = new FloodlightContext(); + } + + @Override + public void startUp(FloodlightModuleContext context) { + topology.addListener(this); + frm.addFlowReconcileListener(this); + } + + @Override + public String getName() { + return "portdownreconciliation"; + } + + @Override + public boolean isCallbackOrderingPrereq(OFType type, String name) { + return false; + } + + @Override + public boolean isCallbackOrderingPostreq(OFType type, String name) { + return true; + } + + /** + * Base case for the reconciliation of flows. This is triggered at the + * switch which is immediately affected by the PORT_DOWN event + * + * @return the Command whether to STOP or Continue + */ + @Override + public net.floodlightcontroller.core.IListener.Command + reconcileFlows(ArrayList<OFMatchReconcile> ofmRcList) { + if (lds != null) { + links = new HashMap<Link, LinkInfo>(); + // Get all the switch links from the topology + if (lds.getLinks() != null) links.putAll(lds.getLinks()); + + for (OFMatchReconcile ofmr : ofmRcList) { + // We only care about OFMatchReconcile objects that wish to + // update the path to a switch + if (ofmr.rcAction.equals(OFMatchReconcile.ReconcileAction.UPDATE_PATH)) { + // Get the switch object from the OFMatchReconcile + IOFSwitch sw = floodlightProvider.getSwitches() + .get(ofmr.ofmWithSwDpid.getSwitchDataPathId()); + + // Map data structure that holds the invalid matches and the + // ingress ports of those matches + Map<Short, List<OFMatch>> invalidBaseIngressAndMatches = new HashMap<Short, List<OFMatch>>(); + + // Get the invalid flows + List<OFFlowStatisticsReply> flows = getFlows(sw, + ofmr.outPort); + + // Analyze all the flows with outPorts equaling the downed + // port and extract OFMatch's to trace back to neighbors + for (OFFlowStatisticsReply flow : flows) { + // Create a reference to the match for ease + OFMatch match = flow.getMatch(); + + // Here we utilize an index of input ports which point + // to multiple invalid matches + if (invalidBaseIngressAndMatches.containsKey(match.getInputPort())) + // If the input port is already in the index, add + // the match to it's list + invalidBaseIngressAndMatches.get(match.getInputPort()) + .add(match); + else { + // Otherwise create a new list and add it to the + // index + List<OFMatch> matches = new ArrayList<OFMatch>(); + matches.add(match); + invalidBaseIngressAndMatches.put(match.getInputPort(), + matches); + } + } + + // Remove invalid flows from the base switch, if they exist + if (!flows.isEmpty()) { + log.debug("Removing flows on switch : " + sw.getId() + + " with outport: " + ofmr.outPort); + clearFlowMods(sw, ofmr.outPort); + } + + // Create a list of neighboring switches we need to remove + // invalid flows from + Map<IOFSwitch, Map<Short, List<OFMatch>>> neighborSwitches = new HashMap<IOFSwitch, Map<Short, List<OFMatch>>>(); + + // Loop through all the links + for (Link link : links.keySet()) { + // Filter out links we care about + if (link.getDst() == sw.getId()) { + // Loop through the links to neighboring switches + // which have invalid flows + for (Entry<Short, List<OFMatch>> invalidBaseIngressAndMatch : invalidBaseIngressAndMatches.entrySet()) { + // Find links on the network which link to the + // ingress ports that have invalidly routed + // flows + if (link.getDstPort() == invalidBaseIngressAndMatch.getKey()) { + Map<Short, List<OFMatch>> invalidNeighborOutportAndMatch = new HashMap<Short, List<OFMatch>>(); + // Insert the neighbor's outPort to the base + // switch and the invalid match + invalidNeighborOutportAndMatch.put(link.getSrcPort(), + invalidBaseIngressAndMatch.getValue()); + // Link a neighbor switch's invalid match + // and outport to their Switch object + neighborSwitches.put(floodlightProvider.getSwitches() + .get(link.getSrc()), + invalidNeighborOutportAndMatch); + } + } + } + } + log.debug("We have " + neighborSwitches.size() + + " neighboring switches to deal with!"); + // Loop through all the switches we found to have potential + // issues + for (IOFSwitch neighborSwitch : neighborSwitches.keySet()) { + log.debug("NeighborSwitch ID : " + + neighborSwitch.getId()); + if (neighborSwitches.get(neighborSwitch) != null) + deleteInvalidFlows(neighborSwitch, + neighborSwitches.get(neighborSwitch)); + } + } + return Command.CONTINUE; + } + } else { + log.error("Link Discovery Service Is Null"); + } + return Command.CONTINUE; + } + + /** + * @param sw + * the switch object that we wish to get flows from + * @param outPort + * the output action port we wish to find flows with + * @return a list of OFFlowStatisticsReply objects or essentially flows + */ + public List<OFFlowStatisticsReply> getFlows(IOFSwitch sw, Short outPort) { + + statsReply = new ArrayList<OFFlowStatisticsReply>(); + List<OFStatistics> values = null; + Future<List<OFStatistics>> future; + + // Statistics request object for getting flows OFStatisticsRequest req = new OFStatisticsRequest(); req.setStatisticType(OFStatisticsType.FLOW); int requestLength = req.getLengthU(); - OFFlowStatisticsRequest specificReq = new OFFlowStatisticsRequest(); + OFFlowStatisticsRequest specificReq = new OFFlowStatisticsRequest(); specificReq.setMatch(new OFMatch().setWildcards(0xffffffff)); specificReq.setOutPort(outPort); specificReq.setTableId((byte) 0xff); - req.setStatistics(Collections.singletonList((OFStatistics)specificReq)); + req.setStatistics(Collections.singletonList((OFStatistics) specificReq)); requestLength += specificReq.getLength(); req.setLengthU(requestLength); - + try { - //System.out.println(sw.getStatistics(req)); - future = sw.getStatistics(req); + // System.out.println(sw.getStatistics(req)); + future = sw.getStatistics(req); values = future.get(10, TimeUnit.SECONDS); - if(values != null){ - for(OFStatistics stat : values){ - statsReply.add((OFFlowStatisticsReply)stat); - } + if (values != null) { + for (OFStatistics stat : values) { + statsReply.add((OFFlowStatisticsReply) stat); + } } } catch (Exception e) { log.error("Failure retrieving statistics from switch " + sw, e); } - + return statsReply; - } - - /** - * - * @param sw The switch we wish to remove flows from - * @param outPort The specific Output Action OutPort of specific flows we wish to delete - */ - public void clearFlowMods(IOFSwitch sw, Short outPort) { - // Delete all pre-existing flows with the same output action port or outPort - OFMatch match = new OFMatch().setWildcards(OFMatch.OFPFW_ALL); + } + + /** + * @param sw + * The switch we wish to remove flows from + * @param outPort + * The specific Output Action OutPort of specific flows we wish + * to delete + */ + public void clearFlowMods(IOFSwitch sw, Short outPort) { + // Delete all pre-existing flows with the same output action port or + // outPort + OFMatch match = new OFMatch().setWildcards(OFMatch.OFPFW_ALL); OFMessage fm = ((OFFlowMod) floodlightProvider.getOFMessageFactory() - .getMessage(OFType.FLOW_MOD)) - .setMatch(match) - .setCommand(OFFlowMod.OFPFC_DELETE) - .setOutPort(outPort) - .setLength(U16.t(OFFlowMod.MINIMUM_LENGTH)); + .getMessage(OFType.FLOW_MOD)).setMatch(match) + .setCommand(OFFlowMod.OFPFC_DELETE) + .setOutPort(outPort) + .setLength(U16.t(OFFlowMod.MINIMUM_LENGTH)); try { List<OFMessage> msglist = new ArrayList<OFMessage>(1); msglist.add(fm); @@ -351,22 +352,26 @@ public class PortDownReconciliation implements IFloodlightModule, log.error("Failed to clear flows on switch {} - {}", this, e); } } - - /** - * - * @param sw The switch we wish to remove flows from - * @param match The specific OFMatch object of specific flows we wish to delete - * @param outPort The specific Output Action OutPort of specific flows we wish to delete - */ - public void clearFlowMods(IOFSwitch sw, OFMatch match, Short outPort) { - // Delete pre-existing flows with the same match, and output action port or outPort - match.setWildcards(OFMatch.OFPFW_ALL); + + /** + * @param sw + * The switch we wish to remove flows from + * @param match + * The specific OFMatch object of specific flows we wish to + * delete + * @param outPort + * The specific Output Action OutPort of specific flows we wish + * to delete + */ + public void clearFlowMods(IOFSwitch sw, OFMatch match, Short outPort) { + // Delete pre-existing flows with the same match, and output action port + // or outPort + match.setWildcards(OFMatch.OFPFW_ALL); OFMessage fm = ((OFFlowMod) floodlightProvider.getOFMessageFactory() - .getMessage(OFType.FLOW_MOD)) - .setMatch(match) - .setCommand(OFFlowMod.OFPFC_DELETE) - .setOutPort(outPort) - .setLength(U16.t(OFFlowMod.MINIMUM_LENGTH)); + .getMessage(OFType.FLOW_MOD)).setMatch(match) + .setCommand(OFFlowMod.OFPFC_DELETE) + .setOutPort(outPort) + .setLength(U16.t(OFFlowMod.MINIMUM_LENGTH)); try { List<OFMessage> msglist = new ArrayList<OFMessage>(1); msglist.add(fm); @@ -376,124 +381,115 @@ public class PortDownReconciliation implements IFloodlightModule, } } - /** - * Deletes flows with similar matches and output action ports on the - * specified switch - * - * @param sw - * the switch to query flows on - * @param match - * the problematic OFMatch from the base switch which we wish to - * find and remove - * @param outPort - * the output action port wanted from the flows, which follows - * the route to the base switch - */ - public void deleteInvalidFlows(IOFSwitch sw, - Map<Short, List<OFMatch>> invalidOutportAndMatch) { - log.debug("Deleting invalid flows on switch : " + sw.getId()); - - // A map that holds the input ports and invalid matches on a switch - Map<Short, List<OFMatch>> invalidNeighborIngressAndMatches = new HashMap<Short, List<OFMatch>>(); - - for (Short outPort : invalidOutportAndMatch.keySet()) { - // Get the flows on the switch - List<OFFlowStatisticsReply> flows = getFlows(sw, outPort); - - // Analyze all the flows with outPorts pointing to problematic route - for (OFFlowStatisticsReply flow : flows) { - // Loop through all the problematic matches - for (OFMatch match : invalidOutportAndMatch.get(outPort)) { - // Compare the problematic matches with the match of the flow on the switch - if (HexString.toHexString( - flow.getMatch().getDataLayerDestination()).equals( - HexString.toHexString(match - .getDataLayerDestination())) - && HexString.toHexString( - flow.getMatch().getDataLayerSource()) - .equals(HexString.toHexString(match - .getDataLayerSource())) - && flow.getMatch().getDataLayerType() == match - .getDataLayerType() - && flow.getMatch().getDataLayerVirtualLan() == match - .getDataLayerVirtualLan() - && flow.getMatch().getNetworkDestination() == match - .getNetworkDestination() - && flow.getMatch().getNetworkDestinationMaskLen() == match - .getNetworkDestinationMaskLen() - && flow.getMatch().getNetworkProtocol() == match - .getNetworkProtocol() - && flow.getMatch().getNetworkSource() == match - .getNetworkSource() - && flow.getMatch().getNetworkSourceMaskLen() == match - .getNetworkSourceMaskLen() - && flow.getMatch().getNetworkTypeOfService() == match - .getNetworkTypeOfService()) { - - // Here we utilize an index of input ports which point - // to multiple invalid matches - if (invalidNeighborIngressAndMatches.containsKey(match - .getInputPort())) - // If the input port is already in the index, add - // the match to it's list - invalidNeighborIngressAndMatches.get( - match.getInputPort()).add(match); - else { - // Otherwise create a new list and add it to the - // index - List<OFMatch> matches = new ArrayList<OFMatch>(); - matches.add(match); - invalidNeighborIngressAndMatches.put( - match.getInputPort(), matches); - } - // Remove flows from the switch with the invalid match and outPort - clearFlowMods(sw, flow.getMatch(), outPort); - } - } - } - - // Create a list of neighboring switches we need to check for - // invalid flows - Map<IOFSwitch, Map<Short, List<OFMatch>>> neighborSwitches = new HashMap<IOFSwitch, Map<Short, List<OFMatch>>>(); - - // Loop through all the links - for (Link link : links.keySet()) { - // Filter out links we care about - if (link.getDst() == sw.getId()) { - // Loop through the ingressPorts that are involved in - // invalid flows on neighboring switches - for (Entry<Short, List<OFMatch>> ingressPort : invalidNeighborIngressAndMatches - .entrySet()) { - // Filter out invalid links by matching the link - // destination port to our invalid flows ingress port - if (link.getDstPort() == ingressPort.getKey()) { - // Generate a match and outPort map since I don't - // want to create an object - Map<Short, List<OFMatch>> invalidNeighborOutportAndMatch = new HashMap<Short, List<OFMatch>>(); - invalidNeighborOutportAndMatch.put( - link.getSrcPort(), ingressPort.getValue()); - // Link a neighbor switch's invalid match and - // outport to their Switch object - neighborSwitches.put(floodlightProvider - .getSwitches().get(link.getSrc()), - invalidNeighborOutportAndMatch); - } - } - } - } - log.debug("We have " + neighborSwitches.size() - + " neighbors to deal with!"); - - // Loop through all the neighbor switches we found to have - // invalid matches - for (IOFSwitch neighborSwitch : neighborSwitches.keySet()) { - log.debug("NeighborSwitch ID : " - + neighborSwitch.getId()); - // Recursively seek out and delete invalid flows on the - // neighbor switch - deleteInvalidFlows(neighborSwitch, - neighborSwitches.get(neighborSwitch)); - } - } - } -} \ No newline at end of file + /** + * Deletes flows with similar matches and output action ports on the + * specified switch + * + * @param sw + * the switch to query flows on + * @param match + * the problematic OFMatch from the base switch which we wish to + * find and remove + * @param outPort + * the output action port wanted from the flows, which follows + * the route to the base switch + */ + public + void + deleteInvalidFlows(IOFSwitch sw, + Map<Short, List<OFMatch>> invalidOutportAndMatch) { + log.debug("Deleting invalid flows on switch : " + sw.getId()); + + // A map that holds the input ports and invalid matches on a switch + Map<Short, List<OFMatch>> invalidNeighborIngressAndMatches = new HashMap<Short, List<OFMatch>>(); + + for (Short outPort : invalidOutportAndMatch.keySet()) { + // Get the flows on the switch + List<OFFlowStatisticsReply> flows = getFlows(sw, outPort); + + // Analyze all the flows with outPorts pointing to problematic route + for (OFFlowStatisticsReply flow : flows) { + // Loop through all the problematic matches + for (OFMatch match : invalidOutportAndMatch.get(outPort)) { + // Compare the problematic matches with the match of the + // flow on the switch + if (HexString.toHexString(flow.getMatch() + .getDataLayerDestination()) + .equals(HexString.toHexString(match.getDataLayerDestination())) + && HexString.toHexString(flow.getMatch() + .getDataLayerSource()) + .equals(HexString.toHexString(match.getDataLayerSource())) + && flow.getMatch().getDataLayerType() == match.getDataLayerType() + && flow.getMatch().getDataLayerVirtualLan() == match.getDataLayerVirtualLan() + && flow.getMatch().getNetworkDestination() == match.getNetworkDestination() + && flow.getMatch().getNetworkDestinationMaskLen() == match.getNetworkDestinationMaskLen() + && flow.getMatch().getNetworkProtocol() == match.getNetworkProtocol() + && flow.getMatch().getNetworkSource() == match.getNetworkSource() + && flow.getMatch().getNetworkSourceMaskLen() == match.getNetworkSourceMaskLen() + && flow.getMatch().getNetworkTypeOfService() == match.getNetworkTypeOfService()) { + + // Here we utilize an index of input ports which point + // to multiple invalid matches + if (invalidNeighborIngressAndMatches.containsKey(match.getInputPort())) + // If the input port is already in the index, add + // the match to it's list + invalidNeighborIngressAndMatches.get(match.getInputPort()) + .add(match); + else { + // Otherwise create a new list and add it to the + // index + List<OFMatch> matches = new ArrayList<OFMatch>(); + matches.add(match); + invalidNeighborIngressAndMatches.put(match.getInputPort(), + matches); + } + // Remove flows from the switch with the invalid match + // and outPort + clearFlowMods(sw, flow.getMatch(), outPort); + } + } + } + + // Create a list of neighboring switches we need to check for + // invalid flows + Map<IOFSwitch, Map<Short, List<OFMatch>>> neighborSwitches = new HashMap<IOFSwitch, Map<Short, List<OFMatch>>>(); + + // Loop through all the links + for (Link link : links.keySet()) { + // Filter out links we care about + if (link.getDst() == sw.getId()) { + // Loop through the ingressPorts that are involved in + // invalid flows on neighboring switches + for (Entry<Short, List<OFMatch>> ingressPort : invalidNeighborIngressAndMatches.entrySet()) { + // Filter out invalid links by matching the link + // destination port to our invalid flows ingress port + if (link.getDstPort() == ingressPort.getKey()) { + // Generate a match and outPort map since I don't + // want to create an object + Map<Short, List<OFMatch>> invalidNeighborOutportAndMatch = new HashMap<Short, List<OFMatch>>(); + invalidNeighborOutportAndMatch.put(link.getSrcPort(), + ingressPort.getValue()); + // Link a neighbor switch's invalid match and + // outport to their Switch object + neighborSwitches.put(floodlightProvider.getSwitches() + .get(link.getSrc()), + invalidNeighborOutportAndMatch); + } + } + } + } + log.debug("We have " + neighborSwitches.size() + + " neighbors to deal with!"); + + // Loop through all the neighbor switches we found to have + // invalid matches + for (IOFSwitch neighborSwitch : neighborSwitches.keySet()) { + log.debug("NeighborSwitch ID : " + neighborSwitch.getId()); + // Recursively seek out and delete invalid flows on the + // neighbor switch + deleteInvalidFlows(neighborSwitch, + neighborSwitches.get(neighborSwitch)); + } + } + } +} diff --git a/src/main/java/net/floodlightcontroller/loadbalancer/ILoadBalancerService.java b/src/main/java/net/floodlightcontroller/loadbalancer/ILoadBalancerService.java new file mode 100644 index 0000000000000000000000000000000000000000..c88ee382a232f4113520227925a57a8f41d1ee07 --- /dev/null +++ b/src/main/java/net/floodlightcontroller/loadbalancer/ILoadBalancerService.java @@ -0,0 +1,142 @@ +package net.floodlightcontroller.loadbalancer; + +import java.util.Collection; + +import net.floodlightcontroller.core.module.IFloodlightService; + +public interface ILoadBalancerService extends IFloodlightService { + + /** + * List all current Vips. + */ + public Collection<LBVip> listVips(); + + /** + * List selected Vip by its ID. + * @param vipId Id of requested Vip + */ + public Collection<LBVip> listVip(String vipId); + + /** + * Create and return a new Vip. + * @param LBVip vip: data structure with caller provided Vip attributes + * @return LBVip: Created Vip + */ + public LBVip createVip(LBVip vip); + + /** + * Update and return an existing Vip. + * @param LBVip vip: data structure with caller provided Vip attributes + * @return LBVip: Updated Vip + */ + public LBVip updateVip(LBVip vip); + + /** + * Remove an existing Vip. + * @param String vipId + * @return int: removal status + */ + public int removeVip(String vipId); + + /** + * List all current pools. + */ + public Collection<LBPool> listPools(); + + /** + * List selected pool by its ID. + * @param poolId Id of requested pool + */ + public Collection<LBPool> listPool(String poolId); + + /** + * Create and return a new pool. + * @param LBPool pool: data structure with caller provided pool attributes + * @return LBPool: Created pool + */ + public LBPool createPool(LBPool pool); + + /** + * Update and return an existing pool. + * @param LBPool pool: data structure with caller provided pool attributes + * @return LBPool: Updated pool + */ + public LBPool updatePool(LBPool pool); + + /** + * Remove an existing pool. + * @param String poolId + * @return int: removal status + */ + public int removePool(String poolId); + + /** + * List all current members. + */ + public Collection<LBMember> listMembers(); + + /** + * List selected member by its ID. + * @param memberId Id of requested member + */ + public Collection<LBMember> listMember(String memberId); + + /** + * List all members in a specified pool. + */ + public Collection<LBMember> listMembersByPool(String poolId); + + /** + * Create and return a new member. + * @param LBMember member: data structure with caller provided member attributes + * @return LBMember: Created member + */ + public LBMember createMember(LBMember member); + + /** + * Update and return an existing member. + * @param LBMember member: data structure with caller provided member attributes + * @return LBMember: Updated member + */ + public LBMember updateMember(LBMember member); + + /** + * Remove an existing member. + * @param String memberId + * @return int: removal status + */ + public int removeMember(String memberId); + + /** + * List all current monitors. + */ + public Collection<LBMonitor> listMonitors(); + + /** + * List selected monitor by its ID. + * @param monitorId Id of requested monitor + */ + public Collection<LBMonitor> listMonitor(String monitorId); + + /** + * Create and return a new monitor. + * @param LBMonitor monitor: data structure with caller provided monitor attributes + * @return LBMonitor: Created monitor + */ + public LBMonitor createMonitor(LBMonitor monitor); + + /** + * Update and return an existing monitor. + * @param LBMonitor monitor: data structure with caller provided pool attributes + * @return LBMonitor: Updated monitor + */ + public LBMonitor updateMonitor(LBMonitor monitor); + + /** + * Remove an existing monitor. + * @param String monitorId + * @return int: removal status + */ + public int removeMonitor(String monitorId); + +} diff --git a/src/main/java/net/floodlightcontroller/loadbalancer/LBMember.java b/src/main/java/net/floodlightcontroller/loadbalancer/LBMember.java new file mode 100644 index 0000000000000000000000000000000000000000..d1109dea262c7b111876353968dd7096b2039188 --- /dev/null +++ b/src/main/java/net/floodlightcontroller/loadbalancer/LBMember.java @@ -0,0 +1,38 @@ +package net.floodlightcontroller.loadbalancer; + +import org.codehaus.jackson.map.annotate.JsonSerialize; + +/** + * Data structure for Load Balancer based on + * Quantum proposal http://wiki.openstack.org/LBaaS/CoreResourceModel/proposal + * + * @author KC Wang + */ + +@JsonSerialize(using=LBMemberSerializer.class) +public class LBMember { + protected String id; + protected int address; + protected short port; + protected String macString; + + protected int connectionLimit; + protected short adminState; + protected short status; + + protected String poolId; + protected String vipId; + + public LBMember() { + id = String.valueOf((int) (Math.random()*10000)); + address = 0; + macString = null; + port = 0; + + connectionLimit = 0; + adminState = 0; + status = 0; + poolId = null; + vipId = null; + } +} diff --git a/src/main/java/net/floodlightcontroller/loadbalancer/LBMemberSerializer.java b/src/main/java/net/floodlightcontroller/loadbalancer/LBMemberSerializer.java new file mode 100644 index 0000000000000000000000000000000000000000..a0726df2eb1b4a28b9d7fb817c281c1b65d5f64a --- /dev/null +++ b/src/main/java/net/floodlightcontroller/loadbalancer/LBMemberSerializer.java @@ -0,0 +1,27 @@ +package net.floodlightcontroller.loadbalancer; + +import java.io.IOException; + +import org.codehaus.jackson.JsonGenerator; +import org.codehaus.jackson.JsonProcessingException; +import org.codehaus.jackson.map.JsonSerializer; +import org.codehaus.jackson.map.SerializerProvider; + +public class LBMemberSerializer extends JsonSerializer<LBMember>{ + + @Override + public void serialize(LBMember member, JsonGenerator jGen, + SerializerProvider serializer) throws IOException, + JsonProcessingException { + jGen.writeStartObject(); + + jGen.writeStringField("id", member.id); + jGen.writeStringField("address", String.valueOf(member.address)); + jGen.writeStringField("port", Short.toString(member.port)); + jGen.writeStringField("poolId", member.poolId); + jGen.writeStringField("vipId", member.vipId); + + jGen.writeEndObject(); + } + +} diff --git a/src/main/java/net/floodlightcontroller/loadbalancer/LBMonitor.java b/src/main/java/net/floodlightcontroller/loadbalancer/LBMonitor.java new file mode 100644 index 0000000000000000000000000000000000000000..f246f962266554236bdeea9ef1bc486012159cdd --- /dev/null +++ b/src/main/java/net/floodlightcontroller/loadbalancer/LBMonitor.java @@ -0,0 +1,44 @@ +package net.floodlightcontroller.loadbalancer; + +/** + * Data structure for Load Balancer based on + * Quantum proposal http://wiki.openstack.org/LBaaS/CoreResourceModel/proposal + * + * @author KC Wang + */ + +public class LBMonitor { + protected String id; + protected String name; + protected short type; + protected short delay; + protected short timeout; + protected short attemptsBeforeDeactivation; + + protected String netId; + protected int address; + protected byte protocol; + protected short port; + + //protected path?? + + protected short adminState; + protected short status; + + public LBMonitor() { + id = null; + name = null; + type = 0; + delay = 0; + timeout = 0; + attemptsBeforeDeactivation = 0; + netId = null; + address = 0; + protocol = 0; + port = 0; + adminState = 0; + status = 0; + + } + +} diff --git a/src/main/java/net/floodlightcontroller/loadbalancer/LBPool.java b/src/main/java/net/floodlightcontroller/loadbalancer/LBPool.java new file mode 100644 index 0000000000000000000000000000000000000000..f628765ed2ccdc98b90572e0dd990029ffe4eee6 --- /dev/null +++ b/src/main/java/net/floodlightcontroller/loadbalancer/LBPool.java @@ -0,0 +1,58 @@ +package net.floodlightcontroller.loadbalancer; + +import java.util.ArrayList; + +import org.codehaus.jackson.map.annotate.JsonSerialize; + +import net.floodlightcontroller.loadbalancer.LoadBalancer.IPClient; + +/** + * Data structure for Load Balancer based on + * Quantum proposal http://wiki.openstack.org/LBaaS/CoreResourceModel/proposal + * + * @author KC Wang + */ + + +@JsonSerialize(using=LBPoolSerializer.class) +public class LBPool { + protected String id; + protected String name; + protected String tenantId; + protected String netId; + protected short lbMethod; + protected byte protocol; + protected ArrayList<String> members; + protected ArrayList<String> monitors; + protected short adminState; + protected short status; + + protected String vipId; + + protected int previousMemberIndex; + + public LBPool() { + id = String.valueOf((int) (Math.random()*10000)); + name = null; + tenantId = null; + netId = null; + lbMethod = 0; + protocol = 0; + members = new ArrayList<String>(); + monitors = new ArrayList<String>(); + adminState = 0; + status = 0; + previousMemberIndex = -1; + } + + public String pickMember(IPClient client) { + // simple round robin for now; add different lbmethod later + if (members.size() > 0) { + previousMemberIndex = (previousMemberIndex + 1) % members.size(); + return members.get(previousMemberIndex); + } else { + return null; + } + } + +} diff --git a/src/main/java/net/floodlightcontroller/loadbalancer/LBPoolSerializer.java b/src/main/java/net/floodlightcontroller/loadbalancer/LBPoolSerializer.java new file mode 100644 index 0000000000000000000000000000000000000000..b637eb53b151409c326ea5b76eac78f7a1f959e3 --- /dev/null +++ b/src/main/java/net/floodlightcontroller/loadbalancer/LBPoolSerializer.java @@ -0,0 +1,28 @@ +package net.floodlightcontroller.loadbalancer; + +import java.io.IOException; + +import org.codehaus.jackson.JsonGenerator; +import org.codehaus.jackson.JsonProcessingException; +import org.codehaus.jackson.map.JsonSerializer; +import org.codehaus.jackson.map.SerializerProvider; + +public class LBPoolSerializer extends JsonSerializer<LBPool>{ + + @Override + public void serialize(LBPool pool, JsonGenerator jGen, + SerializerProvider serializer) throws IOException, + JsonProcessingException { + jGen.writeStartObject(); + + jGen.writeStringField("name", pool.name); + jGen.writeStringField("id", pool.id); + jGen.writeStringField("vipId", pool.vipId); + + for (int i=0; i<pool.members.size(); i++) + jGen.writeStringField("pool", pool.members.get(i)); + + jGen.writeEndObject(); + } + +} diff --git a/src/main/java/net/floodlightcontroller/loadbalancer/LBStats.java b/src/main/java/net/floodlightcontroller/loadbalancer/LBStats.java new file mode 100644 index 0000000000000000000000000000000000000000..74e55d47df410f79046e6861dc8fd90f1d870b37 --- /dev/null +++ b/src/main/java/net/floodlightcontroller/loadbalancer/LBStats.java @@ -0,0 +1,22 @@ +package net.floodlightcontroller.loadbalancer; + +/** + * Data structure for Load Balancer based on + * Quantum proposal http://wiki.openstack.org/LBaaS/CoreResourceModel/proposal + * + * @author KC Wang + */ + +public class LBStats { + protected int bytesIn; + protected int bytesOut; + protected int activeConnections; + protected int totalConnections; + + public LBStats() { + bytesIn = 0; + bytesOut = 0; + activeConnections = 0; + totalConnections = 0; + } +} diff --git a/src/main/java/net/floodlightcontroller/loadbalancer/LBVip.java b/src/main/java/net/floodlightcontroller/loadbalancer/LBVip.java new file mode 100644 index 0000000000000000000000000000000000000000..fd8c0bd451ab87c75bdecbcc94016126006bb945 --- /dev/null +++ b/src/main/java/net/floodlightcontroller/loadbalancer/LBVip.java @@ -0,0 +1,61 @@ +package net.floodlightcontroller.loadbalancer; + +import java.util.ArrayList; + +import org.codehaus.jackson.map.annotate.JsonSerialize; + +import net.floodlightcontroller.loadbalancer.LoadBalancer.IPClient; +import net.floodlightcontroller.util.MACAddress; + +/** + * Data structure for Load Balancer based on + * Quantum proposal http://wiki.openstack.org/LBaaS/CoreResourceModel/proposal + * + * @author KC Wang + */ + +@JsonSerialize(using=LBVipSerializer.class) +public class LBVip { + protected String id; + protected String name; + protected String tenantId; + protected String netId; + protected int address; + protected byte protocol; + protected short lbMethod; + protected short port; + protected ArrayList<String> pools; + protected boolean sessionPersistence; + protected int connectionLimit; + protected short adminState; + protected short status; + + protected MACAddress proxyMac; + + public LBVip() { + this.id = String.valueOf((int) (Math.random()*10000)); + this.name = null; + this.tenantId = null; + this.netId = null; + this.address = 0; + this.protocol = 0; + this.lbMethod = 0; + this.port = 0; + this.pools = new ArrayList<String>(); + this.sessionPersistence = false; + this.connectionLimit = 0; + this.address = 0; + this.status = 0; + + this.proxyMac = MACAddress.valueOf("12:34:56:78:90:12"); + } + + public String pickPool(IPClient client) { + // for now, return the first pool; consider different pool choice policy later + if (pools.size() > 0) + return pools.get(0); + else + return null; + } + +} diff --git a/src/main/java/net/floodlightcontroller/loadbalancer/LBVipSerializer.java b/src/main/java/net/floodlightcontroller/loadbalancer/LBVipSerializer.java new file mode 100644 index 0000000000000000000000000000000000000000..acbecf614ce627efa89cd67b25871cdf8211d912 --- /dev/null +++ b/src/main/java/net/floodlightcontroller/loadbalancer/LBVipSerializer.java @@ -0,0 +1,27 @@ +package net.floodlightcontroller.loadbalancer; + +import java.io.IOException; +import org.codehaus.jackson.JsonGenerator; +import org.codehaus.jackson.JsonProcessingException; +import org.codehaus.jackson.map.JsonSerializer; +import org.codehaus.jackson.map.SerializerProvider; + +public class LBVipSerializer extends JsonSerializer<LBVip>{ + + @Override + public void serialize(LBVip vip, JsonGenerator jGen, + SerializerProvider serializer) throws IOException, + JsonProcessingException { + jGen.writeStartObject(); + + jGen.writeStringField("name", vip.name); + jGen.writeStringField("id", vip.id); + jGen.writeStringField("address", String.valueOf(vip.address)); + jGen.writeStringField("protocol", Byte.toString(vip.protocol)); + jGen.writeStringField("port", Short.toString(vip.port)); + + jGen.writeEndObject(); + } + + +} diff --git a/src/main/java/net/floodlightcontroller/loadbalancer/LoadBalancer.java b/src/main/java/net/floodlightcontroller/loadbalancer/LoadBalancer.java new file mode 100644 index 0000000000000000000000000000000000000000..a091ed8ca814bee942cb411393330205cca506a1 --- /dev/null +++ b/src/main/java/net/floodlightcontroller/loadbalancer/LoadBalancer.java @@ -0,0 +1,810 @@ +package net.floodlightcontroller.loadbalancer; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.UnsupportedEncodingException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Comparator; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; + +import org.apache.http.HttpResponse; +import org.apache.http.client.ClientProtocolException; +import org.apache.http.client.HttpClient; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.entity.StringEntity; +import org.apache.http.impl.client.DefaultHttpClient; +import org.apache.http.message.BasicHeader; +import org.apache.http.protocol.HTTP; +import org.json.simple.JSONObject; +import org.openflow.protocol.OFMessage; +import org.openflow.protocol.OFPacketIn; +import org.openflow.protocol.OFPacketOut; +import org.openflow.protocol.OFPort; +import org.openflow.protocol.OFType; +import org.openflow.protocol.action.OFAction; +import org.openflow.protocol.action.OFActionOutput; +import org.openflow.util.HexString; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import net.floodlightcontroller.core.FloodlightContext; +import net.floodlightcontroller.core.IFloodlightProviderService; +import net.floodlightcontroller.core.IOFMessageListener; +import net.floodlightcontroller.core.IOFSwitch; +import net.floodlightcontroller.core.module.FloodlightModuleContext; +import net.floodlightcontroller.core.module.FloodlightModuleException; +import net.floodlightcontroller.core.module.IFloodlightModule; +import net.floodlightcontroller.core.module.IFloodlightService; +import net.floodlightcontroller.counter.ICounterStoreService; +import net.floodlightcontroller.devicemanager.IDevice; +import net.floodlightcontroller.devicemanager.IDeviceService; +import net.floodlightcontroller.devicemanager.SwitchPort; +import net.floodlightcontroller.packet.ARP; +import net.floodlightcontroller.packet.Ethernet; +import net.floodlightcontroller.packet.IPacket; +import net.floodlightcontroller.packet.IPv4; +import net.floodlightcontroller.packet.TCP; +import net.floodlightcontroller.packet.UDP; +import net.floodlightcontroller.restserver.IRestApiService; +import net.floodlightcontroller.routing.IRoutingService; +import net.floodlightcontroller.routing.Route; +import net.floodlightcontroller.topology.ITopologyService; +import net.floodlightcontroller.topology.NodePortTuple; +import net.floodlightcontroller.util.MACAddress; +import net.floodlightcontroller.util.OFMessageDamper; + +/** + * A simple load balancer module for ping, tcp, and udp flows. This module is accessed + * via a REST API defined close to the OpenStack Quantum LBaaS (Load-balancer-as-a-Service) + * v1.0 API proposal. Since the proposal has not been final, no efforts have yet been + * made to confirm compatibility at this time. + * + * Limitations: + * - client records and static flows not purged after use, will exhaust switch flow tables over time + * - round robin policy among servers based on connections, not traffic volume + * - health monitoring feature not implemented yet + * + * @author kcwang + */ +public class LoadBalancer implements IFloodlightModule, + ILoadBalancerService, IOFMessageListener { + + protected static Logger log = LoggerFactory.getLogger(LoadBalancer.class); + + // Our dependencies + protected IFloodlightProviderService floodlightProvider; + protected IRestApiService restApi; + + protected ICounterStoreService counterStore; + protected OFMessageDamper messageDamper; + protected IDeviceService deviceManager; + protected IRoutingService routingEngine; + protected ITopologyService topology; + + protected HashMap<String, LBVip> vips; + protected HashMap<String, LBPool> pools; + protected HashMap<String, LBMember> members; + protected HashMap<Integer, String> vipIpToId; + protected HashMap<Integer, MACAddress> vipIpToMac; + protected HashMap<Integer, String> memberIpToId; + protected HashMap<IPClient, LBMember> clientToMember; + + //Copied from Forwarding with message damper routine for pushing proxy Arp + protected static int OFMESSAGE_DAMPER_CAPACITY = 10000; // ms. + protected static int OFMESSAGE_DAMPER_TIMEOUT = 250; // ms + // Comparator for sorting by SwitchCluster + public Comparator<SwitchPort> clusterIdComparator = + new Comparator<SwitchPort>() { + @Override + public int compare(SwitchPort d1, SwitchPort d2) { + Long d1ClusterId = + topology.getL2DomainId(d1.getSwitchDPID()); + Long d2ClusterId = + topology.getL2DomainId(d2.getSwitchDPID()); + return d1ClusterId.compareTo(d2ClusterId); + } + }; + + // data structure for storing connected + public class IPClient { + int ipAddress; + byte nw_proto; + short srcPort; // tcp/udp src port. icmp type (OFMatch convention) + short targetPort; // tcp/udp dst port, icmp code (OFMatch convention) + + public IPClient() { + ipAddress = 0; + nw_proto = 0; + srcPort = -1; + targetPort = -1; + } + } + + @Override + public String getName() { + return "loadbalancer"; + } + + @Override + public boolean isCallbackOrderingPrereq(OFType type, String name) { + return (type.equals(OFType.PACKET_IN) && + (name.equals("topology") || + name.equals("devicemanager"))); + } + + @Override + public boolean isCallbackOrderingPostreq(OFType type, String name) { + return (type.equals(OFType.PACKET_IN) && name.equals("forwarding")); + } + + @Override + public net.floodlightcontroller.core.IListener.Command + receive(IOFSwitch sw, OFMessage msg, FloodlightContext cntx) { + switch (msg.getType()) { + case PACKET_IN: + return processPacketIn(sw, (OFPacketIn)msg, cntx); + default: + break; + } + log.warn("Received unexpected message {}", msg); + return Command.CONTINUE; + } + + private net.floodlightcontroller.core.IListener.Command + processPacketIn(IOFSwitch sw, OFPacketIn pi, + FloodlightContext cntx) { + + Ethernet eth = IFloodlightProviderService.bcStore.get(cntx, + IFloodlightProviderService.CONTEXT_PI_PAYLOAD); + IPacket pkt = eth.getPayload(); + + if (eth.isBroadcast() || eth.isMulticast()) { + // handle ARP for VIP + if (eth.getEtherType() == Ethernet.TYPE_ARP) { + // retrieve arp to determine target IP address + ARP arpRequest = (ARP) eth.getPayload(); + + int targetProtocolAddress = IPv4.toIPv4Address(arpRequest + .getTargetProtocolAddress()); + + if (vipIpToId.containsKey(targetProtocolAddress)) { + String vipId = vipIpToId.get(targetProtocolAddress); + vipProxyArpReply(sw, pi, cntx, vipId); + return Command.STOP; + } + } + } else { + // currently only load balance IPv4 packets - no-op for other traffic + if (pkt instanceof IPv4) { + IPv4 ip_pkt = (IPv4) pkt; + + // If match Vip and port, check pool and choose member + int destIpAddress = ip_pkt.getDestinationAddress(); + + if (vipIpToId.containsKey(destIpAddress)){ + IPClient client = new IPClient(); + client.ipAddress = ip_pkt.getSourceAddress(); + client.nw_proto = ip_pkt.getProtocol(); + if (client.nw_proto == IPv4.PROTOCOL_TCP) { + TCP tcp_pkt = (TCP) ip_pkt.getPayload(); + client.srcPort = tcp_pkt.getSourcePort(); + client.targetPort = tcp_pkt.getDestinationPort(); + } + if (client.nw_proto == IPv4.PROTOCOL_UDP) { + UDP udp_pkt = (UDP) ip_pkt.getPayload(); + client.srcPort = udp_pkt.getSourcePort(); + client.targetPort = udp_pkt.getDestinationPort(); + } + if (client.nw_proto == IPv4.PROTOCOL_ICMP) { + client.srcPort = 8; + client.targetPort = 0; + } + + LBVip vip = vips.get(vipIpToId.get(destIpAddress)); + LBPool pool = pools.get(vip.pickPool(client)); + LBMember member = members.get(pool.pickMember(client)); + + // for chosen member, check device manager and find and push routes, in both directions + pushBidirectionalVipRoutes(sw, pi, cntx, client, member); + + // packet out based on table rule + pushPacket(pkt, sw, pi.getBufferId(), pi.getInPort(), OFPort.OFPP_TABLE.getValue(), + cntx, true); + + return Command.STOP; + } + } + } + // bypass non-load-balanced traffic for normal processing (forwarding) + return Command.CONTINUE; + } + + /** + * used to send proxy Arp for load balanced service requests + * @param IOFSwitch sw + * @param OFPacketIn pi + * @param FloodlightContext cntx + * @param String vipId + */ + + protected void vipProxyArpReply(IOFSwitch sw, OFPacketIn pi, FloodlightContext cntx, String vipId) { + log.debug("vipProxyArpReply"); + + Ethernet eth = IFloodlightProviderService.bcStore.get(cntx, + IFloodlightProviderService.CONTEXT_PI_PAYLOAD); + + // retrieve original arp to determine host configured gw IP address + ARP arpRequest = (ARP) eth.getPayload(); + + // have to do proxy arp reply since at this point we cannot determine the requesting application type + byte[] vipProxyMacBytes = vips.get(vipId).proxyMac.toBytes(); + + // generate proxy ARP reply + IPacket arpReply = new Ethernet() + .setSourceMACAddress(vipProxyMacBytes) + .setDestinationMACAddress(eth.getSourceMACAddress()) + .setEtherType(Ethernet.TYPE_ARP) + .setVlanID(eth.getVlanID()) + .setPriorityCode(eth.getPriorityCode()) + .setPayload( + new ARP() + .setHardwareType(ARP.HW_TYPE_ETHERNET) + .setProtocolType(ARP.PROTO_TYPE_IP) + .setHardwareAddressLength((byte) 6) + .setProtocolAddressLength((byte) 4) + .setOpCode(ARP.OP_REPLY) + .setSenderHardwareAddress(vipProxyMacBytes) + .setSenderProtocolAddress( + arpRequest.getTargetProtocolAddress()) + .setTargetHardwareAddress( + eth.getSourceMACAddress()) + .setTargetProtocolAddress( + arpRequest.getSenderProtocolAddress())); + + // push ARP reply out + pushPacket(arpReply, sw, OFPacketOut.BUFFER_ID_NONE, OFPort.OFPP_NONE.getValue(), + pi.getInPort(), cntx, true); + log.debug("proxy ARP reply pushed as {}", IPv4.fromIPv4Address(vips.get(vipId).address)); + + return; + } + + /** + * used to push any packet - borrowed routine from Forwarding + * + * @param OFPacketIn pi + * @param IOFSwitch sw + * @param int bufferId + * @param short inPort + * @param short outPort + * @param FloodlightContext cntx + * @param boolean flush + */ + public void pushPacket(IPacket packet, + IOFSwitch sw, + int bufferId, + short inPort, + short outPort, + FloodlightContext cntx, + boolean flush) { + if (log.isTraceEnabled()) { + log.trace("PacketOut srcSwitch={} inPort={} outPort={}", + new Object[] {sw, inPort, outPort}); + } + + OFPacketOut po = + (OFPacketOut) floodlightProvider.getOFMessageFactory() + .getMessage(OFType.PACKET_OUT); + + // set actions + List<OFAction> actions = new ArrayList<OFAction>(); + actions.add(new OFActionOutput(outPort, (short) 0xffff)); + + po.setActions(actions) + .setActionsLength((short) OFActionOutput.MINIMUM_LENGTH); + short poLength = + (short) (po.getActionsLength() + OFPacketOut.MINIMUM_LENGTH); + + // set buffer_id, in_port + po.setBufferId(bufferId); + po.setInPort(inPort); + + // set data - only if buffer_id == -1 + if (po.getBufferId() == OFPacketOut.BUFFER_ID_NONE) { + if (packet == null) { + log.error("BufferId is not set and packet data is null. " + + "Cannot send packetOut. " + + "srcSwitch={} inPort={} outPort={}", + new Object[] {sw, inPort, outPort}); + return; + } + byte[] packetData = packet.serialize(); + poLength += packetData.length; + po.setPacketData(packetData); + } + + po.setLength(poLength); + + try { + counterStore.updatePktOutFMCounterStoreLocal(sw, po); + messageDamper.write(sw, po, cntx, flush); + } catch (IOException e) { + log.error("Failure writing packet out", e); + } + } + + /** + * used to find and push in-bound and out-bound routes using StaticFlowEntryPusher + * @param IOFSwitch sw + * @param OFPacketIn pi + * @param FloodlightContext cntx + * @param IPClient client + * @param LBMember member + */ + protected void pushBidirectionalVipRoutes(IOFSwitch sw, OFPacketIn pi, FloodlightContext cntx, IPClient client, LBMember member) { + + // borrowed code from Forwarding to retrieve src and dst device entities + // Check if we have the location of the destination + IDevice srcDevice = null; + IDevice dstDevice = null; + + // retrieve all known devices + Collection<? extends IDevice> allDevices = deviceManager + .getAllDevices(); + + for (IDevice d : allDevices) { + for (int j = 0; j < d.getIPv4Addresses().length; j++) { + if (srcDevice == null && client.ipAddress == d.getIPv4Addresses()[j]) + srcDevice = d; + if (dstDevice == null && member.address == d.getIPv4Addresses()[j]) { + dstDevice = d; + member.macString = dstDevice.getMACAddressString(); + } + if (srcDevice != null && dstDevice != null) + break; + } + } + + Long srcIsland = topology.getL2DomainId(sw.getId()); + + if (srcIsland == null) { + log.debug("No openflow island found for source {}/{}", + sw.getStringId(), pi.getInPort()); + return; + } + + // Validate that we have a destination known on the same island + // Validate that the source and destination are not on the same switchport + boolean on_same_island = false; + boolean on_same_if = false; + for (SwitchPort dstDap : dstDevice.getAttachmentPoints()) { + long dstSwDpid = dstDap.getSwitchDPID(); + Long dstIsland = topology.getL2DomainId(dstSwDpid); + if ((dstIsland != null) && dstIsland.equals(srcIsland)) { + on_same_island = true; + if ((sw.getId() == dstSwDpid) && + (pi.getInPort() == dstDap.getPort())) { + on_same_if = true; + } + break; + } + } + + if (!on_same_island) { + // Flood since we don't know the dst device + if (log.isTraceEnabled()) { + log.trace("No first hop island found for destination " + + "device {}, Action = flooding", dstDevice); + } + return; + } + + if (on_same_if) { + if (log.isTraceEnabled()) { + log.trace("Both source and destination are on the same " + + "switch/port {}/{}, Action = NOP", + sw.toString(), pi.getInPort()); + } + return; + } + + // Install all the routes where both src and dst have attachment + // points. Since the lists are stored in sorted order we can + // traverse the attachment points in O(m+n) time + SwitchPort[] srcDaps = srcDevice.getAttachmentPoints(); + Arrays.sort(srcDaps, clusterIdComparator); + SwitchPort[] dstDaps = dstDevice.getAttachmentPoints(); + Arrays.sort(dstDaps, clusterIdComparator); + + int iSrcDaps = 0, iDstDaps = 0; + + // following Forwarding's same routing routine, retrieve both in-bound and out-bound routes for + // all clusters. + while ((iSrcDaps < srcDaps.length) && (iDstDaps < dstDaps.length)) { + SwitchPort srcDap = srcDaps[iSrcDaps]; + SwitchPort dstDap = dstDaps[iDstDaps]; + Long srcCluster = + topology.getL2DomainId(srcDap.getSwitchDPID()); + Long dstCluster = + topology.getL2DomainId(dstDap.getSwitchDPID()); + + int srcVsDest = srcCluster.compareTo(dstCluster); + if (srcVsDest == 0) { + if (!srcDap.equals(dstDap) && + (srcCluster != null) && + (dstCluster != null)) { + Route routeIn = + routingEngine.getRoute(srcDap.getSwitchDPID(), + (short)srcDap.getPort(), + dstDap.getSwitchDPID(), + (short)dstDap.getPort()); + Route routeOut = + routingEngine.getRoute(dstDap.getSwitchDPID(), + (short)dstDap.getPort(), + srcDap.getSwitchDPID(), + (short)srcDap.getPort()); + + // use static flow entry pusher to push flow mod along in and out path + // in: match src client (ip, port), rewrite dest from vip ip/port to member ip/port, forward + // out: match dest client (ip, port), rewrite src from member ip/port to vip ip/port, forward + + if (routeIn != null) { + pushStaticVipRoute(true, routeIn, client, member, sw.getId()); + } + + if (routeOut != null) { + pushStaticVipRoute(false, routeOut, client, member, sw.getId()); + } + + } + iSrcDaps++; + iDstDaps++; + } else if (srcVsDest < 0) { + iSrcDaps++; + } else { + iDstDaps++; + } + } + return; + } + + /** + * used to push given route using static flow entry pusher + * @param boolean inBound + * @param Route route + * @param IPClient client + * @param LBMember member + * @param long pinSwitch + */ + @SuppressWarnings("unchecked") + public void pushStaticVipRoute(boolean inBound, Route route, IPClient client, LBMember member, long pinSwitch) { + List<NodePortTuple> path = route.getPath(); + if (path.size()>0) { + + for (int i = 0; i < path.size(); i+=2) { + JSONObject json = new JSONObject(); + long sw = path.get(i).getNodeId(); + + json.put("switch", HexString.toHexString(sw)); + if (inBound) { + json.put("name","inbound-vip-"+ member.vipId+"client-"+client.ipAddress+"-port-"+client.targetPort + +"srcswitch-"+path.get(0).getNodeId()); + json.put("src-ip",IPv4.fromIPv4Address(client.ipAddress)); + json.put("protocol",String.valueOf(client.nw_proto)); + json.put("src-port",String.valueOf(client.srcPort & 0xffff)); + json.put("ether-type","0x800"); + json.put("priority","32768"); + json.put("ingress-port",String.valueOf(path.get(i).getPortId())); + json.put("active","true"); + if (sw == pinSwitch) { + json.put("actions","set-dst-ip="+IPv4.fromIPv4Address(member.address)+"," + + "set-dst-mac="+member.macString+"," + + "output="+path.get(i+1).getPortId()); + } else { + json.put("actions", + "output="+path.get(i+1).getPortId()); + } + } else { + json.put("name","outbound-vip-"+ member.vipId+"client-"+client.ipAddress+"-port-"+client.targetPort + +"srcswitch-"+path.get(0).getNodeId()); + json.put("dst-ip",IPv4.fromIPv4Address(client.ipAddress)); + json.put("protocol",String.valueOf(client.nw_proto)); + json.put("dst-port",String.valueOf(client.srcPort & 0xffff)); + json.put("ether-type","0x800"); + json.put("priority","32768"); + json.put("ingress-port",String.valueOf(path.get(i).getPortId())); + json.put("active","true"); + if (sw == pinSwitch) { + json.put("actions","set-src-ip="+IPv4.fromIPv4Address(vips.get(member.vipId).address)+"," + + "set-src-mac="+vips.get(member.vipId).proxyMac.toString()+"," + + "output="+path.get(i+1).getPortId()); + } else { + json.put("actions", + "output="+path.get(i+1).getPortId()); + } + + } + try { + HttpClient httpclient = new DefaultHttpClient(); + HttpPost httpPost = new HttpPost("http://127.0.0.1:8080/wm/staticflowentrypusher/json"); + StringEntity se = new StringEntity(json.toString()); + se.setContentEncoding(new BasicHeader(HTTP.CONTENT_TYPE, "application/json")); + httpPost.setEntity(se); + + HttpResponse response = null; + + try { + response = httpclient.execute(httpPost); + } catch (ClientProtocolException cliente) { + cliente.printStackTrace(); + } catch (IOException ioe) { + ioe.printStackTrace(); + } + + BufferedReader rd = null; + try { + if (response !=null) + rd = new BufferedReader(new InputStreamReader(response.getEntity().getContent())); + } catch (IllegalStateException statee) { + statee.printStackTrace(); + } catch (IOException ioe) { + ioe.printStackTrace(); + } + + String line = ""; + try { + if (rd!= null) { + while ((line = rd.readLine()) != null) { + System.out.println(line); + } + } + } catch (IOException e) { + e.printStackTrace(); + } + } catch (UnsupportedEncodingException e) { + e.printStackTrace(); + } + } + } + + return; + } + + + @Override + public Collection<LBVip> listVips() { + return vips.values(); + } + + @Override + public Collection<LBVip> listVip(String vipId) { + Collection<LBVip> result = new HashSet<LBVip>(); + result.add(vips.get(vipId)); + return result; } + + @Override + public LBVip createVip(LBVip vip) { + if (vip == null) + vip = new LBVip(); + + vips.put(vip.id, vip); + vipIpToId.put(vip.address, vip.id); + vipIpToMac.put(vip.address, vip.proxyMac); + + return vip; + } + + @Override + public LBVip updateVip(LBVip vip) { + vips.put(vip.id, vip); + return vip; + } + + @Override + public int removeVip(String vipId) { + if(vips.containsKey(vipId)){ + vips.remove(vipId); + return 0; + } else { + return -1; + } + } + + @Override + public Collection<LBPool> listPools() { + return pools.values(); + } + + @Override + public Collection<LBPool> listPool(String poolId) { + Collection<LBPool> result = new HashSet<LBPool>(); + result.add(pools.get(poolId)); + return result; + } + + @Override + public LBPool createPool(LBPool pool) { + if (pool==null) + pool = new LBPool(); + + pools.put(pool.id, pool); + if (pool.vipId != null) + vips.get(pool.vipId).pools.add(pool.id); + else + log.error("pool must be specified with non-null vip-id"); + + return pool; + } + + @Override + public LBPool updatePool(LBPool pool) { + pools.put(pool.id, pool); + return null; + } + + @Override + public int removePool(String poolId) { + if(pools.containsKey(poolId)){ + pools.remove(poolId); + return 0; + } else { + return -1; + } + } + + @Override + public Collection<LBMember> listMembers() { + return members.values(); + } + + @Override + public Collection<LBMember> listMember(String memberId) { + Collection<LBMember> result = new HashSet<LBMember>(); + result.add(members.get(memberId)); + return result; + } + + @Override + public Collection<LBMember> listMembersByPool(String poolId) { + Collection<LBMember> result = new HashSet<LBMember>(); + + if(pools.containsKey(poolId)) { + ArrayList<String> memberIds = pools.get(poolId).members; + for (int i=0; i<memberIds.size(); i++) + result.add(members.get(memberIds.get(i))); + } + return result; + } + + @Override + public LBMember createMember(LBMember member) { + if (member == null) + member = new LBMember(); + + if (member.poolId != null && pools.get(member.poolId) != null) { + member.vipId = pools.get(member.poolId).vipId; + members.put(member.id, member); + pools.get(member.poolId).members.add(member.id); + memberIpToId.put(member.address, member.id); + } else + log.error("member must be specified with non-null pool_id"); + + return member; + } + + @Override + public LBMember updateMember(LBMember member) { + members.put(member.id, member); + return member; + } + + @Override + public int removeMember(String memberId) { + if(members.containsKey(memberId)){ + members.remove(memberId); + return 0; + } else { + return -1; + } + } + + @Override + public Collection<LBMonitor> listMonitors() { + // TODO Auto-generated method stub + return null; + } + + @Override + public Collection<LBMonitor> listMonitor(String monitorId) { + // TODO Auto-generated method stub + return null; + } + + @Override + public LBMonitor createMonitor(LBMonitor monitor) { + // TODO Auto-generated method stub + return null; + } + + @Override + public LBMonitor updateMonitor(LBMonitor monitor) { + // TODO Auto-generated method stub + return null; + } + + @Override + public int removeMonitor(String monitorId) { + // TODO Auto-generated method stub + return 0; + } + + @Override + public Collection<Class<? extends IFloodlightService>> + getModuleServices() { + Collection<Class<? extends IFloodlightService>> l = + new ArrayList<Class<? extends IFloodlightService>>(); + l.add(ILoadBalancerService.class); + return l; + } + + @Override + public Map<Class<? extends IFloodlightService>, IFloodlightService> + getServiceImpls() { + Map<Class<? extends IFloodlightService>, IFloodlightService> m = + new HashMap<Class<? extends IFloodlightService>, + IFloodlightService>(); + m.put(ILoadBalancerService.class, this); + return m; + } + + @Override + public Collection<Class<? extends IFloodlightService>> + getModuleDependencies() { + Collection<Class<? extends IFloodlightService>> l = + new ArrayList<Class<? extends IFloodlightService>>(); + l.add(IFloodlightProviderService.class); + l.add(IRestApiService.class); + l.add(ICounterStoreService.class); + l.add(IDeviceService.class); + l.add(ITopologyService.class); + l.add(IRoutingService.class); + + return l; + } + + @Override + public void init(FloodlightModuleContext context) + throws FloodlightModuleException { + floodlightProvider = context.getServiceImpl(IFloodlightProviderService.class); + restApi = context.getServiceImpl(IRestApiService.class); + counterStore = context.getServiceImpl(ICounterStoreService.class); + deviceManager = context.getServiceImpl(IDeviceService.class); + routingEngine = context.getServiceImpl(IRoutingService.class); + topology = context.getServiceImpl(ITopologyService.class); + + messageDamper = new OFMessageDamper(OFMESSAGE_DAMPER_CAPACITY, + EnumSet.of(OFType.FLOW_MOD), + OFMESSAGE_DAMPER_TIMEOUT); + + vips = new HashMap<String, LBVip>(); + pools = new HashMap<String, LBPool>(); + members = new HashMap<String, LBMember>(); + vipIpToId = new HashMap<Integer, String>(); + vipIpToMac = new HashMap<Integer, MACAddress>(); + memberIpToId = new HashMap<Integer, String>(); + } + + @Override + public void startUp(FloodlightModuleContext context) { + floodlightProvider.addOFMessageListener(OFType.PACKET_IN, this); + restApi.addRestletRoutable(new LoadBalancerWebRoutable()); + } + +} diff --git a/src/main/java/net/floodlightcontroller/loadbalancer/LoadBalancerWebRoutable.java b/src/main/java/net/floodlightcontroller/loadbalancer/LoadBalancerWebRoutable.java new file mode 100644 index 0000000000000000000000000000000000000000..170346aa13529cd9032a834605a9cd1db130d410 --- /dev/null +++ b/src/main/java/net/floodlightcontroller/loadbalancer/LoadBalancerWebRoutable.java @@ -0,0 +1,33 @@ +package net.floodlightcontroller.loadbalancer; + +import org.restlet.Context; +import org.restlet.Restlet; +import org.restlet.routing.Router; + +import net.floodlightcontroller.restserver.RestletRoutable; +import net.floodlightcontroller.virtualnetwork.NoOp; + +public class LoadBalancerWebRoutable implements RestletRoutable { + + @Override + public Restlet getRestlet(Context context) { + Router router = new Router(context); + router.attach("/vips/", VipsResource.class); // GET, POST + router.attach("/vips/{vip}", VipsResource.class); // GET, PUT, DELETE + router.attach("/pools/", PoolsResource.class); // GET, POST + router.attach("/pools/{pool}", PoolsResource.class); // GET, PUT, DELETE + router.attach("/members/", MembersResource.class); // GET, POST + router.attach("/members/{member}", MembersResource.class); // GET, PUT, DELETE + router.attach("/pools/{pool}/members", PoolMemberResource.class); //GET + router.attach("/health_monitors/", MonitorsResource.class); //GET, POST + router.attach("/health_monitors/{monitor}", MonitorsResource.class); //GET, PUT, DELETE + router.attachDefault(NoOp.class); + return router; + } + + @Override + public String basePath() { + return "/quantum/v1.0"; + } + +} diff --git a/src/main/java/net/floodlightcontroller/loadbalancer/MembersResource.java b/src/main/java/net/floodlightcontroller/loadbalancer/MembersResource.java new file mode 100644 index 0000000000000000000000000000000000000000..0cd9a3d5668f8bb0a75ea34a54d08e1d5e3eec38 --- /dev/null +++ b/src/main/java/net/floodlightcontroller/loadbalancer/MembersResource.java @@ -0,0 +1,133 @@ +package net.floodlightcontroller.loadbalancer; + +import java.io.IOException; +import java.util.Collection; + +import net.floodlightcontroller.packet.IPv4; + +import org.codehaus.jackson.JsonParseException; +import org.codehaus.jackson.JsonParser; +import org.codehaus.jackson.JsonToken; +import org.codehaus.jackson.map.MappingJsonFactory; +import org.restlet.resource.Delete; +import org.restlet.resource.Get; +import org.restlet.resource.Post; +import org.restlet.resource.Put; +import org.restlet.resource.ServerResource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class MembersResource extends ServerResource { + + protected static Logger log = LoggerFactory.getLogger(MembersResource.class); + + @Get("json") + public Collection <LBMember> retrieve() { + ILoadBalancerService lbs = + (ILoadBalancerService)getContext().getAttributes(). + get(ILoadBalancerService.class.getCanonicalName()); + + String memberId = (String) getRequestAttributes().get("member"); + if (memberId!=null) + return lbs.listMember(memberId); + else + return lbs.listMembers(); + } + + @Put + @Post + public LBMember createMember(String postData) { + + LBMember member=null; + try { + member=jsonToMember(postData); + } catch (IOException e) { + log.error("Could not parse JSON {}", e.getMessage()); + } + + ILoadBalancerService lbs = + (ILoadBalancerService)getContext().getAttributes(). + get(ILoadBalancerService.class.getCanonicalName()); + + String memberId = (String) getRequestAttributes().get("member"); + if (memberId != null) + return lbs.updateMember(member); + else + return lbs.createMember(member); + } + + @Delete + public int removeMember() { + + String memberId = (String) getRequestAttributes().get("member"); + + ILoadBalancerService lbs = + (ILoadBalancerService)getContext().getAttributes(). + get(ILoadBalancerService.class.getCanonicalName()); + + return lbs.removeMember(memberId); + } + + protected LBMember jsonToMember(String json) throws IOException { + MappingJsonFactory f = new MappingJsonFactory(); + JsonParser jp; + LBMember member = new LBMember(); + + try { + jp = f.createJsonParser(json); + } catch (JsonParseException e) { + throw new IOException(e); + } + + jp.nextToken(); + if (jp.getCurrentToken() != JsonToken.START_OBJECT) { + throw new IOException("Expected START_OBJECT"); + } + + while (jp.nextToken() != JsonToken.END_OBJECT) { + if (jp.getCurrentToken() != JsonToken.FIELD_NAME) { + throw new IOException("Expected FIELD_NAME"); + } + + String n = jp.getCurrentName(); + jp.nextToken(); + if (jp.getText().equals("")) + continue; + if (n.equals("id")) { + member.id = jp.getText(); + continue; + } else + if (n.equals("address")) { + member.address = IPv4.toIPv4Address(jp.getText()); + continue; + } else + if (n.equals("port")) { + member.port = Short.parseShort(jp.getText()); + continue; + } else + if (n.equals("connection_limit")) { + member.connectionLimit = Integer.parseInt(jp.getText()); + continue; + } else + if (n.equals("admin_state")) { + member.adminState = Short.parseShort(jp.getText()); + continue; + } else + if (n.equals("status")) { + member.status = Short.parseShort(jp.getText()); + continue; + } else + if (n.equals("pool_id")) { + member.poolId = jp.getText(); + continue; + } + + log.warn("Unrecognized field {} in " + + "parsing Members", + jp.getText()); + } + jp.close(); + + return member; + } +} diff --git a/src/main/java/net/floodlightcontroller/loadbalancer/MonitorsResource.java b/src/main/java/net/floodlightcontroller/loadbalancer/MonitorsResource.java new file mode 100644 index 0000000000000000000000000000000000000000..a726325454dda0de57b98e1ed4f1d9c409eb81b6 --- /dev/null +++ b/src/main/java/net/floodlightcontroller/loadbalancer/MonitorsResource.java @@ -0,0 +1,156 @@ +package net.floodlightcontroller.loadbalancer; + +import java.io.IOException; +import java.util.Collection; + +import org.codehaus.jackson.JsonParseException; +import org.codehaus.jackson.JsonParser; +import org.codehaus.jackson.JsonToken; +import org.codehaus.jackson.map.MappingJsonFactory; +import org.restlet.resource.Delete; +import org.restlet.resource.Get; +import org.restlet.resource.Post; +import org.restlet.resource.Put; +import org.restlet.resource.ServerResource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class MonitorsResource extends ServerResource { + protected static Logger log = LoggerFactory.getLogger(MonitorsResource.class); + + @Get("json") + public Collection <LBMonitor> retrieve() { + ILoadBalancerService lbs = + (ILoadBalancerService)getContext().getAttributes(). + get(ILoadBalancerService.class.getCanonicalName()); + + String monitorId = (String) getRequestAttributes().get("monitor"); + if (monitorId!=null) + return lbs.listMonitor(monitorId); + else + return lbs.listMonitors(); + } + + @Put + @Post + public LBMonitor createMonitor(String postData) { + + LBMonitor monitor=null; + try { + monitor=jsonToMonitor(postData); + } catch (IOException e) { + log.error("Could not parse JSON {}", e.getMessage()); + } + + ILoadBalancerService lbs = + (ILoadBalancerService)getContext().getAttributes(). + get(ILoadBalancerService.class.getCanonicalName()); + + String monitorId = (String) getRequestAttributes().get("monitor"); + if (monitorId != null) + return lbs.updateMonitor(monitor); + else + return lbs.createMonitor(monitor); + } + + @Delete + public int removeMonitor() { + + String monitorId = (String) getRequestAttributes().get("monitor"); + + ILoadBalancerService lbs = + (ILoadBalancerService)getContext().getAttributes(). + get(ILoadBalancerService.class.getCanonicalName()); + + return lbs.removeMonitor(monitorId); + } + + protected LBMonitor jsonToMonitor(String json) throws IOException { + MappingJsonFactory f = new MappingJsonFactory(); + JsonParser jp; + LBMonitor monitor = new LBMonitor(); + + try { + jp = f.createJsonParser(json); + } catch (JsonParseException e) { + throw new IOException(e); + } + + jp.nextToken(); + if (jp.getCurrentToken() != JsonToken.START_OBJECT) { + throw new IOException("Expected START_OBJECT"); + } + + while (jp.nextToken() != JsonToken.END_OBJECT) { + if (jp.getCurrentToken() != JsonToken.FIELD_NAME) { + throw new IOException("Expected FIELD_NAME"); + } + + String n = jp.getCurrentName(); + jp.nextToken(); + if (jp.getText().equals("")) + continue; + else if (n.equals("monitor")) { + while (jp.nextToken() != JsonToken.END_OBJECT) { + String field = jp.getCurrentName(); + + if (field.equals("id")) { + monitor.id = jp.getText(); + continue; + } + if (field.equals("name")) { + monitor.name = jp.getText(); + continue; + } + if (field.equals("type")) { + monitor.type = Short.parseShort(jp.getText()); + continue; + } + if (field.equals("delay")) { + monitor.delay = Short.parseShort(jp.getText()); + continue; + } + if (field.equals("timeout")) { + monitor.timeout = Short.parseShort(jp.getText()); + continue; + } + if (field.equals("attempts_before_deactivation")) { + monitor.attemptsBeforeDeactivation = Short.parseShort(jp.getText()); + continue; + } + if (field.equals("network_id")) { + monitor.netId = jp.getText(); + continue; + } + if (field.equals("address")) { + monitor.address = Integer.parseInt(jp.getText()); + continue; + } + if (field.equals("protocol")) { + monitor.protocol = Byte.parseByte(jp.getText()); + continue; + } + if (field.equals("port")) { + monitor.port = Short.parseShort(jp.getText()); + continue; + } + if (field.equals("admin_state")) { + monitor.adminState = Short.parseShort(jp.getText()); + continue; + } + if (field.equals("status")) { + monitor.status = Short.parseShort(jp.getText()); + continue; + } + + log.warn("Unrecognized field {} in " + + "parsing Vips", + jp.getText()); + } + } + } + jp.close(); + + return monitor; + } +} diff --git a/src/main/java/net/floodlightcontroller/loadbalancer/PoolMemberResource.java b/src/main/java/net/floodlightcontroller/loadbalancer/PoolMemberResource.java new file mode 100644 index 0000000000000000000000000000000000000000..21f0444122e574c3fa0a6e289c292ee66861a854 --- /dev/null +++ b/src/main/java/net/floodlightcontroller/loadbalancer/PoolMemberResource.java @@ -0,0 +1,25 @@ +package net.floodlightcontroller.loadbalancer; + +import java.util.Collection; + +import org.restlet.resource.Get; +import org.restlet.resource.ServerResource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class PoolMemberResource extends ServerResource { + protected static Logger log = LoggerFactory.getLogger(PoolMemberResource.class); + + @Get("json") + public Collection <LBMember> retrieve() { + ILoadBalancerService lbs = + (ILoadBalancerService)getContext().getAttributes(). + get(ILoadBalancerService.class.getCanonicalName()); + + String poolId = (String) getRequestAttributes().get("pool"); + if (poolId!=null) + return lbs.listMembersByPool(poolId); + else + return null; + } +} diff --git a/src/main/java/net/floodlightcontroller/loadbalancer/PoolsResource.java b/src/main/java/net/floodlightcontroller/loadbalancer/PoolsResource.java new file mode 100644 index 0000000000000000000000000000000000000000..9efb50c451bb7e976fdf18b114af7d90bb0bc88b --- /dev/null +++ b/src/main/java/net/floodlightcontroller/loadbalancer/PoolsResource.java @@ -0,0 +1,143 @@ +package net.floodlightcontroller.loadbalancer; + +import java.io.IOException; +import java.util.Collection; + +import net.floodlightcontroller.packet.IPv4; + +import org.codehaus.jackson.JsonParseException; +import org.codehaus.jackson.JsonParser; +import org.codehaus.jackson.JsonToken; +import org.codehaus.jackson.map.MappingJsonFactory; +import org.restlet.resource.Delete; +import org.restlet.resource.Get; +import org.restlet.resource.Post; +import org.restlet.resource.Put; +import org.restlet.resource.ServerResource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class PoolsResource extends ServerResource { + + protected static Logger log = LoggerFactory.getLogger(PoolsResource.class); + + @Get("json") + public Collection <LBPool> retrieve() { + ILoadBalancerService lbs = + (ILoadBalancerService)getContext().getAttributes(). + get(ILoadBalancerService.class.getCanonicalName()); + + String poolId = (String) getRequestAttributes().get("pool"); + if (poolId!=null) + return lbs.listPool(poolId); + else + return lbs.listPools(); + } + + @Put + @Post + public LBPool createPool(String postData) { + + LBPool pool=null; + try { + pool=jsonToPool(postData); + } catch (IOException e) { + log.error("Could not parse JSON {}", e.getMessage()); + } + + ILoadBalancerService lbs = + (ILoadBalancerService)getContext().getAttributes(). + get(ILoadBalancerService.class.getCanonicalName()); + + String poolId = (String) getRequestAttributes().get("pool"); + if (poolId != null) + return lbs.updatePool(pool); + else + return lbs.createPool(pool); + } + + @Delete + public int removePool() { + + String poolId = (String) getRequestAttributes().get("pool"); + + ILoadBalancerService lbs = + (ILoadBalancerService)getContext().getAttributes(). + get(ILoadBalancerService.class.getCanonicalName()); + + return lbs.removePool(poolId); + } + + protected LBPool jsonToPool(String json) throws IOException { + if (json==null) return null; + + MappingJsonFactory f = new MappingJsonFactory(); + JsonParser jp; + LBPool pool = new LBPool(); + + try { + jp = f.createJsonParser(json); + } catch (JsonParseException e) { + throw new IOException(e); + } + + jp.nextToken(); + if (jp.getCurrentToken() != JsonToken.START_OBJECT) { + throw new IOException("Expected START_OBJECT"); + } + + while (jp.nextToken() != JsonToken.END_OBJECT) { + if (jp.getCurrentToken() != JsonToken.FIELD_NAME) { + throw new IOException("Expected FIELD_NAME"); + } + + String n = jp.getCurrentName(); + jp.nextToken(); + if (jp.getText().equals("")) + continue; + if (n.equals("id")) { + pool.id = jp.getText(); + continue; + } + if (n.equals("tenant_id")) { + pool.tenantId = jp.getText(); + continue; + } + if (n.equals("name")) { + pool.name = jp.getText(); + continue; + } + if (n.equals("network_id")) { + pool.netId = jp.getText(); + continue; + } + if (n.equals("lb_method")) { + pool.lbMethod = Short.parseShort(jp.getText()); + continue; + } + if (n.equals("protocol")) { + String tmp = jp.getText(); + if (tmp.equalsIgnoreCase("TCP")) { + pool.protocol = IPv4.PROTOCOL_TCP; + } else if (tmp.equalsIgnoreCase("UDP")) { + pool.protocol = IPv4.PROTOCOL_UDP; + } else if (tmp.equalsIgnoreCase("ICMP")) { + pool.protocol = IPv4.PROTOCOL_ICMP; + } + continue; + } + if (n.equals("vip_id")) { + pool.vipId = jp.getText(); + continue; + } + + log.warn("Unrecognized field {} in " + + "parsing Pools", + jp.getText()); + } + jp.close(); + + return pool; + } + +} diff --git a/src/main/java/net/floodlightcontroller/loadbalancer/VipsResource.java b/src/main/java/net/floodlightcontroller/loadbalancer/VipsResource.java new file mode 100644 index 0000000000000000000000000000000000000000..20bf882cdbc2d2f1c72954875a674902d97da1fd --- /dev/null +++ b/src/main/java/net/floodlightcontroller/loadbalancer/VipsResource.java @@ -0,0 +1,148 @@ +package net.floodlightcontroller.loadbalancer; + +import java.io.IOException; +import java.util.Collection; + +import net.floodlightcontroller.packet.IPv4; + +import org.codehaus.jackson.JsonParseException; +import org.codehaus.jackson.JsonParser; +import org.codehaus.jackson.JsonToken; +import org.codehaus.jackson.map.MappingJsonFactory; +import org.restlet.resource.Delete; +import org.restlet.resource.Get; +import org.restlet.resource.Post; +import org.restlet.resource.Put; +import org.restlet.resource.ServerResource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class VipsResource extends ServerResource { + protected static Logger log = LoggerFactory.getLogger(VipsResource.class); + + @Get("json") + public Collection <LBVip> retrieve() { + ILoadBalancerService lbs = + (ILoadBalancerService)getContext().getAttributes(). + get(ILoadBalancerService.class.getCanonicalName()); + + String vipId = (String) getRequestAttributes().get("vip"); + if (vipId!=null) + return lbs.listVip(vipId); + else + return lbs.listVips(); + } + + @Put + @Post + public LBVip createVip(String postData) { + + LBVip vip=null; + try { + vip=jsonToVip(postData); + } catch (IOException e) { + log.error("Could not parse JSON {}", e.getMessage()); + } + + ILoadBalancerService lbs = + (ILoadBalancerService)getContext().getAttributes(). + get(ILoadBalancerService.class.getCanonicalName()); + + String vipId = (String) getRequestAttributes().get("vip"); + if (vipId != null) + return lbs.updateVip(vip); + else + return lbs.createVip(vip); + } + + @Delete + public int removeVip() { + + String vipId = (String) getRequestAttributes().get("vip"); + + ILoadBalancerService lbs = + (ILoadBalancerService)getContext().getAttributes(). + get(ILoadBalancerService.class.getCanonicalName()); + + return lbs.removeVip(vipId); + } + + protected LBVip jsonToVip(String json) throws IOException { + + if (json==null) return null; + + MappingJsonFactory f = new MappingJsonFactory(); + JsonParser jp; + LBVip vip = new LBVip(); + + try { + jp = f.createJsonParser(json); + } catch (JsonParseException e) { + throw new IOException(e); + } + + jp.nextToken(); + if (jp.getCurrentToken() != JsonToken.START_OBJECT) { + throw new IOException("Expected START_OBJECT"); + } + + while (jp.nextToken() != JsonToken.END_OBJECT) { + if (jp.getCurrentToken() != JsonToken.FIELD_NAME) { + throw new IOException("Expected FIELD_NAME"); + } + + String n = jp.getCurrentName(); + jp.nextToken(); + if (jp.getText().equals("")) + continue; + + if (n.equals("id")) { + vip.id = jp.getText(); + continue; + } + if (n.equals("tenant_id")) { + vip.tenantId = jp.getText(); + continue; + } + if (n.equals("name")) { + vip.name = jp.getText(); + continue; + } + if (n.equals("network_id")) { + vip.netId = jp.getText(); + continue; + } + if (n.equals("protocol")) { + String tmp = jp.getText(); + if (tmp.equalsIgnoreCase("TCP")) { + vip.protocol = IPv4.PROTOCOL_TCP; + } else if (tmp.equalsIgnoreCase("UDP")) { + vip.protocol = IPv4.PROTOCOL_UDP; + } else if (tmp.equalsIgnoreCase("ICMP")) { + vip.protocol = IPv4.PROTOCOL_ICMP; + } + continue; + } + if (n.equals("address")) { + vip.address = IPv4.toIPv4Address(jp.getText()); + continue; + } + if (n.equals("port")) { + vip.port = Short.parseShort(jp.getText()); + continue; + } + if (n.equals("pool_id")) { + vip.pools.add(jp.getText()); + continue; + } + + log.warn("Unrecognized field {} in " + + "parsing Vips", + jp.getText()); + } + jp.close(); + + return vip; + } + +} diff --git a/src/main/java/net/floodlightcontroller/topology/ITopologyService.java b/src/main/java/net/floodlightcontroller/topology/ITopologyService.java index cc62e82ac86f78eb63c20da7b70833d5786248e7..6cfc679a80b2bfb8cfa66c34703c9c490cf9688a 100644 --- a/src/main/java/net/floodlightcontroller/topology/ITopologyService.java +++ b/src/main/java/net/floodlightcontroller/topology/ITopologyService.java @@ -41,6 +41,10 @@ public interface ITopologyService extends IFloodlightService { public boolean inSameOpenflowDomain(long switch1, long switch2, boolean tunnelEnabled); + public Set<Long> getSwitchesInOpenflowDomain(long switchDPID); + public Set<Long> getSwitchesInOpenflowDomain(long switchDPID, + boolean tunnelEnabled); + /** * Queries whether two switches are in the same island. * Currently, island and cluster are the same. In future, diff --git a/src/main/java/net/floodlightcontroller/topology/TopologyInstance.java b/src/main/java/net/floodlightcontroller/topology/TopologyInstance.java index 7fea75b348d6362c3968c71f78bb1ba0abd8d3c9..129f18bfb6ae6aadc3e5485f68beb5bb70928b4c 100644 --- a/src/main/java/net/floodlightcontroller/topology/TopologyInstance.java +++ b/src/main/java/net/floodlightcontroller/topology/TopologyInstance.java @@ -727,7 +727,13 @@ public class TopologyInstance { protected Set<Long> getSwitchesInOpenflowDomain(long switchId) { Cluster c = switchClusterMap.get(switchId); - if (c == null) return null; + if (c == null) { + // The switch is not known to topology as there + // are no links connected to it. + Set<Long> nodes = new HashSet<Long>(); + nodes.add(switchId); + return nodes; + } return (c.getNodes()); } diff --git a/src/main/java/net/floodlightcontroller/topology/TopologyManager.java b/src/main/java/net/floodlightcontroller/topology/TopologyManager.java index 0d2e974428b619c092fd6b81739dc827da0e6ae6..179db39b2518c4790fe883a390b71ccc5148784b 100644 --- a/src/main/java/net/floodlightcontroller/topology/TopologyManager.java +++ b/src/main/java/net/floodlightcontroller/topology/TopologyManager.java @@ -377,7 +377,8 @@ public class TopologyManager implements } - + //////////////////////////////////////////////////////////////////////// + //////////////////////////////////////////////////////////////////////// /** * Checks if the switchport is a broadcast domain port or not. */ @@ -393,7 +394,8 @@ public class TopologyManager implements return ti.isBroadcastDomainPort(new NodePortTuple(sw, port)); } - + //////////////////////////////////////////////////////////////////////// + //////////////////////////////////////////////////////////////////////// /** * Checks if the new attachment point port is consistent with the * old attachment point port. @@ -467,6 +469,22 @@ public class TopologyManager implements return ti.getAllowedIncomingBroadcastPort(src,srcPort); } + //////////////////////////////////////////////////////////////////////// + //////////////////////////////////////////////////////////////////////// + @Override + public Set<Long> getSwitchesInOpenflowDomain(long switchDPID) { + return getSwitchesInOpenflowDomain(switchDPID, true); + } + + @Override + public Set<Long> getSwitchesInOpenflowDomain(long switchDPID, + boolean tunnelEnabled) { + TopologyInstance ti = getCurrentInstance(tunnelEnabled); + return ti.getSwitchesInOpenflowDomain(switchDPID); + } + //////////////////////////////////////////////////////////////////////// + //////////////////////////////////////////////////////////////////////// + @Override public Set<NodePortTuple> getBroadcastDomainPorts() { return portBroadcastDomainLinks.keySet(); diff --git a/src/main/resources/META-INF/services/net.floodlightcontroller.core.module.IFloodlightModule b/src/main/resources/META-INF/services/net.floodlightcontroller.core.module.IFloodlightModule index 07985a1268a9a66755c1745fc9696c9272c290ec..935fe105c9f6a40b0415d21ef18816bc084042bf 100644 --- a/src/main/resources/META-INF/services/net.floodlightcontroller.core.module.IFloodlightModule +++ b/src/main/resources/META-INF/services/net.floodlightcontroller.core.module.IFloodlightModule @@ -23,4 +23,5 @@ net.floodlightcontroller.devicemanager.internal.DefaultEntityClassifier net.floodlightcontroller.devicemanager.test.MockDeviceManager net.floodlightcontroller.core.test.MockFloodlightProvider net.floodlightcontroller.core.test.MockThreadPoolService -net.floodlightcontroller.firewall.Firewall \ No newline at end of file +net.floodlightcontroller.firewall.Firewall +net.floodlightcontroller.loadbalancer.LoadBalancer \ No newline at end of file diff --git a/src/main/resources/floodlightdefault.properties b/src/main/resources/floodlightdefault.properties index 5bb2e28832f44fd7c4db3984002c102f71bc7933..30dfdcaaa06d8e73d98a80a0fc9cb43b827ef347 100644 --- a/src/main/resources/floodlightdefault.properties +++ b/src/main/resources/floodlightdefault.properties @@ -13,7 +13,8 @@ net.floodlightcontroller.flowcache.FlowReconcileManager, \ net.floodlightcontroller.jython.JythonDebugInterface,\ net.floodlightcontroller.counter.CounterStore,\ net.floodlightcontroller.perfmon.PktInProcessingTime,\ -net.floodlightcontroller.ui.web.StaticWebRoutable +net.floodlightcontroller.ui.web.StaticWebRoutable,\ +net.floodlightcontroller.loadbalancer.LoadBalancer net.floodlightcontroller.restserver.RestApiServer.port = 8080 net.floodlightcontroller.core.FloodlightProvider.openflowport = 6633 net.floodlightcontroller.jython.JythonDebugInterface.port = 6655 diff --git a/src/test/java/net/floodlightcontroller/devicemanager/internal/DeviceManagerImplTest.java b/src/test/java/net/floodlightcontroller/devicemanager/internal/DeviceManagerImplTest.java index 91e640d9e716dbac87c9c57db9ed8d90cdf7565d..2e64e99b34c8510de69ce615975912bc98ddbad4 100644 --- a/src/test/java/net/floodlightcontroller/devicemanager/internal/DeviceManagerImplTest.java +++ b/src/test/java/net/floodlightcontroller/devicemanager/internal/DeviceManagerImplTest.java @@ -80,6 +80,7 @@ import org.junit.Test; import org.openflow.protocol.OFPacketIn; import org.openflow.protocol.OFPacketIn.OFPacketInReason; import org.openflow.protocol.OFPhysicalPort; +import org.openflow.protocol.OFPort; import org.openflow.protocol.OFType; import org.openflow.util.HexString; import org.slf4j.Logger; @@ -868,6 +869,73 @@ public class DeviceManagerImplTest extends FloodlightTestCase { assertArrayEquals(new SwitchPort[] { new SwitchPort(1L, 2) }, aps); } + /** + * This test verifies that the learning behavior on OFPP_LOCAL ports. + * Once a host is learned on OFPP_LOCAL, it is allowed to move only from + * one OFPP_LOCAL to another OFPP_LOCAL port. + * @throws Exception + */ + @Test + public void testLOCALAttachmentPointLearning() throws Exception { + ITopologyService mockTopology = createMock(ITopologyService.class); + expect(mockTopology.getL2DomainId(anyLong())). + andReturn(1L).anyTimes(); + expect(mockTopology.isAttachmentPointPort(anyLong(), anyShort())). + andReturn(true).anyTimes(); + expect(mockTopology.isBroadcastDomainPort(1L, (short)1)). + andReturn(false).anyTimes(); + expect(mockTopology.isBroadcastDomainPort(1L, OFPort.OFPP_LOCAL.getValue())). + andReturn(false).anyTimes(); + expect(mockTopology.isBroadcastDomainPort(1L, (short)2)). + andReturn(true).anyTimes(); + expect(mockTopology.isInSameBroadcastDomain(1L, (short)1, + 1L, OFPort.OFPP_LOCAL.getValue())).andReturn(true).anyTimes(); + expect(mockTopology.isInSameBroadcastDomain(1L, OFPort.OFPP_LOCAL.getValue(), + 1L, (short)2)).andReturn(true).anyTimes(); + expect(mockTopology.isInSameBroadcastDomain(1L, (short)2, + 1L, OFPort.OFPP_LOCAL.getValue())).andReturn(true).anyTimes(); + expect(mockTopology.isConsistent(anyLong(), anyShort(), anyLong(), anyShort())).andReturn(false).anyTimes(); + + Date topologyUpdateTime = new Date(); + expect(mockTopology.getLastUpdateTime()).andReturn(topologyUpdateTime). + anyTimes(); + + replay(mockTopology); + + deviceManager.topology = mockTopology; + + Calendar c = Calendar.getInstance(); + Entity entity1 = new Entity(1L, null, 1, 1L, 1, c.getTime()); + c.add(Calendar.MILLISECOND, + (int)AttachmentPoint.OPENFLOW_TO_EXTERNAL_TIMEOUT/ 2); + Entity entity2 = new Entity(1L, null, null, 1L, (int)OFPort.OFPP_LOCAL.getValue(), c.getTime()); + c.add(Calendar.MILLISECOND, + (int)AttachmentPoint.OPENFLOW_TO_EXTERNAL_TIMEOUT + 1); + Entity entity3 = new Entity(1L, null, null, 1L, 2, c.getTime()); + + IDevice d; + SwitchPort[] aps; + + d = deviceManager.learnDeviceByEntity(entity1); + assertEquals(1, deviceManager.getAllDevices().size()); + aps = d.getAttachmentPoints(); + assertArrayEquals(new SwitchPort[] { new SwitchPort(1L, 1) }, aps); + + // Ensure that the attachment point changes to OFPP_LOCAL + d = deviceManager.learnDeviceByEntity(entity2); + assertEquals(1, deviceManager.getAllDevices().size()); + aps = d.getAttachmentPoints(); + assertArrayEquals(new SwitchPort[] { new SwitchPort(1L, OFPort.OFPP_LOCAL.getValue()) }, aps); + + // Even though the new attachment point is consistent with old + // and the time has elapsed, OFPP_LOCAL attachment point should + // be maintained. + d = deviceManager.learnDeviceByEntity(entity3); + assertEquals(1, deviceManager.getAllDevices().size()); + aps = d.getAttachmentPoints(); + assertArrayEquals(new SwitchPort[] { new SwitchPort(1L, OFPort.OFPP_LOCAL.getValue()) }, aps); + } + @Test public void testPacketIn() throws Exception { diff --git a/src/test/java/net/floodlightcontroller/flowcache/PortDownReconciliationTest.java b/src/test/java/net/floodlightcontroller/flowcache/PortDownReconciliationTest.java index bf209aeff94ae8d3c8a11e92b35ac37fc74dbf94..c8c767030dce5f69c0da1428e600d35000878e58 100644 --- a/src/test/java/net/floodlightcontroller/flowcache/PortDownReconciliationTest.java +++ b/src/test/java/net/floodlightcontroller/flowcache/PortDownReconciliationTest.java @@ -76,386 +76,386 @@ import net.floodlightcontroller.topology.ITopologyService; * information about the PORT_DOWN event is passed to the class, where it begins * breaking down the information,analyzing the switches for flows and deleting * those that are invalid. This Test specifically verifies that each switch is - * queried for flows once and is sent the appropriate OFFlowMod delete - * message. + * queried for flows once and is sent the appropriate OFFlowMod delete message. * * @author Jason Parraga */ public class PortDownReconciliationTest extends FloodlightTestCase { - protected MockFloodlightProvider mockFloodlightProvider; - protected FloodlightModuleContext fmc; - protected ILinkDiscoveryService lds; - protected FlowReconcileManager flowReconcileMgr; - protected MockThreadPoolService tps; - protected DefaultEntityClassifier entityClassifier; - protected PortDownReconciliation pdr; - protected ITopologyService topology; - protected IOFSwitch sw1, sw2, sw3, sw4; - protected Map<Long, IOFSwitch> switches; - protected Capture<List<OFMessage>> wc1, wc2, wc3, wc4; - protected Capture<FloodlightContext> bc1, bc2, bc3, bc4; - protected OFMessage fmd, fmd2; - protected ArrayList<OFMatchReconcile> lofmr; - protected OFMatchReconcile ofmr; - protected static Logger log; - protected FloodlightContext cntx; - protected List<OFStatistics> statsReply; - - @Override + protected MockFloodlightProvider mockFloodlightProvider; + protected FloodlightModuleContext fmc; + protected ILinkDiscoveryService lds; + protected FlowReconcileManager flowReconcileMgr; + protected MockThreadPoolService tps; + protected DefaultEntityClassifier entityClassifier; + protected PortDownReconciliation pdr; + protected ITopologyService topology; + protected IOFSwitch sw1, sw2, sw3, sw4; + protected Map<Long, IOFSwitch> switches; + protected Capture<List<OFMessage>> wc1, wc2, wc3, wc4; + protected Capture<FloodlightContext> bc1, bc2, bc3, bc4; + protected OFMessage fmd, fmd2; + protected ArrayList<OFMatchReconcile> lofmr; + protected OFMatchReconcile ofmr; + protected static Logger log; + protected FloodlightContext cntx; + protected List<OFStatistics> statsReply; + + @Override @Before - public void setUp() throws Exception { - super.setUp(); - - log = LoggerFactory.getLogger(PortDownReconciliation.class); - fmc = new FloodlightModuleContext(); - mockFloodlightProvider = getMockFloodlightProvider(); - pdr = new PortDownReconciliation(); - lds = createMock(ILinkDiscoveryService.class); - entityClassifier = new DefaultEntityClassifier(); - tps = new MockThreadPoolService(); - flowReconcileMgr = new FlowReconcileManager(); - topology = createMock(ITopologyService.class); - cntx = new FloodlightContext(); - statsReply = new ArrayList<OFStatistics>(); - - fmc.addService(IThreadPoolService.class, tps); - fmc.addService(IFloodlightProviderService.class, - getMockFloodlightProvider()); - fmc.addService(IFlowReconcileService.class, flowReconcileMgr); - fmc.addService(ITopologyService.class, topology); - fmc.addService(IEntityClassifierService.class, entityClassifier); - fmc.addService(ILinkDiscoveryService.class, lds); - - tps.init(fmc); - flowReconcileMgr.init(fmc); - entityClassifier.init(fmc); - getMockFloodlightProvider().init(fmc); - pdr.init(fmc); - - tps.startUp(fmc); - flowReconcileMgr.startUp(fmc); - entityClassifier.startUp(fmc); - getMockFloodlightProvider().startUp(fmc); - pdr.startUp(fmc); - - // The STATS_REQUEST object used when querying the switches for flows - OFStatisticsRequest req = new OFStatisticsRequest(); - req.setStatisticType(OFStatisticsType.FLOW); - int requestLength = req.getLengthU(); - OFFlowStatisticsRequest specificReq = new OFFlowStatisticsRequest(); - specificReq.setMatch(new OFMatch().setWildcards(0xffffffff)); - specificReq.setOutPort((short) 3); - specificReq.setTableId((byte) 0xff); - req.setStatistics(Collections.singletonList((OFStatistics) specificReq)); - requestLength += specificReq.getLength(); - req.setLengthU(requestLength); - - // Actions for the STATS_REPLY object - OFFlowMod flow = new OFFlowMod(); - OFActionOutput action = new OFActionOutput((short) 3, (short) 0xffff); - List<OFAction> actions = new ArrayList<OFAction>(); - actions.add(action); - - // Match for the STATS_REPLY object - OFMatch m = new OFMatch(); - // Set the incoming port to 1 so that it will find the connected - m.setInputPort((short) 1); - - // STATS_REPLY object - OFFlowStatisticsReply reply = new OFFlowStatisticsReply(); - reply.setActions(actions); - reply.setMatch(m); - // Add the reply to the list of OFStatistics - statsReply.add(reply); - - // Create the STATS_REPLY asynchronous reply object - Callable<List<OFStatistics>> replyFuture = new ReplyFuture(); - // Assign the callable object to a Futuretask so that it will produce future results - FutureTask<List<OFStatistics>> futureStats = new FutureTask<List<OFStatistics>>( - replyFuture); - - // Assign the results of calling the object (the asynchronous reply) - Future<List<OFStatistics>> results = getResults(futureStats); - - // SW1 -- Mock switch for base and multiple switch test case - sw1 = EasyMock.createNiceMock(IOFSwitch.class); - // Expect that the switch's ID is 1 - expect(sw1.getId()).andReturn(1L).anyTimes(); - expect(sw1.getStatistics(req)).andReturn(results).once(); - // Captures to hold resulting flowmod delete messages - wc1 = new Capture<List<OFMessage>>(CaptureType.ALL); - bc1 = new Capture<FloodlightContext>(CaptureType.ALL); - // Capture the parameters passed when sw1.write is invoked - sw1.write(capture(wc1), capture(bc1)); - expectLastCall().once(); - replay(sw1); - - // SW2 -- Mock switch for extended test cases - sw2 = EasyMock.createNiceMock(IOFSwitch.class); - // Expect that the switch's ID is 2 - expect(sw2.getId()).andReturn(2L).anyTimes(); - expect(sw2.getStatistics(req)).andReturn(results).once(); - wc2 = new Capture<List<OFMessage>>(CaptureType.ALL); - bc2 = new Capture<FloodlightContext>(CaptureType.ALL); - // Capture the parameters passwed when sw1.write is invoked - sw2.write(capture(wc2), capture(bc2)); - expectLastCall().anyTimes(); - replay(sw2); - - // SW3 -- Mock switch for extended test cases - sw3 = EasyMock.createNiceMock(IOFSwitch.class); - // Expect that the switch's ID is 3 - expect(sw3.getId()).andReturn(3L).anyTimes(); - expect(sw3.getStatistics(req)).andReturn(results).once(); - wc3 = new Capture<List<OFMessage>>(CaptureType.ALL); - bc3 = new Capture<FloodlightContext>(CaptureType.ALL); - // Capture the parameters passwed when sw1.write is invoked - sw3.write(capture(wc3), capture(bc3)); - expectLastCall().anyTimes(); - replay(sw3); - - // SW4 -- Mock switch for extended test cases - sw4 = EasyMock.createNiceMock(IOFSwitch.class); - // Expect that the switch's ID is 4 - expect(sw4.getId()).andReturn(4L).anyTimes(); - expect(sw4.getStatistics(req)).andReturn(results).once(); - wc4 = new Capture<List<OFMessage>>(CaptureType.ALL); - bc4 = new Capture<FloodlightContext>(CaptureType.ALL); - // Capture the parameters passed when sw1.write is invoked - sw4.write(capture(wc4), capture(bc4)); - expectLastCall().anyTimes(); - replay(sw4); - - // Here we create the OFMatch Reconcile list we wish to pass - lofmr = new ArrayList<OFMatchReconcile>(); - - // Create the only OFMatch Reconcile object that will be in the list - ofmr = new OFMatchReconcile(); - long affectedSwitch = sw1.getId(); - OFMatch match = new OFMatch().setWildcards(OFMatch.OFPFW_ALL); - OFMatchWithSwDpid ofmatchsw = new OFMatchWithSwDpid(match, - affectedSwitch); - ofmr.rcAction = OFMatchReconcile.ReconcileAction.UPDATE_PATH; - ofmr.ofmWithSwDpid = ofmatchsw; - - // We'll say port 3 went down - ofmr.outPort = 3; - - // Add the OFMatch Reconcile object to the list - lofmr.add(ofmr); - - // Expected Flow Mod Deletes Messages - // Flow Mod Delete for base switch - fmd = ((OFFlowMod) mockFloodlightProvider.getOFMessageFactory() - .getMessage(OFType.FLOW_MOD)) - .setMatch(new OFMatch().setWildcards(OFMatch.OFPFW_ALL)) - .setCommand(OFFlowMod.OFPFC_DELETE) - // Notice we specify an outPort - .setOutPort((short) 3) - .setLength(U16.t(OFFlowMod.MINIMUM_LENGTH)); - - // Flow Mod Delete for the neighborswitches - fmd2 = ((OFFlowMod) mockFloodlightProvider.getOFMessageFactory() - .getMessage(OFType.FLOW_MOD)) - // Notice we used the base switch's flow's match - .setMatch(flow.getMatch()) - .setCommand(OFFlowMod.OFPFC_DELETE) - // Notice we specific an outPort - .setOutPort((short) 3) - .setLength(U16.t(OFFlowMod.MINIMUM_LENGTH)); - - } - - // This generates the asynchronous reply to sw.getStatistics() - public Future<List<OFStatistics>> getResults( - FutureTask<List<OFStatistics>> futureStats) { - Thread t = new Thread(futureStats); - t.start(); - return futureStats; - - } - - // Class for the asynchronous reply - public class ReplyFuture implements Callable<List<OFStatistics>> { - @Override - public List<OFStatistics> call() throws Exception { - // return stats reply defined above - return statsReply; - } - } - - /** - * This tests the port down reconciliation in the event that the base switch - * is the only switch involved in the PORT_DOWN event. It simply deletes - * flows concerning the downed port. - * - * @verify checks to see that a general clearFlowMods(Short outPort) is - * called - * @throws Exception - */ - @Test - public void testSingleSwitchPortDownReconciliation() throws Exception { - log.debug("Starting single switch port down reconciliation test"); - // Load the switch map - switches = new HashMap<Long, IOFSwitch>(); - switches.put(1L, sw1); - mockFloodlightProvider.setSwitches(switches); - - // Reconcile flows with specified OFMatchReconcile - pdr.reconcileFlows(lofmr); - // Validate results - verify(sw1); - - assertTrue(wc1.hasCaptured()); - - List<List<OFMessage>> msglist = wc1.getValues(); - - // Make sure the messages we captures correct - for (List<OFMessage> m : msglist) { - if (m instanceof OFFlowMod) - assertEquals(fmd, m); - } - } - - /** - * This tests the port down reconciliation in the event that the base switch - * is connected to a chain of three switches. It discovers that is has 1 - * neighbor, which recursively finds out that it has 1 neighbor until the - * final switch "sw4" is evaluated, which has no neighbors. - * - * @verify checks to see that a general clearFlowMods(Short outPort) is - * called on the base switch while specific clearFlowMods(OFMatch - * match, Short outPort) are called on the neighboring switches - * @throws Exception - */ - @Test - public void testLinearLinkPortDownReconciliation() throws Exception { - log.debug("Starting linear link port down reconciliation test"); - - // Load the switch map - switches = new HashMap<Long, IOFSwitch>(); - switches.put(1L, sw1); - switches.put(2L, sw2); - switches.put(3L, sw3); - switches.put(4L, sw4); - mockFloodlightProvider.setSwitches(switches); - - // Create the links between the switches - // (Switch 4) --> (Switch 3) --> (Switch 2) --> (Switch 1) - Map<Link, LinkInfo> links = new HashMap<Link, LinkInfo>(); - Link link = new Link(2L, (short) 3, 1L, (short) 1); - Link link2 = new Link(3L, (short) 3, 2L, (short) 1); - Link link3 = new Link(4L, (short) 3, 3L, (short) 1); - LinkInfo linkinfo = null; - links.put(link, linkinfo); - links.put(link2, linkinfo); - links.put(link3, linkinfo); - - // Make sure that the link discovery service provides the link we made - expect(lds.getLinks()).andReturn(links).anyTimes(); - replay(lds); - - // Reconcile flows with specified OFMatchReconcile - pdr.reconcileFlows(lofmr); - // Validate results - verify(sw1, sw2, sw3, sw4); - - // Make sure each capture is not null - assertTrue(wc2.hasCaptured()); - assertTrue(wc3.hasCaptured()); - assertTrue(wc4.hasCaptured()); - - // Make sure each capture has captured the proper Flow Mod Delete - // message - List<List<OFMessage>> msglist = wc2.getValues(); - for (List<OFMessage> m : msglist) { - if (m instanceof OFFlowMod) - assertEquals(fmd2, m); - } - - msglist = wc3.getValues(); - for (List<OFMessage> m : msglist) { - if (m instanceof OFFlowMod) - assertEquals(fmd2, m); - } - - msglist = wc4.getValues(); - for (List<OFMessage> m : msglist) { - if (m instanceof OFFlowMod) - assertEquals(fmd2, m); - } - } - - /** - * This tests the port down reconciliation in the event that the base switch - * has three separate neighboring switches with invalid flows. It discovers - * that is has 3 neighbors and each of them delete flows with the specific - * OFMatch and outPort. - * - * @verify checks to see that a general clearFlowMods(Short outPort) is - * called on the base switch while specific clearFlowMods(OFMatch - * match, Short outPort) are called on the neighboring switches - * @throws Exception - */ - @Test - public void testMultipleLinkPortDownReconciliation() throws Exception { - log.debug("Starting multiple link port down reconciliation test"); - - // Load the switch map - switches = new HashMap<Long, IOFSwitch>(); - switches.put(1L, sw1); - switches.put(2L, sw2); - switches.put(3L, sw3); - switches.put(4L, sw4); - mockFloodlightProvider.setSwitches(switches); - - // Create the links between the switches - // (Switch 4 output port 3) --> (Switch 1 input port 1) - // (Switch 3 output port 3) --> (Switch 1 input port 1) - // (Switch 2 output port 3) --> (Switch 1 input port 1) - Map<Link, LinkInfo> links = new HashMap<Link, LinkInfo>(); - Link link = new Link(2L, (short) 3, 1L, (short) 1); - Link link2 = new Link(3L, (short) 3, 1L, (short) 1); - Link link3 = new Link(4L, (short) 3, 1L, (short) 1); - LinkInfo linkinfo = null; - links.put(link, linkinfo); - links.put(link2, linkinfo); - links.put(link3, linkinfo); - - // Make sure that the link discovery service provides the link we made - expect(lds.getLinks()).andReturn(links).anyTimes(); - replay(lds); - - // Reconcile flows with specified OFMatchReconcile - pdr.reconcileFlows(lofmr); - // Validate results - verify(sw1, sw2, sw3, sw4); - - // Make sure each capture is not null - assertTrue(wc2.hasCaptured()); - assertTrue(wc3.hasCaptured()); - assertTrue(wc4.hasCaptured()); - - // Make sure each capture has captured the proper Flow Mod Delete - // message - List<List<OFMessage>> msglist = wc2.getValues(); - for (List<OFMessage> m : msglist) { - if (m instanceof OFFlowMod) - assertEquals(fmd2, m); - } - - msglist = wc3.getValues(); - for (List<OFMessage> m : msglist) { - if (m instanceof OFFlowMod) - assertEquals(fmd2, m); - } - - msglist = wc4.getValues(); - for (List<OFMessage> m : msglist) { - if (m instanceof OFFlowMod) - assertEquals(fmd2, m); - } - } -} \ No newline at end of file + public void setUp() throws Exception { + super.setUp(); + + log = LoggerFactory.getLogger(PortDownReconciliation.class); + fmc = new FloodlightModuleContext(); + mockFloodlightProvider = getMockFloodlightProvider(); + pdr = new PortDownReconciliation(); + lds = createMock(ILinkDiscoveryService.class); + entityClassifier = new DefaultEntityClassifier(); + tps = new MockThreadPoolService(); + flowReconcileMgr = new FlowReconcileManager(); + topology = createMock(ITopologyService.class); + cntx = new FloodlightContext(); + statsReply = new ArrayList<OFStatistics>(); + + fmc.addService(IThreadPoolService.class, tps); + fmc.addService(IFloodlightProviderService.class, + getMockFloodlightProvider()); + fmc.addService(IFlowReconcileService.class, flowReconcileMgr); + fmc.addService(ITopologyService.class, topology); + fmc.addService(IEntityClassifierService.class, entityClassifier); + fmc.addService(ILinkDiscoveryService.class, lds); + + tps.init(fmc); + flowReconcileMgr.init(fmc); + entityClassifier.init(fmc); + getMockFloodlightProvider().init(fmc); + pdr.init(fmc); + + tps.startUp(fmc); + flowReconcileMgr.startUp(fmc); + entityClassifier.startUp(fmc); + getMockFloodlightProvider().startUp(fmc); + pdr.startUp(fmc); + + // The STATS_REQUEST object used when querying the switches for flows + OFStatisticsRequest req = new OFStatisticsRequest(); + req.setStatisticType(OFStatisticsType.FLOW); + int requestLength = req.getLengthU(); + OFFlowStatisticsRequest specificReq = new OFFlowStatisticsRequest(); + specificReq.setMatch(new OFMatch().setWildcards(0xffffffff)); + specificReq.setOutPort((short) 3); + specificReq.setTableId((byte) 0xff); + req.setStatistics(Collections.singletonList((OFStatistics) specificReq)); + requestLength += specificReq.getLength(); + req.setLengthU(requestLength); + + // Actions for the STATS_REPLY object + OFFlowMod flow = new OFFlowMod(); + OFActionOutput action = new OFActionOutput((short) 3, (short) 0xffff); + List<OFAction> actions = new ArrayList<OFAction>(); + actions.add(action); + + // Match for the STATS_REPLY object + OFMatch m = new OFMatch(); + // Set the incoming port to 1 so that it will find the connected + m.setInputPort((short) 1); + + // STATS_REPLY object + OFFlowStatisticsReply reply = new OFFlowStatisticsReply(); + reply.setActions(actions); + reply.setMatch(m); + // Add the reply to the list of OFStatistics + statsReply.add(reply); + + // Create the STATS_REPLY asynchronous reply object + Callable<List<OFStatistics>> replyFuture = new ReplyFuture(); + // Assign the callable object to a Futuretask so that it will produce + // future results + FutureTask<List<OFStatistics>> futureStats = new FutureTask<List<OFStatistics>>( + replyFuture); + + // Assign the results of calling the object (the asynchronous reply) + Future<List<OFStatistics>> results = getResults(futureStats); + + // SW1 -- Mock switch for base and multiple switch test case + sw1 = EasyMock.createNiceMock(IOFSwitch.class); + // Expect that the switch's ID is 1 + expect(sw1.getId()).andReturn(1L).anyTimes(); + expect(sw1.getStatistics(req)).andReturn(results).once(); + // Captures to hold resulting flowmod delete messages + wc1 = new Capture<List<OFMessage>>(CaptureType.ALL); + bc1 = new Capture<FloodlightContext>(CaptureType.ALL); + // Capture the parameters passed when sw1.write is invoked + sw1.write(capture(wc1), capture(bc1)); + expectLastCall().once(); + replay(sw1); + + // SW2 -- Mock switch for extended test cases + sw2 = EasyMock.createNiceMock(IOFSwitch.class); + // Expect that the switch's ID is 2 + expect(sw2.getId()).andReturn(2L).anyTimes(); + expect(sw2.getStatistics(req)).andReturn(results).once(); + wc2 = new Capture<List<OFMessage>>(CaptureType.ALL); + bc2 = new Capture<FloodlightContext>(CaptureType.ALL); + // Capture the parameters passwed when sw1.write is invoked + sw2.write(capture(wc2), capture(bc2)); + expectLastCall().anyTimes(); + replay(sw2); + + // SW3 -- Mock switch for extended test cases + sw3 = EasyMock.createNiceMock(IOFSwitch.class); + // Expect that the switch's ID is 3 + expect(sw3.getId()).andReturn(3L).anyTimes(); + expect(sw3.getStatistics(req)).andReturn(results).once(); + wc3 = new Capture<List<OFMessage>>(CaptureType.ALL); + bc3 = new Capture<FloodlightContext>(CaptureType.ALL); + // Capture the parameters passwed when sw1.write is invoked + sw3.write(capture(wc3), capture(bc3)); + expectLastCall().anyTimes(); + replay(sw3); + + // SW4 -- Mock switch for extended test cases + sw4 = EasyMock.createNiceMock(IOFSwitch.class); + // Expect that the switch's ID is 4 + expect(sw4.getId()).andReturn(4L).anyTimes(); + expect(sw4.getStatistics(req)).andReturn(results).once(); + wc4 = new Capture<List<OFMessage>>(CaptureType.ALL); + bc4 = new Capture<FloodlightContext>(CaptureType.ALL); + // Capture the parameters passed when sw1.write is invoked + sw4.write(capture(wc4), capture(bc4)); + expectLastCall().anyTimes(); + replay(sw4); + + // Here we create the OFMatch Reconcile list we wish to pass + lofmr = new ArrayList<OFMatchReconcile>(); + + // Create the only OFMatch Reconcile object that will be in the list + ofmr = new OFMatchReconcile(); + long affectedSwitch = sw1.getId(); + OFMatch match = new OFMatch().setWildcards(OFMatch.OFPFW_ALL); + OFMatchWithSwDpid ofmatchsw = new OFMatchWithSwDpid(match, + affectedSwitch); + ofmr.rcAction = OFMatchReconcile.ReconcileAction.UPDATE_PATH; + ofmr.ofmWithSwDpid = ofmatchsw; + + // We'll say port 3 went down + ofmr.outPort = 3; + + // Add the OFMatch Reconcile object to the list + lofmr.add(ofmr); + + // Expected Flow Mod Deletes Messages + // Flow Mod Delete for base switch + fmd = ((OFFlowMod) mockFloodlightProvider.getOFMessageFactory() + .getMessage(OFType.FLOW_MOD)).setMatch(new OFMatch().setWildcards(OFMatch.OFPFW_ALL)) + .setCommand(OFFlowMod.OFPFC_DELETE) + // Notice + // we + // specify + // an + // outPort + .setOutPort((short) 3) + .setLength(U16.t(OFFlowMod.MINIMUM_LENGTH)); + + // Flow Mod Delete for the neighborswitches + fmd2 = ((OFFlowMod) mockFloodlightProvider.getOFMessageFactory() + .getMessage(OFType.FLOW_MOD)) + // Notice we used the base switch's flow's match + .setMatch(flow.getMatch()) + .setCommand(OFFlowMod.OFPFC_DELETE) + // Notice + // we + // specific + // an + // outPort + .setOutPort((short) 3) + .setLength(U16.t(OFFlowMod.MINIMUM_LENGTH)); + + } + + // This generates the asynchronous reply to sw.getStatistics() + public Future<List<OFStatistics>> + getResults(FutureTask<List<OFStatistics>> futureStats) { + Thread t = new Thread(futureStats); + t.start(); + return futureStats; + + } + + // Class for the asynchronous reply + public class ReplyFuture implements Callable<List<OFStatistics>> { + @Override + public List<OFStatistics> call() throws Exception { + // return stats reply defined above + return statsReply; + } + } + + /** + * This tests the port down reconciliation in the event that the base switch + * is the only switch involved in the PORT_DOWN event. It simply deletes + * flows concerning the downed port. + * + * @verify checks to see that a general clearFlowMods(Short outPort) is + * called + * @throws Exception + */ + @Test + public void testSingleSwitchPortDownReconciliation() throws Exception { + log.debug("Starting single switch port down reconciliation test"); + // Load the switch map + switches = new HashMap<Long, IOFSwitch>(); + switches.put(1L, sw1); + mockFloodlightProvider.setSwitches(switches); + + // Reconcile flows with specified OFMatchReconcile + pdr.reconcileFlows(lofmr); + // Validate results + verify(sw1); + + assertTrue(wc1.hasCaptured()); + + List<List<OFMessage>> msglist = wc1.getValues(); + + // Make sure the messages we captures correct + for (List<OFMessage> m : msglist) { + if (m instanceof OFFlowMod) assertEquals(fmd, m); + } + } + + /** + * This tests the port down reconciliation in the event that the base switch + * is connected to a chain of three switches. It discovers that is has 1 + * neighbor, which recursively finds out that it has 1 neighbor until the + * final switch "sw4" is evaluated, which has no neighbors. + * + * @verify checks to see that a general clearFlowMods(Short outPort) is + * called on the base switch while specific clearFlowMods(OFMatch + * match, Short outPort) are called on the neighboring switches + * @throws Exception + */ + @Test + public void testLinearLinkPortDownReconciliation() throws Exception { + log.debug("Starting linear link port down reconciliation test"); + + // Load the switch map + switches = new HashMap<Long, IOFSwitch>(); + switches.put(1L, sw1); + switches.put(2L, sw2); + switches.put(3L, sw3); + switches.put(4L, sw4); + mockFloodlightProvider.setSwitches(switches); + + // Create the links between the switches + // (Switch 4) --> (Switch 3) --> (Switch 2) --> (Switch 1) + Map<Link, LinkInfo> links = new HashMap<Link, LinkInfo>(); + Link link = new Link(2L, (short) 3, 1L, (short) 1); + Link link2 = new Link(3L, (short) 3, 2L, (short) 1); + Link link3 = new Link(4L, (short) 3, 3L, (short) 1); + LinkInfo linkinfo = null; + links.put(link, linkinfo); + links.put(link2, linkinfo); + links.put(link3, linkinfo); + + // Make sure that the link discovery service provides the link we made + expect(lds.getLinks()).andReturn(links).anyTimes(); + replay(lds); + + // Reconcile flows with specified OFMatchReconcile + pdr.reconcileFlows(lofmr); + // Validate results + verify(sw1, sw2, sw3, sw4); + + // Make sure each capture is not null + assertTrue(wc2.hasCaptured()); + assertTrue(wc3.hasCaptured()); + assertTrue(wc4.hasCaptured()); + + // Make sure each capture has captured the proper Flow Mod Delete + // message + List<List<OFMessage>> msglist = wc2.getValues(); + for (List<OFMessage> m : msglist) { + if (m instanceof OFFlowMod) assertEquals(fmd2, m); + } + + msglist = wc3.getValues(); + for (List<OFMessage> m : msglist) { + if (m instanceof OFFlowMod) assertEquals(fmd2, m); + } + + msglist = wc4.getValues(); + for (List<OFMessage> m : msglist) { + if (m instanceof OFFlowMod) assertEquals(fmd2, m); + } + } + + /** + * This tests the port down reconciliation in the event that the base switch + * has three separate neighboring switches with invalid flows. It discovers + * that is has 3 neighbors and each of them delete flows with the specific + * OFMatch and outPort. + * + * @verify checks to see that a general clearFlowMods(Short outPort) is + * called on the base switch while specific clearFlowMods(OFMatch + * match, Short outPort) are called on the neighboring switches + * @throws Exception + */ + @Test + public void testMultipleLinkPortDownReconciliation() throws Exception { + log.debug("Starting multiple link port down reconciliation test"); + + // Load the switch map + switches = new HashMap<Long, IOFSwitch>(); + switches.put(1L, sw1); + switches.put(2L, sw2); + switches.put(3L, sw3); + switches.put(4L, sw4); + mockFloodlightProvider.setSwitches(switches); + + // Create the links between the switches + // (Switch 4 output port 3) --> (Switch 1 input port 1) + // (Switch 3 output port 3) --> (Switch 1 input port 1) + // (Switch 2 output port 3) --> (Switch 1 input port 1) + Map<Link, LinkInfo> links = new HashMap<Link, LinkInfo>(); + Link link = new Link(2L, (short) 3, 1L, (short) 1); + Link link2 = new Link(3L, (short) 3, 1L, (short) 1); + Link link3 = new Link(4L, (short) 3, 1L, (short) 1); + LinkInfo linkinfo = null; + links.put(link, linkinfo); + links.put(link2, linkinfo); + links.put(link3, linkinfo); + + // Make sure that the link discovery service provides the link we made + expect(lds.getLinks()).andReturn(links).anyTimes(); + replay(lds); + + // Reconcile flows with specified OFMatchReconcile + pdr.reconcileFlows(lofmr); + // Validate results + verify(sw1, sw2, sw3, sw4); + + // Make sure each capture is not null + assertTrue(wc2.hasCaptured()); + assertTrue(wc3.hasCaptured()); + assertTrue(wc4.hasCaptured()); + + // Make sure each capture has captured the proper Flow Mod Delete + // message + List<List<OFMessage>> msglist = wc2.getValues(); + for (List<OFMessage> m : msglist) { + if (m instanceof OFFlowMod) assertEquals(fmd2, m); + } + + msglist = wc3.getValues(); + for (List<OFMessage> m : msglist) { + if (m instanceof OFFlowMod) assertEquals(fmd2, m); + } + + msglist = wc4.getValues(); + for (List<OFMessage> m : msglist) { + if (m instanceof OFFlowMod) assertEquals(fmd2, m); + } + } +}