diff --git a/build.xml b/build.xml index 012c0c34f4b8e97165c9ac655c7a1488c24a3174..0cfdb03f43fd5e51e85c154aaacada2068bdcaac 100644 --- a/build.xml +++ b/build.xml @@ -74,6 +74,7 @@ <include name="guava-13.0.1.jar" /> <include name="findbugs-annotations-2.0.1.jar" /> <include name="findbugs-jsr305-2.0.1.jar" /> + <include name="derby-10.9.1.0.jar"/> </patternset> <path id="classpath"> diff --git a/lib/derby-10.9.1.0.jar b/lib/derby-10.9.1.0.jar new file mode 100644 index 0000000000000000000000000000000000000000..26feece9e6b1568531ffdb38b173736c35f4d260 Binary files /dev/null and b/lib/derby-10.9.1.0.jar differ diff --git a/setup-eclipse.sh b/setup-eclipse.sh index cf95d767dafe8c2e79a295e2823c2b8b3defc533..3be048a516d4bad794267f68f8182d234bc8b5db 100755 --- a/setup-eclipse.sh +++ b/setup-eclipse.sh @@ -65,7 +65,7 @@ cat >"$d/SyncClient.launch" << EOF <listAttribute key="org.eclipse.debug.core.MAPPED_RESOURCE_TYPES"> <listEntry value="1"/> </listAttribute> -<stringAttribute key="org.eclipse.jdt.launching.MAIN_TYPE" value="com.bigswitch.bigsync.client.SyncClient"/> +<stringAttribute key="org.eclipse.jdt.launching.MAIN_TYPE" value="org.sdnplatform.sync.client.SyncClient"/> <stringAttribute key="org.eclipse.jdt.launching.PROGRAM_ARGUMENTS" value="--hostname localhost --port 6642"/> <stringAttribute key="org.eclipse.jdt.launching.PROJECT_ATTR" value="bigfloodlight"/> <stringAttribute key="org.eclipse.jdt.launching.VM_ARGUMENTS" value="-ea"/> diff --git a/src/main/java/org/sdnplatform/sync/IClosableIterator.java b/src/main/java/org/sdnplatform/sync/IClosableIterator.java new file mode 100644 index 0000000000000000000000000000000000000000..c2c2f7797c7e316c61d46a3a6befd6b1d0f86af6 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/IClosableIterator.java @@ -0,0 +1,35 @@ +/* + * Copyright 2008-2009 LinkedIn, Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.sdnplatform.sync; + +import java.io.Closeable; +import java.util.Iterator; + +/** + * An iterator that must be closed after use + * + * + * @param <T> The type being iterated over + */ +public interface IClosableIterator<T> extends Iterator<T>,Closeable { + + /** + * Close the iterator + */ + public void close(); + +} diff --git a/src/main/java/org/sdnplatform/sync/IInconsistencyResolver.java b/src/main/java/org/sdnplatform/sync/IInconsistencyResolver.java new file mode 100644 index 0000000000000000000000000000000000000000..67ed911a660eda236f20f17052c2e5b73c7954c9 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/IInconsistencyResolver.java @@ -0,0 +1,46 @@ +/* + * Copyright 2008-2009 LinkedIn, Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.sdnplatform.sync; + +import java.util.List; + +/** + * A method for resolving inconsistent object values into a single value. + * Applications can implement this to provide a method for reconciling conflicts + * that cannot be resolved simply by the version information. + * + * + */ +public interface IInconsistencyResolver<T> { + + /** + * Take two different versions of an object and combine them into a single + * version of the object Implementations must maintain the contract that + * <ol> + * <li> + * {@code resolveConflict([null, null]) == null}</li> + * <li> + * if {@code t != null}, then + * + * {@code resolveConflict([null, t]) == resolveConflict([t, null]) == t}</li> + * + * @param items The items to be resolved + * @return The united object + */ + public List<T> resolveConflicts(List<T> items); + +} diff --git a/src/main/java/org/sdnplatform/sync/IStoreClient.java b/src/main/java/org/sdnplatform/sync/IStoreClient.java new file mode 100644 index 0000000000000000000000000000000000000000..bb2a64212e36c2dabba03ef1af4d90ad03b2a4a0 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/IStoreClient.java @@ -0,0 +1,178 @@ +/* + * Copyright 2008-2009 LinkedIn, Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.sdnplatform.sync; + +import java.util.Iterator; +import java.util.Map.Entry; + +import org.sdnplatform.sync.error.ObsoleteVersionException; +import org.sdnplatform.sync.error.SyncException; + + +/** + * The user-facing interface to a sync store. Gives basic put/get/delete + * plus helper functions. + * + * @param <K> The type of the key being stored + * @param <V> The type of the value being stored + */ +public interface IStoreClient<K, V> { + + /** + * Get the value associated with the given key or null if there is no value + * associated with this key. This method strips off all version information + * and is only useful when no further storage operations will be done on + * this key. In general, you should prefer the get() method that returns + * version information unless. + * + * @param key The key + * @throws SyncException + */ + public V getValue(K key) throws SyncException; + + /** + * Get the value associated with the given key or defaultValue if there is + * no value associated with the key. This method strips off all version + * information and is only useful when no further storage operations will be + * done on this key.In general, you should prefer the get() method that returns + * version information unless. + * + * @param key The key for which to fetch the associated value + * @param defaultValue A value to return if there is no value associated + * with this key + * @return Either the value stored for the key or the default value. + * @throws SyncException + */ + public V getValue(K key, V defaultValue) throws SyncException; + + /** + * Get the versioned value associated with the given key. Note that while + * this function will never return null, the {@link Versioned} returned + * can have a null value (i.e. {@link Versioned#getValue() can be null} + * if the key is not present. + * + * @param key The key for which to fetch the value. + * @return The versioned value + * @throws SyncException + */ + public Versioned<V> get(K key) throws SyncException; + + /** + * Get the versioned value associated with the given key or the defaultValue + * if no value is associated with the key. + * + * @param key The key for which to fetch the value. + * @return The versioned value, or the defaultValue if no value is stored + * for this key. + * @throws SyncException + */ + public Versioned<V> get(K key, Versioned<V> defaultValue) + throws SyncException; + + /** + * Get an iterator that will get all the entries in the store. Note + * that this has the potential to miss any values added while you're + * iterating through the collection, and it's possible that items will + * be deleted before you get to the end. + * + * Note that you *must* close the {@link IClosableIterator} when you are + * finished with it or there may be resource leaks. An example of how you + * should use this iterator to ensure that it is closed even if there are + * exceptions follows: + * <code> + * IClosableIterator iter = store.entries(); + * try { + * // do your iteration + * } finally { + * iter.close(); + * } + * </code> + * + * Another important caveat is that because {@link IClosableIterator} + * extends {@link Iterator}, there is no checked exception declared in + * {@link Iterator#next()}. Because of this, calling + * {@link Iterator#next()} on the iterator returned here may throw a + * SyncRuntimeException wrapping a SyncException such as might be + * returned by {@link IStoreClient#get(Object)} + * @return + * @throws SyncException + */ + public IClosableIterator<Entry<K, Versioned<V>>> entries() + throws SyncException; + + /** + * Associated the given value to the key, clobbering any existing values + * stored for the key. + * + * @param key The key + * @param value The value + * @return version The version of the object + * @throws SyncException + */ + public IVersion put(K key, V value) throws SyncException; + + /** + * Put the given Versioned value into the store for the given key if the + * version is greater to or concurrent with existing values. Throw an + * ObsoleteVersionException otherwise. + * + * @param key The key + * @param versioned The value and its versioned + * @throws ObsoleteVersionException + * @throws SyncException + */ + public IVersion put(K key, Versioned<V> versioned) + throws SyncException; + + /** + * Put the versioned value to the key, ignoring any ObsoleteVersionException + * that may be thrown + * + * @param key The key + * @param versioned The versioned value + * @return true if the put succeeded + * @throws SyncException + */ + public boolean putIfNotObsolete(K key, Versioned<V> versioned) + throws SyncException; + + /** + * Delete the key by writing a null tombstone to the store + * + * @param key The key + * @throws SyncException + */ + public void delete(K key) throws SyncException; + + /** + * Delete the key by writing a null tombstone to the store using the + * provided {@link IVersion}. + * + * @param key The key to delete + * @param version The version of the key + * @throws SyncException + */ + public void delete(K key, IVersion version) throws SyncException; + + /** + * Add a listener that will be notified about changes to the given store. + * @param listener the {@link IStoreListener} that will receive the + * notifications + */ + public void addStoreListener(IStoreListener<K> listener); + +} diff --git a/src/main/java/org/sdnplatform/sync/IStoreListener.java b/src/main/java/org/sdnplatform/sync/IStoreListener.java new file mode 100644 index 0000000000000000000000000000000000000000..9bfe53dfd9b24960696a47d5afb4bf2b6182f05f --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/IStoreListener.java @@ -0,0 +1,10 @@ +package org.sdnplatform.sync; + +import java.util.Iterator; + +public interface IStoreListener<K> { + /** + * Called when keys in the store are modified or deleted. + */ + public void keysModified(Iterator<K> keys); +} diff --git a/src/main/java/org/sdnplatform/sync/ISyncService.java b/src/main/java/org/sdnplatform/sync/ISyncService.java new file mode 100644 index 0000000000000000000000000000000000000000..6518420603b2bc5f47f75124334355ce2e4fa019 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/ISyncService.java @@ -0,0 +1,121 @@ +package org.sdnplatform.sync; + +import org.sdnplatform.sync.error.SyncException; +import org.sdnplatform.sync.error.UnknownStoreException; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.core.type.TypeReference; + + +import net.floodlightcontroller.core.module.IFloodlightService; + +/** + * The sync service provides a high-performance in-memory database for + * fault and partition-tolerant replication of state data. It provides + * eventually consistent semantics with versioning through vector clocks + * and allows custom handling of resolution of inconsistent writes. + * + * An important caveat to keep in mind is that keys should not be constructed + * using any hash tables, because the serialized version will contain a + * a potentially-inconsistent ordering of elements resulting in a failure + * for keys to match. Using a java bean or a {@link JsonNode} will avoid this + * problem. Using strings as keys also avoids this problem. + * @author readams + */ +public interface ISyncService extends IFloodlightService { + public enum Scope { + GLOBAL, + LOCAL + } + + /** + * Create a store with the given store name and scope + * @param storeName the name of the store + * @param scope the distribution scope for the data + * @throws SyncException + */ + public void registerStore(String storeName, Scope scope) + throws SyncException; + + /** + * Create a store with the given store name and scope that will be + * persistent across reboots. The performance will be dramatically slower + * @param storeName the name of the store + * @param scope the distribution scope for the data + */ + public void registerPersistentStore(String storeName, Scope scope) + throws SyncException; + + /** + * Get a store client for the given store. The store client will use + * a default inconsistency resolution strategy which will use the + * timestamps of any concurrent updates and choose the later update + * @param storeName the name of the store to retrieve + * @param keyClass the class for the underlying key needed for + * deserialization + * @param valueClass the class for the underlying value needed for + * deserialization + * @return the store client + * @throws UnknownStoreException + */ + public <K, V> IStoreClient<K, V> getStoreClient(String storeName, + Class<K> keyClass, + Class<V> valueClass) + throws UnknownStoreException; + + /** + * Get a store client that will use the provided inconsistency resolver + * to resolve concurrent updates. + * @param storeName the name of the store to retrieve + * @param keyClass the class for the underlying key needed for + * deserialization + * @param valueClass the class for the underlying value needed for + * deserialization + * @param resolver the inconsistency resolver to use for the store + * @return the store client + * @throws UnknownStoreException + */ + public <K, V> IStoreClient<K, V> + getStoreClient(String storeName, + Class<K> keyClass, + Class<V> valueClass, + IInconsistencyResolver<Versioned<V>> resolver) + throws UnknownStoreException; + + /** + * Get a store client for the given store. The store client will use + * a default inconsistency resolution strategy which will use the + * timestamps of any concurrent updates and choose the later update + * @param storeName the name of the store to retrieve + * @param keyType the type reference for the underlying key needed for + * deserialization + * @param valueType the type reference for the underlying value needed for + * deserialization + * @return the store client + * @throws UnknownStoreException + */ + public <K, V> IStoreClient<K, V> getStoreClient(String storeName, + TypeReference<K> keyType, + TypeReference<V> valueType) + throws UnknownStoreException; + + /** + * Get a store client that will use the provided inconsistency resolver + * to resolve concurrent updates. + * @param storeName the name of the store to retrieve + * @param keyType the type reference for the underlying key needed for + * deserialization + * @param valueType the type reference for the underlying value needed for + * deserialization + * @param resolver the inconsistency resolver to use for the store + * @return the store client + * @throws UnknownStoreException + */ + public <K, V> IStoreClient<K, V> + getStoreClient(String storeName, + TypeReference<K> keyType, + TypeReference<V> valueType, + IInconsistencyResolver<Versioned<V>> resolver) + throws UnknownStoreException; + +} diff --git a/src/main/java/org/sdnplatform/sync/IVersion.java b/src/main/java/org/sdnplatform/sync/IVersion.java new file mode 100644 index 0000000000000000000000000000000000000000..3af0e802271333361944fcf9f723bf918d2acf72 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/IVersion.java @@ -0,0 +1,50 @@ +/* + * Copyright 2008-2009 LinkedIn, Inc + * Copyright 2013 Big Switch Networks, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.sdnplatform.sync; + +/** + * An interface that allows us to determine if a given version happened before + * or after another version. + * + * This could have been done using the comparable interface but that is + * confusing, because the numeric codes are easily confused, and because + * concurrent versions are not necessarily "equal" in the normal sense. + * + * + */ + +public interface IVersion { + /** + * The result of comparing two times--either t1 is BEFORE t2, + * t1 is AFTER t2, or t1 happens CONCURRENTLY to t2. + */ + public enum Occurred { + BEFORE, + AFTER, + CONCURRENTLY + } + + /** + * Return whether or not the given version preceeded this one, succeeded it, + * or is concurrant with it + * + * @param v The other version + */ + public Occurred compare(IVersion v); + +} diff --git a/src/main/java/org/sdnplatform/sync/Versioned.java b/src/main/java/org/sdnplatform/sync/Versioned.java new file mode 100644 index 0000000000000000000000000000000000000000..73617a7e2c18f416baba3f89878f25579cb260fa --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/Versioned.java @@ -0,0 +1,183 @@ +/* + * Copyright 2008-2009 LinkedIn, Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.sdnplatform.sync; + +import java.io.Serializable; +import java.util.Arrays; +import java.util.Comparator; + +import org.sdnplatform.sync.IVersion.Occurred; +import org.sdnplatform.sync.internal.version.VectorClock; + +import com.google.common.base.Objects; + +/** + * A wrapper for an object that adds a Version. + * + * + */ +public class Versioned<T> implements Serializable { + + private static final long serialVersionUID = 1; + + private volatile VectorClock version; + private volatile T value; + + public Versioned(T object) { + this(object, new VectorClock()); + } + + public Versioned(T object, + IVersion version) { + this.version = version == null ? new VectorClock() : (VectorClock) version; + this.value = object; + } + + public IVersion getVersion() { + return version; + } + + public void increment(int nodeId, long time) { + this.version = version.incremented(nodeId, time); + } + + public T getValue() { + return value; + } + + public void setValue(T object) { + this.value = object; + } + + /** + * Determines if two objects are equal as determined by + * {@link Object#equals(Object)}, or "deeply equal" if both are arrays. + * <p> + * If both objects are null, true is returned; if both objects are array, + * the corresponding {@link Arrays#deepEquals(Object[], Object[])}, or + * {@link Arrays#equals(int[], int[])} or the like are called to determine + * equality. + * <p> + * Note that this method does not "deeply" compare the fields of the + * objects. + */ + private static boolean deepEquals(Object o1, Object o2) { + if(o1 == o2) { + return true; + } + if(o1 == null || o2 == null) { + return false; + } + + Class<?> type1 = o1.getClass(); + Class<?> type2 = o2.getClass(); + if(!(type1.isArray() && type2.isArray())) { + return o1.equals(o2); + } + if(o1 instanceof Object[] && o2 instanceof Object[]) { + return Arrays.deepEquals((Object[]) o1, (Object[]) o2); + } + if(type1 != type2) { + return false; + } + if(o1 instanceof boolean[]) { + return Arrays.equals((boolean[]) o1, (boolean[]) o2); + } + if(o1 instanceof char[]) { + return Arrays.equals((char[]) o1, (char[]) o2); + } + if(o1 instanceof byte[]) { + return Arrays.equals((byte[]) o1, (byte[]) o2); + } + if(o1 instanceof short[]) { + return Arrays.equals((short[]) o1, (short[]) o2); + } + if(o1 instanceof int[]) { + return Arrays.equals((int[]) o1, (int[]) o2); + } + if(o1 instanceof long[]) { + return Arrays.equals((long[]) o1, (long[]) o2); + } + if(o1 instanceof float[]) { + return Arrays.equals((float[]) o1, (float[]) o2); + } + if(o1 instanceof double[]) { + return Arrays.equals((double[]) o1, (double[]) o2); + } + throw new AssertionError(); + } + + @Override + public boolean equals(Object o) { + if(o == this) + return true; + else if(!(o instanceof Versioned<?>)) + return false; + + Versioned<?> versioned = (Versioned<?>) o; + return Objects.equal(getVersion(), versioned.getVersion()) + && deepEquals(getValue(), versioned.getValue()); + } + + @Override + public int hashCode() { + int v = 31 + version.hashCode(); + if(value != null) { + v += 31 * value.hashCode(); + } + return v; + } + + @Override + public String toString() { + return "[" + value + ", " + version + "]"; + } + + /** + * Create a clone of this Versioned object such that the object pointed to + * is the same, but the VectorClock and Versioned wrapper is a shallow copy. + */ + public Versioned<T> cloneVersioned() { + return new Versioned<T>(this.getValue(), this.version.clone()); + } + + public static <S> Versioned<S> value(S s) { + return new Versioned<S>(s, new VectorClock()); + } + + public static <S> Versioned<S> value(S s, IVersion v) { + return new Versioned<S>(s, v); + } + + public static <S> Versioned<S> emptyVersioned() { + return new Versioned<S>(null, new VectorClock(0)); + } + + public static final class HappenedBeforeComparator<S> implements Comparator<Versioned<S>> { + + public int compare(Versioned<S> v1, Versioned<S> v2) { + Occurred occurred = v1.getVersion().compare(v2.getVersion()); + if(occurred == Occurred.BEFORE) + return -1; + else if(occurred == Occurred.AFTER) + return 1; + else + return 0; + } + } + +} diff --git a/src/main/java/org/sdnplatform/sync/client/ShellCommand.java b/src/main/java/org/sdnplatform/sync/client/ShellCommand.java new file mode 100644 index 0000000000000000000000000000000000000000..7d3cfbf5d8041231862190f27e3ea6a33d3f161a --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/client/ShellCommand.java @@ -0,0 +1,68 @@ +package org.sdnplatform.sync.client; + +import java.io.IOException; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.MappingJsonFactory; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.SerializationFeature; + +/** + * A user command for the command line client + * @author readams + */ +public abstract class ShellCommand { + + protected static final ObjectMapper mapper = new ObjectMapper(); + protected static final MappingJsonFactory mjf = + new MappingJsonFactory(mapper); + + static { + mapper.configure(SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS, + true); + } + + /** + * Execute the command on the given tokens + * @param tokens the argument tokens. The first token will be the command + * @param line the whole command line + * @return whether to exit the shell after the command + */ + public abstract boolean execute(String[] tokens, + String line) throws Exception; + + /** + * Return syntax description + * @return the syntax string + */ + public abstract String syntaxString(); + + /** + * Parse a JSON object + * @param jp the JSON parse + * @return the JSON node + * @throws IOException + */ + protected JsonNode validateJson(JsonParser jp) throws IOException { + JsonNode parsed = null; + + try { + parsed = jp.readValueAsTree(); + } catch (JsonProcessingException e) { + System.err.println("Could not parse JSON: " + e.getMessage()); + return null; + } + return parsed; + } + + /** + * Serialize a JSON object as bytes + * @param value the object to serialize + * @return the serialized bytes + * @throws Exception + */ + protected byte[] serializeJson(JsonNode value) throws Exception { + return mapper.writeValueAsBytes(value); + } +} diff --git a/src/main/java/org/sdnplatform/sync/client/SyncClient.java b/src/main/java/org/sdnplatform/sync/client/SyncClient.java new file mode 100644 index 0000000000000000000000000000000000000000..dc4cca161fed53395e305a856dd4fea3bd580858 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/client/SyncClient.java @@ -0,0 +1,449 @@ +package org.sdnplatform.sync.client; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.PrintStream; +import java.io.StringReader; +import java.util.HashMap; +import java.util.Map.Entry; +import net.floodlightcontroller.core.module.FloodlightModuleContext; +import net.floodlightcontroller.threadpool.IThreadPoolService; +import net.floodlightcontroller.threadpool.ThreadPool; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectWriter; +import com.fasterxml.jackson.core.JsonParser; +import org.kohsuke.args4j.CmdLineException; +import org.kohsuke.args4j.CmdLineParser; +import org.kohsuke.args4j.Option; +import org.sdnplatform.sync.IStoreClient; +import org.sdnplatform.sync.ISyncService; +import org.sdnplatform.sync.Versioned; +import org.sdnplatform.sync.ISyncService.Scope; +import org.sdnplatform.sync.error.UnknownStoreException; +import org.sdnplatform.sync.internal.remote.RemoteSyncManager; + + +public class SyncClient { + RemoteSyncManager syncManager; + IStoreClient<JsonNode, JsonNode> storeClient; + + /** + * Shell commands + */ + protected HashMap<String, ShellCommand> commands; + + /** + * Command-line settings + */ + protected SyncClientSettings settings; + + /** + * Stream to use for output + */ + protected PrintStream out = System.out; + + /** + * Stream to use for errors + */ + protected PrintStream err = System.err; + + public SyncClient(SyncClientSettings settings) { + this.settings = settings; + + commands = new HashMap<String, ShellCommand>(); + commands.put("quit", new QuitCommand()); + commands.put("help", new HelpCommand()); + commands.put("put", new PutCommand()); + commands.put("delete", new DeleteCommand()); + commands.put("get", new GetCommand()); + commands.put("getfull", new GetFullCommand()); + commands.put("store", new StoreCommand()); + commands.put("register", new RegisterCommand()); + } + + protected boolean connect() + throws Exception { + FloodlightModuleContext fmc = new FloodlightModuleContext(); + ThreadPool tp = new ThreadPool(); + syncManager = new RemoteSyncManager(); + fmc.addService(IThreadPoolService.class, tp); + fmc.addService(ISyncService.class, syncManager); + fmc.addConfigParam(syncManager, "hostname", settings.hostname); + fmc.addConfigParam(syncManager, "port", + Integer.toString(settings.port)); + tp.init(fmc); + syncManager.init(fmc); + tp.startUp(fmc); + syncManager.startUp(fmc); + + if (settings.storeName != null) + getStoreClient(); + + out.println("Connected to " + + settings.hostname + ":" + settings.port); + return true; + } + + protected void getStoreClient() + throws UnknownStoreException { + storeClient = syncManager.getStoreClient(settings.storeName, + JsonNode.class, + JsonNode.class); + } + + protected boolean checkStoreSettings() { + if (settings.storeName == null) { + err.println("No store selected. Select using \"store\" command."); + return false; + } + return true; + } + + /** + * Quit command + * @author readams + * + */ + protected static class QuitCommand extends ShellCommand { + @Override + public boolean execute(String[] tokens, String line) { + return true; + } + + @Override + public String syntaxString() { + return "quit"; + } + } + + /** + * Help command + * @author readams + * + */ + protected class HelpCommand extends ShellCommand { + @Override + public boolean execute(String[] tokens, String line) { + out.println("Commands: "); + for (Entry<String, ShellCommand> entry : commands.entrySet()) { + out.println(entry.getValue().syntaxString()); + } + return false; + } + + @Override + public String syntaxString() { + return "help"; + } + } + + /** + * Get command + * @author readams + * + */ + protected class GetCommand extends ShellCommand { + ObjectWriter writer = mapper.writerWithDefaultPrettyPrinter(); + + @Override + public boolean execute(String[] tokens, String line) throws Exception { + if (tokens.length < 2) { + err.println("Usage: " + syntaxString()); + return false; + } + if (!checkStoreSettings()) return false; + + StringReader sr = new StringReader(line); + while (sr.read() != ' '); + JsonParser jp = mjf.createJsonParser(sr); + + JsonNode keyNode = validateJson(jp); + if (keyNode == null) return false; + + out.println("Getting Key:"); + out.println(writer.writeValueAsString(keyNode)); + out.println(""); + Versioned<JsonNode> value = storeClient.get(keyNode); + display(value); + + return false; + } + + protected void display(Versioned<JsonNode> value) throws Exception { + if (value.getValue() == null) { + out.println("Not found"); + } else { + out.println("Value:"); + out.println(writer.writeValueAsString(value.getValue())); + } + } + + @Override + public String syntaxString() { + return "get [key]"; + } + } + + protected class GetFullCommand extends GetCommand { + @Override + protected void display(Versioned<JsonNode> value) throws Exception { + if (value.getValue() == null) { + out.println("Not found"); + } else { + out.println("Version:"); + out.println(value.getVersion()); + out.println("Value:"); + out.println(writer.writeValueAsString(value.getValue())); + } + } + + @Override + public String syntaxString() { + return "getfull [key]"; + } + } + + /** + * Put command + * @author readams + * + */ + protected class PutCommand extends ShellCommand { + @Override + public boolean execute(String[] tokens, String line) throws Exception { + if (tokens.length < 3) { + err.println("Usage: " + syntaxString()); + return false; + + } + if (!checkStoreSettings()) return false; + + StringReader sr = new StringReader(line); + while (sr.read() != ' '); + JsonParser jp = mjf.createJsonParser(sr); + + JsonNode keyNode = validateJson(jp); + if (keyNode == null) return false; + JsonNode valueNode = validateJson(jp); + if (valueNode == null) return false; + + ObjectWriter writer = mapper.writerWithDefaultPrettyPrinter(); + out.println("Putting Key:"); + out.println(writer.writeValueAsString(keyNode)); + out.println("\nValue:"); + out.println(writer.writeValueAsString(valueNode)); + out.flush(); + + storeClient.put(keyNode, valueNode); + out.println("Success"); + + return false; + } + + @Override + public String syntaxString() { + return "put [key] [value]"; + } + } + + /** + * Delete command + * @author readams + * + */ + protected class DeleteCommand extends ShellCommand { + @Override + public boolean execute(String[] tokens, String line) throws Exception { + if (tokens.length < 2) { + err.println("Usage: " + syntaxString()); + return false; + } + if (!checkStoreSettings()) return false; + + StringReader sr = new StringReader(line); + while (sr.read() != ' '); + JsonParser jp = mjf.createJsonParser(sr); + + JsonNode keyNode = validateJson(jp); + if (keyNode == null) return false; + + ObjectWriter writer = mapper.writerWithDefaultPrettyPrinter(); + out.println("Deleting Key:"); + out.println(writer.writeValueAsString(keyNode)); + out.println(""); + + storeClient.delete(keyNode); + out.println("Success"); + + return false; + } + + @Override + public String syntaxString() { + return "delete [key]"; + } + } + + /** + * Choose the store + * @author readams + */ + protected class StoreCommand extends ShellCommand { + + @Override + public boolean execute(String[] tokens, String line) + throws Exception { + if (tokens.length < 2) { + err.println("Usage: " + syntaxString()); + return false; + } + + settings.storeName = tokens[1]; + getStoreClient(); + return false; + } + + @Override + public String syntaxString() { + return "store [storeName]"; + } + + } + + /** + * Register a new store + * @author readams + */ + protected class RegisterCommand extends ShellCommand { + + @Override + public boolean execute(String[] tokens, String line) + throws Exception { + if (tokens.length < 3) { + err.println("Usage: " + syntaxString()); + return false; + } + Scope scope = Scope.LOCAL; + if ("global".equals(tokens[2])) + scope = Scope.GLOBAL; + + settings.storeName = tokens[1]; + syncManager.registerStore(settings.storeName, scope); + getStoreClient(); + return false; + } + + @Override + public String syntaxString() { + return "register [storeName] [local|global]"; + } + + } + + protected void cleanup() throws InterruptedException { + syncManager.shutdown(); + } + + protected boolean executeCommandLine(String line) { + String[] tokens = line.split("\\s+"); + if (tokens.length > 0) { + ShellCommand command = commands.get(tokens[0]); + if (command != null) { + try { + if (command.execute(tokens, line)) + return true; + } catch (Exception e) { + err.println("Failed to execute command: " + + line); + if (settings.debug) + e.printStackTrace(err); + else + err.println(e.getClass().getSimpleName() + + ": " + e.getMessage()); + } + } else { + err.println("Unrecognized command: \"" + + tokens[0] + "\""); + } + } + return false; + } + + protected void startShell(SyncClientSettings settings) + throws InterruptedException { + BufferedReader br = + new BufferedReader(new InputStreamReader(System.in)); + String line; + try { + while (true) { + err.print("> "); + line = br.readLine(); + if (line == null) break; + if (executeCommandLine(line)) break; + } + } catch (IOException e) { + err.println("Could not read input: " + e.getMessage()); + } + } + + protected static class SyncClientSettings { + @Option(name="--help", + usage="Server hostname") + protected boolean help; + + @Option(name="--hostname", aliases="-h", + usage="Server hostname", required=true) + protected String hostname; + + @Option(name="--port", aliases="-p", usage="Server port", required=true) + protected int port; + + @Option(name="--store", aliases="-s", + usage="Store name to access") + protected String storeName; + + @Option(name="--command", aliases="-c", + usage="If set, execute a command") + protected String command; + + @Option(name="--debug", + usage="Show full error information") + protected boolean debug; + } + + /** + * @param args + * @throws InterruptedException + */ + public static void main(String[] args) throws Exception { + SyncClientSettings settings = new SyncClientSettings(); + CmdLineParser parser = new CmdLineParser(settings); + try { + parser.parseArgument(args); + } catch (CmdLineException e) { + System.err.println(e.getMessage()); + parser.printUsage(System.err); + System.exit(1); + } + if (settings.help) { + parser.printUsage(System.err); + System.exit(1); + } + + SyncClient client = new SyncClient(settings); + try { + if (false == client.connect()) { + return; + } + if (settings.command == null) { + client.startShell(settings); + } else { + client.executeCommandLine(settings.command); + } + } finally { + client.cleanup(); + } + } +} diff --git a/src/main/java/org/sdnplatform/sync/error/HandshakeTimeoutException.java b/src/main/java/org/sdnplatform/sync/error/HandshakeTimeoutException.java new file mode 100644 index 0000000000000000000000000000000000000000..4a2d6b8ecb6cfdf98872d2d31e5174c7292a1aa3 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/error/HandshakeTimeoutException.java @@ -0,0 +1,16 @@ +/** +* Copyright 2011,2013, Big Switch Networks, Inc. +**/ + +package org.sdnplatform.sync.error; + +/** + * Exception is thrown when the handshake fails to complete + * before a specified time + * @author readams + */ +public class HandshakeTimeoutException extends SyncException { + + private static final long serialVersionUID = 6859880268940337312L; + +} diff --git a/src/main/java/org/sdnplatform/sync/error/InconsistentDataException.java b/src/main/java/org/sdnplatform/sync/error/InconsistentDataException.java new file mode 100644 index 0000000000000000000000000000000000000000..f3644b3bcacfdaa514b5eb66e806146da7c7118d --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/error/InconsistentDataException.java @@ -0,0 +1,45 @@ +/* + * Copyright 2008-2009 LinkedIn, Inc + * Copyright 2013 Big Switch Networks, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.sdnplatform.sync.error; + +import java.util.List; + +/** + * Thrown when the inconsistency resolver fails to resolve down to a single + * value + */ +public class InconsistentDataException extends SyncException { + + private static final long serialVersionUID = 1050277622160468516L; + List<?> unresolvedVersions; + + public InconsistentDataException(String message, List<?> versions) { + super(message); + this.unresolvedVersions = versions; + } + + public List<?> getUnresolvedVersions() { + return unresolvedVersions; + } + + @Override + public ErrorType getErrorCode() { + return ErrorType.INCONSISTENT_DATA; + } + +} diff --git a/src/main/java/org/sdnplatform/sync/error/ObsoleteVersionException.java b/src/main/java/org/sdnplatform/sync/error/ObsoleteVersionException.java new file mode 100644 index 0000000000000000000000000000000000000000..2ec532abed4f0344aa46adedaa5ab3aa429ee37c --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/error/ObsoleteVersionException.java @@ -0,0 +1,33 @@ +package org.sdnplatform.sync.error; + +/** + * This exception is thrown when attempting to write a value into a store + * that is older than the value already in the store. If you get + * this exception, you need to redo your read/modify/write operation. + */ +public class ObsoleteVersionException extends SyncException { + + private static final long serialVersionUID = 7128132048300845832L; + + public ObsoleteVersionException() { + super(); + } + + public ObsoleteVersionException(String message, Throwable cause) { + super(message, cause); + } + + public ObsoleteVersionException(String message) { + super(message); + } + + public ObsoleteVersionException(Throwable cause) { + super(cause); + } + + @Override + public ErrorType getErrorCode() { + return ErrorType.OBSOLETE_VERSION; + } + +} diff --git a/src/main/java/org/sdnplatform/sync/error/PersistException.java b/src/main/java/org/sdnplatform/sync/error/PersistException.java new file mode 100644 index 0000000000000000000000000000000000000000..2d557e8fe29c108d6ea0da77d6d6454e70247a77 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/error/PersistException.java @@ -0,0 +1,31 @@ +package org.sdnplatform.sync.error; + +/** + * An error with a persistence layer + * @author readams + * + */ +public class PersistException extends SyncException { + private static final long serialVersionUID = -1374782534201553648L; + + public PersistException() { + super(); + } + + public PersistException(String message, Throwable cause) { + super(message, cause); + } + + public PersistException(String message) { + super(message); + } + + public PersistException(Throwable cause) { + super(cause); + } + + @Override + public ErrorType getErrorCode() { + return ErrorType.PERSIST; + } +} diff --git a/src/main/java/org/sdnplatform/sync/error/RemoteStoreException.java b/src/main/java/org/sdnplatform/sync/error/RemoteStoreException.java new file mode 100644 index 0000000000000000000000000000000000000000..b6d2353417b4bbfe71809c7fbe14bf5ca4c0f21b --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/error/RemoteStoreException.java @@ -0,0 +1,27 @@ +package org.sdnplatform.sync.error; + +/** + * An exception related to retrieving data from a remote store + * @author readams + */ +public class RemoteStoreException extends SyncException { + + private static final long serialVersionUID = -8098015934951853774L; + + public RemoteStoreException() { + super(); + } + + public RemoteStoreException(String message, Throwable cause) { + super(message, cause); + } + + public RemoteStoreException(String message) { + super(message); + } + + public RemoteStoreException(Throwable cause) { + super(cause); + } + +} diff --git a/src/main/java/org/sdnplatform/sync/error/SerializationException.java b/src/main/java/org/sdnplatform/sync/error/SerializationException.java new file mode 100644 index 0000000000000000000000000000000000000000..dcf0524cf929eeb11e489543a0b0fb66eabe2c93 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/error/SerializationException.java @@ -0,0 +1,31 @@ +package org.sdnplatform.sync.error; + +/** + * An error occurred while serializing or deserializing objects + * @author readams + */ +public class SerializationException extends SyncException { + + private static final long serialVersionUID = 6633759330354187L; + + public SerializationException() { + super(); + } + + public SerializationException(String message, Throwable cause) { + super(message, cause); + } + + public SerializationException(String message) { + super(message); + } + + public SerializationException(Throwable cause) { + super(cause); + } + + @Override + public ErrorType getErrorCode() { + return ErrorType.SERIALIZATION; + } +} diff --git a/src/main/java/org/sdnplatform/sync/error/SyncException.java b/src/main/java/org/sdnplatform/sync/error/SyncException.java new file mode 100644 index 0000000000000000000000000000000000000000..a364dae30d451a2ad111873e6792656ce0207232 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/error/SyncException.java @@ -0,0 +1,75 @@ +package org.sdnplatform.sync.error; + +/** + * Generic exception type for sync service exceptions + * @author readams + */ +public class SyncException extends Exception { + + private static final long serialVersionUID = -6150348258087759055L; + + public enum ErrorType { + SUCCESS(0), + GENERIC(1), + INCONSISTENT_DATA(2), + OBSOLETE_VERSION(3), + UNKNOWN_STORE(4), + SERIALIZATION(5), + PERSIST(6), + HANDSHAKE_TIMEOUT(7), + REMOTE_STORE(8); + + private final int value; + + ErrorType(int value) { + this.value = value; + } + + public int getValue() { + return value; + } + } + + public SyncException() { + super(); + } + + public SyncException(String message, Throwable cause) { + super(message, cause); + } + + public SyncException(String message) { + super(message); + } + + public SyncException(Throwable cause) { + super(cause); + } + + public ErrorType getErrorCode() { + return ErrorType.GENERIC; + } + + public static SyncException newInstance(ErrorType type, + String message, Throwable cause) { + switch (type) { + case INCONSISTENT_DATA: + return new InconsistentDataException(message, null); + case OBSOLETE_VERSION: + return new ObsoleteVersionException(message, cause); + case UNKNOWN_STORE: + return new UnknownStoreException(message, cause); + case SERIALIZATION: + return new SerializationException(message, cause); + case PERSIST: + return new PersistException(message, cause); + case HANDSHAKE_TIMEOUT: + return new HandshakeTimeoutException(); + case REMOTE_STORE: + return new RemoteStoreException(message, cause); + case GENERIC: + default: + return new SyncException(message, cause); + } + } +} diff --git a/src/main/java/org/sdnplatform/sync/error/SyncRuntimeException.java b/src/main/java/org/sdnplatform/sync/error/SyncRuntimeException.java new file mode 100644 index 0000000000000000000000000000000000000000..54f762b9c6de156ed8c06720a3c0c9318a82902b --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/error/SyncRuntimeException.java @@ -0,0 +1,19 @@ +package org.sdnplatform.sync.error; + +/** + * A runtime exception that wraps a SyncException. This is thrown from + * standard interfaces that don't support an appropriate exceptional type. + * @author readams + */ +public class SyncRuntimeException extends RuntimeException { + + private static final long serialVersionUID = -5357245946596447913L; + + public SyncRuntimeException(String message, SyncException cause) { + super(message, cause); + } + + public SyncRuntimeException(SyncException cause) { + super(cause); + } +} diff --git a/src/main/java/org/sdnplatform/sync/error/UnknownStoreException.java b/src/main/java/org/sdnplatform/sync/error/UnknownStoreException.java new file mode 100644 index 0000000000000000000000000000000000000000..055715e798e47fd2b5db59efc35eb0e959d16517 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/error/UnknownStoreException.java @@ -0,0 +1,31 @@ +package org.sdnplatform.sync.error; + +/** + * Thrown when attempting to perform an operation on an unknown store + * @author readams + */ +public class UnknownStoreException extends SyncException { + + private static final long serialVersionUID = 6633759330354187L; + + public UnknownStoreException() { + super(); + } + + public UnknownStoreException(String message, Throwable cause) { + super(message, cause); + } + + public UnknownStoreException(String message) { + super(message); + } + + public UnknownStoreException(Throwable cause) { + super(cause); + } + + @Override + public ErrorType getErrorCode() { + return ErrorType.UNKNOWN_STORE; + } +} diff --git a/src/main/java/org/sdnplatform/sync/internal/AbstractStoreClient.java b/src/main/java/org/sdnplatform/sync/internal/AbstractStoreClient.java new file mode 100644 index 0000000000000000000000000000000000000000..3d767d66836b45d451acd0cef08863a69a989459 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/AbstractStoreClient.java @@ -0,0 +1,79 @@ +package org.sdnplatform.sync.internal; + +import java.util.List; + +import org.sdnplatform.sync.IStoreClient; +import org.sdnplatform.sync.IVersion; +import org.sdnplatform.sync.Versioned; +import org.sdnplatform.sync.error.ObsoleteVersionException; +import org.sdnplatform.sync.error.SyncException; +import org.sdnplatform.sync.internal.version.VectorClock; + + +public abstract class AbstractStoreClient<K,V> implements IStoreClient<K, V> { + + @Override + public V getValue(K key) throws SyncException { + return getValue(key, null); + } + + @Override + public V getValue(K key, V defaultValue) throws SyncException { + Versioned<V> val = get(key); + if (val == null || val.getValue() == null) return defaultValue; + return val.getValue(); + } + + /** + * Get the versions for a key + * @param key the key + * @return the versions + * @throws SyncException + */ + protected abstract List<IVersion> getVersions(K key) throws SyncException; + + @Override + public Versioned<V> get(K key) throws SyncException { + return get(key, null); + } + + @Override + public IVersion put(K key, V value) throws SyncException { + List<IVersion> versions = getVersions(key); + Versioned<V> versioned; + if(versions.isEmpty()) + versioned = Versioned.value(value, new VectorClock()); + else if(versions.size() == 1) + versioned = Versioned.value(value, versions.get(0)); + else { + versioned = get(key, null); + if(versioned == null) + versioned = Versioned.value(value, new VectorClock()); + else + versioned.setValue(value); + } + return put(key, versioned); + } + + @Override + public boolean putIfNotObsolete(K key, Versioned<V> versioned) + throws SyncException { + try { + put(key, versioned); + return true; + } catch (ObsoleteVersionException e) { + return false; + } + } + + @Override + public void delete(K key) throws SyncException { + put(key, (V)null); + } + + @Override + public void delete(K key, IVersion version) throws SyncException { + put(key, new Versioned<V>((V)null, version)); + } + +} diff --git a/src/main/java/org/sdnplatform/sync/internal/AbstractSyncManager.java b/src/main/java/org/sdnplatform/sync/internal/AbstractSyncManager.java new file mode 100644 index 0000000000000000000000000000000000000000..5b20aeb1cfa7123d81ecec4a39da95d11e6ab17c --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/AbstractSyncManager.java @@ -0,0 +1,176 @@ +package org.sdnplatform.sync.internal; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; + +import org.sdnplatform.sync.IInconsistencyResolver; +import org.sdnplatform.sync.IStoreClient; +import org.sdnplatform.sync.ISyncService; +import org.sdnplatform.sync.Versioned; +import org.sdnplatform.sync.error.UnknownStoreException; +import org.sdnplatform.sync.internal.store.IStore; +import org.sdnplatform.sync.internal.store.JacksonStore; +import org.sdnplatform.sync.internal.store.MappingStoreListener; +import org.sdnplatform.sync.internal.util.ByteArray; + +import net.floodlightcontroller.core.module.IFloodlightModule; +import net.floodlightcontroller.core.module.IFloodlightService; +import com.fasterxml.jackson.core.type.TypeReference; + + +/** + * An abstract base class for modules providing {@link ISyncService} + * @author readams + */ +public abstract class AbstractSyncManager + implements ISyncService, IFloodlightModule { + + // ************ + // ISyncService + // ************ + + @Override + public <K, V> IStoreClient<K, V> + getStoreClient(String storeName, + Class<K> keyClass, + Class<V> valueClass) + throws UnknownStoreException { + return getStoreClient(storeName, keyClass, null, + valueClass, null, null); + } + + @Override + public <K, V>IStoreClient<K, V> + getStoreClient(String storeName, + TypeReference<K> keyType, + TypeReference<V> valueType) + throws UnknownStoreException { + return getStoreClient(storeName, null, keyType, + null, valueType, null); + } + + @Override + public <K, V> IStoreClient<K, V> + getStoreClient(String storeName, + TypeReference<K> keyType, + TypeReference<V> valueType, + IInconsistencyResolver<Versioned<V>> resolver) + throws UnknownStoreException { + return getStoreClient(storeName, null, keyType, + null, valueType, resolver); + } + + @Override + public <K, V> IStoreClient<K, V> + getStoreClient(String storeName, + Class<K> keyClass, + Class<V> valueClass, + IInconsistencyResolver<Versioned<V>> resolver) + throws UnknownStoreException { + return getStoreClient(storeName, keyClass, null, + valueClass, null, resolver); + } + + // ***************** + // IFloodlightModule + // ***************** + + @Override + public Collection<Class<? extends IFloodlightService>> + getModuleServices() { + Collection<Class<? extends IFloodlightService>> l = + new ArrayList<Class<? extends IFloodlightService>>(); + l.add(ISyncService.class); + return l; + } + + @Override + public Map<Class<? extends IFloodlightService>, IFloodlightService> + getServiceImpls() { + Map<Class<? extends IFloodlightService>, + IFloodlightService> m = + new HashMap<Class<? extends IFloodlightService>, + IFloodlightService>(); + // We are the class that implements the service + m.put(ISyncService.class, this); + return m; + } + + // ******************* + // AbstractSyncManager + // ******************* + + /** + * The "real" version of getStoreClient that will be called by all + * the others + * @param storeName the store name + * @param keyClass the key class + * @param keyType the key type + * @param valueClass the value class + * @param valueType the value type + * @param resolver the inconsistency resolver + * @return a {@link DefaultStoreClient} using the given parameters. + * @throws UnknownStoreException + */ + public <K, V> IStoreClient<K, V> + getStoreClient(String storeName, + Class<K> keyClass, + TypeReference<K> keyType, + Class<V> valueClass, + TypeReference<V> valueType, + IInconsistencyResolver<Versioned<V>> resolver) + throws UnknownStoreException { + IStore<ByteArray,byte[]> store = getStore(storeName); + IStore<K, V> serializingStore; + if (valueType != null && keyType != null) { + serializingStore = + new JacksonStore<K, V>(store, keyType, valueType); + } else if (valueClass != null && keyClass != null) { + serializingStore = + new JacksonStore<K, V>(store, keyClass, valueClass); + } else { + throw new IllegalArgumentException("Must include type reference" + + " or value class"); + } + + DefaultStoreClient<K, V> storeClient = + new DefaultStoreClient<K, V>(serializingStore, + resolver, + this, + keyClass, + keyType); + return storeClient; + } + + /** + * Get a store object corresponding to the given store name + * @param storeName the store name + * @return the {@link IStore} + * @throws UnknownStoreException + */ + public abstract IStore<ByteArray,byte[]> getStore(String storeName) + throws UnknownStoreException; + + /** + * Get the local ID of the local node + * @return the node ID + */ + public abstract short getLocalNodeId(); + + /** + * Add a listener to the specified store + * @param storeName the name of the store + * @param listener the listener to add + * @throws UnknownStoreException + */ + public abstract void addListener(String storeName, + MappingStoreListener listener) + throws UnknownStoreException; + + /** + * Shut down the sync manager. Tear down any communicating threads + */ + public abstract void shutdown(); +} diff --git a/src/main/java/org/sdnplatform/sync/internal/Cursor.java b/src/main/java/org/sdnplatform/sync/internal/Cursor.java new file mode 100644 index 0000000000000000000000000000000000000000..a9fbc224bd75791a82dfb02ff787bdae29a4f1bb --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/Cursor.java @@ -0,0 +1,49 @@ +package org.sdnplatform.sync.internal; + +import java.util.List; +import java.util.Map.Entry; + +import org.sdnplatform.sync.IClosableIterator; +import org.sdnplatform.sync.Versioned; +import org.sdnplatform.sync.internal.util.ByteArray; + + +public class Cursor implements + IClosableIterator<Entry<ByteArray, List<Versioned<byte[]>>>> { + private final int cursorId; + private final + IClosableIterator<Entry<ByteArray, + List<Versioned<byte[]>>>> delegate; + + public Cursor(int cursorId, + IClosableIterator<Entry<ByteArray, + List<Versioned<byte[]>>>> delegate) { + super(); + this.cursorId = cursorId; + this.delegate = delegate; + } + + @Override + public boolean hasNext() { + return delegate.hasNext(); + } + + @Override + public Entry<ByteArray, List<Versioned<byte[]>>> next() { + return delegate.next(); + } + + @Override + public void remove() { + delegate.remove(); + } + + @Override + public void close() { + delegate.close(); + } + + public int getCursorId() { + return this.cursorId; + } +} \ No newline at end of file diff --git a/src/main/java/org/sdnplatform/sync/internal/DefaultStoreClient.java b/src/main/java/org/sdnplatform/sync/internal/DefaultStoreClient.java new file mode 100644 index 0000000000000000000000000000000000000000..56d000d4f74a6000de763ea3f8efa9a4e2f732b3 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/DefaultStoreClient.java @@ -0,0 +1,191 @@ +package org.sdnplatform.sync.internal; + +import java.util.List; +import java.util.Map.Entry; + +import com.fasterxml.jackson.core.type.TypeReference; + +import org.sdnplatform.sync.IClosableIterator; +import org.sdnplatform.sync.IInconsistencyResolver; +import org.sdnplatform.sync.IStoreListener; +import org.sdnplatform.sync.IVersion; +import org.sdnplatform.sync.Versioned; +import org.sdnplatform.sync.error.InconsistentDataException; +import org.sdnplatform.sync.error.SyncException; +import org.sdnplatform.sync.error.UnknownStoreException; +import org.sdnplatform.sync.internal.store.IStore; +import org.sdnplatform.sync.internal.store.MappingStoreListener; +import org.sdnplatform.sync.internal.util.Pair; +import org.sdnplatform.sync.internal.version.ChainedResolver; +import org.sdnplatform.sync.internal.version.TimeBasedInconsistencyResolver; +import org.sdnplatform.sync.internal.version.VectorClock; +import org.sdnplatform.sync.internal.version.VectorClockInconsistencyResolver; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +/** + * Default implementation of a store client used for accessing a store + * locally in process. + * @author readams + * + * @param <K> the key type + * @param <V> the value type + */ +public class DefaultStoreClient<K, V> extends AbstractStoreClient<K, V> { + protected static final Logger logger = + LoggerFactory.getLogger(DefaultStoreClient.class.getName()); + + private IStore<K, V> delegate; + private IInconsistencyResolver<Versioned<V>> resolver; + private AbstractSyncManager syncManager; + private Class<K> keyClass; + private TypeReference<K> keyType; + + @SuppressWarnings("unchecked") + public DefaultStoreClient(IStore<K, V> delegate, + IInconsistencyResolver<Versioned<V>> resolver, + AbstractSyncManager syncManager, + Class<K> keyClass, + TypeReference<K> keyType) { + super(); + this.delegate = delegate; + this.syncManager = syncManager; + this.keyClass = keyClass; + this.keyType = keyType; + + IInconsistencyResolver<Versioned<V>> vcir = + new VectorClockInconsistencyResolver<V>(); + IInconsistencyResolver<Versioned<V>> secondary = resolver; + if (secondary == null) + secondary = new TimeBasedInconsistencyResolver<V>(); + this.resolver = new ChainedResolver<Versioned<V>>(vcir, secondary); + } + + // ****************** + // IStoreClient<K,V> + // ****************** + + @Override + public Versioned<V> get(K key, Versioned<V> defaultValue) + throws SyncException { + List<Versioned<V>> raw = delegate.get(key); + return handleGet(key, defaultValue, raw); + } + + @Override + public IClosableIterator<Entry<K, Versioned<V>>> entries() throws SyncException { + return new StoreClientIterator(delegate.entries()); + } + + @Override + public IVersion put(K key, Versioned<V> versioned) + throws SyncException { + VectorClock vc = (VectorClock)versioned.getVersion(); + + vc = vc.incremented(syncManager.getLocalNodeId(), + System.currentTimeMillis()); + versioned = Versioned.value(versioned.getValue(), vc); + + delegate.put(key, versioned); + return versioned.getVersion(); + } + + @Override + public void addStoreListener(IStoreListener<K> listener) { + if (listener == null) + throw new IllegalArgumentException("Must include listener"); + MappingStoreListener msl = + new MappingStoreListener(keyType, keyClass, listener); + try { + syncManager.addListener(delegate.getName(), msl); + } catch (UnknownStoreException e) { + // this shouldn't happen since we already have a store client, + // unless the store has been deleted somehow + logger.error("Unexpected internal state: unknown store " + + "from store client. Could not register listener", e); + } + } + + // ************************ + // AbstractStoreClient<K,V> + // ************************ + + @Override + protected List<IVersion> getVersions(K key) throws SyncException { + return delegate.getVersions(key); + } + + // ********************* + // Private local methods + // ********************* + + protected Versioned<V> handleGet(K key, + Versioned<V> defaultValue, + List<Versioned<V>> raw) + throws InconsistentDataException { + if (raw == null) return defaultValue(defaultValue); + List<Versioned<V>> vs = resolver.resolveConflicts(raw); + return getItemOrThrow(key, defaultValue, vs); + } + + protected Versioned<V> defaultValue(Versioned<V> defaultValue) { + if (defaultValue == null) + return Versioned.emptyVersioned(); + return defaultValue; + } + + protected Versioned<V> getItemOrThrow(K key, + Versioned<V> defaultValue, + List<Versioned<V>> items) + throws InconsistentDataException { + if(items.size() == 0) + return defaultValue(defaultValue); + else if(items.size() == 1) + return items.get(0); + else + throw new InconsistentDataException("Resolver failed to resolve" + + " conflict: " + items.size() + " unresolved items", items); + } + + + protected class StoreClientIterator implements + IClosableIterator<Entry<K, Versioned<V>>> { + + IClosableIterator<Entry<K, List<Versioned<V>>>> delegate; + + public StoreClientIterator(IClosableIterator<Entry<K, + List<Versioned<V>>>> delegate) { + super(); + this.delegate = delegate; + } + + @Override + public boolean hasNext() { + return delegate.hasNext(); + } + + @Override + public Entry<K, Versioned<V>> next() { + Entry<K, List<Versioned<V>>> n = delegate.next(); + try { + return new Pair<K, Versioned<V>>(n.getKey(), + handleGet(n.getKey(), null, n.getValue())); + } catch (SyncException e) { + logger.error("Failed to construct next value", e); + return null; + } + } + + @Override + public void remove() { + delegate.remove(); + } + + @Override + public void close() { + delegate.close(); + } + + } +} diff --git a/src/main/java/org/sdnplatform/sync/internal/StoreRegistry.java b/src/main/java/org/sdnplatform/sync/internal/StoreRegistry.java new file mode 100644 index 0000000000000000000000000000000000000000..9c0dbdff0b1b2fbbcd10eb13f6f8367ee81eed9f --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/StoreRegistry.java @@ -0,0 +1,279 @@ +package org.sdnplatform.sync.internal; + +import java.util.ArrayDeque; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; + +import javax.sql.ConnectionPoolDataSource; + +import net.floodlightcontroller.core.annotations.LogMessageDoc; + +import org.sdnplatform.sync.Versioned; +import org.sdnplatform.sync.ISyncService.Scope; +import org.sdnplatform.sync.error.PersistException; +import org.sdnplatform.sync.error.SyncException; +import org.sdnplatform.sync.internal.store.IStorageEngine; +import org.sdnplatform.sync.internal.store.InMemoryStorageEngine; +import org.sdnplatform.sync.internal.store.JavaDBStorageEngine; +import org.sdnplatform.sync.internal.store.SynchronizingStorageEngine; +import org.sdnplatform.sync.internal.util.ByteArray; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +/** + * Manage registered stores and associated metadata + * @author readams + */ +public class StoreRegistry { + protected static final Logger logger = + LoggerFactory.getLogger(StoreRegistry.class); + + /** + * The associated syncManager + */ + private final SyncManager syncManager; + + /** + * A data source suitable for use in persistent stores + */ + private ConnectionPoolDataSource persistentDataSource = + JavaDBStorageEngine.getDataSource(false); + + /** + * The storage engines that contain the locally-stored data + */ + private HashMap<String,SynchronizingStorageEngine> localStores = + new HashMap<String, SynchronizingStorageEngine>(); + + /** + * Undelivered hints associated with the stores + */ + private InMemoryStorageEngine<HintKey,byte[]> hints; + + /** + * A queue containing pending hints. + */ + private ArrayDeque<HintKey> hintQueue = new ArrayDeque<HintKey>(); + private Lock hintLock = new ReentrantLock(); + private Condition hintsAvailable = hintLock.newCondition(); + + /** + * Construct a new {@link StoreRegistry} + * @param syncManager The associated syncManager + */ + public StoreRegistry(SyncManager syncManager) { + super(); + this.syncManager = syncManager; + hints = new InMemoryStorageEngine<HintKey, byte[]>("system-hints"); + } + + // ************** + // public methods + // ************** + + /** + * Get the store associated with the given name, or null if there is no + * such store + * @param storeName + * @return a {@link SynchronizingStorageEngine} + */ + public SynchronizingStorageEngine get(String storeName) { + return localStores.get(storeName); + } + + /** + * Register a new store with the given name, scope and persistence + * @param storeName the name of the store + * @param scope the scope for the store + * @param persistent whether the store should be persistent + * @return the newly-allocated store + * @throws PersistException + */ + public synchronized SynchronizingStorageEngine register(String storeName, + Scope scope, + boolean persistent) + throws PersistException { + SynchronizingStorageEngine store = + localStores.get(storeName); + if (store != null) { + return store; + } + + IStorageEngine<ByteArray, byte[]> dstore; + if (persistent) { + dstore = new JavaDBStorageEngine(storeName, persistentDataSource); + } else { + dstore = new InMemoryStorageEngine<ByteArray, byte[]>(storeName); + } + store = new SynchronizingStorageEngine(dstore, syncManager, + scope); + localStores.put(storeName, store); + return store; + } + + /** + * Get a collection containing all the currently-registered stores + * @return the {@link Collection<SynchronizingStorageEngine>} + */ + public Collection<SynchronizingStorageEngine> values() { + return localStores.values(); + } + + /** + * Add a key/value to the hint store for the given store + * @param storeName the name of the store for the keyed value + * @param key the key + * @param value the value + */ + @LogMessageDoc(level="ERROR", + message="Failed to queue hint for store {storeName}", + explanation="There was an error synchronizing data to " + + "remote nodes", + recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG) + public void queueHint(String storeName, + ByteArray key, Versioned<byte[]> value) { + try { + HintKey hk = new HintKey(storeName,key); + hintLock.lock(); + try { + boolean needed = !hints.containsKey(hk); + needed &= hints.doput(hk, value); + if (needed) { + hintQueue.add(hk); + hintsAvailable.signal(); + } + } finally { + hintLock.unlock(); + } + } catch (SyncException e) { + logger.error("Failed to queue hint for store " + storeName, e); + } + } + + /** + * Drain up to the given number of hints to the provided collection. + * This method will block until at least one hint is available + * @param c the collection to which the hints should be copied + * @param maxElements the maximum number of hints to drain + * @throws InterruptedException + */ + public void takeHints(Collection<Hint> c, int maxElements) + throws InterruptedException { + int count = 0; + try { + while (count == 0) { + hintLock.lock(); + while (hintQueue.isEmpty()) { + hintsAvailable.await(); + } + while (count < maxElements && !hintQueue.isEmpty()) { + HintKey hintKey = hintQueue.pollFirst(); + if (hintKey != null) { + List<Versioned<byte[]>> values = hints.remove(hintKey); + if (values == null) { + continue; + } + c.add(new Hint(hintKey, values)); + count += 1; + } + } + } + } finally { + hintLock.unlock(); + } + } + + public void shutdown() { + hintQueue.clear(); + hints.close(); + } + + /** + * A key in the hint store + * @author readams + */ + public static class HintKey { + private final String storeName; + private final ByteArray key; + private final short nodeId; + + public HintKey(String storeName, + ByteArray key, + short nodeId) { + super(); + this.storeName = storeName; + this.key = key; + this.nodeId = nodeId; + } + + public HintKey(String storeName, + ByteArray key) { + super(); + this.storeName = storeName; + this.key = key; + this.nodeId = -1; + } + + public String getStoreName() { + return storeName; + } + public ByteArray getKey() { + return key; + } + public short getNodeId() { + return nodeId; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((key == null) ? 0 : key.hashCode()); + result = prime * result + nodeId; + result = prime * result + + ((storeName == null) ? 0 : storeName.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + HintKey other = (HintKey) obj; + if (key == null) { + if (other.key != null) return false; + } else if (!key.equals(other.key)) return false; + if (nodeId != other.nodeId) return false; + if (storeName == null) { + if (other.storeName != null) return false; + } else if (!storeName.equals(other.storeName)) return false; + return true; + } + } + + /** + * A hint representing a hint key and a value + * @author readams + */ + public static class Hint { + private HintKey hintKey; + private List<Versioned<byte[]>> values; + public Hint(HintKey hintKey, List<Versioned<byte[]>> values) { + super(); + this.hintKey = hintKey; + this.values = values; + } + public HintKey getHintKey() { + return hintKey; + } + public List<Versioned<byte[]>> getValues() { + return values; + } + } +} diff --git a/src/main/java/org/sdnplatform/sync/internal/SyncManager.java b/src/main/java/org/sdnplatform/sync/internal/SyncManager.java new file mode 100644 index 0000000000000000000000000000000000000000..3d3bae6c7baf7855981ea22e508de61deba8d1a8 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/SyncManager.java @@ -0,0 +1,765 @@ +package org.sdnplatform.sync.internal; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Random; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.core.type.TypeReference; + +import org.sdnplatform.sync.IClosableIterator; +import org.sdnplatform.sync.ISyncService; +import org.sdnplatform.sync.Versioned; +import org.sdnplatform.sync.IVersion.Occurred; +import org.sdnplatform.sync.error.PersistException; +import org.sdnplatform.sync.error.SyncException; +import org.sdnplatform.sync.error.SyncRuntimeException; +import org.sdnplatform.sync.error.UnknownStoreException; +import org.sdnplatform.sync.internal.StoreRegistry.Hint; +import org.sdnplatform.sync.internal.config.ClusterConfig; +import org.sdnplatform.sync.internal.config.DelegatingCCProvider; +import org.sdnplatform.sync.internal.config.FallbackCCProvider; +import org.sdnplatform.sync.internal.config.IClusterConfigProvider; +import org.sdnplatform.sync.internal.config.Node; +import org.sdnplatform.sync.internal.config.PropertyCCProvider; +import org.sdnplatform.sync.internal.config.StorageCCProvider; +import org.sdnplatform.sync.internal.rpc.RPCService; +import org.sdnplatform.sync.internal.rpc.TProtocolUtil; +import org.sdnplatform.sync.internal.store.IStorageEngine; +import org.sdnplatform.sync.internal.store.IStore; +import org.sdnplatform.sync.internal.store.MappingStoreListener; +import org.sdnplatform.sync.internal.store.SynchronizingStorageEngine; +import org.sdnplatform.sync.internal.util.ByteArray; +import org.sdnplatform.sync.internal.version.VectorClock; +import org.sdnplatform.sync.thrift.SyncMessage; +import org.sdnplatform.sync.thrift.KeyedValues; +import org.sdnplatform.sync.thrift.KeyedVersions; +import org.sdnplatform.sync.thrift.SyncOfferMessage; +import org.sdnplatform.sync.thrift.SyncValueMessage; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import net.floodlightcontroller.core.annotations.LogMessageCategory; +import net.floodlightcontroller.core.annotations.LogMessageDoc; +import net.floodlightcontroller.core.annotations.LogMessageDocs; +import net.floodlightcontroller.core.module.FloodlightModuleContext; +import net.floodlightcontroller.core.module.FloodlightModuleException; +import net.floodlightcontroller.core.module.IFloodlightService; +import net.floodlightcontroller.core.util.SingletonTask; +import net.floodlightcontroller.storage.IStorageSourceService; +import net.floodlightcontroller.threadpool.IThreadPoolService; + + +/** + * Implementation for {@link ISyncService} that keeps local copies of the data + * and will synchronize it to other nodes in the cluster + * @author readams + * @see ISyncService + */ +@LogMessageCategory("State Synchronization") +public class SyncManager extends AbstractSyncManager { + protected static final Logger logger = + LoggerFactory.getLogger(SyncManager.class.getName()); + + private IThreadPoolService threadPool; + + /** + * The store registry holds the storage engines that provide + * access to the data + */ + private StoreRegistry storeRegistry = new StoreRegistry(this); + + private IClusterConfigProvider clusterConfigProvider; + private ClusterConfig clusterConfig = new ClusterConfig(); + + protected RPCService rpcService = null; + + /** + * Interval between cleanup tasks in seconds + */ + private static final int CLEANUP_INTERVAL = 60 * 60; + + /** + * Interval between antientropy tasks in seconds + */ + private static final int ANTIENTROPY_INTERVAL = 5 * 60; + + /** + * Interval between configuration rescans + */ + private static final int CONFIG_RESCAN_INTERVAL = 10; + + /** + * Task for performing periodic maintenance/cleanup on local stores + */ + private SingletonTask cleanupTask; + + /** + * Task for periodic antientropy between nodes + */ + private SingletonTask antientropyTask; + + /** + * Task to periodically rescan configuration + */ + private SingletonTask updateConfigTask; + + /** + * Number of {@link HintWorker} workers used to drain the queue of writes + * that need to be sent to the connected nodes + */ + private static final int SYNC_WORKER_POOL = 2; + + /** + * A thread pool for the {@link HintWorker} threads. + */ + private ExecutorService hintThreadPool; + + /** + * Random number generator + */ + private Random random = new Random(); + + /** + * A map of the currently-allocated cursors + */ + private Map<Integer, Cursor> cursorMap = + new ConcurrentHashMap<Integer, Cursor>(); + + // ************ + // ISyncService + // ************ + + @Override + public void registerStore(String storeName, Scope scope) { + try { + storeRegistry.register(storeName, scope, false); + } catch (PersistException e) { + // not possible + throw new SyncRuntimeException(e); + } + } + + @Override + public void registerPersistentStore(String storeName, Scope scope) + throws PersistException { + storeRegistry.register(storeName, scope, true); + } + + // ************************** + // SyncManager public methods + // ************************** + + /** + * Get the cluster configuration object + * @return the {@link ClusterConfig} object + * @see ClusterConfig + */ + public ClusterConfig getClusterConfig() { + return clusterConfig; + } + + /** + * Perform periodic scheduled cleanup. Note that this will be called + * automatically and you shouldn't generally call it directly except for + * testing + * @throws SyncException + */ + public void cleanup() throws SyncException { + for (SynchronizingStorageEngine store : storeRegistry.values()) { + store.cleanupTask(); + } + } + + /** + * Perform a synchronization with the node specified + */ + @LogMessageDoc(level="INFO", + message="[{id}->{id}] Synchronizing local state to remote node", + explanation="Normal state resynchronization is occurring") + public void antientropy(Node node) { + if (!rpcService.isConnected(node.getNodeId())) return; + + logger.info("[{}->{}] Synchronizing local state to remote node", + getLocalNodeId(), node.getNodeId()); + + for (SynchronizingStorageEngine store : storeRegistry.values()) { + if (Scope.LOCAL.equals(store.getScope())) { + if (node.getDomainId() != + getClusterConfig().getNode().getDomainId()) + continue; + } + + IClosableIterator<Entry<ByteArray, + List<Versioned<byte[]>>>> entries = + store.entries(); + try { + SyncMessage bsm = + TProtocolUtil.getTSyncOfferMessage(store.getName(), + store.getScope(), + store.isPersistent()); + int count = 0; + while (entries.hasNext()) { + if (!rpcService.isConnected(node.getNodeId())) return; + + Entry<ByteArray, List<Versioned<byte[]>>> pair = + entries.next(); + KeyedVersions kv = + TProtocolUtil.getTKeyedVersions(pair.getKey(), + pair.getValue()); + bsm.getSyncOffer().addToVersions(kv); + count += 1; + if (count >= 50) { + sendSyncOffer(node.getNodeId(), bsm); + bsm.getSyncOffer().unsetVersions(); + count = 0; + } + } + sendSyncOffer(node.getNodeId(), bsm); + } catch (InterruptedException e) { + // This can't really happen + throw new RuntimeException(e); + } finally { + entries.close(); + } + } + } + + /** + * Communicate with a random node and do a full synchronization of the + * all the stores on each node that have the appropriate scope. + */ + public void antientropy() { + ArrayList<Node> candidates = new ArrayList<Node>(); + for (Node n : clusterConfig.getNodes()) + if (rpcService.isConnected(n.getNodeId())) + candidates.add(n); + + int numNodes = candidates.size(); + if (numNodes == 0) return; + Node[] nodes = candidates.toArray(new Node[numNodes]); + int rn = random.nextInt(numNodes); + antientropy(nodes[rn]); + } + + /** + * Write a value synchronized from another node, bypassing some of the + * usual logic when a client writes data. If the store is not known, + * this will automatically register it + * @param storeName the store name + * @param scope the scope for the store + * @param persist TODO + * @param key the key to write + * @param values a list of versions for the key to write + * @throws PersistException + */ + public void writeSyncValue(String storeName, Scope scope, + boolean persist, + byte[] key, Iterable<Versioned<byte[]>> values) + throws PersistException { + SynchronizingStorageEngine store = storeRegistry.get(storeName); + if (store == null) { + store = storeRegistry.register(storeName, scope, persist); + } + store.writeSyncValue(new ByteArray(key), values); + } + + /** + * Check whether any of the specified versions for the key are not older + * than the versions we already have + * @param storeName the store to check + * @param key the key to check + * @param versions an iterable over the versions + * @return true if we'd like a copy of the data indicated + * @throws SyncException + */ + public boolean handleSyncOffer(String storeName, + byte[] key, + Iterable<VectorClock> versions) + throws SyncException { + SynchronizingStorageEngine store = storeRegistry.get(storeName); + if (store == null) return true; + + List<Versioned<byte[]>> values = store.get(new ByteArray(key)); + if (values == null || values.size() == 0) return true; + + // check whether any of the versions are not older than what we have + for (VectorClock vc : versions) { + for (Versioned<byte[]> value : values) { + VectorClock existingVc = (VectorClock)value.getVersion(); + if (!vc.compare(existingVc).equals(Occurred.BEFORE)) + return true; + } + } + + return false; + } + + /** + * Get access to the raw storage engine. This is useful for some + * on-the-wire communication + * @param storeName the store name to get + * @return the {@link IStorageEngine} + * @throws UnknownStoreException + */ + public IStorageEngine<ByteArray, byte[]> getRawStore(String storeName) + throws UnknownStoreException { + return getStoreInternal(storeName); + } + + /** + * Return the threadpool + * @return the {@link IThreadPoolService} + */ + public IThreadPoolService getThreadPool() { + return threadPool; + } + + /** + * Queue a synchronization of the specified {@link KeyedValues} to all nodes + * assocatiated with the storage engine specified + * @param e the storage engine for the values + * @param kv the values to synchronize + */ + @LogMessageDoc(level="WARN", + message="Sync task queue full and not emptying", + explanation="The synchronization service is overloaded", + recommendation=LogMessageDoc.CHECK_CONTROLLER) + public void queueSyncTask(SynchronizingStorageEngine e, + ByteArray key, Versioned<byte[]> value) { + storeRegistry.queueHint(e.getName(), key, value); + } + + @Override + public void addListener(String storeName, MappingStoreListener listener) + throws UnknownStoreException { + SynchronizingStorageEngine store = getStoreInternal(storeName); + store.addListener(listener); + } + + /** + * Update the node configuration to add or remove nodes + * @throws FloodlightModuleException + */ + @LogMessageDocs({ + @LogMessageDoc(level="INFO", + message="Updating sync configuration {config}", + explanation="The sync service cluster configuration has been updated"), + @LogMessageDoc(level="INFO", + message="Local node configuration changed; restarting sync" + + "service", + explanation="The sync service must be restarted to update its configuration") + }) + public void updateConfiguration() + throws FloodlightModuleException { + + try { + ClusterConfig oldConfig = clusterConfig; + clusterConfig = clusterConfigProvider.getConfig(); + if (clusterConfig.equals(oldConfig)) return; + + logger.info("Updating sync configuration {}", clusterConfig); + if (oldConfig.getNode() != null && + !clusterConfig.getNode().equals(oldConfig.getNode())) { + logger.info("Local node configuration changed; restarting sync" + + "service"); + shutdown(); + startUp(null); + } + + for (Node n : clusterConfig.getNodes()) { + Node existing = oldConfig.getNode(n.getNodeId()); + if (existing != null && !n.equals(existing)) { + // we already had this node's configuration, but it's + // changed. Disconnect from the node and let it + // reinitialize + logger.debug("[{}->{}] Configuration for node has changed", + getLocalNodeId(), n.getNodeId()); + rpcService.disconnectNode(n.getNodeId()); + } + } + for (Node n : oldConfig.getNodes()) { + Node nn = clusterConfig.getNode(n.getNodeId()); + if (nn == null) { + // n is a node that doesn't appear in the new config + logger.debug("[{}->{}] Disconnecting deconfigured node", + getLocalNodeId(), n.getNodeId()); + rpcService.disconnectNode(n.getNodeId()); + } + } + } catch (Exception e) { + throw new FloodlightModuleException("Could not update " + + "configuration", e); + } + } + + /** + * Retrieve the cursor, if any, for the given cursor ID + * @param cursorId the cursor ID + * @return the {@link Cursor} + */ + public Cursor getCursor(int cursorId) { + return cursorMap.get(Integer.valueOf(cursorId)); + } + + /** + * Allocate a new cursor for the given store name + * @param storeName the store name + * @return the {@link Cursor} + * @throws SyncException + */ + public Cursor newCursor(String storeName) throws UnknownStoreException { + IStore<ByteArray, byte[]> store = getStore(storeName); + int cursorId = rpcService.getTransactionId(); + Cursor cursor = new Cursor(cursorId, store.entries()); + cursorMap.put(Integer.valueOf(cursorId), cursor); + return cursor; + } + + /** + * Close the given cursor and remove it from the map + * @param cursor the cursor to close + */ + public void closeCursor(Cursor cursor) { + cursor.close(); + cursorMap.remove(Integer.valueOf(cursor.getCursorId())); + } + + // ******************* + // AbstractSyncManager + // ******************* + + @Override + public IStore<ByteArray,byte[]> getStore(String storeName) + throws UnknownStoreException { + return getRawStore(storeName); + } + + @Override + public short getLocalNodeId() { + return clusterConfig.getNode().getNodeId(); + } + + @Override + public void shutdown() { + logger.debug("Shutting down Sync Manager: {} {}", + clusterConfig.getNode().getHostname(), + clusterConfig.getNode().getPort()); + + if (rpcService != null) { + rpcService.shutdown(); + } + if (hintThreadPool != null) { + hintThreadPool.shutdown(); + } + if (storeRegistry != null) { + storeRegistry.shutdown(); + } + hintThreadPool = null; + rpcService = null; + } + + // ***************** + // IFloodlightModule + // ***************** + + @Override + public void init(FloodlightModuleContext context) + throws FloodlightModuleException { + threadPool = context.getServiceImpl(IThreadPoolService.class); + Map<String, String> config = context.getConfigParams(this); + + String[] configProviders = + {StorageCCProvider.class.getName(), + PropertyCCProvider.class.getName(), + FallbackCCProvider.class.getName()}; + try { + + if (config.containsKey("configProviders")) { + configProviders = config.get("configProviders").split(","); + } + DelegatingCCProvider dprovider = new DelegatingCCProvider(); + for (String configProvider : configProviders) { + Class<?> cClass = Class.forName(configProvider); + IClusterConfigProvider provider = + (IClusterConfigProvider) cClass.newInstance(); + dprovider.addProvider(provider); + } + dprovider.init(this, context); + clusterConfigProvider = dprovider; + } catch (Exception e) { + throw new FloodlightModuleException("Could not instantiate config" + + "providers " + Arrays.toString(configProviders), e); + } + + String manualStoreString = config.get("manualStores"); + if (manualStoreString != null) { + List<String> manualStores = null; + try { + manualStores = + (new ObjectMapper()).readValue(manualStoreString, + new TypeReference<List<String>>() {}); + } catch (Exception e) { + throw new FloodlightModuleException("Failed to parse sync " + + "manager manual stores: " + manualStoreString, e); + } + for (String s : manualStores) { + registerStore(s, Scope.GLOBAL); + } + } + } + + @Override + public void startUp(FloodlightModuleContext context) + throws FloodlightModuleException { + updateConfiguration(); + rpcService = new RPCService(this); + rpcService.run(); + + cleanupTask = new SingletonTask(threadPool.getScheduledExecutor(), + new CleanupTask()); + cleanupTask.reschedule(CLEANUP_INTERVAL + + random.nextInt(30), TimeUnit.SECONDS); + + antientropyTask = new SingletonTask(threadPool.getScheduledExecutor(), + new AntientropyTask()); + antientropyTask.reschedule(ANTIENTROPY_INTERVAL + + random.nextInt(30), TimeUnit.SECONDS); + + updateConfigTask = + new SingletonTask(threadPool.getScheduledExecutor(), + new UpdateConfigTask()); + updateConfigTask.reschedule(CONFIG_RESCAN_INTERVAL, TimeUnit.SECONDS); + + final ThreadGroup tg = new ThreadGroup("Hint Workers"); + tg.setMaxPriority(Thread.NORM_PRIORITY - 2); + ThreadFactory f = new ThreadFactory() { + AtomicInteger id = new AtomicInteger(); + + @Override + public Thread newThread(Runnable runnable) { + return new Thread(tg, runnable, + "HintWorker-" + id.getAndIncrement()); + } + }; + hintThreadPool = Executors.newCachedThreadPool(f); + for (int i = 0; i < SYNC_WORKER_POOL; i++) { + hintThreadPool.execute(new HintWorker()); + } + } + + @Override + public Collection<Class<? extends IFloodlightService>> + getModuleDependencies() { + Collection<Class<? extends IFloodlightService>> l = + new ArrayList<Class<? extends IFloodlightService>>(); + l.add(IThreadPoolService.class); + l.add(IStorageSourceService.class); + return l; + } + + // *************** + // Local methods + // *************** + + protected SynchronizingStorageEngine getStoreInternal(String storeName) + throws UnknownStoreException { + SynchronizingStorageEngine store = storeRegistry.get(storeName); + if (store == null) { + throw new UnknownStoreException("Store " + storeName + + " has not been registered"); + } + return store; + } + + private void sendSyncOffer(short nodeId, SyncMessage bsm) + throws InterruptedException { + SyncOfferMessage som = bsm.getSyncOffer(); + if (!som.isSetVersions()) return; + if (logger.isTraceEnabled()) { + logger.trace("[{}->{}] Sending SyncOffer with {} elements", + new Object[]{getLocalNodeId(), nodeId, + som.getVersionsSize()}); + } + + som.getHeader().setTransactionId(rpcService.getTransactionId()); + rpcService.writeToNode(nodeId, bsm); + } + + /** + * Periodically perform cleanup + * @author readams + */ + @LogMessageDoc(level="ERROR", + message="Cleanup task failed", + explanation="Failed to clean up deleted data in the store", + recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG) + protected class CleanupTask implements Runnable { + @Override + public void run() { + try { + if (rpcService != null) + cleanup(); + } catch (Exception e) { + logger.error("Cleanup task failed", e); + } + + if (rpcService != null) { + cleanupTask.reschedule(CLEANUP_INTERVAL + + random.nextInt(30), TimeUnit.SECONDS); + } + } + } + + /** + * Periodically perform antientropy + * @author readams + */ + @LogMessageDoc(level="ERROR", + message="Antientropy task failed", + explanation="Failed to synchronize state between two nodes", + recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG) + protected class AntientropyTask implements Runnable { + @Override + public void run() { + try { + if (rpcService != null) + antientropy(); + } catch (Exception e) { + logger.error("Antientropy task failed", e); + } + + if (rpcService != null) { + antientropyTask.reschedule(ANTIENTROPY_INTERVAL + + random.nextInt(30), + TimeUnit.SECONDS); + } + } + } + + /** + * Worker task to periodically rescan the configuration + * @author readams + */ + @LogMessageDoc(level="ERROR", + message="Failed to update configuration", + explanation="An error occured while updating sync service configuration", + recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG) + protected class UpdateConfigTask implements Runnable { + @Override + public void run() { + try { + if (rpcService != null) + updateConfiguration(); + } catch (Exception e) { + logger.error("Failed to update configuration", e); + } + if (rpcService != null) { + updateConfigTask.reschedule(CONFIG_RESCAN_INTERVAL, + TimeUnit.SECONDS); + } + } + } + + /** + * Worker thread that will drain the sync item queue and write the + * appropriate messages to the node I/O channels + * @author readams + */ + @LogMessageDoc(level="ERROR", + message="Error occured in synchronization worker", + explanation="Failed to synchronize state to remote node", + recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG) + protected class HintWorker implements Runnable { + ArrayList<Hint> tasks = new ArrayList<Hint>(50); + protected HashMap<String, SyncMessage> messages = + new HashMap<String, SyncMessage>(); + + @Override + public void run() { + while (rpcService != null) { + try { + // Batch up sync tasks so we use fewer, larger messages + // XXX - todo - handle hints targeted to specific nodes + storeRegistry.takeHints(tasks, 50); + for (Hint task : tasks) { + SynchronizingStorageEngine store = + storeRegistry.get(task.getHintKey(). + getStoreName()); + SyncMessage bsm = getMessage(store); + KeyedValues kv = + TProtocolUtil. + getTKeyedValues(task.getHintKey().getKey(), + task.getValues()); + bsm.getSyncValue().addToValues(kv); + } + + Iterable<Node> nodes = getClusterConfig().getNodes(); + short localDomainId = + getClusterConfig().getNode().getDomainId(); + for (Node n : nodes) { + for (SyncMessage bsm : messages.values()) { + SyncValueMessage svm = bsm.getSyncValue(); + if (svm.getStore().getScope(). + equals(org.sdnplatform.sync.thrift. + Scope.LOCAL) && + n.getDomainId() != localDomainId) { + // This message is only for local domain + continue; + } + + svm.getHeader(). + setTransactionId(rpcService. + getTransactionId()); + rpcService.writeToNode(n.getNodeId(), bsm); + } + } + + tasks.clear(); + clearMessages(); + + } catch (Exception e) { + logger.error("Error occured in synchronization worker", e); + } + } + } + + /** + * Clear the current list of pending messages + */ + private void clearMessages() { + for (SyncMessage bsm : messages.values()) { + bsm.getSyncValue().unsetValues(); + } + } + + /** + * Allocate a partially-initialized {@link SyncMessage} object for + * the given store + * @param store the store + * @return the {@link SyncMessage} object + */ + private SyncMessage getMessage(SynchronizingStorageEngine store) { + String storeName = store.getName(); + SyncMessage bsm = messages.get(storeName); + if (bsm == null) { + bsm = TProtocolUtil.getTSyncValueMessage(storeName, + store.getScope(), + store.isPersistent()); + messages.put(storeName, bsm); + } + return bsm; + } + } +} diff --git a/src/main/java/org/sdnplatform/sync/internal/SyncTorture.java b/src/main/java/org/sdnplatform/sync/internal/SyncTorture.java new file mode 100644 index 0000000000000000000000000000000000000000..6836b82d6249cf74eb2f9cb58e1952e37b3c9a69 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/SyncTorture.java @@ -0,0 +1,185 @@ +package org.sdnplatform.sync.internal; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; + +import org.sdnplatform.sync.IStoreClient; +import org.sdnplatform.sync.ISyncService; +import org.sdnplatform.sync.Versioned; +import org.sdnplatform.sync.ISyncService.Scope; +import org.sdnplatform.sync.error.SyncException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +import net.floodlightcontroller.core.module.FloodlightModuleContext; +import net.floodlightcontroller.core.module.FloodlightModuleException; +import net.floodlightcontroller.core.module.IFloodlightModule; +import net.floodlightcontroller.core.module.IFloodlightService; + +/** + * A floodlight module that will start up and start doing horrible, + * horrible things to the sync service. + * @author readams + */ +public class SyncTorture implements IFloodlightModule { + protected static final Logger logger = + LoggerFactory.getLogger(SyncTorture.class); + + ISyncService syncService; + int numWorkers = 2; + int keysPerWorker = 1024*1024; + int iterations = 0; + int delay = 0; + + @Override + public Collection<Class<? extends IFloodlightService>> + getModuleServices() { + return null; + } + + @Override + public Map<Class<? extends IFloodlightService>, IFloodlightService> + getServiceImpls() { + return null; + } + + @Override + public Collection<Class<? extends IFloodlightService>> + getModuleDependencies() { + Collection<Class<? extends IFloodlightService>> l = + new ArrayList<Class<? extends IFloodlightService>>(); + l.add(ISyncService.class); + return l; + } + + @Override + public void init(FloodlightModuleContext context) + throws FloodlightModuleException { + syncService = context.getServiceImpl(ISyncService.class); + try { + syncService.registerStore("torture", Scope.GLOBAL); + } catch (SyncException e) { + throw new FloodlightModuleException(e); + } + + Map<String,String> config = context.getConfigParams(this); + if (config.containsKey("numWorkers")) { + numWorkers = Integer.parseInt(config.get("numWorkers")); + } + if (config.containsKey("keysPerWorker")) { + keysPerWorker = Integer.parseInt(config.get("keysPerWorker")); + } + if (config.containsKey("iterations")) { + iterations = Integer.parseInt(config.get("iterations")); + } + if (config.containsKey("delay")) { + delay = Integer.parseInt(config.get("delay")); + } + } + + @Override + public void startUp(FloodlightModuleContext context) + throws FloodlightModuleException { + try { + final IStoreClient<String, TortureValue> storeClient = + syncService.getStoreClient("torture", + String.class, + TortureValue.class); + for (int i = 0; i < numWorkers; i++) { + Thread thread = new Thread(new TortureWorker(storeClient, i), + "Torture-" + i); + thread.setPriority(Thread.MIN_PRIORITY); + thread.start(); + } + } catch (Exception e) { + throw new FloodlightModuleException(e); + } + } + + protected static class TortureValue { + private String string; + private int integer; + private boolean bool; + + public TortureValue() { + super(); + } + + public TortureValue(String string, int integer, boolean bool) { + super(); + this.string = string; + this.integer = integer; + this.bool = bool; + } + + public String getString() { + return string; + } + public void setString(String string) { + this.string = string; + } + public int getInteger() { + return integer; + } + public void setInteger(int integer) { + this.integer = integer; + } + public boolean isBool() { + return bool; + } + public void setBool(boolean bool) { + this.bool = bool; + } + } + + protected class TortureWorker implements Runnable { + final IStoreClient<String, TortureValue> storeClient; + final int workerId; + final List<TortureValue> values; + + public TortureWorker(IStoreClient<String, TortureValue> storeClient, + int workerId) { + super(); + this.storeClient = storeClient; + this.workerId = workerId; + values = new ArrayList<TortureValue>(); + for (int i = 0; i < keysPerWorker; i++) { + values.add(new TortureValue(workerId+":"+i, 0, true)); + } + } + + @Override + public void run() { + if (delay > 0) { + try { + Thread.sleep(delay); + } catch (InterruptedException e) { } + } + int i = 0; + while (iterations == 0 || i++ < iterations) { + long start = System.currentTimeMillis(); + try { + for (TortureValue v : values) { + Versioned<TortureValue> vv = + storeClient.get(v.getString()); + v.setInteger(v.getInteger() + 1); + v.setBool(!v.isBool()); + vv.setValue(v); + storeClient.put(v.getString(), vv); + } + } catch (Exception e) { + logger.error("Error in worker: ", e); + } + long iterend = System.currentTimeMillis(); + logger.info("Completed iteration of {} values in {}ms" + + " ({}/s)", + new Object[]{values.size(), (iterend-start), + 1000*values.size()/(iterend-start)}); + } + + } + } +} diff --git a/src/main/java/org/sdnplatform/sync/internal/config/ClusterConfig.java b/src/main/java/org/sdnplatform/sync/internal/config/ClusterConfig.java new file mode 100644 index 0000000000000000000000000000000000000000..23b64f8428eb69e27518d42590d5a48f226101c7 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/config/ClusterConfig.java @@ -0,0 +1,174 @@ +package org.sdnplatform.sync.internal.config; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; + +import org.sdnplatform.sync.error.SyncException; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.core.type.TypeReference; + + +/** + * Represent the configuration of a cluster in the sync manager + * @author readams + */ +public class ClusterConfig { + private HashMap<Short, Node> allNodes = + new HashMap<Short, Node>(); + private HashMap<Short, List<Node>> localDomains = + new HashMap<Short, List<Node>>(); + private Node thisNode; + + public ClusterConfig() { + super(); + } + + /** + * Initialize a cluster config object using a JSON string containing + * the nodes + * @param nodeConfig the JSON-formatted cluster configurations + * @param thisNodeId the node ID for the current node + * @throws SyncException + */ + public ClusterConfig(String nodeConfig, + short thisNodeId) throws SyncException { + super(); + ObjectMapper mapper = new ObjectMapper(); + List<Node> nodes; + try { + nodes = mapper.readValue(nodeConfig, + new TypeReference<List<Node>>() { }); + } catch (Exception e) { + throw new SyncException("Failed to initialize sync manager", e); + } + init(nodes, thisNodeId); + } + + /** + * Initialize a cluster config using a list of nodes + * @param nodes the nodes to use + * @param thisNodeId the node ID for the current node + * @throws SyncException + */ + public ClusterConfig(List<Node> nodes, short thisNodeId) + throws SyncException { + init(nodes, thisNodeId); + } + + /** + * Get a collection containing all configured nodes + * @return the collection of nodes + */ + public Collection<Node> getNodes() { + return Collections.unmodifiableCollection(allNodes.values()); + } + + /** + * A collection of the nodes in the local domain for the current node + * @return the list of nodes + */ + public Collection<Node> getDomainNodes() { + return getDomainNodes(thisNode.getDomainId()); + } + + /** + * A collection of the nodes in the local domain specified + * @param domainId the domain ID + * @return the list of nodes + */ + public Collection<Node> getDomainNodes(short domainId) { + List<Node> r = localDomains.get(domainId); + return Collections.unmodifiableCollection(r); + } + + /** + * Get the {@link Node} object for the current node + */ + public Node getNode() { + return thisNode; + } + + /** + * The a list of the nodes in the local domain specified + * @param nodeId the node ID to retrieve + * @return the node (or null if there is no such node + */ + public Node getNode(short nodeId) { + return allNodes.get(nodeId); + } + + /** + * Add a new node to the cluster + * @param node the {@link Node} to add + * @throws SyncException if the node already exists + */ + private void addNode(Node node) throws SyncException { + Short nodeId = node.getNodeId(); + if (allNodes.get(nodeId) != null) { + throw new SyncException("Error adding node " + node + + ": a node with that ID already exists"); + } + allNodes.put(nodeId, node); + + Short domainId = node.getDomainId(); + List<Node> localDomain = localDomains.get(domainId); + if (localDomain == null) { + localDomains.put(domainId, + localDomain = new ArrayList<Node>()); + } + localDomain.add(node); + } + + private void init(List<Node> nodes, short thisNodeId) + throws SyncException { + for (Node n : nodes) { + addNode(n); + } + thisNode = getNode(thisNodeId); + if (thisNode == null) { + throw new SyncException("Cannot set thisNode " + + "node: No node with ID " + thisNodeId); + } + } + + @Override + public String toString() { + return "ClusterConfig [allNodes=" + allNodes + ", thisNode=" + + thisNode.getNodeId() + "]"; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + + ((allNodes == null) ? 0 : allNodes.hashCode()); + result = prime * result + + ((localDomains == null) ? 0 : localDomains.hashCode()); + result = prime * result + + ((thisNode == null) ? 0 : thisNode.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + ClusterConfig other = (ClusterConfig) obj; + if (allNodes == null) { + if (other.allNodes != null) return false; + } else if (!allNodes.equals(other.allNodes)) return false; + if (localDomains == null) { + if (other.localDomains != null) return false; + } else if (!localDomains.equals(other.localDomains)) return false; + if (thisNode == null) { + if (other.thisNode != null) return false; + } else if (!thisNode.equals(other.thisNode)) return false; + return true; + } +} diff --git a/src/main/java/org/sdnplatform/sync/internal/config/DelegatingCCProvider.java b/src/main/java/org/sdnplatform/sync/internal/config/DelegatingCCProvider.java new file mode 100644 index 0000000000000000000000000000000000000000..2e793b7f68194385b46694ee9cbedefdbe28a1e4 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/config/DelegatingCCProvider.java @@ -0,0 +1,49 @@ +package org.sdnplatform.sync.internal.config; + +import java.util.ArrayList; +import java.util.List; + +import org.sdnplatform.sync.error.SyncException; +import org.sdnplatform.sync.internal.SyncManager; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import net.floodlightcontroller.core.module.FloodlightModuleContext; + + +/** + * Delegate cluster configuration to a list of providers + * @author readams + */ +public class DelegatingCCProvider implements IClusterConfigProvider { + protected static final Logger logger = + LoggerFactory.getLogger(DelegatingCCProvider.class.getName()); + + List<IClusterConfigProvider> providers = + new ArrayList<IClusterConfigProvider>(); + + public void addProvider(IClusterConfigProvider provider) { + this.providers.add(provider); + } + + @Override + public void init(SyncManager syncManager, + FloodlightModuleContext context) { + for (IClusterConfigProvider provider : providers) + provider.init(syncManager, context); + } + + @Override + public ClusterConfig getConfig() throws SyncException { + for (IClusterConfigProvider provider : providers) { + try { + return provider.getConfig(); + } catch (Exception e) { + logger.debug("ClusterConfig provider {} failed: {}", + provider.getClass().getSimpleName(), + e.getMessage()); + } + } + throw new SyncException("All cluster config providers failed"); + } +} diff --git a/src/main/java/org/sdnplatform/sync/internal/config/FallbackCCProvider.java b/src/main/java/org/sdnplatform/sync/internal/config/FallbackCCProvider.java new file mode 100644 index 0000000000000000000000000000000000000000..4fc891902deaa95ceef5ba9390074eae66f25b17 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/config/FallbackCCProvider.java @@ -0,0 +1,43 @@ +package org.sdnplatform.sync.internal.config; + +import java.util.Collections; + +import net.floodlightcontroller.core.annotations.LogMessageCategory; +import net.floodlightcontroller.core.annotations.LogMessageDoc; + +import org.sdnplatform.sync.error.SyncException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +/** + * Provide a fallback local configuration + * @author readams + */ +@LogMessageCategory("State Synchronization") +public class FallbackCCProvider extends StaticCCProvider { + protected static final Logger logger = + LoggerFactory.getLogger(FallbackCCProvider.class.getName()); + protected volatile boolean warned = false; + + public FallbackCCProvider() throws SyncException { + super(new ClusterConfig(Collections. + singletonList(new Node("localhost", + 6642, + Short.MAX_VALUE, + Short.MAX_VALUE)), + Short.MAX_VALUE)); + } + + @Override + @LogMessageDoc(level="WARN", + message="Using fallback local configuration", + explanation="No other nodes are known") + public ClusterConfig getConfig() throws SyncException { + if (!warned) { + logger.warn("Using fallback local configuration"); + warned = true; + } + return super.getConfig(); + } +} diff --git a/src/main/java/org/sdnplatform/sync/internal/config/IClusterConfigProvider.java b/src/main/java/org/sdnplatform/sync/internal/config/IClusterConfigProvider.java new file mode 100644 index 0000000000000000000000000000000000000000..355d3df31cd6e47c3b52dbb9cee0d24d7aafcde6 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/config/IClusterConfigProvider.java @@ -0,0 +1,27 @@ +package org.sdnplatform.sync.internal.config; + +import org.sdnplatform.sync.error.SyncException; +import org.sdnplatform.sync.internal.SyncManager; + +import net.floodlightcontroller.core.module.FloodlightModuleContext; + +/** + * Provides configuration for the sync service + * @author readams + */ +public interface IClusterConfigProvider { + /** + * Initialize the provider with the configuration parameters from the + * Floodlight module context. + * @param config + */ + public void init(SyncManager syncManager, + FloodlightModuleContext context); + + /** + * Get the {@link ClusterConfig} that represents the current cluster + * @return the {@link ClusterConfig} object + * @throws SyncException + */ + public ClusterConfig getConfig() throws SyncException; +} diff --git a/src/main/java/org/sdnplatform/sync/internal/config/Node.java b/src/main/java/org/sdnplatform/sync/internal/config/Node.java new file mode 100644 index 0000000000000000000000000000000000000000..5893355ef17f54c03e433ad5b371f503e7063663 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/config/Node.java @@ -0,0 +1,95 @@ +package org.sdnplatform.sync.internal.config; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * Represent a node in the synchronization system + * @author readams + */ +public class Node { + /** + * The host name to use for contacting this node from the + * other nodes + */ + private String hostname; + + /** + * The TCP port to use for contacting this node from the + * other nodes + */ + private int port; + + /** + * The node ID for this node + */ + private short nodeId; + + /** + * The ID for the local cluster domain. Data with a local scope will + * be shared only among nodes that share the same domain ID. + */ + private short domainId; + + @JsonCreator + public Node(@JsonProperty("hostname") String hostname, + @JsonProperty("port") int port, + @JsonProperty("nodeId") short nodeId, + @JsonProperty("domainId") short domainId) { + super(); + this.hostname = hostname; + this.port = port; + this.nodeId = nodeId; + this.domainId = domainId; + } + + public String getHostname() { + return hostname; + } + + public int getPort() { + return port; + } + + public short getNodeId() { + return nodeId; + } + + public short getDomainId() { + return domainId; + } + + @Override + public String toString() { + return "Node [hostname=" + hostname + ", port=" + port + ", nodeId=" + + nodeId + ", domainId=" + domainId + "]"; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + domainId; + result = + prime * result + + ((hostname == null) ? 0 : hostname.hashCode()); + result = prime * result + nodeId; + result = prime * result + port; + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + Node other = (Node) obj; + if (domainId != other.domainId) return false; + if (hostname == null) { + if (other.hostname != null) return false; + } else if (!hostname.equals(other.hostname)) return false; + if (nodeId != other.nodeId) return false; + if (port != other.port) return false; + return true; + } +} diff --git a/src/main/java/org/sdnplatform/sync/internal/config/PropertyCCProvider.java b/src/main/java/org/sdnplatform/sync/internal/config/PropertyCCProvider.java new file mode 100644 index 0000000000000000000000000000000000000000..1732a3b9468d9024e36655ff0e65b70a6e266eb0 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/config/PropertyCCProvider.java @@ -0,0 +1,44 @@ +package org.sdnplatform.sync.internal.config; + +import java.util.Map; + +import org.sdnplatform.sync.error.SyncException; +import org.sdnplatform.sync.internal.SyncManager; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import net.floodlightcontroller.core.module.FloodlightModuleContext; + +public class PropertyCCProvider implements IClusterConfigProvider { + protected static Logger logger = + LoggerFactory.getLogger(PropertyCCProvider.class.getName()); + + private Map<String, String> config; + + @Override + public ClusterConfig getConfig() throws SyncException { + if (!config.containsKey("nodes") || !config.containsKey("thisNode")) + throw new SyncException("Configuration properties nodes or " + + "thisNode not set"); + + Short thisNodeId; + try { + thisNodeId = Short.parseShort(config.get("thisNode")); + } catch (NumberFormatException e) { + throw new SyncException("Failed to parse thisNode " + + "node ID: " + config.get("thisNode"), e); + } + try { + return new ClusterConfig(config.get("nodes"), thisNodeId); + } catch (Exception e) { + throw new SyncException("Could not update " + + "configuration", e); + } + } + + @Override + public void init(SyncManager syncManager, + FloodlightModuleContext context) { + this.config = context.getConfigParams(syncManager); + } +} diff --git a/src/main/java/org/sdnplatform/sync/internal/config/StaticCCProvider.java b/src/main/java/org/sdnplatform/sync/internal/config/StaticCCProvider.java new file mode 100644 index 0000000000000000000000000000000000000000..0028d9e0339abb0fa9903bf720b6cfdac4118a63 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/config/StaticCCProvider.java @@ -0,0 +1,26 @@ +package org.sdnplatform.sync.internal.config; + +import org.sdnplatform.sync.error.SyncException; +import org.sdnplatform.sync.internal.SyncManager; + +import net.floodlightcontroller.core.module.FloodlightModuleContext; + +public class StaticCCProvider implements IClusterConfigProvider { + public final ClusterConfig config; + + public StaticCCProvider(ClusterConfig config) { + super(); + this.config = config; + } + + @Override + public void init(SyncManager syncManager, + FloodlightModuleContext context) { + + } + + @Override + public ClusterConfig getConfig() throws SyncException { + return config; + } +} diff --git a/src/main/java/org/sdnplatform/sync/internal/config/StorageCCProvider.java b/src/main/java/org/sdnplatform/sync/internal/config/StorageCCProvider.java new file mode 100644 index 0000000000000000000000000000000000000000..22be6e03c7260a29c1a3a48d4fc2e96b5d062adc --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/config/StorageCCProvider.java @@ -0,0 +1,163 @@ +package org.sdnplatform.sync.internal.config; + +import java.io.FileInputStream; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Properties; + +import org.sdnplatform.sync.error.SyncException; +import org.sdnplatform.sync.internal.SyncManager; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import net.floodlightcontroller.core.FloodlightProvider; +import net.floodlightcontroller.core.module.FloodlightModuleContext; +import net.floodlightcontroller.storage.IResultSet; +import net.floodlightcontroller.storage.IStorageSourceService; + +public class StorageCCProvider + implements IClusterConfigProvider { + protected static final Logger logger = + LoggerFactory.getLogger(StorageCCProvider.class.getName()); + + private IStorageSourceService storageSource; + + String thisControllerID; + + protected static final String CONTROLLER_TABLE_NAME = "controller_controller"; + protected static final String CONTROLLER_ID = "id"; + protected static final String CONTROLLER_SYNC_ID = "sync_id"; + protected static final String CONTROLLER_SYNC_DOMAIN_ID = "sync_domain_id"; + protected static final String CONTROLLER_SYNC_PORT = "sync_port"; + + protected static final String CONTROLLER_INTERFACE_TABLE_NAME = "controller_controllerinterface"; + protected static final String CONTROLLER_INTERFACE_CONTROLLER_ID = "controller_id"; + protected static final String CONTROLLER_INTERFACE_DISCOVERED_IP = "discovered_ip"; + protected static final String CONTROLLER_INTERFACE_TYPE = "type"; + protected static final String CONTROLLER_INTERFACE_NUMBER = "number"; + + protected static final String BOOT_CONFIG = + "/opt/bigswitch/run/boot-config"; + + // ********************** + // IClusterConfigProvider + // ********************** + + @Override + public void init(SyncManager syncManager, + FloodlightModuleContext context) { + storageSource = context.getServiceImpl(IStorageSourceService.class); + + // storageSource.addListener(CONTROLLER_TABLE_NAME, this); + + Map<String, String> config = + context.getConfigParams(FloodlightProvider.class); + thisControllerID = config.get("controllerid"); + } + + @Override + public ClusterConfig getConfig() throws SyncException { + if (thisControllerID == null) { + Properties bootConfig = new Properties(); + FileInputStream is = null; + try { + is = new FileInputStream(BOOT_CONFIG); + bootConfig.load(is); + thisControllerID = bootConfig.getProperty("controller-id"); + } catch (Exception e) { + throw new SyncException("No controller ID configured and " + + "could not read " + BOOT_CONFIG); + } finally { + if (is != null) try { + is.close(); + } catch (IOException e) { + throw new SyncException(e); + } + } + } + if (thisControllerID == null) { + throw new SyncException("No controller ID configured"); + } + logger.debug("Using controller ID: {}", thisControllerID); + + List<Node> nodes = new ArrayList<Node>(); + short thisNodeId = -1; + + String[] cols = {CONTROLLER_ID, + CONTROLLER_SYNC_ID, + CONTROLLER_SYNC_DOMAIN_ID, + CONTROLLER_SYNC_PORT}; + IResultSet res = null; + try { + res = storageSource.executeQuery(CONTROLLER_TABLE_NAME, + cols, null, null); + while (res.next()) { + String controllerId = res.getString(CONTROLLER_ID); + if (!res.containsColumn(CONTROLLER_SYNC_ID) || + !res.containsColumn(CONTROLLER_SYNC_DOMAIN_ID) || + !res.containsColumn(CONTROLLER_SYNC_PORT)) { + logger.debug("No sync data found for {}", controllerId); + continue; + } + + short nodeId = res.getShort(CONTROLLER_SYNC_ID); + short domainId = res.getShort(CONTROLLER_SYNC_DOMAIN_ID); + int port = res.getInt(CONTROLLER_SYNC_PORT); + String syncIp = getNodeIP(controllerId); + if (syncIp == null) { + logger.debug("No sync IP found for {}", controllerId); + continue; + } + Node node = new Node(syncIp, port, nodeId, domainId); + nodes.add(node); + + if (thisControllerID.equals(controllerId)) + thisNodeId = nodeId; + } + } finally { + if (res != null) res.close(); + } + + if (nodes.size() == 0) + throw new SyncException("No valid nodes found"); + if (thisNodeId < 0) + throw new SyncException("Could not find a node for the local node"); + + return new ClusterConfig(nodes, thisNodeId); + } + + // ************* + // Local methods + // ************* + + private String getNodeIP(String controllerID) { + + String[] cols = {CONTROLLER_INTERFACE_CONTROLLER_ID, + CONTROLLER_INTERFACE_TYPE, + CONTROLLER_INTERFACE_NUMBER, + CONTROLLER_INTERFACE_DISCOVERED_IP}; + IResultSet res = null; + try { + res = storageSource.executeQuery(CONTROLLER_INTERFACE_TABLE_NAME, + cols, null, null); + while (res.next()) { + logger.debug("{} {} {} {}", + new Object[] {res.getString(CONTROLLER_INTERFACE_CONTROLLER_ID), + res.getString(CONTROLLER_INTERFACE_TYPE), + res.getIntegerObject(CONTROLLER_INTERFACE_NUMBER), + res.getString(CONTROLLER_INTERFACE_DISCOVERED_IP)}); + if ("Ethernet".equals(res.getString(CONTROLLER_INTERFACE_TYPE)) && + Integer.valueOf(0).equals(res.getIntegerObject(CONTROLLER_INTERFACE_NUMBER)) && + controllerID.equals(res.getString(CONTROLLER_INTERFACE_CONTROLLER_ID))) + return res.getString(CONTROLLER_INTERFACE_DISCOVERED_IP); + } + return null; + + } finally { + if (res != null) res.close(); + } + + } +} diff --git a/src/main/java/org/sdnplatform/sync/internal/remote/RemoteStore.java b/src/main/java/org/sdnplatform/sync/internal/remote/RemoteStore.java new file mode 100644 index 0000000000000000000000000000000000000000..ce19253f47118622019ee6bf7f0580329d48d942 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/remote/RemoteStore.java @@ -0,0 +1,234 @@ +package org.sdnplatform.sync.internal.remote; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Map.Entry; +import java.util.NoSuchElementException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; + +import org.sdnplatform.sync.IClosableIterator; +import org.sdnplatform.sync.IVersion; +import org.sdnplatform.sync.Versioned; +import org.sdnplatform.sync.error.RemoteStoreException; +import org.sdnplatform.sync.error.SyncException; +import org.sdnplatform.sync.error.SyncRuntimeException; +import org.sdnplatform.sync.internal.rpc.TProtocolUtil; +import org.sdnplatform.sync.internal.store.IStore; +import org.sdnplatform.sync.internal.store.StoreUtils; +import org.sdnplatform.sync.internal.util.ByteArray; +import org.sdnplatform.sync.internal.util.Pair; +import org.sdnplatform.sync.thrift.AsyncMessageHeader; +import org.sdnplatform.sync.thrift.SyncMessage; +import org.sdnplatform.sync.thrift.CursorRequestMessage; +import org.sdnplatform.sync.thrift.GetRequestMessage; +import org.sdnplatform.sync.thrift.KeyedValues; +import org.sdnplatform.sync.thrift.MessageType; +import org.sdnplatform.sync.thrift.PutRequestMessage; + + +/** + * A store implementation that will connect to a remote sync instance + * @author readams + */ +public class RemoteStore implements IStore<ByteArray, byte[]> { + + private String storeName; + private RemoteSyncManager syncManager; + + public RemoteStore(String storeName, RemoteSyncManager syncManager) { + super(); + this.storeName = storeName; + this.syncManager = syncManager; + } + + // ************************* + // IStore<ByteArray, byte[]> + // ************************* + + @Override + public List<Versioned<byte[]>> get(ByteArray key) throws SyncException { + StoreUtils.assertValidKey(key); + GetRequestMessage grm = new GetRequestMessage(); + + AsyncMessageHeader header = new AsyncMessageHeader(); + header.setTransactionId(syncManager.getTransactionId()); + grm.setHeader(header); + + grm.setKey(key.get()); + grm.setStoreName(storeName); + + SyncMessage bsm = new SyncMessage(MessageType.GET_REQUEST); + bsm.setGetRequest(grm); + + SyncReply reply = getReply(header.getTransactionId(), bsm); + + return reply.getValues(); + } + + @Override + public IClosableIterator<Entry<ByteArray, List<Versioned<byte[]>>>> + entries() { + return new RemoteIterator(); + } + + @Override + public void put(ByteArray key, Versioned<byte[]> value) + throws SyncException { + StoreUtils.assertValidKey(key); + PutRequestMessage prm = new PutRequestMessage(); + + AsyncMessageHeader header = new AsyncMessageHeader(); + header.setTransactionId(syncManager.getTransactionId()); + prm.setHeader(header); + prm.setVersionedValue(TProtocolUtil.getTVersionedValue(value)); + prm.setKey(key.get()); + prm.setStoreName(storeName); + + SyncMessage bsm = new SyncMessage(MessageType.PUT_REQUEST); + bsm.setPutRequest(prm); + + getReply(header.getTransactionId(), bsm); + } + + @Override + public List<IVersion> getVersions(ByteArray key) throws SyncException { + List<Versioned<byte[]>> values = get(key); + ArrayList<IVersion> versions = new ArrayList<IVersion>(); + for (Versioned<byte[]> v : values) { + versions.add(v.getVersion()); + } + return versions; + } + + @Override + public String getName() { + return storeName; + } + + @Override + public void close() throws SyncException { + + } + + // ************* + // Local methods + // ************* + + private SyncReply getReply(int xid, + SyncMessage bsm) + throws SyncException { + SyncReply reply = null; + try { + Future<SyncReply> future = + syncManager.sendRequest(xid, bsm); + reply = future.get(5, TimeUnit.SECONDS); + + } catch (Exception e) { + throw new RemoteStoreException("Error while waiting for reply", e); + } + + if (reply.getError() != null) + throw reply.getError(); + + return reply; + } + + private class RemoteIterator + implements IClosableIterator<Entry<ByteArray, + List<Versioned<byte[]>>>> { + + private final int cursorId; + Iterator<KeyedValues> currentChunk; + + public RemoteIterator() { + CursorRequestMessage crm = getCRM(); + crm.setStoreName(storeName); + SyncMessage bsm = new SyncMessage(MessageType.CURSOR_REQUEST); + bsm.setCursorRequest(crm); + SyncReply reply; + try { + reply = getReply(crm.getHeader().getTransactionId(), + bsm); + } catch (SyncException e) { + throw new SyncRuntimeException(e); + } + this.cursorId = reply.getIntValue(); + if (reply.getKeyedValues() != null) + currentChunk = reply.getKeyedValues().iterator(); + } + + @Override + public boolean hasNext() { + if (currentChunk != null) { + if (currentChunk.hasNext()) + return true; + } + Iterator<KeyedValues> nextChunk = getChunk(); + if (nextChunk != null) { + currentChunk = nextChunk; + return nextChunk.hasNext(); + } + return false; + } + + @Override + public Entry<ByteArray, List<Versioned<byte[]>>> next() { + if (!hasNext()) throw new NoSuchElementException(); + KeyedValues kv = currentChunk.next(); + + ByteArray k = new ByteArray(kv.getKey()); + List<Versioned<byte[]>> v = + TProtocolUtil.getVersionedList(kv.getValues()); + return new Pair<ByteArray, List<Versioned<byte[]>>>(k, v); + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + + @Override + public void close() { + CursorRequestMessage crm = getCRM(); + crm.setCursorId(cursorId); + crm.setClose(true); + SyncMessage bsm = new SyncMessage(MessageType.CURSOR_REQUEST); + bsm.setCursorRequest(crm); + try { + getReply(crm.getHeader().getTransactionId(), + bsm); + } catch (SyncException e) { + throw new SyncRuntimeException(e); + } + } + + private Iterator<KeyedValues> getChunk() { + CursorRequestMessage crm = getCRM(); + crm.setCursorId(cursorId); + SyncMessage bsm = new SyncMessage(MessageType.CURSOR_REQUEST); + bsm.setCursorRequest(crm); + + SyncReply reply; + try { + reply = getReply(crm.getHeader().getTransactionId(), + bsm); + } catch (SyncException e) { + throw new SyncRuntimeException(e); + } + if (reply.getKeyedValues() == null || + reply.getKeyedValues().size() == 0) return null; + + return reply.getKeyedValues().iterator(); + } + + private CursorRequestMessage getCRM() { + CursorRequestMessage crm = new CursorRequestMessage(); + AsyncMessageHeader header = new AsyncMessageHeader(); + header.setTransactionId(syncManager.getTransactionId()); + crm.setHeader(header); + return crm; + } + } +} diff --git a/src/main/java/org/sdnplatform/sync/internal/remote/RemoteSyncChannelHandler.java b/src/main/java/org/sdnplatform/sync/internal/remote/RemoteSyncChannelHandler.java new file mode 100644 index 0000000000000000000000000000000000000000..4c215f93a89d75a8c6efc54683b76e9489a8ea12 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/remote/RemoteSyncChannelHandler.java @@ -0,0 +1,149 @@ +package org.sdnplatform.sync.internal.remote; + +import java.util.List; + +import org.jboss.netty.channel.Channel; +import org.jboss.netty.channel.ChannelHandlerContext; +import org.jboss.netty.channel.ChannelStateEvent; +import org.sdnplatform.sync.Versioned; +import org.sdnplatform.sync.error.SyncException; +import org.sdnplatform.sync.error.SyncException.ErrorType; +import org.sdnplatform.sync.internal.rpc.AbstractRPCChannelHandler; +import org.sdnplatform.sync.internal.rpc.TProtocolUtil; +import org.sdnplatform.sync.thrift.CursorResponseMessage; +import org.sdnplatform.sync.thrift.DeleteResponseMessage; +import org.sdnplatform.sync.thrift.ErrorMessage; +import org.sdnplatform.sync.thrift.GetResponseMessage; +import org.sdnplatform.sync.thrift.HelloMessage; +import org.sdnplatform.sync.thrift.PutResponseMessage; +import org.sdnplatform.sync.thrift.RegisterResponseMessage; + + +/** + * Implement the client side of the RPC service for the + * {@link RemoteSyncManager} + * @see RemoteSyncManager + * @author readams + */ +public class RemoteSyncChannelHandler extends AbstractRPCChannelHandler { + + RemoteSyncManager syncManager; + + public RemoteSyncChannelHandler(RemoteSyncManager syncManager) { + super(); + this.syncManager = syncManager; + } + + // **************************** + // IdleStateAwareChannelHandler + // **************************** + + @Override + public void channelOpen(ChannelHandlerContext ctx, + ChannelStateEvent e) throws Exception { + syncManager.cg.add(ctx.getChannel()); + } + + @Override + public void channelDisconnected(ChannelHandlerContext ctx, + ChannelStateEvent e) throws Exception { + this.syncManager.channel = null; + } + + // ****************************************** + // AbstractRPCChannelHandler message handlers + // ****************************************** + + @Override + protected void handleHello(HelloMessage hello, Channel channel) { + syncManager.remoteNodeId = hello.getNodeId(); + } + + @Override + protected void handleGetResponse(GetResponseMessage response, + Channel channel) { + List<Versioned<byte[]>> values = + TProtocolUtil.getVersionedList(response.getValues()); + SyncReply reply = new SyncReply(values, null, true, null, 0); + syncManager.dispatchReply(response.getHeader().getTransactionId(), + reply); + } + + @Override + protected void handlePutResponse(PutResponseMessage response, + Channel channel) { + SyncReply reply = new SyncReply(null, null, true, null, 0); + syncManager.dispatchReply(response.getHeader().getTransactionId(), + reply); + + } + + @Override + protected void handleDeleteResponse(DeleteResponseMessage response, + Channel channel) { + SyncReply reply = new SyncReply(null, null, + response.isDeleted(), null, 0); + syncManager.dispatchReply(response.getHeader().getTransactionId(), + reply); + } + + @Override + protected void handleCursorResponse(CursorResponseMessage response, + Channel channel) { + SyncReply reply = new SyncReply(null, response.getValues(), true, + null, response.getCursorId()); + syncManager.dispatchReply(response.getHeader().getTransactionId(), + reply); + } + + @Override + protected void handleRegisterResponse(RegisterResponseMessage response, + Channel channel) { + SyncReply reply = new SyncReply(null, null, + true, null, 0); + syncManager.dispatchReply(response.getHeader().getTransactionId(), + reply); + } + + @Override + protected void handleError(ErrorMessage error, Channel channel) { + ErrorType errType = ErrorType.GENERIC; + for (ErrorType e : ErrorType.values()) { + if (e.getValue() == error.getError().getErrorCode()) { + errType = e; + break; + } + } + SyncException ex = + SyncException.newInstance(errType, + error.getError().getMessage(), + null); + SyncReply reply = new SyncReply(null, null, false, ex, 0); + syncManager.dispatchReply(error.getHeader().getTransactionId(), + reply); + } + + // ************************* + // AbstractRPCChannelHandler + // ************************* + + @Override + protected Short getRemoteNodeId() { + return syncManager.remoteNodeId; + } + + @Override + protected Short getLocalNodeId() { + return null; + } + + @Override + protected String getLocalNodeIdString() { + return "client"; + } + + @Override + protected int getTransactionId() { + return syncManager.getTransactionId(); + } +} diff --git a/src/main/java/org/sdnplatform/sync/internal/remote/RemoteSyncFuture.java b/src/main/java/org/sdnplatform/sync/internal/remote/RemoteSyncFuture.java new file mode 100644 index 0000000000000000000000000000000000000000..6e2088d093b4cb805b9990a4a142d7a7469655b1 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/remote/RemoteSyncFuture.java @@ -0,0 +1,84 @@ +package org.sdnplatform.sync.internal.remote; + +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class RemoteSyncFuture implements Future<SyncReply> { + + private final int xid; + private volatile SyncReply reply = null; + private Object notify = new Object(); + + public RemoteSyncFuture(int xid) { + super(); + this.xid = xid; + } + + // ********************** + // Future<SyncReply> + // ********************** + + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + return false; + } + + @Override + public SyncReply get() throws InterruptedException, + ExecutionException { + if (reply != null) return reply; + synchronized (notify) { + while (reply == null) + notify.wait(); + } + return reply; + } + + @Override + public SyncReply + get(long timeout, TimeUnit unit) throws InterruptedException, + ExecutionException, + TimeoutException { + if (reply != null) return reply; + synchronized (notify) { + notify.wait(TimeUnit.MILLISECONDS.convert(timeout, unit)); + } + if (reply == null) throw new TimeoutException(); + return reply; + } + + @Override + public boolean isCancelled() { + return false; + } + + @Override + public boolean isDone() { + return (reply != null); + } + + // **************** + // RemoteSyncFuture + // **************** + + /** + * Get the xid for this message + * @return + */ + public int getXid() { + return xid; + } + + /** + * Set the reply message + * @param reply + */ + public void setReply(SyncReply reply) { + synchronized (notify) { + this.reply = reply; + notify.notifyAll(); + } + } +} diff --git a/src/main/java/org/sdnplatform/sync/internal/remote/RemoteSyncManager.java b/src/main/java/org/sdnplatform/sync/internal/remote/RemoteSyncManager.java new file mode 100644 index 0000000000000000000000000000000000000000..bae0019d7d0a13f1da91eab551fc9ed3cc971701 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/remote/RemoteSyncManager.java @@ -0,0 +1,316 @@ +package org.sdnplatform.sync.internal.remote; + +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.util.Collection; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Executor; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import org.jboss.netty.bootstrap.ClientBootstrap; +import org.jboss.netty.channel.Channel; +import org.jboss.netty.channel.ChannelFuture; +import org.jboss.netty.channel.group.ChannelGroup; +import org.jboss.netty.channel.group.DefaultChannelGroup; +import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory; +import org.sdnplatform.sync.error.RemoteStoreException; +import org.sdnplatform.sync.error.SyncException; +import org.sdnplatform.sync.error.SyncRuntimeException; +import org.sdnplatform.sync.error.UnknownStoreException; +import org.sdnplatform.sync.internal.AbstractSyncManager; +import org.sdnplatform.sync.internal.rpc.RPCService; +import org.sdnplatform.sync.internal.rpc.TProtocolUtil; +import org.sdnplatform.sync.internal.store.IStore; +import org.sdnplatform.sync.internal.store.MappingStoreListener; +import org.sdnplatform.sync.internal.util.ByteArray; +import org.sdnplatform.sync.thrift.AsyncMessageHeader; +import org.sdnplatform.sync.thrift.SyncMessage; +import org.sdnplatform.sync.thrift.MessageType; +import org.sdnplatform.sync.thrift.RegisterRequestMessage; +import org.sdnplatform.sync.thrift.Store; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import net.floodlightcontroller.core.annotations.LogMessageCategory; +import net.floodlightcontroller.core.annotations.LogMessageDoc; +import net.floodlightcontroller.core.module.FloodlightModuleContext; +import net.floodlightcontroller.core.module.FloodlightModuleException; +import net.floodlightcontroller.core.module.IFloodlightService; + +/** + * Implementation of a sync service that passes its functionality off to a + * remote sync manager over a TCP connection + * @author readams + */ +@LogMessageCategory("State Synchronization") +public class RemoteSyncManager extends AbstractSyncManager { + protected static final Logger logger = + LoggerFactory.getLogger(RemoteSyncManager.class.getName()); + + /** + * Channel group that will hold all our channels + */ + final ChannelGroup cg = new DefaultChannelGroup("Internal RPC"); + + /** + * Active connection to server + */ + protected volatile Channel channel; + + /** + * The remote node ID of the node we're connected to + */ + protected Short remoteNodeId; + + /** + * Client bootstrap + */ + protected ClientBootstrap clientBootstrap; + + /** + * Transaction ID used in message headers in the RPC protocol + */ + private AtomicInteger transactionId = new AtomicInteger(); + + /** + * The hostname of the server to connect to + */ + protected String hostname = "localhost"; + + /** + * Port to connect to + */ + protected int port = 6642; + + private ConcurrentHashMap<Integer, RemoteSyncFuture> futureMap = + new ConcurrentHashMap<Integer, RemoteSyncFuture>(); + private Object futureNotify = new Object(); + private static int MAX_PENDING_REQUESTS = 1000; + + // ************ + // ISyncService + // ************ + + public RemoteSyncManager() { + } + + @Override + public void registerStore(String storeName, Scope scope) + throws SyncException { + doRegisterStore(storeName, scope, false); + } + + @Override + public void registerPersistentStore(String storeName, Scope scope) + throws SyncException { + doRegisterStore(storeName, scope, true); + } + + // ******************* + // AbstractSyncManager + // ******************* + + @Override + public void addListener(String storeName, + MappingStoreListener listener) + throws UnknownStoreException { + ensureConnected(); + } + + @Override + public IStore<ByteArray, byte[]> + getStore(String storeName) throws UnknownStoreException { + ensureConnected(); + return new RemoteStore(storeName, this); + } + + @Override + public short getLocalNodeId() { + ensureConnected(); + return remoteNodeId; + } + + @Override + public void shutdown() { + logger.debug("Shutting down Remote Sync Manager"); + try { + if (!cg.close().await(5, TimeUnit.SECONDS)) { + logger.debug("Failed to cleanly shut down remote sync"); + } + clientBootstrap.releaseExternalResources(); + } catch (InterruptedException e) { + logger.debug("Interrupted while shutting down remote sync"); + } + } + + // ***************** + // IFloodlightModule + // ***************** + + @Override + public void init(FloodlightModuleContext context) + throws FloodlightModuleException { + Map<String, String> config = context.getConfigParams(this); + if (null != config.get("hostname")) + hostname = config.get("hostname"); + if (null != config.get("port")) + port = Integer.parseInt(config.get("port")); + } + + @Override + public void startUp(FloodlightModuleContext context) + throws FloodlightModuleException { + Executor bossExecutor = Executors.newCachedThreadPool(); + Executor workerExecutor = Executors.newCachedThreadPool(); + + final ClientBootstrap bootstrap = + new ClientBootstrap( + new NioClientSocketChannelFactory(bossExecutor, + workerExecutor)); + bootstrap.setOption("child.reuseAddr", true); + bootstrap.setOption("child.keepAlive", true); + bootstrap.setOption("child.tcpNoDelay", true); + bootstrap.setOption("child.sendBufferSize", + RPCService.SEND_BUFFER_SIZE); + bootstrap.setOption("child.receiveBufferSize", + RPCService.SEND_BUFFER_SIZE); + bootstrap.setOption("child.connectTimeoutMillis", + RPCService.CONNECT_TIMEOUT); + bootstrap.setPipelineFactory(new RemoteSyncPipelineFactory(this)); + clientBootstrap = bootstrap; + } + + @Override + public Collection<Class<? extends IFloodlightService>> + getModuleDependencies() { + return null; + } + + // ***************** + // RemoteSyncManager + // ***************** + + /** + * Get a suitable transaction ID for sending a message + * @return the unique transaction iD + */ + public int getTransactionId() { + return transactionId.getAndIncrement(); + } + + /** + * Send a request to the server and generate a future for the + * eventual reply. Note that this call can block if there is no active + * connection while a new connection is re-established or if the maximum + * number of requests is already pending + * @param xid the transaction ID for the request + * @param request the actual request to send + * @return A {@link Future} for the reply message + * @throws InterruptedException + */ + public Future<SyncReply> sendRequest(int xid, + SyncMessage request) + throws RemoteStoreException { + ensureConnected(); + RemoteSyncFuture future = new RemoteSyncFuture(xid); + futureMap.put(Integer.valueOf(xid), future); + + if (futureMap.size() > MAX_PENDING_REQUESTS) { + synchronized (futureNotify) { + while (futureMap.size() > MAX_PENDING_REQUESTS) { + try { + futureNotify.wait(); + } catch (InterruptedException e) { + throw new RemoteStoreException("Could not send request", + e); + } + } + } + } + channel.write(request); + return future; + } + + @LogMessageDoc(level="WARN", + message="Unexpected sync message reply type={type} id={id}", + explanation="An error occurred in the sync protocol", + recommendation=LogMessageDoc.REPORT_CONTROLLER_BUG) + public void dispatchReply(int xid, + SyncReply reply) { + RemoteSyncFuture future = futureMap.get(Integer.valueOf(xid)); + if (future == null) { + logger.warn("Unexpected sync message replyid={}", xid); + return; + } + futureMap.remove(Integer.valueOf(xid)); + future.setReply(reply); + synchronized (futureNotify) { + futureNotify.notify(); + } + } + + // *************** + // Local methods + // *************** + + protected void ensureConnected() { + if (channel == null || !channel.isConnected()) { + for (int i = 0; i < 25; i++) { + synchronized (this) { + if (connect(hostname, port)) + return; + } + try { + Thread.sleep(1000); + } catch (Exception e) {} + } + if (channel == null) + throw new SyncRuntimeException(new SyncException("Failed to establish connection")); + } + } + + protected boolean connect(String hostname, int port) { + SocketAddress sa = + new InetSocketAddress(hostname, port); + ChannelFuture future = clientBootstrap.connect(sa); + future.awaitUninterruptibly(); + if (!future.isSuccess()) { + logger.error("Could not connect to " + hostname + + ":" + port, future.getCause()); + return false; + } + channel = future.getChannel(); + logger.debug("Connected to " + hostname + ":" + port); + return true; + } + + private void doRegisterStore(String storeName, Scope scope, boolean b) + throws SyncException{ + + ensureConnected(); + RegisterRequestMessage rrm = new RegisterRequestMessage(); + AsyncMessageHeader header = new AsyncMessageHeader(); + header.setTransactionId(getTransactionId()); + rrm.setHeader(header); + + Store store = new Store(storeName); + store.setScope(TProtocolUtil.getTScope(scope)); + store.setPersist(false); + rrm.setStore(store); + + SyncMessage bsm = new SyncMessage(MessageType.REGISTER_REQUEST); + bsm.setRegisterRequest(rrm); + Future<SyncReply> future = + sendRequest(header.getTransactionId(), bsm); + try { + future.get(2, TimeUnit.SECONDS); + } catch (Exception e) { + throw new RemoteStoreException("Error while waiting for reply", e); + } + } + +} diff --git a/src/main/java/org/sdnplatform/sync/internal/remote/RemoteSyncPipelineFactory.java b/src/main/java/org/sdnplatform/sync/internal/remote/RemoteSyncPipelineFactory.java new file mode 100644 index 0000000000000000000000000000000000000000..812781f52c85a51fbd75fc69e2fb04e7c851d412 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/remote/RemoteSyncPipelineFactory.java @@ -0,0 +1,38 @@ +package org.sdnplatform.sync.internal.remote; + +import org.jboss.netty.channel.ChannelPipeline; +import org.jboss.netty.channel.ChannelPipelineFactory; +import org.jboss.netty.channel.Channels; +import org.sdnplatform.sync.internal.rpc.ThriftFrameDecoder; +import org.sdnplatform.sync.internal.rpc.ThriftFrameEncoder; + +/** + * Pipeline factory for the remote sync service + * @author readams + */ +public class RemoteSyncPipelineFactory implements ChannelPipelineFactory { + + protected RemoteSyncManager syncManager; + + private static final int maxFrameSize = 1024 * 1024 * 10; + + public RemoteSyncPipelineFactory(RemoteSyncManager syncManager) { + super(); + this.syncManager = syncManager; + } + + @Override + public ChannelPipeline getPipeline() throws Exception { + RemoteSyncChannelHandler channelHandler = + new RemoteSyncChannelHandler(syncManager); + ChannelPipeline pipeline = Channels.pipeline(); + + pipeline.addLast("frameDecoder", + new ThriftFrameDecoder(maxFrameSize)); + pipeline.addLast("frameEncoder", + new ThriftFrameEncoder()); + + pipeline.addLast("handler", channelHandler); + return pipeline; + } +} diff --git a/src/main/java/org/sdnplatform/sync/internal/remote/SyncReply.java b/src/main/java/org/sdnplatform/sync/internal/remote/SyncReply.java new file mode 100644 index 0000000000000000000000000000000000000000..1c114a3e6ab48040c5905f32f2e279ec2647a51b --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/remote/SyncReply.java @@ -0,0 +1,46 @@ +package org.sdnplatform.sync.internal.remote; + +import java.util.List; + +import org.sdnplatform.sync.Versioned; +import org.sdnplatform.sync.error.SyncException; +import org.sdnplatform.sync.thrift.KeyedValues; + + +/* + * Represent a reply to a remote message + */ +public class SyncReply { + private List<KeyedValues> keyedValues; + private List<Versioned<byte[]>> values; + private boolean success; + private SyncException error; + private int intValue; + + public SyncReply(List<Versioned<byte[]>> values, + List<KeyedValues> keyedValues, + boolean success, SyncException error, int intValue) { + super(); + this.values = values; + this.keyedValues = keyedValues; + this.success = success; + this.error = error; + this.intValue = intValue; + } + + public int getIntValue() { + return intValue; + } + public List<KeyedValues> getKeyedValues() { + return keyedValues; + } + public List<Versioned<byte[]>> getValues() { + return values; + } + public SyncException getError() { + return error; + } + public boolean isSuccess() { + return success; + } +} diff --git a/src/main/java/org/sdnplatform/sync/internal/rpc/AbstractRPCChannelHandler.java b/src/main/java/org/sdnplatform/sync/internal/rpc/AbstractRPCChannelHandler.java new file mode 100644 index 0000000000000000000000000000000000000000..23aeab647a76e6690c6cd261d71371010bd034fa --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/rpc/AbstractRPCChannelHandler.java @@ -0,0 +1,439 @@ +package org.sdnplatform.sync.internal.rpc; + +import java.io.IOException; +import java.net.ConnectException; +import java.util.List; + +import org.jboss.netty.channel.Channel; +import org.jboss.netty.channel.ChannelHandlerContext; +import org.jboss.netty.channel.ChannelStateEvent; +import org.jboss.netty.channel.Channels; +import org.jboss.netty.channel.ExceptionEvent; +import org.jboss.netty.channel.MessageEvent; +import org.jboss.netty.handler.timeout.IdleStateAwareChannelHandler; +import org.jboss.netty.handler.timeout.IdleStateEvent; +import org.jboss.netty.handler.timeout.ReadTimeoutException; +import org.sdnplatform.sync.error.HandshakeTimeoutException; +import org.sdnplatform.sync.error.SyncException; +import org.sdnplatform.sync.thrift.AsyncMessageHeader; +import org.sdnplatform.sync.thrift.SyncError; +import org.sdnplatform.sync.thrift.SyncMessage; +import org.sdnplatform.sync.thrift.CursorRequestMessage; +import org.sdnplatform.sync.thrift.CursorResponseMessage; +import org.sdnplatform.sync.thrift.DeleteRequestMessage; +import org.sdnplatform.sync.thrift.DeleteResponseMessage; +import org.sdnplatform.sync.thrift.EchoReplyMessage; +import org.sdnplatform.sync.thrift.EchoRequestMessage; +import org.sdnplatform.sync.thrift.ErrorMessage; +import org.sdnplatform.sync.thrift.FullSyncRequestMessage; +import org.sdnplatform.sync.thrift.GetRequestMessage; +import org.sdnplatform.sync.thrift.GetResponseMessage; +import org.sdnplatform.sync.thrift.HelloMessage; +import org.sdnplatform.sync.thrift.MessageType; +import org.sdnplatform.sync.thrift.PutRequestMessage; +import org.sdnplatform.sync.thrift.PutResponseMessage; +import org.sdnplatform.sync.thrift.RegisterRequestMessage; +import org.sdnplatform.sync.thrift.RegisterResponseMessage; +import org.sdnplatform.sync.thrift.SyncOfferMessage; +import org.sdnplatform.sync.thrift.SyncRequestMessage; +import org.sdnplatform.sync.thrift.SyncValueMessage; +import org.sdnplatform.sync.thrift.SyncValueResponseMessage; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +/** + * Abstract base class for implementing the RPC protocol. The protocol is + * defined by a thrift specification; all protocol messages are delivered in + * a {@link SyncMessage} which will provide specific type information. + * @author readams + */ +public abstract class AbstractRPCChannelHandler + extends IdleStateAwareChannelHandler { + protected static final Logger logger = + LoggerFactory.getLogger(AbstractRPCChannelHandler.class); + + public AbstractRPCChannelHandler() { + super(); + } + + // **************************** + // IdleStateAwareChannelHandler + // **************************** + + @Override + public void channelConnected(ChannelHandlerContext ctx, + ChannelStateEvent e) throws Exception { + HelloMessage m = new HelloMessage(); + if (getLocalNodeId() != null) + m.setNodeId(getLocalNodeId()); + AsyncMessageHeader header = new AsyncMessageHeader(); + header.setTransactionId(getTransactionId()); + m.setHeader(header); + SyncMessage bsm = new SyncMessage(MessageType.HELLO); + bsm.setHello(m); + ctx.getChannel().write(bsm); + } + + @Override + public void channelIdle(ChannelHandlerContext ctx, + IdleStateEvent e) throws Exception { + // send an echo request + EchoRequestMessage m = new EchoRequestMessage(); + AsyncMessageHeader header = new AsyncMessageHeader(); + header.setTransactionId(getTransactionId()); + m.setHeader(header); + SyncMessage bsm = new SyncMessage(MessageType.ECHO_REQUEST); + bsm.setEchoRequest(m); + ctx.getChannel().write(bsm); + } + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, + ExceptionEvent e) throws Exception { + if (e.getCause() instanceof ReadTimeoutException) { + // read timeout + logger.error("[{}->{}] Disconnecting RPC node due to read timeout", + getLocalNodeIdString(), getRemoteNodeIdString()); + ctx.getChannel().close(); + } else if (e.getCause() instanceof HandshakeTimeoutException) { + // read timeout + logger.error("[{}->{}] Disconnecting RPC node due to " + + "handshake timeout", + getLocalNodeIdString(), getRemoteNodeIdString()); + ctx.getChannel().close(); + } else if (e.getCause() instanceof ConnectException || + e.getCause() instanceof IOException) { + logger.debug("[{}->{}] {}: {}", + new Object[] {getLocalNodeIdString(), + getRemoteNodeIdString(), + e.getCause().getClass().getName(), + e.getCause().getMessage()}); + } else { + logger.error("[{}->{}] An error occurred on RPC channel", + new Object[]{getLocalNodeIdString(), + getRemoteNodeIdString(), + e.getCause()}); + ctx.getChannel().close(); + } + } + + @Override + public void messageReceived(ChannelHandlerContext ctx, + MessageEvent e) throws Exception { + Object message = e.getMessage(); + if (message instanceof SyncMessage) { + handleSyncMessage((SyncMessage)message, ctx.getChannel()); + } else if (message instanceof List) { + for (Object i : (List<?>)message) { + if (i instanceof SyncMessage) { + try { + handleSyncMessage((SyncMessage)i, + ctx.getChannel()); + } catch (Exception ex) { + logger.error("Error processing message", ex); + Channels.fireExceptionCaught(ctx, ex); + } + } + } + } else { + handleUnknownMessage(ctx, message); + } + } + + // **************** + // Message Handlers + // **************** + + /** + * A handler for messages on the channel that are not of type + * {@link SyncMessage} + * @param ctx the context + * @param message the message object + */ + protected void handleUnknownMessage(ChannelHandlerContext ctx, + Object message) { + logger.warn("[{}->{}] Unhandled message: {}", + new Object[]{getLocalNodeIdString(), + getRemoteNodeIdString(), + message.getClass().getCanonicalName()}); + } + + /** + * Handle a generic {@link SyncMessage} and dispatch to an appropriate + * handler + * @param bsm the message + * @param channel the channel on which the message arrived + */ + protected void handleSyncMessage(SyncMessage bsm, Channel channel) { + switch (bsm.getType()) { + case HELLO: + handleHello(bsm.getHello(), channel); + break; + case ECHO_REQUEST: + handleEchoRequest(bsm.getEchoRequest(), channel); + break; + case GET_REQUEST: + handleGetRequest(bsm.getGetRequest(), channel); + break; + case GET_RESPONSE: + handleGetResponse(bsm.getGetResponse(), channel); + break; + case PUT_REQUEST: + handlePutRequest(bsm.getPutRequest(), channel); + break; + case PUT_RESPONSE: + handlePutResponse(bsm.getPutResponse(), channel); + break; + case DELETE_REQUEST: + handleDeleteRequest(bsm.getDeleteRequest(), channel); + break; + case DELETE_RESPONSE: + handleDeleteResponse(bsm.getDeleteResponse(), channel); + break; + case SYNC_VALUE_RESPONSE: + handleSyncValueResponse(bsm.getSyncValueResponse(), channel); + break; + case SYNC_VALUE: + handleSyncValue(bsm.getSyncValue(), channel); + break; + case SYNC_OFFER: + handleSyncOffer(bsm.getSyncOffer(), channel); + break; + case FULL_SYNC_REQUEST: + handleFullSyncRequest(bsm.getFullSyncRequest(), channel); + break; + case SYNC_REQUEST: + handleSyncRequest(bsm.getSyncRequest(), channel); + break; + case CURSOR_REQUEST: + handleCursorRequest(bsm.getCursorRequest(), channel); + break; + case CURSOR_RESPONSE: + handleCursorResponse(bsm.getCursorResponse(), channel); + break; + case REGISTER_REQUEST: + handleRegisterRequest(bsm.getRegisterRequest(), channel); + break; + case REGISTER_RESPONSE: + handleRegisterResponse(bsm.getRegisterResponse(), channel); + break; + case ERROR: + handleError(bsm.getError(), channel); + break; + case ECHO_REPLY: + // do nothing; just the read will have reset our read timeout + // handler + break; + default: + logger.warn("[{}->{}] Unhandled message: {}", + new Object[]{getLocalNodeIdString(), + getRemoteNodeIdString(), + bsm.getType()}); + break; + } + + } + + protected void handleHello(HelloMessage request, Channel channel) { + unexpectedMessage(request.getHeader().getTransactionId(), + MessageType.HELLO, channel); + } + + protected void handleEchoRequest(EchoRequestMessage request, + Channel channel) { + EchoReplyMessage m = new EchoReplyMessage(); + AsyncMessageHeader header = new AsyncMessageHeader(); + header.setTransactionId(request.getHeader().getTransactionId()); + m.setHeader(header); + SyncMessage bsm = new SyncMessage(MessageType.ECHO_REPLY); + bsm.setEchoReply(m); + channel.write(bsm); + } + + protected void handleGetRequest(GetRequestMessage request, + Channel channel) { + unexpectedMessage(request.getHeader().getTransactionId(), + MessageType.GET_REQUEST, channel); + } + + protected void handleGetResponse(GetResponseMessage response, + Channel channel) { + unexpectedMessage(response.getHeader().getTransactionId(), + MessageType.GET_RESPONSE, channel); + } + + protected void handlePutRequest(PutRequestMessage request, + Channel channel) { + unexpectedMessage(request.getHeader().getTransactionId(), + MessageType.PUT_REQUEST, channel); + } + + protected void handlePutResponse(PutResponseMessage response, + Channel channel) { + unexpectedMessage(response.getHeader().getTransactionId(), + MessageType.PUT_RESPONSE, channel); + } + + protected void handleDeleteRequest(DeleteRequestMessage request, + Channel channel) { + unexpectedMessage(request.getHeader().getTransactionId(), + MessageType.DELETE_REQUEST, channel); + } + + protected void handleDeleteResponse(DeleteResponseMessage response, + Channel channel) { + unexpectedMessage(response.getHeader().getTransactionId(), + MessageType.PUT_RESPONSE, channel); + } + + protected void handleSyncValue(SyncValueMessage message, + Channel channel) { + unexpectedMessage(message.getHeader().getTransactionId(), + MessageType.SYNC_VALUE, channel); + } + + protected void handleSyncValueResponse(SyncValueResponseMessage message, + Channel channel) { + unexpectedMessage(message.getHeader().getTransactionId(), + MessageType.SYNC_VALUE_RESPONSE, channel); + } + + protected void handleSyncOffer(SyncOfferMessage message, + Channel channel) { + unexpectedMessage(message.getHeader().getTransactionId(), + MessageType.SYNC_OFFER, channel); + } + + protected void handleSyncRequest(SyncRequestMessage request, + Channel channel) { + unexpectedMessage(request.getHeader().getTransactionId(), + MessageType.SYNC_REQUEST, channel); + } + + protected void handleFullSyncRequest(FullSyncRequestMessage request, + Channel channel) { + unexpectedMessage(request.getHeader().getTransactionId(), + MessageType.FULL_SYNC_REQUEST, channel); + } + + protected void handleCursorRequest(CursorRequestMessage request, + Channel channel) { + unexpectedMessage(request.getHeader().getTransactionId(), + MessageType.CURSOR_REQUEST, channel); + } + + protected void handleCursorResponse(CursorResponseMessage response, + Channel channel) { + unexpectedMessage(response.getHeader().getTransactionId(), + MessageType.CURSOR_RESPONSE, channel); + } + + protected void handleRegisterRequest(RegisterRequestMessage request, + Channel channel) { + unexpectedMessage(request.getHeader().getTransactionId(), + MessageType.REGISTER_REQUEST, channel); + } + + protected void handleRegisterResponse(RegisterResponseMessage response, + Channel channel) { + unexpectedMessage(response.getHeader().getTransactionId(), + MessageType.REGISTER_RESPONSE, channel); + } + + protected void handleError(ErrorMessage error, Channel channel) { + logger.error("[{}->{}] Error for message {}: {}", + new Object[]{getLocalNodeIdString(), + getRemoteNodeIdString(), + error.getHeader().getTransactionId(), + error.getError().getMessage()}); + } + + // ***************** + // Utility functions + // ***************** + + /** + * Generate an error message from the provided transaction ID and + * exception + * @param transactionId the transaction Id + * @param error the exception + * @param type the type of the message that generated the error + * @return the {@link SyncError} message + */ + protected SyncMessage getError(int transactionId, Exception error, + MessageType type) { + int ec = SyncException.ErrorType.GENERIC.getValue(); + if (error instanceof SyncException) { + ec = ((SyncException)error).getErrorCode().getValue(); + } + SyncError m = new SyncError(); + m.setErrorCode(ec); + m.setMessage(error.getMessage()); + ErrorMessage em = new ErrorMessage(); + em.setError(m); + em.setType(type); + AsyncMessageHeader header = new AsyncMessageHeader(); + header.setTransactionId(transactionId); + em.setHeader(header); + SyncMessage bsm = new SyncMessage(MessageType.ERROR); + bsm.setError(em); + return bsm; + } + + /** + * Send an error to the channel indicating that we got an unexpected + * message for this type of RPC client + * @param transactionId the transaction ID for the message that generated + * the error + * @param type The type of the message that generated the error + * @param channel the channel to write the error + */ + protected void unexpectedMessage(int transactionId, + MessageType type, + Channel channel) { + String message = "Received unexpected message: " + type; + logger.warn("[{}->{}] {}", + new Object[]{getLocalNodeIdString(), + getRemoteNodeIdString(), + message}); + channel.write(getError(transactionId, + new SyncException(message), type)); + } + + /** + * Get a transaction ID suitable for sending an async message + * @return the unique transaction ID + */ + protected abstract int getTransactionId(); + + /** + * Get the node ID for the remote node if its connected + * @return the node ID + */ + protected abstract Short getRemoteNodeId(); + + /** + * Get the node ID for the remote node if its connected as a string + * for use output + * @return the node ID + */ + protected String getRemoteNodeIdString() { + return ""+getRemoteNodeId(); + } + + /** + * Get the node ID for the local node if appropriate + * @return the node ID. Null if this is a client + */ + protected abstract Short getLocalNodeId(); + + /** + * Get the node ID for the local node as a string for use output + * @return the node ID + */ + protected String getLocalNodeIdString() { + return ""+getLocalNodeId(); + } + +} diff --git a/src/main/java/org/sdnplatform/sync/internal/rpc/HandshakeTimeoutHandler.java b/src/main/java/org/sdnplatform/sync/internal/rpc/HandshakeTimeoutHandler.java new file mode 100644 index 0000000000000000000000000000000000000000..2ba0e4bb9be944b6729ee23cf0bf31b70040e4d0 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/rpc/HandshakeTimeoutHandler.java @@ -0,0 +1,105 @@ +/** +* Copyright 2011, Big Switch Networks, Inc. +* Originally created by David Erickson, Stanford University +* +* Licensed under the Apache License, Version 2.0 (the "License"); you may +* not use this file except in compliance with the License. You may obtain +* a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +* License for the specific language governing permissions and limitations +* under the License. +**/ + +package org.sdnplatform.sync.internal.rpc; + +import java.util.concurrent.TimeUnit; + +import org.jboss.netty.channel.ChannelHandlerContext; +import org.jboss.netty.channel.ChannelStateEvent; +import org.jboss.netty.channel.Channels; +import org.jboss.netty.channel.SimpleChannelUpstreamHandler; +import org.jboss.netty.util.ExternalResourceReleasable; +import org.jboss.netty.util.Timeout; +import org.jboss.netty.util.Timer; +import org.jboss.netty.util.TimerTask; +import org.sdnplatform.sync.error.HandshakeTimeoutException; + + +/** + * Trigger a timeout if a switch fails to complete handshake soon enough + */ +public class HandshakeTimeoutHandler + extends SimpleChannelUpstreamHandler + implements ExternalResourceReleasable { + static final HandshakeTimeoutException EXCEPTION = + new HandshakeTimeoutException(); + + final RPCChannelHandler handler; + final Timer timer; + final long timeoutNanos; + volatile Timeout timeout; + + public HandshakeTimeoutHandler(RPCChannelHandler handler, + Timer timer, + long timeoutSeconds) { + super(); + this.handler = handler; + this.timer = timer; + this.timeoutNanos = TimeUnit.SECONDS.toNanos(timeoutSeconds); + + } + + @Override + public void channelOpen(ChannelHandlerContext ctx, ChannelStateEvent e) + throws Exception { + if (timeoutNanos > 0) { + timeout = timer.newTimeout(new HandshakeTimeoutTask(ctx), + timeoutNanos, TimeUnit.NANOSECONDS); + } + ctx.sendUpstream(e); + } + + @Override + public void channelClosed(ChannelHandlerContext ctx, ChannelStateEvent e) + throws Exception { + if (timeout != null) { + timeout.cancel(); + timeout = null; + } + } + + @Override + public void releaseExternalResources() { + timer.stop(); + } + + private final class HandshakeTimeoutTask implements TimerTask { + + private final ChannelHandlerContext ctx; + + HandshakeTimeoutTask(ChannelHandlerContext ctx) { + this.ctx = ctx; + } + + @Override + public void run(Timeout timeout) throws Exception { + if (timeout.isCancelled()) { + return; + } + + if (!ctx.getChannel().isOpen()) { + return; + } + if (!handler.isClientConnection && + ((handler.remoteNode == null || + !handler.rpcService.isConnected(handler.remoteNode. + getNodeId())))) + Channels.fireExceptionCaught(ctx, EXCEPTION); + } + } +} diff --git a/src/main/java/org/sdnplatform/sync/internal/rpc/RPCChannelHandler.java b/src/main/java/org/sdnplatform/sync/internal/rpc/RPCChannelHandler.java new file mode 100644 index 0000000000000000000000000000000000000000..eb33ec44bbf882f0b4fb17e882ab8c44d56eb3e7 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/rpc/RPCChannelHandler.java @@ -0,0 +1,541 @@ +package org.sdnplatform.sync.internal.rpc; + +import java.nio.ByteBuffer; +import java.util.Iterator; +import java.util.List; +import java.util.Map.Entry; + +import net.floodlightcontroller.core.annotations.LogMessageCategory; +import net.floodlightcontroller.core.annotations.LogMessageDoc; + +import org.jboss.netty.channel.Channel; +import org.jboss.netty.channel.ChannelHandlerContext; +import org.jboss.netty.channel.ChannelStateEvent; +import org.sdnplatform.sync.IVersion; +import org.sdnplatform.sync.Versioned; +import org.sdnplatform.sync.ISyncService.Scope; +import org.sdnplatform.sync.error.SyncException; +import org.sdnplatform.sync.internal.Cursor; +import org.sdnplatform.sync.internal.SyncManager; +import org.sdnplatform.sync.internal.config.Node; +import org.sdnplatform.sync.internal.rpc.RPCService.NodeMessage; +import org.sdnplatform.sync.internal.store.IStorageEngine; +import org.sdnplatform.sync.internal.util.ByteArray; +import org.sdnplatform.sync.internal.version.VectorClock; +import org.sdnplatform.sync.thrift.*; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +/** + * Channel handler for the RPC service + * @author readams + */ +@LogMessageCategory("State Synchronization") +public class RPCChannelHandler extends AbstractRPCChannelHandler { + protected static final Logger logger = + LoggerFactory.getLogger(RPCChannelHandler.class); + + protected SyncManager syncManager; + protected RPCService rpcService; + protected Node remoteNode; + protected boolean isClientConnection = false; + + public RPCChannelHandler(SyncManager syncManager, + RPCService rpcService) { + super(); + this.syncManager = syncManager; + this.rpcService = rpcService; + } + + // **************************** + // IdleStateAwareChannelHandler + // **************************** + + @Override + public void channelOpen(ChannelHandlerContext ctx, + ChannelStateEvent e) throws Exception { + rpcService.cg.add(ctx.getChannel()); + } + + @Override + public void channelDisconnected(ChannelHandlerContext ctx, + ChannelStateEvent e) throws Exception { + if (remoteNode != null) { + rpcService.disconnectNode(remoteNode.getNodeId()); + } + } + + // ****************************************** + // AbstractRPCChannelHandler message handlers + // ****************************************** + + @Override + @LogMessageDoc(level="ERROR", + message="[{id}->{id}] Attempted connection from unrecognized " + + "floodlight node {id}; disconnecting", + explanation="A unknown node connected. This can happen " + + "transiently if new nodes join the cluster.", + recommendation="If the problem persists, verify your cluster" + + "configuration and that you don't have unauthorized agents " + + "in your network.") + protected void handleHello(HelloMessage hello, Channel channel) { + if (!hello.isSetNodeId()) { + // this is a client connection. Don't set this up as a node + // connection + isClientConnection = true; + return; + } + remoteNode = syncManager.getClusterConfig().getNode(hello.getNodeId()); + if (remoteNode == null) { + logger.error("[{}->{}] Attempted connection from unrecognized " + + "floodlight node {}; disconnecting", + new Object[]{getLocalNodeIdString(), + getRemoteNodeIdString(), + hello.getNodeId()}); + channel.close(); + return; + } + rpcService.nodeConnected(remoteNode.getNodeId(), channel); + + FullSyncRequestMessage srm = new FullSyncRequestMessage(); + AsyncMessageHeader header = new AsyncMessageHeader(); + header.setTransactionId(getTransactionId()); + srm.setHeader(header); + SyncMessage bsm = new SyncMessage(MessageType.FULL_SYNC_REQUEST); + channel.write(bsm); + + // XXX - TODO - if last connection was longer ago than the tombstone + // timeout, then we need to do a complete flush and reload of our + // state. This is complex though since this applies across entire + // partitions and not just single nodes. We'd need to identify the + // partition and nuke the smaller half (or lower priority in the case + // of an even split). Downstream listeners would need to be able to + // handle a state nuke as well. A simple way to nuke would be to ensure + // floodlight is restarted in the smaller partition. + } + + @Override + protected void handleGetRequest(GetRequestMessage request, + Channel channel) { + String storeName = request.getStoreName(); + try { + IStorageEngine<ByteArray, byte[]> store = + syncManager.getRawStore(storeName); + + GetResponseMessage m = new GetResponseMessage(); + AsyncMessageHeader header = new AsyncMessageHeader(); + header.setTransactionId(request.getHeader().getTransactionId()); + m.setHeader(header); + + List<Versioned<byte[]>> values = + store.get(new ByteArray(request.getKey())); + for (Versioned<byte[]> value : values) { + m.addToValues(TProtocolUtil.getTVersionedValue(value)); + } + + SyncMessage bsm = new SyncMessage(MessageType.GET_RESPONSE); + bsm.setGetResponse(m); + channel.write(bsm); + } catch (Exception e) { + channel.write(getError(request.getHeader().getTransactionId(), e, + MessageType.GET_REQUEST)); + } + } + + @Override + protected void handlePutRequest(PutRequestMessage request, + Channel channel) { + String storeName = request.getStoreName(); + try { + IStorageEngine<ByteArray, byte[]> store = + syncManager.getRawStore(storeName); + + ByteArray key = new ByteArray(request.getKey()); + Versioned<byte[]> value = null; + if (request.isSetVersionedValue()) { + value = TProtocolUtil. + getVersionedValued(request.getVersionedValue()); + value.increment(syncManager.getLocalNodeId(), + System.currentTimeMillis()); + } else if (request.isSetValue()) { + byte[] rvalue = request.getValue(); + List<IVersion> versions = store.getVersions(key); + VectorClock newclock = new VectorClock(); + for (IVersion v : versions) { + newclock = newclock.merge((VectorClock)v); + } + newclock = newclock.incremented(syncManager.getLocalNodeId(), + System.currentTimeMillis()); + value = Versioned.value(rvalue, newclock); + } else { + throw new SyncException("No value specified for put"); + } + + store.put(key, value); + + PutResponseMessage m = new PutResponseMessage(); + AsyncMessageHeader header = new AsyncMessageHeader(); + header.setTransactionId(request.getHeader().getTransactionId()); + m.setHeader(header); + + SyncMessage bsm = new SyncMessage(MessageType.PUT_RESPONSE); + bsm.setPutResponse(m); + channel.write(bsm); + } catch (Exception e) { + channel.write(getError(request.getHeader().getTransactionId(), e, + MessageType.PUT_REQUEST)); + } + } + + @Override + protected void handleDeleteRequest(DeleteRequestMessage request, + Channel channel) { + try { + String storeName = request.getStoreName(); + IStorageEngine<ByteArray, byte[]> store = + syncManager.getRawStore(storeName); + ByteArray key = new ByteArray(request.getKey()); + VectorClock newclock; + if (request.isSetVersion()) { + newclock = TProtocolUtil.getVersion(request.getVersion()); + } else { + newclock = new VectorClock(); + List<IVersion> versions = store.getVersions(key); + for (IVersion v : versions) { + newclock = newclock.merge((VectorClock)v); + } + } + newclock = + newclock.incremented(rpcService.syncManager.getLocalNodeId(), + System.currentTimeMillis()); + Versioned<byte[]> value = Versioned.value(null, newclock); + store.put(key, value); + + DeleteResponseMessage m = new DeleteResponseMessage(); + AsyncMessageHeader header = new AsyncMessageHeader(); + header.setTransactionId(request.getHeader().getTransactionId()); + m.setHeader(header); + + SyncMessage bsm = + new SyncMessage(MessageType.DELETE_RESPONSE); + bsm.setDeleteResponse(m); + channel.write(bsm); + } catch (Exception e) { + channel.write(getError(request.getHeader().getTransactionId(), e, + MessageType.DELETE_REQUEST)); + } + } + + @Override + protected void handleSyncValue(SyncValueMessage request, + Channel channel) { + if (request.isSetResponseTo()) + rpcService.messageAcked(MessageType.SYNC_REQUEST, + getRemoteNodeId()); + try { + if (logger.isTraceEnabled()) { + logger.trace("[{}->{}] Got syncvalue {}", + new Object[]{getLocalNodeIdString(), + getRemoteNodeIdString(), + request}); + } + + Scope scope = TProtocolUtil.getScope(request.getStore().getScope()); + for (KeyedValues kv : request.getValues()) { + Iterable<VersionedValue> tvvi = kv.getValues(); + Iterable<Versioned<byte[]>> vs = new TVersionedValueIterable(tvvi); + syncManager.writeSyncValue(request.getStore().getStoreName(), + scope, + request.getStore().isPersist(), + kv.getKey(), vs); + } + + SyncValueResponseMessage m = new SyncValueResponseMessage(); + m.setCount(request.getValuesSize()); + AsyncMessageHeader header = new AsyncMessageHeader(); + header.setTransactionId(request.getHeader().getTransactionId()); + m.setHeader(header); + SyncMessage bsm = + new SyncMessage(MessageType.SYNC_VALUE_RESPONSE); + bsm.setSyncValueResponse(m); + channel.write(bsm); + } catch (Exception e) { + + channel.write(getError(request.getHeader().getTransactionId(), e, + MessageType.SYNC_VALUE)); + } + } + + protected void handleSyncValueResponse(SyncValueResponseMessage message, + Channel channel) { + rpcService.messageAcked(MessageType.SYNC_VALUE, getRemoteNodeId()); + } + + @Override + protected void handleSyncOffer(SyncOfferMessage request, + Channel channel) { + try { + String storeName = request.getStore().getStoreName(); + + SyncRequestMessage srm = new SyncRequestMessage(); + AsyncMessageHeader header = new AsyncMessageHeader(); + header.setTransactionId(request.getHeader().getTransactionId()); + srm.setHeader(header); + srm.setStore(request.getStore()); + + for (KeyedVersions kv : request.getVersions()) { + Iterable<org.sdnplatform.sync.thrift.VectorClock> tvci = + kv.getVersions(); + Iterable<VectorClock> vci = new TVersionIterable(tvci); + + boolean wantKey = syncManager.handleSyncOffer(storeName, + kv.getKey(), vci); + if (wantKey) + srm.addToKeys(kv.bufferForKey()); + } + + SyncMessage bsm = + new SyncMessage(MessageType.SYNC_REQUEST); + bsm.setSyncRequest(srm); + if (logger.isTraceEnabled()) { + logger.trace("[{}->{}] Sending SyncRequest with {} elements", + new Object[]{getLocalNodeIdString(), + getRemoteNodeIdString(), + srm.getKeysSize()}); + } + channel.write(bsm); + + } catch (Exception e) { + channel.write(getError(request.getHeader().getTransactionId(), + e, MessageType.SYNC_OFFER)); + } + } + + @Override + protected void handleSyncRequest(SyncRequestMessage request, + Channel channel) { + rpcService.messageAcked(MessageType.SYNC_OFFER, getRemoteNodeId()); + if (!request.isSetKeys()) return; + + String storeName = request.getStore().getStoreName(); + try { + IStorageEngine<ByteArray, byte[]> store = + syncManager.getRawStore(storeName); + + SyncMessage bsm = + TProtocolUtil.getTSyncValueMessage(request.getStore()); + SyncValueMessage svm = bsm.getSyncValue(); + svm.setResponseTo(request.getHeader().getTransactionId()); + svm.getHeader().setTransactionId(rpcService.getTransactionId()); + + for (ByteBuffer key : request.getKeys()) { + ByteArray keyArray = new ByteArray(key.array()); + List<Versioned<byte[]>> values = + store.get(keyArray); + if (values == null || values.size() == 0) continue; + KeyedValues kv = + TProtocolUtil.getTKeyedValues(keyArray, values); + svm.addToValues(kv); + } + + if (svm.isSetValues()) + rpcService.syncQueue.add(new NodeMessage(getRemoteNodeId(), + bsm)); + } catch (Exception e) { + channel.write(getError(request.getHeader().getTransactionId(), e, + MessageType.SYNC_REQUEST)); + } + } + + @Override + protected void handleFullSyncRequest(FullSyncRequestMessage request, + Channel channel) { + startAntientropy(); + } + + protected void handleCursorRequest(CursorRequestMessage request, + Channel channel) { + try { + Cursor c = null; + if (request.isSetCursorId()) { + c = syncManager.getCursor(request.getCursorId()); + } else { + c = syncManager.newCursor(request.getStoreName()); + } + if (c == null) { + throw new SyncException("Unrecognized cursor"); + } + + CursorResponseMessage m = new CursorResponseMessage(); + AsyncMessageHeader header = new AsyncMessageHeader(); + header.setTransactionId(request.getHeader().getTransactionId()); + m.setHeader(header); + m.setCursorId(c.getCursorId()); + + if (request.isClose()) { + syncManager.closeCursor(c); + } else { + int i = 0; + while (i < 50 && c.hasNext()) { + Entry<ByteArray, List<Versioned<byte[]>>> e = c.next(); + + m.addToValues(TProtocolUtil.getTKeyedValues(e.getKey(), + e.getValue())); + i += 1; + } + } + + SyncMessage bsm = + new SyncMessage(MessageType.CURSOR_RESPONSE); + bsm.setCursorResponse(m); + channel.write(bsm); + } catch (Exception e) { + channel.write(getError(request.getHeader().getTransactionId(), + e, MessageType.CURSOR_REQUEST)); + } + } + + @Override + protected void handleRegisterRequest(RegisterRequestMessage request, + Channel channel) { + try { + Scope scope = TProtocolUtil.getScope(request.store.getScope()); + if (request.store.isPersist()) + syncManager.registerPersistentStore(request.store.storeName, + scope); + else + syncManager.registerStore(request.store.storeName, scope); + RegisterResponseMessage m = new RegisterResponseMessage(); + AsyncMessageHeader header = new AsyncMessageHeader(); + header.setTransactionId(request.getHeader().getTransactionId()); + m.setHeader(header); + SyncMessage bsm = + new SyncMessage(MessageType.REGISTER_RESPONSE); + bsm.setRegisterResponse(m); + channel.write(bsm); + } catch (Exception e) { + channel.write(getError(request.getHeader().getTransactionId(), e, + MessageType.REGISTER_REQUEST)); + } + } + + @Override + protected void handleError(ErrorMessage error, Channel channel) { + rpcService.messageAcked(error.getType(), getRemoteNodeId()); + super.handleError(error, channel); + } + + // ************************* + // AbstractRPCChannelHandler + // ************************* + + @Override + protected Short getLocalNodeId() { + return syncManager.getLocalNodeId(); + } + + @Override + protected Short getRemoteNodeId() { + if (remoteNode != null) + return remoteNode.getNodeId(); + return null; + } + + @Override + protected String getLocalNodeIdString() { + return ""+getLocalNodeId(); + } + + @Override + protected String getRemoteNodeIdString() { + return ""+getRemoteNodeId(); + } + + @Override + protected int getTransactionId() { + return rpcService.getTransactionId(); + } + + // ***************** + // Utility functions + // ***************** + + protected void startAntientropy() { + // Run antientropy in a background task so we don't use up an I/O + // thread. Note that this task will result in lots of traffic + // that will use I/O threads but each of those will be in manageable + // chunks + Runnable arTask = new Runnable() { + @Override + public void run() { + syncManager.antientropy(remoteNode); + } + }; + syncManager.getThreadPool().getScheduledExecutor().execute(arTask); + } + + + protected static class TVersionedValueIterable + implements Iterable<Versioned<byte[]>> { + final Iterable<VersionedValue> tvvi; + + public TVersionedValueIterable(Iterable<VersionedValue> tvvi) { + this.tvvi = tvvi; + } + + @Override + public Iterator<Versioned<byte[]>> iterator() { + final Iterator<VersionedValue> vs = tvvi.iterator(); + return new Iterator<Versioned<byte[]>>() { + + @Override + public boolean hasNext() { + return vs.hasNext(); + } + + @Override + public Versioned<byte[]> next() { + return TProtocolUtil.getVersionedValued(vs.next()); + } + + @Override + public void remove() { + vs.remove(); + } + }; + } + } + + protected static class TVersionIterable + implements Iterable<VectorClock> { + final Iterable<org.sdnplatform.sync.thrift.VectorClock> tcvi; + + public TVersionIterable(Iterable<org.sdnplatform.sync.thrift.VectorClock> tcvi) { + this.tcvi = tcvi; + } + + @Override + public Iterator<VectorClock> iterator() { + final Iterator<org.sdnplatform.sync.thrift.VectorClock> tcs = + tcvi.iterator(); + return new Iterator<VectorClock>() { + + @Override + public boolean hasNext() { + return tcs.hasNext(); + } + + @Override + public VectorClock next() { + return TProtocolUtil.getVersion(tcs.next()); + } + + @Override + public void remove() { + tcs.remove(); + } + }; + } + } +} \ No newline at end of file diff --git a/src/main/java/org/sdnplatform/sync/internal/rpc/RPCPipelineFactory.java b/src/main/java/org/sdnplatform/sync/internal/rpc/RPCPipelineFactory.java new file mode 100644 index 0000000000000000000000000000000000000000..888314ddf3ec845f1da2d1c14e4a87efecc3c78f --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/rpc/RPCPipelineFactory.java @@ -0,0 +1,60 @@ +package org.sdnplatform.sync.internal.rpc; + +import org.jboss.netty.channel.ChannelPipeline; +import org.jboss.netty.channel.ChannelPipelineFactory; +import org.jboss.netty.channel.Channels; +import org.jboss.netty.handler.timeout.IdleStateHandler; +import org.jboss.netty.handler.timeout.ReadTimeoutHandler; +import org.jboss.netty.util.HashedWheelTimer; +import org.jboss.netty.util.Timer; +import org.sdnplatform.sync.internal.SyncManager; + + +/** + * Pipeline factory for the sync service. + * @see SyncManager + * @author readams + */ +public class RPCPipelineFactory implements ChannelPipelineFactory { + + protected SyncManager syncManager; + protected RPCService rpcService; + protected Timer timer; + + private static final int maxFrameSize = 512 * 1024; + + public RPCPipelineFactory(SyncManager syncManager, + RPCService rpcService) { + super(); + this.syncManager = syncManager; + this.rpcService = rpcService; + + this.timer = new HashedWheelTimer(); + } + + @Override + public ChannelPipeline getPipeline() throws Exception { + RPCChannelHandler channelHandler = + new RPCChannelHandler(syncManager, rpcService); + + IdleStateHandler idleHandler = + new IdleStateHandler(timer, 5, 10, 0); + ReadTimeoutHandler readTimeoutHandler = + new ReadTimeoutHandler(timer, 30); + + ChannelPipeline pipeline = Channels.pipeline(); + pipeline.addLast("idle", idleHandler); + pipeline.addLast("timeout", readTimeoutHandler); + pipeline.addLast("handshaketimeout", + new HandshakeTimeoutHandler(channelHandler, timer, 10)); + + pipeline.addLast("frameDecoder", + new ThriftFrameDecoder(maxFrameSize)); + pipeline.addLast("frameEncoder", + new ThriftFrameEncoder()); + + pipeline.addLast("handler", channelHandler); + return pipeline; + } + +} diff --git a/src/main/java/org/sdnplatform/sync/internal/rpc/RPCService.java b/src/main/java/org/sdnplatform/sync/internal/rpc/RPCService.java new file mode 100644 index 0000000000000000000000000000000000000000..45fb44bad098b35bc8811a29fb9c7b76d225716b --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/rpc/RPCService.java @@ -0,0 +1,655 @@ +package org.sdnplatform.sync.internal.rpc; + +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Executor; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; + +import net.floodlightcontroller.core.annotations.LogMessageCategory; +import net.floodlightcontroller.core.annotations.LogMessageDoc; +import net.floodlightcontroller.core.annotations.LogMessageDocs; +import net.floodlightcontroller.core.util.SingletonTask; +import org.jboss.netty.bootstrap.ClientBootstrap; +import org.jboss.netty.bootstrap.ServerBootstrap; +import org.jboss.netty.channel.Channel; +import org.jboss.netty.channel.ChannelFuture; +import org.jboss.netty.channel.ChannelFutureListener; +import org.jboss.netty.channel.ChannelPipelineFactory; +import org.jboss.netty.channel.group.ChannelGroup; +import org.jboss.netty.channel.group.DefaultChannelGroup; +import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory; +import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory; +import org.jboss.netty.util.internal.LinkedTransferQueue; +import org.sdnplatform.sync.internal.SyncManager; +import org.sdnplatform.sync.internal.config.Node; +import org.sdnplatform.sync.internal.util.Pair; +import org.sdnplatform.sync.thrift.SyncMessage; +import org.sdnplatform.sync.thrift.MessageType; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + + +/** + * A lightweight RPC mechanism built on netty. + * @author readams + */ +@LogMessageCategory("State Synchronization") +public class RPCService { + protected static final Logger logger = + LoggerFactory.getLogger(RPCService.class); + + /** + * Sync manager associated with this RPC service + */ + protected SyncManager syncManager; + + /** + * Channel group that will hold all our channels + */ + final ChannelGroup cg = new DefaultChannelGroup("Internal RPC"); + + /** + * {@link Executor} used for netty boss threads + */ + protected Executor bossExecutor; + + /** + * {@link Executor} used for netty worker threads + */ + protected Executor workerExecutor; + + /** + * Netty {@link ClientBootstrap} used for creating client connections + */ + protected ClientBootstrap clientBootstrap; + + /** + * Netty {@link ServerBootstrap} used for creating server connections + */ + protected ServerBootstrap serverBootstrap; + + /** + * Node connections + */ + protected HashMap<Short, NodeConnection> connections = + new HashMap<Short, NodeConnection>(); + + /** + * Transaction ID used in message headers in the RPC protocol + */ + protected AtomicInteger transactionId = new AtomicInteger(); + + /** + * Buffer size for sockets + */ + public static final int SEND_BUFFER_SIZE = 4 * 1024 * 1024; + + /** + * Connect timeout for client connections + */ + public static final int CONNECT_TIMEOUT = 500; + + /** + * True after the {@link RPCService#run()} method is called + */ + protected boolean started = false; + + /** + * true after the {@link RPCService#shutdown()} method + * is called. + */ + protected volatile boolean shutDown = false; + + /** + * Task to periodically ensure that connections are active + */ + protected SingletonTask reconnectTask; + + /** + * If we want to rate-limit certain types of messages, we can do + * so by limiting the overall number of outstanding messages. + * The number of such messages will be stored in the + * {@link MessageWindow} + */ + protected ConcurrentHashMap<Short, MessageWindow> messageWindows; + protected static final EnumSet<MessageType> windowedTypes = + EnumSet.of(MessageType.SYNC_VALUE, + MessageType.SYNC_OFFER); + + /** + * A thread pool for handling sync messages. These messages require + * a separate pool since writing to the node can be a blocking operation + * while waiting for window capacity, and blocking the I/O threads could + * lead to deadlock + * @see SyncMessageWorker + */ + protected ExecutorService syncExecutor; + + /** + * A queue for holding sync messages that are awaiting being written + * to the channel. + * @see SyncMessageWorker + */ + protected LinkedTransferQueue<NodeMessage> syncQueue = + new LinkedTransferQueue<NodeMessage>(); + + /** + * Number of workers in the sync message thread pool + */ + protected static final int SYNC_MESSAGE_POOL = 2; + + /** + * The maximum number of outstanding pending messages for messages + * that use message windows + */ + protected static final int MAX_PENDING_MESSAGES = 500; + + public RPCService(SyncManager syncManager) { + super(); + this.syncManager = syncManager; + + messageWindows = new ConcurrentHashMap<Short, MessageWindow>(); + } + + // ************* + // public methods + // ************* + + /** + * Start the RPC service + */ + public void run() { + started = true; + + final ThreadGroup tg1 = new ThreadGroup("Sync Message Handlers"); + tg1.setMaxPriority(Thread.NORM_PRIORITY - 3); + ThreadFactory f1 = new ThreadFactory() { + AtomicInteger id = new AtomicInteger(); + + @Override + public Thread newThread(Runnable runnable) { + return new Thread(tg1, runnable, + "SyncMessage-" + id.getAndIncrement()); + } + }; + syncExecutor = Executors.newCachedThreadPool(f1); + for (int i = 0; i < SYNC_MESSAGE_POOL; i++) { + syncExecutor.execute(new SyncMessageWorker()); + } + + final ThreadGroup tg2 = new ThreadGroup("Sync I/O Threads"); + tg2.setMaxPriority(Thread.NORM_PRIORITY - 1); + ThreadFactory f2 = new ThreadFactory() { + @Override + public Thread newThread(Runnable runnable) { + return new Thread(tg2, runnable); + } + }; + + bossExecutor = Executors.newCachedThreadPool(f2); + workerExecutor = Executors.newCachedThreadPool(f2); + + ChannelPipelineFactory pipelineFactory = + new RPCPipelineFactory(syncManager, this); + + startServer(pipelineFactory); + startClients(pipelineFactory); + } + + /** + * Stop the RPC service + */ + @LogMessageDocs({ + @LogMessageDoc(level="WARN", + message="Failed to cleanly shut down RPC server", + explanation="Could not close all open sockets cleanly"), + @LogMessageDoc(level="WARN", + message="Interrupted while shutting down RPC server", + explanation="Could not close all open sockets cleanly") + }) + public void shutdown() { + shutDown = true; + try { + if (!cg.close().await(5, TimeUnit.SECONDS)) { + logger.warn("Failed to cleanly shut down RPC server"); + } + clientBootstrap.releaseExternalResources(); + serverBootstrap.releaseExternalResources(); + } catch (InterruptedException e) { + logger.warn("Interrupted while shutting down RPC server"); + } + logger.debug("Internal floodlight RPC shut down"); + } + + /** + * Get a suitable transaction ID for sending a message + * @return the unique transaction iD + */ + public int getTransactionId() { + return transactionId.getAndIncrement(); + } + + /** + * Write a message to the node specified + * @param nodeId the node ID + * @param bsm the message to write + * @return <code>true</code> if the message was actually written to + * the channel. Note this is not the same as having been sent to the + * other node. + * @throws InterruptedException + */ + public boolean writeToNode(Short nodeId, SyncMessage bsm) + throws InterruptedException { + if (nodeId == null) return false; + NodeConnection nc = connections.get(nodeId); + if (nc != null && nc.state == NodeConnectionState.CONNECTED) { + waitForMessageWindow(bsm.getType(), nodeId, 0); + nc.nodeChannel.write(bsm); + return true; + } + return false; + } + + /** + * Remove the connection from the connection registry and clean up + * any remaining shrapnel + * @param nodeId + */ + public void disconnectNode(short nodeId) { + synchronized (connections) { + Short n = Short.valueOf(nodeId); + MessageWindow mw = messageWindows.get(n); + if (mw != null) { + mw.lock.lock(); + mw.disconnected = true; + try { + mw.full.signalAll(); + messageWindows.remove(n); + } finally { + mw.lock.unlock(); + } + } + + NodeConnection nc = connections.get(nodeId); + if (nc != null) { + nc.nuke(); + } + connections.remove(nodeId); + } + } + + /** + * Check whether all links are established + * @return + */ + public boolean isFullyConnected() { + for (Node n : syncManager.getClusterConfig().getNodes()) { + if (n.getNodeId() != syncManager.getLocalNodeId() && + !isConnected(n.getNodeId())) { + if (logger.isTraceEnabled()) { + logger.trace("[{}->{}] missing connection", + syncManager.getLocalNodeId(), + n.getNodeId()); + } + return false; + } + } + return true; + } + + /** + * Find out if a particular node is connected + * @param nodeId + * @return true if the node is connected + */ + public boolean isConnected(short nodeId) { + NodeConnection nc = connections.get(nodeId); + return (nc != null && nc.state == NodeConnectionState.CONNECTED); + } + + /** + * Called when a message is acknowledged by a remote node + * @param type the message type + * @param nodeId the remote node + */ + public void messageAcked(MessageType type, Short nodeId) { + if (nodeId == null) return; + if (!windowedTypes.contains(type)) return; + + MessageWindow mw = messageWindows.get(nodeId); + if (mw == null) return; + + int pending = mw.pending.decrementAndGet(); + if (pending < MAX_PENDING_MESSAGES) { + mw.lock.lock(); + try { + mw.full.signalAll(); + } finally { + mw.lock.unlock(); + } + } + } + + // ************* + // Local methods + // ************* + + /** + * Get the appropriate {@link MessageWindow} object for the given node. + * @param nodeId the remote node + * @return a {@link MessageWindow} object + */ + private MessageWindow getMW(short nodeId) { + + if (!isConnected(nodeId)) return null; + + Short n = Short.valueOf(nodeId); + MessageWindow mw = messageWindows.get(n); + if (mw == null) { + mw = new MessageWindow(); + MessageWindow old = messageWindows.putIfAbsent(n, mw); + if (old != null) mw = old; + } + + return mw; + } + + /** + * Wait for a message window slow to be available for the given node and + * message type + * @param type the type of the message + * @param nodeId the node Id + * @param maxWait the maximum time to wait in milliseconds + * @throws InterruptedException + * @return <code>true</code> if the message can be safely written + */ + private boolean waitForMessageWindow(MessageType type, short nodeId, + long maxWait) + throws InterruptedException { + if (!windowedTypes.contains(type)) return true; + + long start = System.nanoTime(); + + // note that this can allow slightly more than the maximum number + // of messages. This is fine. + MessageWindow mw = getMW(nodeId); + if (!mw.disconnected && + mw.pending.get() >= MAX_PENDING_MESSAGES) { + mw.lock.lock(); + try { + while (!mw.disconnected && + mw.pending.get() >= MAX_PENDING_MESSAGES) { + long now = System.nanoTime(); + if (maxWait > 0 && + (now - start) > maxWait * 1000) return false; + mw.full.awaitNanos(now - start); + } + } finally { + mw.lock.unlock(); + } + } + mw = getMW(nodeId); + if (mw != null) + mw.pending.getAndIncrement(); + + return true; + } + + /** + * Start listening sockets + */ + @LogMessageDoc(level="INFO", + message="Listening for internal floodlight RPC on {port}", + explanation="The internal RPC service is ready for connections") + protected void startServer(ChannelPipelineFactory pipelineFactory) { + final ServerBootstrap bootstrap = + new ServerBootstrap( + new NioServerSocketChannelFactory(bossExecutor, + workerExecutor)); + bootstrap.setOption("reuseAddr", true); + bootstrap.setOption("child.keepAlive", true); + bootstrap.setOption("child.tcpNoDelay", true); + bootstrap.setOption("child.sendBufferSize", SEND_BUFFER_SIZE); + bootstrap.setOption("child.receiveBufferSize", SEND_BUFFER_SIZE); + + bootstrap.setPipelineFactory(pipelineFactory); + serverBootstrap = bootstrap; + + int port = syncManager.getClusterConfig().getNode().getPort(); + InetSocketAddress sa = new InetSocketAddress(port); + cg.add(bootstrap.bind(sa)); + + logger.info("Listening for internal floodlight RPC on {}", sa); + } + + /** + * Wait for the client connection + * @author readams + */ + protected class ConnectCFListener implements ChannelFutureListener { + protected Node node; + + public ConnectCFListener(Node node) { + super(); + this.node = node; + } + + @Override + public void operationComplete(ChannelFuture cf) throws Exception { + if (!cf.isSuccess()) { + synchronized (connections) { + NodeConnection c = connections.remove(node.getNodeId()); + if (c != null) c.nuke(); + cf.getChannel().close(); + } + + String message = "[unknown error]"; + if (cf.isCancelled()) message = "Timed out on connect"; + if (cf.getCause() != null) message = cf.getCause().getMessage(); + logger.debug("[{}->{}] Could not connect to RPC " + + "node: {}", + new Object[]{syncManager.getLocalNodeId(), + node.getNodeId(), + message}); + } else { + logger.trace("[{}->{}] Channel future successful", + syncManager.getLocalNodeId(), + node.getNodeId()); + } + } + } + + /** + * Add the node connection to the node connection map + * @param nodeId the node ID for the channel + * @param channel the new channel + */ + protected void nodeConnected(short nodeId, Channel channel) { + logger.debug("[{}->{}] Connection established", + syncManager.getLocalNodeId(), + nodeId); + synchronized (connections) { + NodeConnection c = connections.get(nodeId); + if (c == null) { + connections.put(nodeId, c = new NodeConnection()); + } + c.nodeChannel = channel; + c.state = NodeConnectionState.CONNECTED; + } + } + + /** + * Connect to remote servers. We'll initiate the connection to + * any nodes with a lower ID so that there will be a single connection + * between each pair of nodes which we'll use symmetrically + */ + protected void startClients(ChannelPipelineFactory pipelineFactory) { + final ClientBootstrap bootstrap = + new ClientBootstrap( + new NioClientSocketChannelFactory(bossExecutor, + workerExecutor)); + bootstrap.setOption("child.reuseAddr", true); + bootstrap.setOption("child.keepAlive", true); + bootstrap.setOption("child.tcpNoDelay", true); + bootstrap.setOption("child.sendBufferSize", SEND_BUFFER_SIZE); + bootstrap.setOption("child.connectTimeoutMillis", CONNECT_TIMEOUT); + bootstrap.setPipelineFactory(pipelineFactory); + clientBootstrap = bootstrap; + + ScheduledExecutorService ses = + syncManager.getThreadPool().getScheduledExecutor(); + reconnectTask = new SingletonTask(ses, new ConnectTask()); + reconnectTask.reschedule(0, TimeUnit.SECONDS); + } + + /** + * Connect to a remote node if appropriate + * @param bootstrap the client bootstrap object + * @param n the node to connect to + */ + protected void doNodeConnect(Node n) { + if (!shutDown && n.getNodeId() < syncManager.getLocalNodeId()) { + Short nodeId = n.getNodeId(); + + synchronized (connections) { + NodeConnection c = connections.get(n.getNodeId()); + if (c == null) { + connections.put(nodeId, c = new NodeConnection()); + } + + if (logger.isTraceEnabled()) { + logger.trace("[{}->{}] Connection state: {}", + new Object[]{syncManager.getLocalNodeId(), + nodeId, c.state}); + } + if (c.state.equals(NodeConnectionState.NONE)) { + if (logger.isDebugEnabled()) { + logger.debug("[{}->{}] Attempting connection {} {}", + new Object[]{syncManager.getLocalNodeId(), + nodeId, + n.getHostname(), + n.getPort()}); + } + SocketAddress sa = + new InetSocketAddress(n.getHostname(), n.getPort()); + c.pendingFuture = clientBootstrap.connect(sa); + c.pendingFuture.addListener(new ConnectCFListener(n)); + c.state = NodeConnectionState.PENDING; + } + } + } + } + + /** + * Ensure that all client connections are active + */ + protected void startClientConnections() { + for (Node n : syncManager.getClusterConfig().getNodes()) { + doNodeConnect(n); + } + } + + /** + * Periodically ensure that all the node connections are alive + * @author readams + */ + protected class ConnectTask implements Runnable { + @Override + public void run() { + try { + if (!shutDown) + startClientConnections(); + } catch (Exception e) { + logger.error("Error in reconnect task", e); + } + if (!shutDown) { + reconnectTask.reschedule(500, TimeUnit.MILLISECONDS); + } + } + } + + /** + * Various states for connections + * @author readams + */ + protected enum NodeConnectionState { + NONE, + PENDING, + CONNECTED + } + + /** + * Connection state wrapper for node connections + * @author readams + */ + protected static class NodeConnection { + volatile NodeConnectionState state = NodeConnectionState.NONE; + protected ChannelFuture pendingFuture; + protected Channel nodeChannel; + + protected void nuke() { + state = NodeConnectionState.NONE; + if (pendingFuture != null) pendingFuture.cancel(); + if (nodeChannel != null) nodeChannel.close(); + pendingFuture = null; + nodeChannel = null; + } + } + + /** + * Maintain state for the pending message window for a given message type + * @author readams + */ + protected static class MessageWindow { + AtomicInteger pending = new AtomicInteger(); + volatile boolean disconnected = false; + Lock lock = new ReentrantLock(); + Condition full = lock.newCondition(); + } + + /** + * A pending message to be sent to a particular mode. + * @author readams + */ + protected static class NodeMessage extends Pair<Short,SyncMessage> { + private static final long serialVersionUID = -3443080461324647922L; + + public NodeMessage(Short first, SyncMessage second) { + super(first, second); + } + } + + /** + * A worker thread responsible for reading sync messages off the queue + * and writing them to the appropriate node's channel. Because calls + * {@link RPCService#writeToNode(Short, SyncMessage)} can block while + * waiting for available slots in the message window, we do this in a + * separate thread. + * @author readams + */ + protected class SyncMessageWorker implements Runnable { + @Override + public void run() { + while (true) { + try { + NodeMessage m = syncQueue.take(); + writeToNode(m.getFirst(), m.getSecond()); + } catch (Exception e) { + logger.error("Error while dispatching message", e); + } + } + } + } +} diff --git a/src/main/java/org/sdnplatform/sync/internal/rpc/TProtocolUtil.java b/src/main/java/org/sdnplatform/sync/internal/rpc/TProtocolUtil.java new file mode 100644 index 0000000000000000000000000000000000000000..184482d794cc2b3aebb2d9ba7b1199c8de5b3506 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/rpc/TProtocolUtil.java @@ -0,0 +1,293 @@ +package org.sdnplatform.sync.internal.rpc; + +import java.util.ArrayList; +import java.util.List; + +import org.sdnplatform.sync.Versioned; +import org.sdnplatform.sync.ISyncService.Scope; +import org.sdnplatform.sync.internal.util.ByteArray; +import org.sdnplatform.sync.internal.version.ClockEntry; +import org.sdnplatform.sync.internal.version.VectorClock; +import org.sdnplatform.sync.thrift.AsyncMessageHeader; +import org.sdnplatform.sync.thrift.SyncMessage; +import org.sdnplatform.sync.thrift.KeyedValues; +import org.sdnplatform.sync.thrift.KeyedVersions; +import org.sdnplatform.sync.thrift.MessageType; +import org.sdnplatform.sync.thrift.Store; +import org.sdnplatform.sync.thrift.SyncOfferMessage; +import org.sdnplatform.sync.thrift.SyncValueMessage; +import org.sdnplatform.sync.thrift.VersionedValue; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +/** + * Some utility methods for constructing Thrift messages + * @author readams + */ +public class TProtocolUtil { + protected static Logger logger = + LoggerFactory.getLogger(TProtocolUtil.class.getName()); + + /** + * Convert a {@link VectorClock} into a + * {@link org.sdnplatform.sync.thrift.VectorClock} + * @param vc the input clock + * @return the output thrift object + */ + public static org.sdnplatform.sync.thrift.VectorClock + getTVectorClock(VectorClock vc) { + org.sdnplatform.sync.thrift.VectorClock tvc = + new org.sdnplatform.sync.thrift.VectorClock(); + tvc.setTimestamp(vc.getTimestamp()); + for (ClockEntry ce : vc.getEntries()) { + org.sdnplatform.sync.thrift.ClockEntry tce = + new org.sdnplatform.sync.thrift.ClockEntry(); + tce.setNodeId(ce.getNodeId()); + tce.setVersion(ce.getVersion()); + tvc.addToVersions(tce); + } + + return tvc; + } + + /** + * Allocate a thrift {@link org.sdnplatform.sync.thrift.VersionedValue} + * object wrapping a {@link Versioned} object + * @param value the value to wrap + * @return the thrift object + */ + public static org.sdnplatform.sync.thrift.VersionedValue + getTVersionedValue(Versioned<byte[]> value) { + + org.sdnplatform.sync.thrift.VersionedValue tvv = + new org.sdnplatform.sync.thrift.VersionedValue(); + org.sdnplatform.sync.thrift.VectorClock tvc = + getTVectorClock((VectorClock)value.getVersion()); + + tvv.setVersion(tvc); + tvv.setValue(value.getValue()); + + return tvv; + } + + /** + * Construct a thrift {@link org.sdnplatform.sync.thrift.KeyedValues} + * @param key the key + * @param value the versioned values + * @return the thrift object + */ + public static KeyedValues getTKeyedValues(ByteArray key, + Versioned<byte[]>... value) { + KeyedValues kv = new KeyedValues(); + kv.setKey(key.get()); + for (Versioned<byte[]> v : value) { + kv.addToValues(getTVersionedValue(v)); + } + return kv; + } + + /** + * Construct a thrift {@link org.sdnplatform.sync.thrift.KeyedValues} + * @param key the key + * @param values the versioned values + * @return the thrift object + */ + public static KeyedValues + getTKeyedValues(ByteArray key, + Iterable<Versioned<byte[]>> values) { + KeyedValues kv = new KeyedValues(); + kv.setKey(key.get()); + for (Versioned<byte[]> v : values) { + kv.addToValues(getTVersionedValue(v)); + } + return kv; + } + + /** + * Construct a thrift {@link org.sdnplatform.sync.thrift.KeyedValues} + * @param key the key + * @param value the versioned values + * @return the thrift object + */ + public static KeyedVersions + getTKeyedVersions(ByteArray key, List<Versioned<byte[]>> values) { + KeyedVersions kv = new KeyedVersions(); + kv.setKey(key.get()); + for (Versioned<byte[]> v : values) { + kv.addToVersions(getTVectorClock((VectorClock)v.getVersion())); + } + return kv; + } + + /** + * Allocate a thrift {@link org.sdnplatform.sync.thrift.Store} object + * for the current store + * @param storeName the name of the store + * @param scope the scope of the store + * @param persist whether the store is persistent + * @return the object + */ + public static org.sdnplatform.sync.thrift.Store getTStore(String storeName, + Scope scope, + boolean persist) { + return getTStore(storeName, getTScope(scope), persist); + } + + /** + * Allocate a thrift {@link org.sdnplatform.sync.thrift.Store} object + * for the current store + * @param storeName the name of the store + * @param scope the scope of the store + * @param persist whether the store is persistent + * @return the object + */ + public static org.sdnplatform.sync.thrift.Store + getTStore(String storeName, + org.sdnplatform.sync.thrift.Scope scope, + boolean persist) { + org.sdnplatform.sync.thrift.Store store = + new org.sdnplatform.sync.thrift.Store(); + store.setScope(scope); + store.setStoreName(storeName); + store.setPersist(persist); + return store; + } + + /** + * Convert a {@link org.sdnplatform.sync.thrift.Scope} into a + * {@link Scope} + * @param tScope the {@link org.sdnplatform.sync.thrift.Scope} to convert + * @return the resulting {@link Scope} + */ + public static Scope getScope(org.sdnplatform.sync.thrift.Scope tScope) { + switch (tScope) { + case LOCAL: + return Scope.LOCAL; + case GLOBAL: + default: + return Scope.GLOBAL; + } + } + + /** + * Convert a {@link Scope} into a + * {@link org.sdnplatform.sync.thrift.Scope} + * @param tScope the {@link Scope} to convert + * @return the resulting {@link org.sdnplatform.sync.thrift.Scope} + */ + public static org.sdnplatform.sync.thrift.Scope getTScope(Scope Scope) { + switch (Scope) { + case LOCAL: + return org.sdnplatform.sync.thrift.Scope.LOCAL; + case GLOBAL: + default: + return org.sdnplatform.sync.thrift.Scope.GLOBAL; + } + } + + /** + * Get a partially-initialized {@link SyncValueMessage} wrapped with a + * {@link SyncMessage}. The values will not be set in the + * {@link SyncValueMessage}, and the transaction ID will not be set in + * the {@link AsyncMessageHeader}. + * @param storeName the store name + * @param scope the scope + * @param persist whether the store is persistent + * @return the {@link SyncMessage} + */ + public static SyncMessage getTSyncValueMessage(String storeName, + Scope scope, + boolean persist) { + return getTSyncValueMessage(getTStore(storeName, scope, persist)); + } + + /** + * Get a partially-initialized {@link SyncValueMessage} wrapped with a + * {@link SyncMessage}. The values will not be set in the + * {@link SyncValueMessage}, and the transaction ID will not be set in + * the {@link AsyncMessageHeader}. + * @param store the {@link Store} associated with the message + * @return the {@link SyncMessage} + */ + public static SyncMessage getTSyncValueMessage(Store store) { + SyncMessage bsm = + new SyncMessage(MessageType.SYNC_VALUE); + AsyncMessageHeader header = new AsyncMessageHeader(); + SyncValueMessage svm = new SyncValueMessage(); + svm.setHeader(header); + svm.setStore(store); + + bsm.setSyncValue(svm); + return bsm; + } + + /** + * Get a partially-initialized {@link SyncOfferMessage} wrapped with a + * {@link SyncMessage}. + * @param storeName the name of the store associated with the message + * @param scope the {@link Scope} for the store + * @param persist the scope for the store + * @return the {@link SyncMessage} + */ + public static SyncMessage getTSyncOfferMessage(String storeName, + Scope scope, + boolean persist) { + SyncMessage bsm = new SyncMessage(MessageType.SYNC_OFFER); + AsyncMessageHeader header = new AsyncMessageHeader(); + SyncOfferMessage som = new SyncOfferMessage(); + som.setHeader(header); + som.setStore(getTStore(storeName, scope, persist)); + + bsm.setSyncOffer(som); + return bsm; + } + + /** + * Convert a thrift {@link org.sdnplatform.sync.thrift.VectorClock} into + * a {@link VectorClock}. + * @param tvc the {@link org.sdnplatform.sync.thrift.VectorClock} + * @param the {@link VectorClock} + */ + public static VectorClock getVersion(org.sdnplatform.sync.thrift.VectorClock tvc) { + ArrayList<ClockEntry> entries = + new ArrayList<ClockEntry>(); + if (tvc.getVersions() != null) { + for (org.sdnplatform.sync.thrift.ClockEntry ce : + tvc.getVersions()) { + entries.add(new ClockEntry(ce.getNodeId(), ce.getVersion())); + } + } + return new VectorClock(entries, tvc.getTimestamp()); + } + + /** + * Convert a thrift {@link VersionedValue} into a {@link Versioned}. + * @param tvv the {@link VersionedValue} + * @return the {@link Versioned} + */ + public static Versioned<byte[]> + getVersionedValued(VersionedValue tvv) { + Versioned<byte[]> vv = + new Versioned<byte[]>(tvv.getValue(), + getVersion(tvv.getVersion())); + return vv; + } + + /** + * Convert from a list of {@link VersionedValue} to a list + * of {@link Versioned<byte[]>} + * @param tvv the list of versioned values + * @return the list of versioned + */ + public static List<Versioned<byte[]>> getVersionedList(List<VersionedValue> tvv) { + ArrayList<Versioned<byte[]>> values = + new ArrayList<Versioned<byte[]>>(); + if (tvv != null) { + for (VersionedValue v : tvv) { + values.add(TProtocolUtil.getVersionedValued(v)); + } + } + return values; + } +} diff --git a/src/main/java/org/sdnplatform/sync/internal/rpc/ThriftFrameDecoder.java b/src/main/java/org/sdnplatform/sync/internal/rpc/ThriftFrameDecoder.java new file mode 100644 index 0000000000000000000000000000000000000000..50ec9d0f81f6613ba33555ed7366634baac2f759 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/rpc/ThriftFrameDecoder.java @@ -0,0 +1,49 @@ +package org.sdnplatform.sync.internal.rpc; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.thrift.protocol.TCompactProtocol; +import org.apache.thrift.transport.TIOStreamTransport; +import org.jboss.netty.buffer.ChannelBuffer; +import org.jboss.netty.buffer.ChannelBufferInputStream; +import org.jboss.netty.channel.Channel; +import org.jboss.netty.channel.ChannelHandlerContext; +import org.jboss.netty.handler.codec.frame.LengthFieldBasedFrameDecoder; +import org.sdnplatform.sync.thrift.SyncMessage; + +/** + * Decode a {@link SyncMessage} from the channel + * @author readams + */ +public class ThriftFrameDecoder extends LengthFieldBasedFrameDecoder { + + public ThriftFrameDecoder(int maxSize) { + super(maxSize, 0, 4, 0, 4); + } + + @Override + protected Object decode(ChannelHandlerContext ctx, + Channel channel, + ChannelBuffer buffer) throws Exception { + List<SyncMessage> ms = null; + ChannelBuffer frame = null; + while (null != (frame = (ChannelBuffer) super.decode(ctx, channel, + buffer))) { + if (ms == null) ms = new ArrayList<SyncMessage>(); + ChannelBufferInputStream is = new ChannelBufferInputStream(frame); + TCompactProtocol thriftProtocol = + new TCompactProtocol(new TIOStreamTransport(is)); + SyncMessage bsm = new SyncMessage(); + bsm.read(thriftProtocol); + ms.add(bsm); + } + return ms; + } + + @Override + protected ChannelBuffer extractFrame(ChannelBuffer buffer, + int index, int length) { + return buffer.slice(index, length); + } +} diff --git a/src/main/java/org/sdnplatform/sync/internal/rpc/ThriftFrameEncoder.java b/src/main/java/org/sdnplatform/sync/internal/rpc/ThriftFrameEncoder.java new file mode 100644 index 0000000000000000000000000000000000000000..d71e8d2de8e51dd0285fa55b85c3d6e6c75bcc4e --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/rpc/ThriftFrameEncoder.java @@ -0,0 +1,39 @@ +package org.sdnplatform.sync.internal.rpc; + +import org.apache.thrift.protocol.TCompactProtocol; +import org.apache.thrift.transport.TIOStreamTransport; +import org.jboss.netty.buffer.ChannelBuffer; +import org.jboss.netty.buffer.ChannelBufferOutputStream; +import org.jboss.netty.buffer.ChannelBuffers; +import org.jboss.netty.buffer.DynamicChannelBuffer; +import org.jboss.netty.channel.Channel; +import org.jboss.netty.channel.ChannelHandlerContext; +import org.jboss.netty.handler.codec.oneone.OneToOneEncoder; +import org.sdnplatform.sync.thrift.SyncMessage; + + +/** + * Encode a {@link SyncMessage} into the channel + * @author readams + * + */ +public class ThriftFrameEncoder extends OneToOneEncoder { + + @Override + protected Object encode(ChannelHandlerContext ctx, Channel channel, + Object message) throws Exception { + if (message instanceof SyncMessage) { + ChannelBuffer buf = new DynamicChannelBuffer(512); + ChannelBufferOutputStream os = new ChannelBufferOutputStream(buf); + TCompactProtocol thriftProtocol = + new TCompactProtocol(new TIOStreamTransport(os)); + ((SyncMessage) message).write(thriftProtocol); + + ChannelBuffer len = ChannelBuffers.buffer(4); + len.writeInt(buf.readableBytes()); + return ChannelBuffers.wrappedBuffer(len, buf); + } + return message; + } + +} diff --git a/src/main/java/org/sdnplatform/sync/internal/store/DerbySlf4jBridge.java b/src/main/java/org/sdnplatform/sync/internal/store/DerbySlf4jBridge.java new file mode 100644 index 0000000000000000000000000000000000000000..43484289fe6eb664294d250c595bb64a18d105e8 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/store/DerbySlf4jBridge.java @@ -0,0 +1,60 @@ +package org.sdnplatform.sync.internal.store; + +import java.io.Writer; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Funnels Derby log outputs into an SLF4J logger. + */ +public final class DerbySlf4jBridge +{ + + private static final Logger logger = + LoggerFactory.getLogger(DerbySlf4jBridge.class); + + private DerbySlf4jBridge() + { + } + + /** + * A basic adapter that funnels Derby's logs through an SLF4J logger. + */ + public static final class LoggingWriter extends Writer + { + @Override + public void write(final char[] cbuf, final int off, final int len) + { + if (!logger.isDebugEnabled()) return; + + // Don't bother with empty lines. + if (len > 1) + { + logger.debug(new String(cbuf, off, len)); + } + } + + @Override + public void flush() + { + // noop. + } + + @Override + public void close() + { + // noop. + } + } + + public static String getBridgeMethod() { + return DerbySlf4jBridge.class.getCanonicalName() + + ".bridge"; + } + + public static Writer bridge() + { + return new LoggingWriter(); + } +} \ No newline at end of file diff --git a/src/main/java/org/sdnplatform/sync/internal/store/IStorageEngine.java b/src/main/java/org/sdnplatform/sync/internal/store/IStorageEngine.java new file mode 100644 index 0000000000000000000000000000000000000000..4e1515f9164697449fe4b308519fe2bcc70afa10 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/store/IStorageEngine.java @@ -0,0 +1,108 @@ +/* + * Copyright 2008-2009 LinkedIn, Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.sdnplatform.sync.internal.store; + +import java.util.List; +import java.util.Map.Entry; + +import org.sdnplatform.sync.IClosableIterator; +import org.sdnplatform.sync.Versioned; +import org.sdnplatform.sync.error.SyncException; + + +/** + * A base storage class which is actually responsible for data persistence. This + * interface implies all the usual responsibilities of a Store implementation, + * and in addition + * <ol> + * <li>The implementation MUST throw an ObsoleteVersionException if the user + * attempts to put a version which is strictly before an existing version + * (concurrent is okay)</li> + * <li>The implementation MUST increment this version number when the value is + * stored.</li> + * <li>The implementation MUST contain an ID identifying it as part of the + * cluster</li> + * </ol> + * + * A hash value can be produced for known subtrees of a StorageEngine + * + * + * @param <K> The type of the key being stored + * @param <V> The type of the value being stored + * @param <T> The type of the transforms + * + */ +public interface IStorageEngine<K, V> extends IStore<K, V> { + + /** + * Get an iterator over pairs of entries in the store. The key is the first + * element in the pair and the versioned value is the second element. + * + * Note that the iterator need not be threadsafe, and that it must be + * manually closed after use. + * + * @return An iterator over the entries in this StorageEngine. + */ + public IClosableIterator<Entry<K,List<Versioned<V>>>> entries(); + + /** + * Get an iterator over keys in the store. + * + * Note that the iterator need not be threadsafe, and that it must be + * manually closed after use. + * + * @return An iterator over the keys in this StorageEngine. + */ + public IClosableIterator<K> keys(); + + /** + * Truncate all entries in the store. Note that this is a purely local + * operation and all the data will sync back over of it's connected + * @throws SyncException + */ + public void truncate() throws SyncException; + + /** + * Write the given versioned values into the given key. + * @param key the key + * @param values the list of versions for that key + * @return true if any of the values were new and not obsolete + * @throws SyncException + */ + public boolean writeSyncValue(K key, Iterable<Versioned<V>> values); + + /** + * Perform any periodic cleanup tasks that might need to be performed. + * This method will be called periodically by the sync manager + * @throws SyncException + */ + public void cleanupTask() throws SyncException; + + /** + * Returns true if the underlying data store is persistent + * @return whether the store is persistent + */ + public boolean isPersistent(); + + /** + * Set the interval after which tombstones will be cleaned up. This + * imposes an upper bound on the amount of time that two partitions can + * be separate before reaching consistency for any given key. + * @param interval the interval in milliseconds + */ + void setTombstoneInterval(int interval); +} diff --git a/src/main/java/org/sdnplatform/sync/internal/store/IStore.java b/src/main/java/org/sdnplatform/sync/internal/store/IStore.java new file mode 100644 index 0000000000000000000000000000000000000000..8c97966252028736416f10f246c68683167dfc88 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/store/IStore.java @@ -0,0 +1,89 @@ +/* + * Copyright 2008-2009 LinkedIn, Inc + * Copyright 2013 Big Switch Networks, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.sdnplatform.sync.internal.store; + +import java.util.List; +import java.util.Map.Entry; + +import org.sdnplatform.sync.IClosableIterator; +import org.sdnplatform.sync.IVersion; +import org.sdnplatform.sync.Versioned; +import org.sdnplatform.sync.error.SyncException; + + +/** + * The basic interface used for storage and storage decorators. Allows the usual + * crud operations. + * + * Note that certain operations rely on the correct implementation of equals and + * hashCode for the key. As such, arrays as keys should be avoided. + * + * + */ +public interface IStore<K, V> { + + /** + * Get the value associated with the given key + * + * @param key The key to check for + * @return The value associated with the key or an empty list if no values + * are found. + * @throws SyncException + */ + public List<Versioned<V>> get(K key) throws SyncException; + + /** + * Get an iterator over pairs of entries in the store. The key is the first + * element in the pair and the versioned value is the second element. + * + * Note that the iterator need not be threadsafe, and that it must be + * manually closed after use. + * + * @return An iterator over the entries in this StorageEngine. + */ + public IClosableIterator<Entry<K,List<Versioned<V>>>> entries(); + + /** + * Associate the value with the key and version in this store + * + * @param key The key to use + * @param value The value to store and its version. + */ + public void put(K key, Versioned<V> value) + throws SyncException; + + /** + * Get a list of the versions associated with the given key + * @param key the key + * @return the list of {@link IVersion} objects + * @throws SyncException + */ + public List<IVersion> getVersions(K key) throws SyncException; + + /** + * @return The name of the store. + */ + public String getName(); + + /** + * Close the store. + * + * @throws SyncException If closing fails. + */ + public void close() throws SyncException; +} diff --git a/src/main/java/org/sdnplatform/sync/internal/store/InMemoryStorageEngine.java b/src/main/java/org/sdnplatform/sync/internal/store/InMemoryStorageEngine.java new file mode 100644 index 0000000000000000000000000000000000000000..fc04ac5686c1fbe5c16e3241e64eff21c3c61ea0 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/store/InMemoryStorageEngine.java @@ -0,0 +1,289 @@ +/* + * Copyright 2008-2009 LinkedIn, Inc + * Copyright 2013 Big Switch Networks, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.sdnplatform.sync.internal.store; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Map.Entry; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +import org.sdnplatform.sync.IClosableIterator; +import org.sdnplatform.sync.IVersion; +import org.sdnplatform.sync.Versioned; +import org.sdnplatform.sync.IVersion.Occurred; +import org.sdnplatform.sync.error.ObsoleteVersionException; +import org.sdnplatform.sync.error.SyncException; +import org.sdnplatform.sync.internal.util.Pair; + + +/** + * A simple non-persistent, in-memory store. + */ +public class InMemoryStorageEngine<K, V> implements IStorageEngine<K, V> { + + private final ConcurrentMap<K, List<Versioned<V>>> map; + private final String name; + + /** + * Interval in milliseconds before tombstones will be cleared. + */ + protected int tombstoneDeletion = 24 * 60 * 60 * 1000; + + public InMemoryStorageEngine(String name) { + this.name = name; + this.map = new ConcurrentHashMap<K, List<Versioned<V>>>(); + } + + public InMemoryStorageEngine(String name, + ConcurrentMap<K, List<Versioned<V>>> map) { + this.name = name; + this.map = map; + } + + // ****************** + // StorageEngine<K,V> + // ****************** + + @Override + public void close() {} + + @Override + public List<IVersion> getVersions(K key) throws SyncException { + return StoreUtils.getVersions(get(key)); + } + + @Override + public List<Versioned<V>> get(K key) throws SyncException { + StoreUtils.assertValidKey(key); + List<Versioned<V>> results = map.get(key); + if(results == null) { + return new ArrayList<Versioned<V>>(0); + } + synchronized(results) { + return new ArrayList<Versioned<V>>(results); + } + } + + @Override + public void put(K key, Versioned<V> value) throws SyncException { + if (!doput(key, value)) + throw new ObsoleteVersionException(); + } + + public boolean doput(K key, Versioned<V> value) throws SyncException { + StoreUtils.assertValidKey(key); + + IVersion version = value.getVersion(); + + while(true) { + List<Versioned<V>> items = map.get(key); + // If we have no value, optimistically try to add one + if(items == null) { + items = new ArrayList<Versioned<V>>(); + items.add(new Versioned<V>(value.getValue(), version)); + if (map.putIfAbsent(key, items) != null) + continue; + return true; + } else { + synchronized(items) { + // if this check fails, items has been removed from the map + // by delete, so we try again. + if(map.get(key) != items) + continue; + + // Check for existing versions - remember which items to + // remove in case of success + List<Versioned<V>> itemsToRemove = new ArrayList<Versioned<V>>(items.size()); + for(Versioned<V> versioned: items) { + Occurred occurred = value.getVersion().compare(versioned.getVersion()); + if(occurred == Occurred.BEFORE) { + return false; + } else if(occurred == Occurred.AFTER) { + itemsToRemove.add(versioned); + } + } + items.removeAll(itemsToRemove); + items.add(value); + } + return true; + } + } + } + + @Override + public IClosableIterator<Entry<K,List<Versioned<V>>>> entries() { + return new InMemoryIterator<K, V>(map); + } + + @Override + public IClosableIterator<K> keys() { + // TODO Implement more efficient version. + return StoreUtils.keys(entries()); + } + + @Override + public void truncate() { + map.clear(); + } + + @Override + public String getName() { + return name; + } + + @Override + public boolean writeSyncValue(K key, Iterable<Versioned<V>> values) { + boolean success = false; + for (Versioned<V> value : values) { + try { + put (key, value); + success = true; + } catch (SyncException e) { + // ignore + } + } + return success; + } + + @Override + public void cleanupTask() { + // Remove tombstones that are older than the tombstone deletion + // threshold. If a value is deleted and the tombstone has been + // cleaned up before the cluster is fully synchronized, then there + // is a chance that deleted values could be resurrected + Iterator<Entry<K, List<Versioned<V>>>> iter = map.entrySet().iterator(); + while (iter.hasNext()) { + Entry<K, List<Versioned<V>>> e = iter.next(); + List<Versioned<V>> items = e.getValue(); + + synchronized (items) { + if (StoreUtils.canDelete(items, tombstoneDeletion)) + iter.remove(); + } + } + } + + @Override + public boolean isPersistent() { + return false; + } + + @Override + public void setTombstoneInterval(int interval) { + this.tombstoneDeletion = interval; + } + + // ********************* + // InMemoryStorageEngine + // ********************* + + /** + * Get the number of keys currently in the store + * @return + */ + public int size() { + return map.size(); + } + + /** + * Atomically remove the key and return the value that was mapped to it, + * if any + * @param key the key to remove + * @return the mapped values + */ + public List<Versioned<V>> remove(K key) { + while (true) { + List<Versioned<V>> items = map.get(key); + synchronized (items) { + if (map.remove(key, items)) + return items; + } + } + } + + /** + * Check whether the given key is present in the store + * @param key the key + * @return <code>true</code> if the key is present + */ + public boolean containsKey(K key) { + return map.containsKey(key); + } + + // ****** + // Object + // ****** + + @Override + public String toString() { + return toString(15); + } + + // ************* + // Local methods + // ************* + + protected String toString(int size) { + StringBuilder builder = new StringBuilder(); + builder.append("{"); + int count = 0; + for(Entry<K, List<Versioned<V>>> entry: map.entrySet()) { + if(count > size) { + builder.append("..."); + break; + } + builder.append(entry.getKey()); + builder.append(':'); + builder.append(entry.getValue()); + builder.append(','); + } + builder.append('}'); + return builder.toString(); + } + + private static class InMemoryIterator<K, V> implements + IClosableIterator<Entry<K, List<Versioned<V>>>> { + + private final Iterator<Entry<K, List<Versioned<V>>>> iterator; + + public InMemoryIterator(ConcurrentMap<K, List<Versioned<V>>> map) { + this.iterator = map.entrySet().iterator(); + } + + public boolean hasNext() { + return iterator.hasNext(); + } + + public Pair<K, List<Versioned<V>>> next() { + Entry<K, List<Versioned<V>>> entry = iterator.next(); + return new Pair<K, List<Versioned<V>>>(entry.getKey(), + entry.getValue()); + } + + public void remove() { + throw new UnsupportedOperationException("No removal y'all."); + } + + @Override + public void close() { + // nothing to do + } + } +} diff --git a/src/main/java/org/sdnplatform/sync/internal/store/JacksonStore.java b/src/main/java/org/sdnplatform/sync/internal/store/JacksonStore.java new file mode 100644 index 0000000000000000000000000000000000000000..a4cd6e251d7d0b41bae9d93db28bd640e90ccea7 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/store/JacksonStore.java @@ -0,0 +1,240 @@ +package org.sdnplatform.sync.internal.store; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map.Entry; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.ObjectReader; +import com.fasterxml.jackson.databind.ObjectWriter; +import com.fasterxml.jackson.databind.SerializationFeature; +import com.fasterxml.jackson.dataformat.smile.SmileFactory; + +import com.fasterxml.jackson.core.type.TypeReference; + +import org.sdnplatform.sync.IClosableIterator; +import org.sdnplatform.sync.IVersion; +import org.sdnplatform.sync.Versioned; +import org.sdnplatform.sync.error.SerializationException; +import org.sdnplatform.sync.error.SyncException; +import org.sdnplatform.sync.error.SyncRuntimeException; +import org.sdnplatform.sync.internal.util.ByteArray; +import org.sdnplatform.sync.internal.util.Pair; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +/** + * A store that will serialize and deserialize objects to JSON using Jackson + */ +public class JacksonStore<K, V> implements IStore<K, V> { + protected static Logger logger = + LoggerFactory.getLogger(JacksonStore.class); + + protected static final ObjectMapper mapper = + new ObjectMapper(new SmileFactory()); + static { + mapper.configure(SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS, + true); + } + + private final IStore<ByteArray, byte[]> delegate; + + private final ObjectWriter keyWriter; + private final ObjectWriter valueWriter; + private final ObjectReader keyReader; + private final ObjectReader valueReader; + + private final boolean keyAsTree; + private final boolean valueAsTree; + + public JacksonStore(IStore<ByteArray, byte[]> delegate, + Class<K> keyClass, + Class<V> valueClass) { + super(); + this.delegate = delegate; + if (keyClass.isAssignableFrom(JsonNode.class)) { + keyAsTree = true; + this.keyWriter = null; + this.keyReader = null; + } else { + keyAsTree = false; + this.keyWriter = mapper.writerWithType(keyClass); + this.keyReader = mapper.reader(keyClass); + } + if (valueClass.isAssignableFrom(JsonNode.class)) { + valueAsTree = true; + this.valueWriter = null; + this.valueReader = null; + } else { + valueAsTree = false; + this.valueWriter = mapper.writerWithType(valueClass); + this.valueReader = mapper.reader(valueClass); + } + } + + public JacksonStore(IStore<ByteArray, byte[]> delegate, + TypeReference<K> keyType, + TypeReference<V> valueType) { + super(); + this.delegate = delegate; + keyAsTree = false; + valueAsTree = false; + this.keyWriter = mapper.writerWithType(keyType); + this.keyReader = mapper.reader(keyType); + this.valueWriter = mapper.writerWithType(valueType); + this.valueReader = mapper.reader(valueType); + } + + // ************ + // Store<K,V,T> + // ************ + @Override + public List<Versioned<V>> get(K key) throws SyncException { + ByteArray keybytes = getKeyBytes(key); + List<Versioned<byte[]>> values = delegate.get(keybytes); + return convertValues(values); + } + + @Override + public IClosableIterator<Entry<K, List<Versioned<V>>>> entries() { + return new JacksonIterator(delegate.entries()); + } + + @Override + public void put(K key, Versioned<V> value) + throws SyncException { + ByteArray keybytes = getKeyBytes(key); + byte[] valuebytes = value.getValue() != null + ? getValueBytes(value.getValue()) + : null; + delegate.put(keybytes, + new Versioned<byte[]>(valuebytes, value.getVersion())); + } + + @Override + public String getName() { + return delegate.getName(); + } + + @Override + public void close() throws SyncException { + delegate.close(); + } + + @Override + public List<IVersion> getVersions(K key) throws SyncException { + ByteArray keybytes = getKeyBytes(key); + return delegate.getVersions(keybytes); + } + + // ************* + // Local methods + // ************* + + private ByteArray getKeyBytes(K key) + throws SyncException { + if (key == null) + throw new IllegalArgumentException("Cannot get null key"); + + try { + if (keyAsTree) + return new ByteArray(mapper.writeValueAsBytes(key)); + else + return new ByteArray(keyWriter.writeValueAsBytes(key)); + } catch (Exception e) { + throw new SerializationException(e); + } + } + + private byte[] getValueBytes(V value) throws SyncException { + try { + if (valueAsTree) + return mapper.writeValueAsBytes(value); + else + return valueWriter.writeValueAsBytes(value); + } catch (Exception e) { + throw new SerializationException(e); + } + } + + @SuppressWarnings("unchecked") + private V getValueObject(byte[] value) throws SyncException { + try { + if (value == null) return null; + if (valueAsTree) + return (V)mapper.readTree(value); + else + return valueReader.readValue(value); + } catch (Exception e) { + throw new SerializationException(e); + } + } + + @SuppressWarnings("unchecked") + private K getKeyObject(ByteArray key) throws SyncException { + try { + if (keyAsTree) + return (K)mapper.readTree(key.get()); + else + return keyReader.readValue(key.get()); + } catch (Exception e) { + throw new SerializationException(e); + } + } + + private List<Versioned<V>> convertValues(List<Versioned<byte[]>> values) + throws SyncException { + if (values != null) { + List<Versioned<V>> objectvalues = + new ArrayList<Versioned<V>>(values.size()); + for (Versioned<byte[]> vb : values) { + objectvalues.add(new Versioned<V>(getValueObject(vb.getValue()), + vb.getVersion())); + } + return objectvalues; + } + return null; + } + + private class JacksonIterator implements + IClosableIterator<Entry<K, List<Versioned<V>>>> { + + IClosableIterator<Entry<ByteArray, List<Versioned<byte[]>>>> delegate; + + public JacksonIterator(IClosableIterator<Entry<ByteArray, + List<Versioned<byte[]>>>> delegate) { + super(); + this.delegate = delegate; + } + + @Override + public boolean hasNext() { + return delegate.hasNext(); + } + + @Override + public Entry<K, List<Versioned<V>>> next() { + Entry<ByteArray, List<Versioned<byte[]>>> n = delegate.next(); + try { + return new Pair<K, List<Versioned<V>>>(getKeyObject(n.getKey()), + convertValues(n.getValue())); + } catch (SyncException e) { + throw new SyncRuntimeException("Failed to construct next value", + e); + } + } + + @Override + public void remove() { + delegate.remove(); + } + + @Override + public void close() { + delegate.close(); + } + + } +} diff --git a/src/main/java/org/sdnplatform/sync/internal/store/JavaDBStorageEngine.java b/src/main/java/org/sdnplatform/sync/internal/store/JavaDBStorageEngine.java new file mode 100644 index 0000000000000000000000000000000000000000..d05393406aebcb106854c863200a99966011397e --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/store/JavaDBStorageEngine.java @@ -0,0 +1,505 @@ +package org.sdnplatform.sync.internal.store; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.UnsupportedEncodingException; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.List; +import java.util.Map.Entry; +import java.util.NoSuchElementException; + +import javax.sql.ConnectionPoolDataSource; +import org.apache.derby.jdbc.EmbeddedConnectionPoolDataSource40; + +import org.sdnplatform.sync.IClosableIterator; +import org.sdnplatform.sync.IVersion; +import org.sdnplatform.sync.Versioned; +import org.sdnplatform.sync.IVersion.Occurred; +import org.sdnplatform.sync.error.ObsoleteVersionException; +import org.sdnplatform.sync.error.PersistException; +import org.sdnplatform.sync.error.SyncException; +import org.sdnplatform.sync.error.SyncRuntimeException; +import org.sdnplatform.sync.internal.util.ByteArray; +import org.sdnplatform.sync.internal.util.EmptyClosableIterator; +import org.sdnplatform.sync.internal.util.Pair; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.fasterxml.jackson.core.JsonParseException; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.JsonMappingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.dataformat.smile.SmileFactory; + +/** + * Persistent storage engine that keeps its data in a JDB database + * @author readams + */ +public class JavaDBStorageEngine implements IStorageEngine<ByteArray, byte[]> { + protected static final Logger logger = + LoggerFactory.getLogger(JavaDBStorageEngine.class.getName()); + + private static String CREATE_DATA_TABLE = + " (datakey varchar(4096) primary key," + + "datavalue blob)"; + private static String SELECT_ALL = + "select * from <tbl>"; + private static String SELECT_KEY = + "select * from <tbl> where datakey = ?"; + private static String INSERT_KEY = + "insert into <tbl> values (?, ?)"; + private static String UPDATE_KEY = + "update <tbl> set datavalue = ? where datakey = ?"; + private static String DELETE_KEY = + "delete from <tbl> where datakey = ?"; + private static String TRUNCATE = + "delete from <tbl>"; + + private String name; + + private ConnectionPoolDataSource dataSource; + + /** + * Interval in milliseconds before tombstones will be cleared. + */ + private int tombstoneDeletion = 24 * 60 * 60 * 1000; + + private static final ObjectMapper mapper = + new ObjectMapper(new SmileFactory()); + { + System.setProperty("derby.stream.error.method", + DerbySlf4jBridge.getBridgeMethod()); + } + + /** + * Construct a new storage engine that will use the provided engine + * as a delegate and provide persistence for its data. Note that + * the delegate engine must be empty when this object is constructed + * @param delegate the delegate engine to persist + * @throws SyncException + */ + public JavaDBStorageEngine(String name, + ConnectionPoolDataSource dataSource) + throws PersistException { + super(); + + this.name = name; + this.dataSource = dataSource; + + try { + initTable(); + } catch (SQLException sqle) { + throw new PersistException("Could not initialize persistent storage", + sqle); + } + } + + // ******************************* + // StorageEngine<ByteArray,byte[]> + // ******************************* + + @Override + public List<Versioned<byte[]>> get(ByteArray key) throws SyncException { + StoreUtils.assertValidKey(key); + Connection dbConnection = null; + PreparedStatement stmt = null; + try { + dbConnection = getConnection(); + stmt = dbConnection.prepareStatement(getSql(SELECT_KEY)); + return doSelect(stmt, getKeyAsString(key)); + + } catch (Exception e) { + throw new PersistException("Could not retrieve key" + + " from database", + e); + } finally { + cleanupSQL(dbConnection, stmt); + } + } + + @Override + public IClosableIterator<Entry<ByteArray, List<Versioned<byte[]>>>> + entries() { + PreparedStatement stmt = null; + Connection dbConnection = null; + try { + // we never close this connection unless there's an error; + // it must be closed by the DbIterator + dbConnection = getConnection(); + stmt = dbConnection.prepareStatement(getSql(SELECT_ALL)); + ResultSet rs = stmt.executeQuery(); + return new DbIterator(dbConnection, stmt, rs); + } catch (Exception e) { + logger.error("Could not create iterator on data", e); + try { + cleanupSQL(dbConnection, stmt); + } catch (Exception e2) { + logger.error("Failed to clean up after error", e2); + } + return new EmptyClosableIterator<Entry<ByteArray,List<Versioned<byte[]>>>>(); + } + } + + @Override + public void put(ByteArray key, Versioned<byte[]> value) + throws SyncException { + StoreUtils.assertValidKey(key); + Connection dbConnection = null; + try { + PreparedStatement stmt = null; + PreparedStatement update = null; + try { + String keyStr = getKeyAsString(key); + dbConnection = getConnection(); + dbConnection.setAutoCommit(false); + stmt = dbConnection.prepareStatement(getSql(SELECT_KEY)); + List<Versioned<byte[]>> values = doSelect(stmt, keyStr); + + int vindex; + if (values.size() > 0) { + update = dbConnection.prepareStatement(getSql(UPDATE_KEY)); + update.setString(2, keyStr); + vindex = 1; + } else { + update = dbConnection.prepareStatement(getSql(INSERT_KEY)); + update.setString(1, keyStr); + vindex = 2; + } + + List<Versioned<byte[]>> itemsToRemove = + new ArrayList<Versioned<byte[]>>(values.size()); + for(Versioned<byte[]> versioned: values) { + Occurred occurred = value.getVersion().compare(versioned.getVersion()); + if(occurred == Occurred.BEFORE) { + throw new ObsoleteVersionException("Obsolete version for key '" + key + + "': " + value.getVersion()); + } else if(occurred == Occurred.AFTER) { + itemsToRemove.add(versioned); + } + } + values.removeAll(itemsToRemove); + values.add(value); + + ByteArrayInputStream is = + new ByteArrayInputStream(mapper.writeValueAsBytes(values)); + update.setBinaryStream(vindex, is); + update.execute(); + dbConnection.commit(); + } catch (SyncException e) { + dbConnection.rollback(); + throw e; + } catch (Exception e) { + dbConnection.rollback(); + throw new PersistException("Could not retrieve key from database", + e); + } finally { + cleanupSQL(dbConnection, stmt, update); + } + } catch (SQLException e) { + cleanupSQL(dbConnection); + throw new PersistException("Could not clean up", e); + } + } + + @Override + public IClosableIterator<ByteArray> keys() { + return StoreUtils.keys(entries()); + } + + @Override + public void truncate() throws SyncException { + Connection dbConnection = null; + PreparedStatement update = null; + try { + dbConnection = getConnection(); + update = dbConnection.prepareStatement(getSql(TRUNCATE)); + update.execute(); + } catch (Exception e) { + logger.error("Failed to truncate store " + getName(), e); + } finally { + cleanupSQL(dbConnection, update); + } + } + + @Override + public String getName() { + return name; + } + + @Override + public void close() throws SyncException { + + } + + @Override + public boolean writeSyncValue(ByteArray key, + Iterable<Versioned<byte[]>> values) { + boolean success = false; + for (Versioned<byte[]> value : values) { + try { + put (key, value); + success = true; + } catch (PersistException e) { + logger.error("Failed to sync value because of " + + "persistence exception", e); + } catch (SyncException e) { + // ignore obsolete version exception + } + } + return success; + } + + @Override + public List<IVersion> getVersions(ByteArray key) throws SyncException { + return StoreUtils.getVersions(get(key)); + } + + @Override + public void cleanupTask() throws SyncException { + Connection dbConnection = null; + PreparedStatement stmt = null; + try { + dbConnection = getConnection(); + dbConnection.setAutoCommit(true); + stmt = dbConnection.prepareStatement(getSql(SELECT_ALL)); + ResultSet rs = stmt.executeQuery(); + while (rs.next()) { + List<Versioned<byte[]>> items = getVersionedList(rs); + if (StoreUtils.canDelete(items, tombstoneDeletion)) { + doClearTombstone(rs.getString("datakey")); + } + } + } catch (Exception e) { + logger.error("Failed to delete key", e); + } finally { + cleanupSQL(dbConnection, stmt); + } + } + + @Override + public boolean isPersistent() { + return true; + } + + @Override + public void setTombstoneInterval(int interval) { + this.tombstoneDeletion = interval; + } + + // ******************* + // JavaDBStorageEngine + // ******************* + + /** + * Get a connection pool data source for use by Java DB storage engines + * @param memory whether to actually use a memory database + * @return the {@link ConnectionPoolDataSource} + */ + public static ConnectionPoolDataSource getDataSource(boolean memory) { + + EmbeddedConnectionPoolDataSource40 ds = + new EmbeddedConnectionPoolDataSource40(); + if (memory) { + ds.setDatabaseName("memory:SyncDB"); + } else { + ds.setDatabaseName("SyncDB"); + } + ds.setCreateDatabase("create"); + ds.setUser("floodlight"); + ds.setPassword("floodlight"); + return ds; + } + + // ************* + // Local methods + // ************* + + private static void cleanupSQL(Connection dbConnection) + throws SyncException { + cleanupSQL(dbConnection, (PreparedStatement[])null); + } + + private static void cleanupSQL(Connection dbConnection, + PreparedStatement... stmts) + throws SyncException { + try { + if (stmts != null) { + for (PreparedStatement stmt : stmts) { + if (stmt != null) + stmt.close(); + } + } + } catch (SQLException e) { + throw new PersistException("Could not close statement", e); + } finally { + try { + if (dbConnection != null && !dbConnection.isClosed()) + dbConnection.close(); + } catch (SQLException e) { + throw new PersistException("Could not close connection", e); + } + } + } + + private Connection getConnection() throws SQLException { + Connection conn = dataSource.getPooledConnection().getConnection(); + conn.setTransactionIsolation(Connection. + TRANSACTION_READ_COMMITTED); + return conn; + } + + private void initTable() throws SQLException { + Connection dbConnection = getConnection(); + Statement statement = null; + statement = dbConnection.createStatement(); + try { + statement.execute("CREATE TABLE " + getName() + + CREATE_DATA_TABLE); + } catch (SQLException e) { + // eat table already exists exception + if (!"X0Y32".equals(e.getSQLState())) + throw e; + } finally { + if (statement != null) statement.close(); + dbConnection.close(); + } + } + + private String getKeyAsString(ByteArray key) + throws UnsupportedEncodingException { + return new String(key.get(), "UTF8"); + } + + private static ByteArray getStringAsKey(String keyStr) + throws UnsupportedEncodingException { + return new ByteArray(keyStr.getBytes("UTF8")); + } + + private String getSql(String sql) { + return sql.replace("<tbl>", getName()); + } + + private static List<Versioned<byte[]>> getVersionedList(ResultSet rs) + throws SQLException, JsonParseException, + JsonMappingException, IOException { + InputStream is = rs.getBinaryStream("datavalue"); + return mapper.readValue(is, + new TypeReference<List<VCVersioned<byte[]>>>() {}); + } + + private List<Versioned<byte[]>> doSelect(PreparedStatement stmt, + String key) + throws SQLException, JsonParseException, + JsonMappingException, IOException { + stmt.setString(1, key); + ResultSet rs = stmt.executeQuery(); + + if (rs.next()) { + return getVersionedList(rs); + } else { + return new ArrayList<Versioned<byte[]>>(0); + } + } + + private void doClearTombstone(String keyStr) throws SyncException { + Connection dbConnection = null; + try { + PreparedStatement stmt = null; + PreparedStatement update = null; + try { + dbConnection = getConnection(); + dbConnection.setAutoCommit(false); + stmt = dbConnection.prepareStatement(getSql(SELECT_KEY)); + List<Versioned<byte[]>> items = doSelect(stmt, keyStr); + if (StoreUtils.canDelete(items, tombstoneDeletion)) { + update = dbConnection.prepareStatement(getSql(DELETE_KEY)); + update.setString(1, keyStr); + update.execute(); + } + dbConnection.commit(); + + } catch (Exception e) { + if (dbConnection != null) + dbConnection.rollback(); + logger.error("Failed to delete key", e); + } finally { + cleanupSQL(dbConnection, stmt, update); + } + } catch (SQLException e) { + logger.error("Failed to clean up after error", e); + cleanupSQL(dbConnection); + } + } + + private static class DbIterator implements + IClosableIterator<Entry<ByteArray,List<Versioned<byte[]>>>> { + + private final Connection dbConnection; + private final PreparedStatement stmt; + private final ResultSet rs; + private boolean hasNext = false; + private boolean hasNextSet = false; + + public DbIterator(Connection dbConnection, + PreparedStatement stmt, + ResultSet rs) { + super(); + this.dbConnection = dbConnection; + this.stmt = stmt; + this.rs = rs; + } + + @Override + public boolean hasNext() { + try { + if (hasNextSet) return hasNext; + hasNextSet = true; + hasNext = rs.next(); + } catch (Exception e) { + logger.error("Error in DB Iterator", e); + hasNextSet = true; + hasNext = false; + } + return hasNext; + } + + @Override + public Pair<ByteArray, List<Versioned<byte[]>>> next() { + if (hasNext()) { + try { + ByteArray key = getStringAsKey(rs.getString("datakey")); + List<Versioned<byte[]>> vlist = getVersionedList(rs); + hasNextSet = false; + return new Pair<ByteArray, + List<Versioned<byte[]>>>(key, vlist); + } catch (Exception e) { + throw new SyncRuntimeException("Error in DB Iterator", + new PersistException(e)); + } + } else { + throw new NoSuchElementException(); + } + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + + @Override + public void close() { + try { + cleanupSQL(dbConnection, stmt); + } catch (SyncException e) { + logger.error("Could not close DB iterator", e); + } + } + + } +} diff --git a/src/main/java/org/sdnplatform/sync/internal/store/ListenerStorageEngine.java b/src/main/java/org/sdnplatform/sync/internal/store/ListenerStorageEngine.java new file mode 100644 index 0000000000000000000000000000000000000000..645f496e669b21401b4604a7b250c4126b7cd203 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/store/ListenerStorageEngine.java @@ -0,0 +1,127 @@ +package org.sdnplatform.sync.internal.store; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Map.Entry; + +import org.sdnplatform.sync.IClosableIterator; +import org.sdnplatform.sync.IVersion; +import org.sdnplatform.sync.Versioned; +import org.sdnplatform.sync.error.SyncException; +import org.sdnplatform.sync.internal.util.ByteArray; + + +/** + * A storage engine that proxies to another storage engine and notifies + * registered listeners of changes + * @author readams + */ +public class ListenerStorageEngine + implements IStorageEngine<ByteArray, byte[]> { + + /** + * Listeners for this store + */ + protected List<MappingStoreListener> listeners = + new ArrayList<MappingStoreListener>(); + + /** + * The local storage for this storage engine + */ + protected IStorageEngine<ByteArray, byte[]> localStorage; + + + public ListenerStorageEngine(IStorageEngine<ByteArray, + byte[]> localStorage) { + this.localStorage = localStorage; + } + + // ************************* + // StorageEngine<Key,byte[]> + // ************************* + + @Override + public List<Versioned<byte[]>> get(ByteArray key) throws SyncException { + return localStorage.get(key); + } + + @Override + public IClosableIterator<Entry<ByteArray,List<Versioned<byte[]>>>> entries() { + return localStorage.entries(); + } + + @Override + public void put(ByteArray key, Versioned<byte[]> value) + throws SyncException { + localStorage.put(key, value); + notifyListeners(key); + } + + @Override + public IClosableIterator<ByteArray> keys() { + return localStorage.keys(); + } + + @Override + public void truncate() throws SyncException { + localStorage.truncate(); + } + + @Override + public String getName() { + return localStorage.getName(); + } + + @Override + public void close() throws SyncException { + localStorage.close(); + } + + @Override + public List<IVersion> getVersions(ByteArray key) throws SyncException { + return localStorage.getVersions(key); + } + + @Override + public boolean writeSyncValue(ByteArray key, + Iterable<Versioned<byte[]>> values) { + boolean r = localStorage.writeSyncValue(key, values); + if (r) notifyListeners(key); + return r; + } + + @Override + public void cleanupTask() throws SyncException { + localStorage.cleanupTask(); + } + + @Override + public boolean isPersistent() { + return localStorage.isPersistent(); + } + + @Override + public void setTombstoneInterval(int interval) { + localStorage.setTombstoneInterval(interval); + } + + // ********************* + // ListenerStorageEngine + // ********************* + + public void addListener(MappingStoreListener listener) { + listeners.add(listener); + } + + protected void notifyListeners(ByteArray key) { + notifyListeners(Collections.singleton(key).iterator()); + } + + protected void notifyListeners(Iterator<ByteArray> keys) { + for (MappingStoreListener msl : listeners) { + msl.notify(keys); + } + } +} diff --git a/src/main/java/org/sdnplatform/sync/internal/store/MappingStoreListener.java b/src/main/java/org/sdnplatform/sync/internal/store/MappingStoreListener.java new file mode 100644 index 0000000000000000000000000000000000000000..3af70ccd7480a6e00e7a3e9f7374d1700e86f4eb --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/store/MappingStoreListener.java @@ -0,0 +1,89 @@ +package org.sdnplatform.sync.internal.store; + +import java.util.Iterator; +import java.util.NoSuchElementException; + +import org.sdnplatform.sync.IStoreListener; +import org.sdnplatform.sync.internal.util.ByteArray; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.fasterxml.jackson.core.type.TypeReference; + +/** + * A class that will map from the raw serialized keys to the appropriate key + * type for a store listener + * @author readams + */ +@SuppressWarnings({"rawtypes", "unchecked"}) +public class MappingStoreListener { + protected static Logger logger = + LoggerFactory.getLogger(MappingStoreListener.class); + + TypeReference typeRef; + Class keyClass; + IStoreListener listener; + + public MappingStoreListener(TypeReference typeRef, Class keyClass, + IStoreListener listener) { + super(); + this.typeRef = typeRef; + this.keyClass = keyClass; + this.listener = listener; + } + + public void notify(Iterator<ByteArray> keys) { + listener.keysModified(new MappingIterator(keys)); + } + + class MappingIterator implements Iterator { + Iterator<ByteArray> keys; + protected Object next; + + public MappingIterator(Iterator<ByteArray> keys) { + super(); + this.keys = keys; + } + + private Object map() { + try { + ByteArray ka = keys.next(); + Object key = null; + if (typeRef != null) + key = JacksonStore.mapper.readValue(ka.get(), typeRef); + else if (keyClass != null) + key = JacksonStore.mapper.readValue(ka.get(), keyClass); + + return key; + } catch (Exception e) { + return null; + } + } + + @Override + public boolean hasNext() { + if (next != null) return true; + while (keys.hasNext()) { + next = map(); + if (next != null) return true; + } + return false; + } + + @Override + public Object next() { + if (hasNext()) { + Object cur = next; + next = null; + return cur; + } + throw new NoSuchElementException(); + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + + } +} diff --git a/src/main/java/org/sdnplatform/sync/internal/store/StoreUtils.java b/src/main/java/org/sdnplatform/sync/internal/store/StoreUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..7b92dade96666ff6ae0b145eaf41d5b36d34c043 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/store/StoreUtils.java @@ -0,0 +1,177 @@ +/* + * Copyright 2008-2009 LinkedIn, Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.sdnplatform.sync.internal.store; + +import java.io.Closeable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +import org.sdnplatform.sync.IClosableIterator; +import org.sdnplatform.sync.IVersion; +import org.sdnplatform.sync.Versioned; +import org.sdnplatform.sync.IVersion.Occurred; +import org.sdnplatform.sync.error.SyncException; +import org.sdnplatform.sync.internal.version.VectorClock; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; + +/** + * Group of store utilities + * + */ +public class StoreUtils { + protected static final Logger logger = + LoggerFactory.getLogger(StoreUtils.class); + + public static void assertValidKeys(Iterable<?> keys) { + if(keys == null) + throw new IllegalArgumentException("Keys cannot be null."); + for(Object key: keys) + assertValidKey(key); + } + + public static <K> void assertValidKey(K key) { + if(key == null) + throw new IllegalArgumentException("Key cannot be null."); + } + + /** + * Implements getAll by delegating to get. + * @throws SyncException + */ + public static <K, V> Map<K, List<Versioned<V>>> + getAll(IStore<K, V> storageEngine, + Iterable<K> keys) throws SyncException { + Map<K, List<Versioned<V>>> result = newEmptyHashMap(keys); + for(K key: keys) { + List<Versioned<V>> value = + storageEngine.get(key); + if(!value.isEmpty()) + result.put(key, value); + } + return result; + } + + /** + * Returns an empty map with expected size matching the iterable size if + * it's of type Collection. Otherwise, an empty map with the default size is + * returned. + */ + public static <K, V> HashMap<K, V> newEmptyHashMap(Iterable<?> iterable) { + if(iterable instanceof Collection<?>) + return Maps.newHashMapWithExpectedSize(((Collection<?>) iterable).size()); + return Maps.newHashMap(); + } + + /** + * Closes a Closeable and logs a potential error instead of re-throwing the + * exception. If {@code null} is passed, this method is a no-op. + * + * This is typically used in finally blocks to prevent an exception thrown + * during close from hiding an exception thrown inside the try. + * + * @param c The Closeable to close, may be null. + */ + public static void close(Closeable c) { + if(c != null) { + try { + c.close(); + } catch(IOException e) { + logger.error("Error closing stream", e); + } + } + } + + + public static <V> List<IVersion> getVersions(List<Versioned<V>> versioneds) { + List<IVersion> versions = Lists.newArrayListWithCapacity(versioneds.size()); + for(Versioned<?> versioned: versioneds) + versions.add(versioned.getVersion()); + return versions; + } + + public static <K, V> IClosableIterator<K> + keys(final IClosableIterator<Entry<K, V>> values) { + return new IClosableIterator<K>() { + + public void close() { + values.close(); + } + + public boolean hasNext() { + return values.hasNext(); + } + + public K next() { + Entry<K, V> value = values.next(); + if(value == null) + return null; + return value.getKey(); + } + + public void remove() { + values.remove(); + } + + }; + } + + public static <V> boolean canDelete(List<Versioned<V>> items, + long tombstoneDeletion) { + List<VectorClock> tombstones = new ArrayList<VectorClock>(); + long now = System.currentTimeMillis(); + // make two passes; first we find tombstones that are old enough. + for (Versioned<V> v : items) { + if (v.getValue() == null) { + VectorClock vc = (VectorClock)v.getVersion(); + if ((vc.getTimestamp() + tombstoneDeletion) < now) + tombstones.add(vc); + } + } + + // second, if we find a tombstone which is later than every + // non-tombstone value, then we can delete the key. + for (VectorClock vc : tombstones) { + boolean later = true; + for (Versioned<V> v : items) { + if (v.getValue() != null) { + VectorClock curvc = (VectorClock)v.getVersion(); + if (!Occurred.AFTER.equals(vc.compare(curvc))) { + later = false; + break; + } + } + } + if (later) { + // we found a tombstone that's old enough and + // logically later than all non-tombstones. We can + // remove the value from the map. + return true; + } + } + + return false; + } +} diff --git a/src/main/java/org/sdnplatform/sync/internal/store/SynchronizingStorageEngine.java b/src/main/java/org/sdnplatform/sync/internal/store/SynchronizingStorageEngine.java new file mode 100644 index 0000000000000000000000000000000000000000..7cab6ff1c32f7bdaf6ac77c2f445158480e89622 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/store/SynchronizingStorageEngine.java @@ -0,0 +1,71 @@ +package org.sdnplatform.sync.internal.store; + +import org.sdnplatform.sync.Versioned; +import org.sdnplatform.sync.ISyncService.Scope; +import org.sdnplatform.sync.error.SyncException; +import org.sdnplatform.sync.internal.SyncManager; +import org.sdnplatform.sync.internal.util.ByteArray; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +/** + * This storage engine will asynchronously replicate its data to the other + * nodes in the cluster based on the scope of the s + */ +public class SynchronizingStorageEngine extends ListenerStorageEngine { + + protected static Logger logger = + LoggerFactory.getLogger(SynchronizingStorageEngine.class); + + /** + * The synchronization manager + */ + protected SyncManager syncManager; + + /** + * The scope of distribution for data in this store + */ + protected Scope scope; + + /** + * Allocate a synchronizing storage engine + * @param localStorage the local storage + * @param syncManager the sync manager + * @param scope the scope for this store + * @param rpcService the RPC service + * @param storeName the name of the store + */ + public SynchronizingStorageEngine(IStorageEngine<ByteArray, + byte[]> localStorage, + SyncManager syncManager, + Scope scope) { + super(localStorage); + this.localStorage = localStorage; + this.syncManager = syncManager; + this.scope = scope; + } + + // ************************* + // StorageEngine<Key,byte[]> + // ************************* + + @Override + public void put(ByteArray key, Versioned<byte[]> value) + throws SyncException { + super.put(key, value); + syncManager.queueSyncTask(this, key, value); + } + + // ************** + // Public methods + // ************** + + /** + * Get the scope for this store + * @return + */ + public Scope getScope() { + return scope; + } +} diff --git a/src/main/java/org/sdnplatform/sync/internal/store/VCVersioned.java b/src/main/java/org/sdnplatform/sync/internal/store/VCVersioned.java new file mode 100644 index 0000000000000000000000000000000000000000..4861892bc7004416a5384eca71f74fc8dbc05e43 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/store/VCVersioned.java @@ -0,0 +1,23 @@ +package org.sdnplatform.sync.internal.store; + +import org.sdnplatform.sync.Versioned; +import org.sdnplatform.sync.internal.version.VectorClock; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; + + +public final class VCVersioned<T> extends Versioned<T> { + + private static final long serialVersionUID = 8038484251323965062L; + + public VCVersioned(T object) { + super(object); + } + + @JsonCreator + public VCVersioned(@JsonProperty("object") T object, + @JsonProperty("version") VectorClock version) { + super(object, version); + } +} diff --git a/src/main/java/org/sdnplatform/sync/internal/util/ByteArray.java b/src/main/java/org/sdnplatform/sync/internal/util/ByteArray.java new file mode 100644 index 0000000000000000000000000000000000000000..5a9473562e4d226fb7e8f5f22e805074bcd90129 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/util/ByteArray.java @@ -0,0 +1,65 @@ +package org.sdnplatform.sync.internal.util; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Arrays; + +import org.openflow.util.HexString; + +/** + * A byte array container that provides an equals and hashCode pair based on the + * contents of the byte array. This is useful as a key for Maps. + */ +public final class ByteArray implements Serializable { + + private static final long serialVersionUID = 1L; + + public static final ByteArray EMPTY = new ByteArray(); + + private final byte[] underlying; + + public ByteArray(byte... underlying) { + this.underlying = underlying; + } + + public byte[] get() { + return underlying; + } + + @Override + public int hashCode() { + return Arrays.hashCode(underlying); + } + + @Override + public boolean equals(Object obj) { + if(this == obj) + return true; + if(!(obj instanceof ByteArray)) + return false; + ByteArray other = (ByteArray) obj; + return Arrays.equals(underlying, other.underlying); + } + + @Override + public String toString() { + return Arrays.toString(underlying); + } + + /** + * Translate the each ByteArray in an iterable into a hexidecimal string + * + * @param arrays The array of bytes to translate + * @return An iterable of converted strings + */ + public static Iterable<String> toHexStrings(Iterable<ByteArray> arrays) { + ArrayList<String> ret = new ArrayList<String>(); + for(ByteArray array: arrays) + ret.add(HexString.toHexString(array.get())); + return ret; + } + + public int length() { + return underlying.length; + } +} diff --git a/src/main/java/org/sdnplatform/sync/internal/util/EmptyClosableIterator.java b/src/main/java/org/sdnplatform/sync/internal/util/EmptyClosableIterator.java new file mode 100644 index 0000000000000000000000000000000000000000..55554db9297d60bb3aec00bedd9b86218cce6bcd --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/util/EmptyClosableIterator.java @@ -0,0 +1,26 @@ +package org.sdnplatform.sync.internal.util; + +import java.util.NoSuchElementException; + +import org.sdnplatform.sync.IClosableIterator; + + +public class EmptyClosableIterator<T> implements IClosableIterator<T> { + + public boolean hasNext() { + return false; + } + + public T next() { + throw new NoSuchElementException(); + } + + public void remove() { + throw new NoSuchElementException(); + } + + @Override + public void close() { + // no-op + } +} diff --git a/src/main/java/org/sdnplatform/sync/internal/util/Pair.java b/src/main/java/org/sdnplatform/sync/internal/util/Pair.java new file mode 100644 index 0000000000000000000000000000000000000000..db78f6dc4812b98715a864102d14760c54b63dde --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/util/Pair.java @@ -0,0 +1,113 @@ +package org.sdnplatform.sync.internal.util; + +import java.io.Serializable; +import java.util.Map.Entry; + +import com.google.common.base.Function; +import com.google.common.base.Objects; + +/** + * Represents a pair of items. + */ +public class Pair<F, S> implements Serializable, Function<F, S>, + Entry<F, S> { + + private static final long serialVersionUID = 1L; + + private final F first; + + private final S second; + + /** + * Static factory method that, unlike the constructor, performs generic + * inference saving some typing. Use in the following way (for a pair of + * Strings): + * + * <p> + * <code> + * Pair<String, String> pair = Pair.create("first", "second"); + * </code> + * </p> + * + * @param <F> The type of the first thing. + * @param <S> The type of the second thing + * @param first The first thing + * @param second The second thing + * @return The pair (first,second) + */ + public static final <F, S> Pair<F, S> create(F first, S second) { + return new Pair<F, S>(first, second); + } + + /** + * Use the static factory method {@link #create(Object, Object)} instead of + * this where possible. + * + * @param first + * @param second + */ + public Pair(F first, S second) { + this.first = first; + this.second = second; + } + + public S apply(F from) { + if(from == null ? first == null : from.equals(first)) + return second; + return null; + } + + public final F getFirst() { + return first; + } + + public final S getSecond() { + return second; + } + + @Override + public final int hashCode() { + final int PRIME = 31; + int result = 1; + result = PRIME * result + ((first == null) ? 0 : first.hashCode()); + result = PRIME * result + ((second == null) ? 0 : second.hashCode()); + return result; + } + + @Override + public final boolean equals(Object obj) { + if(this == obj) + return true; + if(!(obj instanceof Pair<?, ?>)) + return false; + + final Pair<?, ?> other = (Pair<?, ?>) (obj); + return Objects.equal(first, other.first) && Objects.equal(second, other.second); + } + + @Override + public final String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("[ " + first + ", " + second + " ]"); + return builder.toString(); + } + + // ***** + // Entry + // ***** + + @Override + public F getKey() { + return getFirst(); + } + + @Override + public S getValue() { + return getSecond(); + } + + @Override + public S setValue(S value) { + throw new UnsupportedOperationException(); + } +} diff --git a/src/main/java/org/sdnplatform/sync/internal/version/ChainedResolver.java b/src/main/java/org/sdnplatform/sync/internal/version/ChainedResolver.java new file mode 100644 index 0000000000000000000000000000000000000000..a3b98d93fe197f842a6988ccd09497eac9d9df2f --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/version/ChainedResolver.java @@ -0,0 +1,73 @@ +/* + * Copyright 2008-2009 LinkedIn, Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.sdnplatform.sync.internal.version; + +import java.util.ArrayList; +import java.util.List; + +import org.sdnplatform.sync.IInconsistencyResolver; + + +/** + * Apply the given inconsistency resolvers in order until there are 1 or fewer + * items left. + * + * + */ +public class ChainedResolver<T> implements IInconsistencyResolver<T> { + + private List<IInconsistencyResolver<T>> resolvers; + + public ChainedResolver(IInconsistencyResolver<T>... resolvers) { + this.resolvers = new ArrayList<IInconsistencyResolver<T>>(resolvers.length); + for(IInconsistencyResolver<T> resolver: resolvers) + this.resolvers.add(resolver); + } + + public List<T> resolveConflicts(List<T> items) { + for(IInconsistencyResolver<T> resolver: resolvers) { + if(items.size() <= 1) + return items; + else + items = resolver.resolveConflicts(items); + } + + return items; + } + + @Override + public boolean equals(Object o) { + if(this == o) + return true; + if(o == null || getClass() != o.getClass()) + return false; + + ChainedResolver<?> that = (ChainedResolver<?>) o; + + if(resolvers != null + ? !resolvers.equals(that.resolvers) + : that.resolvers != null) + return false; + + return true; + } + + @Override + public int hashCode() { + return resolvers != null ? resolvers.hashCode() : 0; + } +} diff --git a/src/main/java/org/sdnplatform/sync/internal/version/ClockEntry.java b/src/main/java/org/sdnplatform/sync/internal/version/ClockEntry.java new file mode 100644 index 0000000000000000000000000000000000000000..058a1cacd7658aa26f0f9ee486188790e1205c64 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/version/ClockEntry.java @@ -0,0 +1,105 @@ +/* + * Copyright 2008-2009 LinkedIn, Inc + * Copyright 2013 Big Switch Networks, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.sdnplatform.sync.internal.version; + +import java.io.Serializable; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * An entry element for a vector clock versioning scheme This assigns the + * version from a specific machine, the VectorClock keeps track of the complete + * system version, which will consist of many individual Version objects. + * + * + */ +public final class ClockEntry + implements Cloneable, Serializable { + + private static final long serialVersionUID = -759862327985468981L; + + private final short nodeId; + private final long version; + + /** + * Create a new Version from constituate parts + * + * @param nodeId The node id + * @param version The current version + */ + @JsonCreator + public ClockEntry(@JsonProperty("nodeId") short nodeId, + @JsonProperty("version") long version) { + if(nodeId < 0) + throw new IllegalArgumentException("Node id " + nodeId + " is not in the range (0, " + + Short.MAX_VALUE + ")."); + if(version < 1) + throw new IllegalArgumentException("Version " + version + " is not in the range (1, " + + Short.MAX_VALUE + ")."); + this.nodeId = nodeId; + this.version = version; + } + + @Override + public ClockEntry clone() { + try { + return (ClockEntry) super.clone(); + } catch(CloneNotSupportedException e) { + throw new RuntimeException(e); + } + } + + public short getNodeId() { + return nodeId; + } + + public long getVersion() { + return version; + } + + public ClockEntry incremented() { + return new ClockEntry(nodeId, version + 1); + } + + @Override + public int hashCode() { + return nodeId + (((int) version) << 16); + } + + @Override + public boolean equals(Object o) { + if(this == o) + return true; + + if(o == null) + return false; + + if(o.getClass().equals(ClockEntry.class)) { + ClockEntry v = (ClockEntry) o; + return v.getNodeId() == getNodeId() && v.getVersion() == getVersion(); + } else { + return false; + } + } + + @Override + public String toString() { + return nodeId + ":" + version; + } +} diff --git a/src/main/java/org/sdnplatform/sync/internal/version/TimeBasedInconsistencyResolver.java b/src/main/java/org/sdnplatform/sync/internal/version/TimeBasedInconsistencyResolver.java new file mode 100644 index 0000000000000000000000000000000000000000..75b0c80530dc8d242eb3c76dba7fab9bf39391f3 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/version/TimeBasedInconsistencyResolver.java @@ -0,0 +1,68 @@ +/* + * Copyright 2008-2009 LinkedIn, Inc + * Copyright 2013 Big Switch Networks, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.sdnplatform.sync.internal.version; + +import java.util.Collections; +import java.util.List; + +import org.sdnplatform.sync.IInconsistencyResolver; +import org.sdnplatform.sync.Versioned; + + +/** + * Resolve inconsistencies based on timestamp in the vector clock + * @param <T> The type f the versioned object + */ +public class TimeBasedInconsistencyResolver<T> + implements IInconsistencyResolver<Versioned<T>> { + + public List<Versioned<T>> resolveConflicts(List<Versioned<T>> items) { + if(items.size() <= 1) { + return items; + } else { + Versioned<T> max = items.get(0); + long maxTime = + ((VectorClock) items.get(0).getVersion()).getTimestamp(); + VectorClock maxClock = ((VectorClock) items.get(0).getVersion()); + for(Versioned<T> versioned: items) { + VectorClock clock = (VectorClock) versioned.getVersion(); + if(clock.getTimestamp() > maxTime) { + max = versioned; + maxTime = ((VectorClock) versioned.getVersion()). + getTimestamp(); + } + maxClock = maxClock.merge(clock); + } + Versioned<T> maxTimeClockVersioned = + new Versioned<T>(max.getValue(), maxClock); + return Collections.singletonList(maxTimeClockVersioned); + } + } + + @Override + public boolean equals(Object o) { + if(this == o) + return true; + return (o != null && getClass() == o.getClass()); + } + + @Override + public int hashCode() { + return getClass().hashCode(); + } +} diff --git a/src/main/java/org/sdnplatform/sync/internal/version/VectorClock.java b/src/main/java/org/sdnplatform/sync/internal/version/VectorClock.java new file mode 100644 index 0000000000000000000000000000000000000000..7ad1b5bf3dff01fb03d8f75d9fe6a2373185fcf2 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/version/VectorClock.java @@ -0,0 +1,278 @@ +/* + * Copyright 2008-2009 LinkedIn, Inc + * Copyright 2013 Big Switch Networks, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.sdnplatform.sync.internal.version; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import org.sdnplatform.sync.IVersion; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; + +import com.google.common.collect.Lists; + +/** + * A vector of the number of writes mastered by each node. The vector is stored + * sparely, since, in general, writes will be mastered by only one node. This + * means implicitly all the versions are at zero, but we only actually store + * those greater than zero. + */ +public class VectorClock implements IVersion, Serializable, Cloneable { + + private static final long serialVersionUID = 7663945747147638702L; + + private static final int MAX_NUMBER_OF_VERSIONS = Short.MAX_VALUE; + + /* A sorted list of live versions ordered from least to greatest */ + private final List<ClockEntry> versions; + + /* + * The time of the last update on the server on which the update was + * performed + */ + private final long timestamp; + + /** + * Construct an empty VectorClock + */ + public VectorClock() { + this(new ArrayList<ClockEntry>(0), System.currentTimeMillis()); + } + + public VectorClock(long timestamp) { + this(new ArrayList<ClockEntry>(0), timestamp); + } + + /** + * Create a VectorClock with the given version and timestamp + * + * @param versions The version to prepopulate + * @param timestamp The timestamp to prepopulate + */ + @JsonCreator + public VectorClock(@JsonProperty("entries") List<ClockEntry> versions, + @JsonProperty("timestamp") long timestamp) { + this.versions = versions; + this.timestamp = timestamp; + } + + /** + * Get new vector clock based on this clock but incremented on index nodeId + * + * @param nodeId The id of the node to increment + * @return A vector clock equal on each element execept that indexed by + * nodeId + */ + public VectorClock incremented(int nodeId, long time) { + if(nodeId < 0 || nodeId > Short.MAX_VALUE) + throw new IllegalArgumentException(nodeId + + " is outside the acceptable range of node ids."); + + // stop on the index greater or equal to the node + List<ClockEntry> newversions = Lists.newArrayList(versions); + boolean found = false; + int index = 0; + for(; index < newversions.size(); index++) { + if(newversions.get(index).getNodeId() == nodeId) { + found = true; + break; + } else if(newversions.get(index).getNodeId() > nodeId) { + found = false; + break; + } + } + + if(found) { + newversions.set(index, newversions.get(index).incremented()); + } else if(index < newversions.size() - 1) { + newversions.add(index, new ClockEntry((short) nodeId, 1)); + } else { + // we don't already have a version for this, so add it + if(newversions.size() > MAX_NUMBER_OF_VERSIONS) + throw new IllegalStateException("Vector clock is full!"); + newversions.add(index, new ClockEntry((short) nodeId, 1)); + } + + return new VectorClock(newversions, time); + } + + @Override + public VectorClock clone() { + return new VectorClock(Lists.newArrayList(versions), this.timestamp); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + (int) (timestamp ^ (timestamp >>> 32)); + result = prime * result + + ((versions == null) ? 0 : versions.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + VectorClock other = (VectorClock) obj; + if (timestamp != other.timestamp) return false; + if (versions == null) { + if (other.versions != null) return false; + } else if (!versions.equals(other.versions)) return false; + return true; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("version("); + if(this.versions.size() > 0) { + for(int i = 0; i < this.versions.size() - 1; i++) { + builder.append(this.versions.get(i)); + builder.append(", "); + } + builder.append(this.versions.get(this.versions.size() - 1)); + } + builder.append(")"); + builder.append(" ts:" + timestamp); + return builder.toString(); + } + + @JsonIgnore + public long getMaxVersion() { + long max = -1; + for(ClockEntry entry: versions) + max = Math.max(entry.getVersion(), max); + return max; + } + + public VectorClock merge(VectorClock clock) { + VectorClock newClock = new VectorClock(); + int i = 0; + int j = 0; + while(i < this.versions.size() && j < clock.versions.size()) { + ClockEntry v1 = this.versions.get(i); + ClockEntry v2 = clock.versions.get(j); + if(v1.getNodeId() == v2.getNodeId()) { + newClock.versions.add(new ClockEntry(v1.getNodeId(), Math.max(v1.getVersion(), + v2.getVersion()))); + i++; + j++; + } else if(v1.getNodeId() < v2.getNodeId()) { + newClock.versions.add(v1.clone()); + i++; + } else { + newClock.versions.add(v2.clone()); + j++; + } + } + + // Okay now there may be leftovers on one or the other list remaining + for(int k = i; k < this.versions.size(); k++) + newClock.versions.add(this.versions.get(k).clone()); + for(int k = j; k < clock.versions.size(); k++) + newClock.versions.add(clock.versions.get(k).clone()); + + return newClock; + } + + @Override + public Occurred compare(IVersion v) { + if(!(v instanceof VectorClock)) + throw new IllegalArgumentException("Cannot compare Versions of different types."); + + return compare(this, (VectorClock) v); + } + + /** + * Is this Reflexive, AntiSymetic, and Transitive? Compare two VectorClocks, + * the outcomes will be one of the following: -- Clock 1 is BEFORE clock 2 + * if there exists an i such that c1(i) <= c(2) and there does not exist a j + * such that c1(j) > c2(j). -- Clock 1 is CONCURRENT to clock 2 if there + * exists an i, j such that c1(i) < c2(i) and c1(j) > c2(j) -- Clock 1 is + * AFTER clock 2 otherwise + * + * @param v1 The first VectorClock + * @param v2 The second VectorClock + */ + public static Occurred compare(VectorClock v1, VectorClock v2) { + if(v1 == null || v2 == null) + throw new IllegalArgumentException("Can't compare null vector clocks!"); + // We do two checks: v1 <= v2 and v2 <= v1 if both are true then + boolean v1Bigger = false; + boolean v2Bigger = false; + int p1 = 0; + int p2 = 0; + + while(p1 < v1.versions.size() && p2 < v2.versions.size()) { + ClockEntry ver1 = v1.versions.get(p1); + ClockEntry ver2 = v2.versions.get(p2); + if(ver1.getNodeId() == ver2.getNodeId()) { + if(ver1.getVersion() > ver2.getVersion()) + v1Bigger = true; + else if(ver2.getVersion() > ver1.getVersion()) + v2Bigger = true; + p1++; + p2++; + } else if(ver1.getNodeId() > ver2.getNodeId()) { + // since ver1 is bigger that means it is missing a version that + // ver2 has + v2Bigger = true; + p2++; + } else { + // this means ver2 is bigger which means it is missing a version + // ver1 has + v1Bigger = true; + p1++; + } + } + + /* Okay, now check for left overs */ + if(p1 < v1.versions.size()) + v1Bigger = true; + else if(p2 < v2.versions.size()) + v2Bigger = true; + + /* This is the case where they are equal, return BEFORE arbitrarily */ + if(!v1Bigger && !v2Bigger) + return Occurred.BEFORE; + /* This is the case where v1 is a successor clock to v2 */ + else if(v1Bigger && !v2Bigger) + return Occurred.AFTER; + /* This is the case where v2 is a successor clock to v1 */ + else if(!v1Bigger && v2Bigger) + return Occurred.BEFORE; + /* This is the case where both clocks are parallel to one another */ + else + return Occurred.CONCURRENTLY; + } + + public long getTimestamp() { + return this.timestamp; + } + + public List<ClockEntry> getEntries() { + return Collections.unmodifiableList(this.versions); + } +} diff --git a/src/main/java/org/sdnplatform/sync/internal/version/VectorClockInconsistencyResolver.java b/src/main/java/org/sdnplatform/sync/internal/version/VectorClockInconsistencyResolver.java new file mode 100644 index 0000000000000000000000000000000000000000..a50779bb27e78e2346a8bdb916412acfc139b979 --- /dev/null +++ b/src/main/java/org/sdnplatform/sync/internal/version/VectorClockInconsistencyResolver.java @@ -0,0 +1,74 @@ +/* + * Copyright 2008-2009 LinkedIn, Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.sdnplatform.sync.internal.version; + +import java.util.List; +import java.util.ListIterator; + +import org.sdnplatform.sync.IInconsistencyResolver; +import org.sdnplatform.sync.Versioned; +import org.sdnplatform.sync.IVersion.Occurred; + +import com.google.common.collect.Lists; + +/** + * An inconsistency resolver that uses the object VectorClocks leaving only a + * set of concurrent versions remaining. + * + * + */ +public class VectorClockInconsistencyResolver<T> + implements IInconsistencyResolver<Versioned<T>> { + + public List<Versioned<T>> resolveConflicts(List<Versioned<T>> items) { + int size = items.size(); + if(size <= 1) + return items; + + List<Versioned<T>> newItems = Lists.newArrayList(); + for(Versioned<T> v1: items) { + boolean found = false; + for(ListIterator<Versioned<T>> it2 = + newItems.listIterator(); it2.hasNext();) { + Versioned<T> v2 = it2.next(); + Occurred compare = v1.getVersion().compare(v2.getVersion()); + if(compare == Occurred.AFTER) { + if(found) + it2.remove(); + else + it2.set(v1); + } + if(compare != Occurred.CONCURRENTLY) + found = true; + } + if(!found) + newItems.add(v1); + } + return newItems; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + return (o != null && getClass() == o.getClass()); + } + + @Override + public int hashCode() { + return getClass().hashCode(); + } +} diff --git a/src/main/resources/META-INF/services/net.floodlightcontroller.core.module.IFloodlightModule b/src/main/resources/META-INF/services/net.floodlightcontroller.core.module.IFloodlightModule index dc38c05b97eed015c76e1f6ea56ae3b1099130be..4f2f68440b700de96a3c5690f857a13dc560b6a4 100644 --- a/src/main/resources/META-INF/services/net.floodlightcontroller.core.module.IFloodlightModule +++ b/src/main/resources/META-INF/services/net.floodlightcontroller.core.module.IFloodlightModule @@ -26,4 +26,6 @@ net.floodlightcontroller.devicemanager.test.MockDeviceManager net.floodlightcontroller.core.test.MockFloodlightProvider net.floodlightcontroller.core.test.MockThreadPoolService net.floodlightcontroller.firewall.Firewall -net.floodlightcontroller.loadbalancer.LoadBalancer \ No newline at end of file +net.floodlightcontroller.loadbalancer.LoadBalancer +org.sdnplatform.sync.internal.SyncManager +org.sdnplatform.sync.internal.SyncTorture diff --git a/src/main/resources/floodlightdefault.properties b/src/main/resources/floodlightdefault.properties index 2d384d3a1b6e39accfcd56839588cbfc1fb9bdbe..620471ab9fb0491700e9980e7f98675942dcd00d 100644 --- a/src/main/resources/floodlightdefault.properties +++ b/src/main/resources/floodlightdefault.properties @@ -15,7 +15,8 @@ net.floodlightcontroller.counter.CounterStore,\ net.floodlightcontroller.debugcounter.DebugCounter,\ net.floodlightcontroller.perfmon.PktInProcessingTime,\ net.floodlightcontroller.ui.web.StaticWebRoutable,\ -net.floodlightcontroller.loadbalancer.LoadBalancer +net.floodlightcontroller.loadbalancer.LoadBalancer,\ +org.sdnplatform.sync.internal.SyncManager net.floodlightcontroller.restserver.RestApiServer.port = 8080 net.floodlightcontroller.core.FloodlightProvider.openflowport = 6633 net.floodlightcontroller.jython.JythonDebugInterface.port = 6655 diff --git a/src/test/java/org/sdnplatform/sync/VersionedTest.java b/src/test/java/org/sdnplatform/sync/VersionedTest.java new file mode 100644 index 0000000000000000000000000000000000000000..7c2c4433db1330a03af7363ec52beef714bddeb4 --- /dev/null +++ b/src/test/java/org/sdnplatform/sync/VersionedTest.java @@ -0,0 +1,71 @@ +/* + * Copyright 2008-2009 LinkedIn, Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.sdnplatform.sync; + +import static org.junit.Assert.*; + +import org.junit.Test; +import org.sdnplatform.sync.Versioned; +import org.sdnplatform.sync.internal.TUtils; + + +public class VersionedTest { + + private Versioned<Integer> getVersioned(Integer value, int... versionIncrements) { + return new Versioned<Integer>(value, TUtils.getClock(versionIncrements)); + } + + public void mustHaveVersion() { + try { + new Versioned<Integer>(1, null); + fail("Successfully created Versioned with null version."); + } catch(NullPointerException e) { + // this is good + } + } + + @Test + public void testEquals() { + assertEquals("Null versioneds not equal.", getVersioned(null), getVersioned(null)); + assertEquals("equal versioneds not equal.", getVersioned(1), getVersioned(1)); + assertEquals("equal versioneds not equal.", getVersioned(1, 1, 2), getVersioned(1, 1, 2)); + + assertTrue("Equals values with different version are equal!", + !getVersioned(1, 1, 2).equals(getVersioned(1, 1, 2, 2))); + assertTrue("Different values with same version are equal!", + !getVersioned(1, 1, 2).equals(getVersioned(2, 1, 2))); + assertTrue("Different values with different version are equal!", + !getVersioned(1, 1, 2).equals(getVersioned(2, 1, 1, 2))); + + // Should work for array types too! + assertEquals("Equal arrays are not equal!", + new Versioned<byte[]>(new byte[] { 1 }), + new Versioned<byte[]>(new byte[] { 1 })); + } + + @Test + public void testClone() { + Versioned<Integer> v1 = getVersioned(2, 1, 2, 3); + Versioned<Integer> v2 = v1.cloneVersioned(); + assertEquals(v1, v2); + assertTrue(v1 != v2); + assertTrue(v1.getVersion() != v2.getVersion()); + v2.increment(1, System.currentTimeMillis()); + assertTrue(!v1.equals(v2)); + } + +} diff --git a/src/test/java/org/sdnplatform/sync/client/ClientTest.java b/src/test/java/org/sdnplatform/sync/client/ClientTest.java new file mode 100644 index 0000000000000000000000000000000000000000..2ecfe3f3c18964e359024451cf48f293f23021fe --- /dev/null +++ b/src/test/java/org/sdnplatform/sync/client/ClientTest.java @@ -0,0 +1,149 @@ +package org.sdnplatform.sync.client; + +import static org.junit.Assert.*; + +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.util.ArrayList; + +import net.floodlightcontroller.core.module.FloodlightModuleContext; +import net.floodlightcontroller.threadpool.IThreadPoolService; +import net.floodlightcontroller.threadpool.ThreadPool; + +import com.fasterxml.jackson.databind.ObjectMapper; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.sdnplatform.sync.ISyncService.Scope; +import org.sdnplatform.sync.client.SyncClient; +import org.sdnplatform.sync.client.SyncClient.SyncClientSettings; +import org.sdnplatform.sync.internal.SyncManager; +import org.sdnplatform.sync.internal.config.Node; + + +public class ClientTest { + protected SyncManager syncManager; + protected final static ObjectMapper mapper = new ObjectMapper(); + protected String nodeString; + ArrayList<Node> nodes; + ThreadPool tp; + + @Before + public void setUp() throws Exception { + nodes = new ArrayList<Node>(); + nodes.add(new Node("localhost", 40101, (short)1, (short)1)); + nodeString = mapper.writeValueAsString(nodes); + + tp = new ThreadPool(); + syncManager = new SyncManager(); + + FloodlightModuleContext fmc = new FloodlightModuleContext(); + fmc.addService(IThreadPoolService.class, tp); + + fmc.addConfigParam(syncManager, "nodes", nodeString); + fmc.addConfigParam(syncManager, "thisNode", ""+1); + syncManager.registerStore("global", Scope.GLOBAL); + tp.init(fmc); + syncManager.init(fmc); + tp.startUp(fmc); + syncManager.startUp(fmc); + } + + @After + public void tearDown() { + if (null != tp) + tp.getScheduledExecutor().shutdownNow(); + tp = null; + + if (null != syncManager) + syncManager.shutdown(); + syncManager = null; + } + + @Test + public void testClientBasic() throws Exception { + SyncClientSettings scs = new SyncClientSettings(); + scs.hostname = "localhost"; + scs.port = 40101; + scs.storeName = "global"; + scs.debug = true; + SyncClient client = new SyncClient(scs); + ByteArrayOutputStream out = new ByteArrayOutputStream(); + client.out = new PrintStream(out); + ByteArrayOutputStream err = new ByteArrayOutputStream(); + client.err = new PrintStream(err); + client.connect(); + client.executeCommandLine("get \"key\""); + assertEquals("", err.toString()); + assertEquals("Connected to localhost:40101\n" + + "Getting Key:\n" + + "\"key\"\n\n" + + "Not found\n", + out.toString()); + + out = new ByteArrayOutputStream(); + err = new ByteArrayOutputStream(); + client.out = new PrintStream(out); + client.err = new PrintStream(err); + client.executeCommandLine("put \"key\" {\"field1\": \"value1\", \"field2\": \"value2\"}"); + assertEquals("", err.toString()); + assertEquals("Putting Key:\n" + + "\"key\"\n\n" + + "Value:\n" + + "{\n" + + " \"field1\" : \"value1\",\n" + + " \"field2\" : \"value2\"\n" + + "}\n" + + "Success\n", + out.toString()); + + out = new ByteArrayOutputStream(); + err = new ByteArrayOutputStream(); + client.out = new PrintStream(out); + client.err = new PrintStream(err); + client.executeCommandLine("get \"key\""); + assertEquals("", err.toString()); + assertEquals("Getting Key:\n" + + "\"key\"\n\n" + + "Value:\n" + + "{\n" + + " \"field1\" : \"value1\",\n" + + " \"field2\" : \"value2\"\n" + + "}\n", + out.toString()); + + out = new ByteArrayOutputStream(); + err = new ByteArrayOutputStream(); + client.out = new PrintStream(out); + client.err = new PrintStream(err); + client.executeCommandLine("delete \"key\""); + assertEquals("", err.toString()); + assertEquals("Deleting Key:\n" + + "\"key\"\n\n" + + "Success\n", + out.toString()); + + out = new ByteArrayOutputStream(); + err = new ByteArrayOutputStream(); + client.out = new PrintStream(out); + client.err = new PrintStream(err); + client.executeCommandLine("get \"key\""); + assertEquals("", err.toString()); + assertEquals("Getting Key:\n" + + "\"key\"\n\n" + + "Not found\n", + out.toString()); + + out = new ByteArrayOutputStream(); + err = new ByteArrayOutputStream(); + client.out = new PrintStream(out); + client.err = new PrintStream(err); + client.executeCommandLine("quit"); + assertEquals("", err.toString()); + assertEquals("", + out.toString()); + + client.executeCommandLine("help"); + assert(!"".equals(out.toString())); + } +} diff --git a/src/test/java/org/sdnplatform/sync/internal/SyncManagerTest.java b/src/test/java/org/sdnplatform/sync/internal/SyncManagerTest.java new file mode 100644 index 0000000000000000000000000000000000000000..2004356a345af7b28ca16262de56a364b16acf6f --- /dev/null +++ b/src/test/java/org/sdnplatform/sync/internal/SyncManagerTest.java @@ -0,0 +1,652 @@ +package org.sdnplatform.sync.internal; + +import static org.junit.Assert.*; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map.Entry; + +import net.floodlightcontroller.core.module.FloodlightModuleContext; +import net.floodlightcontroller.core.module.FloodlightModuleException; +import net.floodlightcontroller.threadpool.IThreadPoolService; +import net.floodlightcontroller.threadpool.ThreadPool; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.junit.After; +import org.junit.Before; +import org.junit.Ignore; +import org.junit.Test; +import org.sdnplatform.sync.IClosableIterator; +import org.sdnplatform.sync.IInconsistencyResolver; +import org.sdnplatform.sync.IStoreClient; +import org.sdnplatform.sync.IStoreListener; +import org.sdnplatform.sync.ISyncService; +import org.sdnplatform.sync.Versioned; +import org.sdnplatform.sync.ISyncService.Scope; +import org.sdnplatform.sync.error.ObsoleteVersionException; +import org.sdnplatform.sync.internal.AbstractSyncManager; +import org.sdnplatform.sync.internal.SyncManager; +import org.sdnplatform.sync.internal.SyncTorture; +import org.sdnplatform.sync.internal.config.Node; +import org.sdnplatform.sync.internal.config.PropertyCCProvider; +import org.sdnplatform.sync.internal.store.Key; +import org.sdnplatform.sync.internal.store.TBean; +import org.sdnplatform.sync.internal.version.VectorClock; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +public class SyncManagerTest { + protected static Logger logger = + LoggerFactory.getLogger(SyncManagerTest.class); + + protected FloodlightModuleContext[] moduleContexts; + protected SyncManager[] syncManagers; + protected final static ObjectMapper mapper = new ObjectMapper(); + protected String nodeString; + ArrayList<Node> nodes; + + ThreadPool tp; + + protected void setupSyncManager(FloodlightModuleContext fmc, + SyncManager syncManager, Node thisNode) + throws FloodlightModuleException { + fmc.addService(IThreadPoolService.class, tp); + fmc.addConfigParam(syncManager, "configProviders", + PropertyCCProvider.class.getName()); + fmc.addConfigParam(syncManager, "nodes", nodeString); + fmc.addConfigParam(syncManager, "thisNode", ""+thisNode.getNodeId()); + syncManager.registerStore("global", Scope.GLOBAL); + syncManager.registerStore("local", Scope.LOCAL); + tp.init(fmc); + syncManager.init(fmc); + tp.startUp(fmc); + syncManager.startUp(fmc); + } + + @Before + public void setUp() throws Exception { + tp = new ThreadPool(); + + syncManagers = new SyncManager[4]; + moduleContexts = new FloodlightModuleContext[4]; + + nodes = new ArrayList<Node>(); + nodes.add(new Node("localhost", 40101, (short)1, (short)1)); + nodes.add(new Node("localhost", 40102, (short)2, (short)2)); + nodes.add(new Node("localhost", 40103, (short)3, (short)1)); + nodes.add(new Node("localhost", 40104, (short)4, (short)2)); + nodeString = mapper.writeValueAsString(nodes); + + for(int i = 0; i < 4; i++) { + moduleContexts[i] = new FloodlightModuleContext(); + syncManagers[i] = new SyncManager(); + setupSyncManager(moduleContexts[i], syncManagers[i], nodes.get(i)); + } + } + + @After + public void tearDown() { + tp.getScheduledExecutor().shutdownNow(); + tp = null; + + if (syncManagers != null) { + for(int i = 0; i < syncManagers.length; i++) { + if (null != syncManagers[i]) + syncManagers[i].shutdown(); + } + } + syncManagers = null; + } + + @Test + public void testBasicOneNode() throws Exception { + AbstractSyncManager sync = syncManagers[0]; + IStoreClient<Key, TBean> testClient = + sync.getStoreClient("global", Key.class, TBean.class); + Key k = new Key("com.bigswitch.bigsync.internal", "test"); + TBean tb = new TBean("hello", 42); + TBean tb2 = new TBean("hello", 84); + TBean tb3 = new TBean("hello", 126); + + assertNotNull(testClient.get(k)); + assertNull(testClient.get(k).getValue()); + + testClient.put(k, tb); + Versioned<TBean> result = testClient.get(k); + assertEquals(result.getValue(), tb); + + result.setValue(tb2); + testClient.put(k, result); + + try { + result.setValue(tb3); + testClient.put(k, result); + fail("Should get ObsoleteVersionException"); + } catch (ObsoleteVersionException e) { + // happy town + } + + result = testClient.get(k); + assertEquals(tb2, result.getValue()); + + } + + @Test + public void testIterator() throws Exception { + AbstractSyncManager sync = syncManagers[0]; + IStoreClient<Key, TBean> testClient = + sync.getStoreClient("local", Key.class, TBean.class); + + HashMap<Key, TBean> testMap = new HashMap<Key, TBean>(); + for (int i = 0; i < 100; i++) { + Key k = new Key("com.bigswitch.bigsync.internal", "test" + i); + TBean tb = new TBean("value", i); + testMap.put(k, tb); + testClient.put(k, tb); + } + + IClosableIterator<Entry<Key, Versioned<TBean>>> iter = + testClient.entries(); + int size = 0; + try { + while (iter.hasNext()) { + Entry<Key, Versioned<TBean>> e = iter.next(); + assertEquals(testMap.get(e.getKey()), e.getValue().getValue()); + size += 1; + } + } finally { + iter.close(); + } + assertEquals(testMap.size(), size); + } + + private <K, V> Versioned<V> waitForValue(IStoreClient<K, V> client, + K key, V value, + int maxTime, + String clientName) + throws Exception { + Versioned<V> v = null; + long then = System.currentTimeMillis(); + while (true) { + v = client.get(key); + if (value != null) { + if (v.getValue() != null && v.getValue().equals(value)) break; + } else { + if (v.getValue() != null) break; + } + if (v.getValue() != null) + logger.info("{}: Value for key {} not yet right: " + + "expected: {}; actual: {}", + new Object[]{clientName, key, value, v.getValue()}); + else + logger.info("{}: Value for key {} is null: expected {}", + new Object[]{clientName, key, value}); + + + Thread.sleep(100); + assertTrue(then + maxTime > System.currentTimeMillis()); + } + return v; + } + + private void waitForFullMesh(int maxTime) throws Exception { + long then = System.currentTimeMillis(); + + while (true) { + boolean full = true; + for(int i = 0; i < syncManagers.length; i++) { + if (!syncManagers[i].rpcService.isFullyConnected()) + full = false; + } + if (full) break; + Thread.sleep(100); + assertTrue(then + maxTime > System.currentTimeMillis()); + } + } + + private void waitForConnection(SyncManager sm, + short nodeId, + boolean connected, + int maxTime) throws Exception { + long then = System.currentTimeMillis(); + + while (true) { + if (connected == sm.rpcService.isConnected(nodeId)) break; + Thread.sleep(100); + assertTrue(then + maxTime > System.currentTimeMillis()); + } + } + + @Test + public void testBasicGlobalSync() throws Exception { + waitForFullMesh(2000); + + ArrayList<IStoreClient<String, String>> clients = + new ArrayList<IStoreClient<String, String>>(syncManagers.length); + // write one value to each node's local interface + for (int i = 0; i < syncManagers.length; i++) { + IStoreClient<String, String> client = + syncManagers[i].getStoreClient("global", + String.class, String.class); + clients.add(client); + client.put("key" + i, ""+i); + } + + // verify that we see all the values everywhere + for (int j = 0; j < clients.size(); j++) { + for (int i = 0; i < syncManagers.length; i++) { + waitForValue(clients.get(j), "key" + i, ""+i, 2000, "client"+j); + } + } + } + + @Test + public void testBasicLocalSync() throws Exception { + waitForFullMesh(2000); + + ArrayList<IStoreClient<String, String>> clients = + new ArrayList<IStoreClient<String, String>>(syncManagers.length); + // write one value to each node's local interface + for (int i = 0; i < syncManagers.length; i++) { + IStoreClient<String, String> client = + syncManagers[i].getStoreClient("local", + String.class, String.class); + clients.add(client); + client.put("key" + i, ""+i); + } + + // verify that we see all the values from each local group at all the + // nodes of that local group + for (int j = 0; j < clients.size(); j++) { + IStoreClient<String, String> client = clients.get(j); + for (int i = 0; i < syncManagers.length; i++) { + if (i % 2 == j % 2) + waitForValue(client, "key" + i, ""+i, 2000, "client"+j); + else { + Versioned<String> v = client.get("key" + i); + if (v.getValue() != null) { + fail("Node " + j + " reading key" + i + + ": " + v.getValue()); + } + } + } + } + } + + @Test + public void testConcurrentWrite() throws Exception { + waitForFullMesh(2000); + + // Here we generate concurrent writes and then resolve them using + // a custom inconsistency resolver + IInconsistencyResolver<Versioned<List<String>>> ir = + new IInconsistencyResolver<Versioned<List<String>>>() { + @Override + public List<Versioned<List<String>>> + resolveConflicts(List<Versioned<List<String>>> items) { + VectorClock vc = null; + List<String> strings = new ArrayList<String>(); + for (Versioned<List<String>> item : items) { + if (vc == null) + vc = (VectorClock)item.getVersion(); + else + vc = vc.merge((VectorClock)item.getVersion()); + + strings.addAll(item.getValue()); + } + Versioned<List<String>> v = + new Versioned<List<String>>(strings, vc); + return Collections.singletonList(v); + } + }; + + TypeReference<List<String>> tr = new TypeReference<List<String>>() {}; + TypeReference<String> ktr = new TypeReference<String>() {}; + IStoreClient<String, List<String>> client0 = + syncManagers[0].getStoreClient("local", ktr, tr, ir); + IStoreClient<String, List<String>> client2 = + syncManagers[2].getStoreClient("local", ktr, tr, ir); + + client0.put("key", Collections.singletonList("value")); + Versioned<List<String>> v = client0.get("key"); + assertNotNull(v); + + // now we generate two writes that are concurrent to each other + // but are both locally after the first write. The result should be + // two non-obsolete lists each containing a single element. + // The inconsistency resolver above will resolve these by merging + // the lists + List<String> comp = new ArrayList<String>(); + v.setValue(Collections.singletonList("newvalue0")); + comp.add("newvalue0"); + client0.put("key", v); + v.setValue(Collections.singletonList("newvalue1")); + comp.add("newvalue1"); + client2.put("key", v); + + v = waitForValue(client0, "key", comp, 1000, "client0"); + + // add one more value to the array. Now there will be exactly one + // non-obsolete value + List<String> newlist = new ArrayList<String>(v.getValue()); + assertEquals(2, newlist.size()); + newlist.add("finalvalue"); + v.setValue(newlist); + client0.put("key", v); + + v = waitForValue(client2, "key", newlist, 2000, "client2"); + assertEquals(3, newlist.size()); + + } + + @Test + public void testReconnect() throws Exception { + IStoreClient<String, String> client0 = + syncManagers[0].getStoreClient("global", + String.class, + String.class); + IStoreClient<String, String> client1 = + syncManagers[1].getStoreClient("global", + String.class, String.class); + IStoreClient<String, String> client2 = + syncManagers[2].getStoreClient("global", + String.class, String.class); + + client0.put("key0", "value0"); + waitForValue(client2, "key0", "value0", 1000, "client0"); + + logger.info("Shutting down server ID 1"); + syncManagers[0].shutdown(); + + client1.put("newkey1", "newvalue1"); + client2.put("newkey2", "newvalue2"); + client1.put("key0", "newvalue0"); + client2.put("key2", "newvalue2"); + + for (int i = 0; i < 500; i++) { + client2.put("largetest" + i, "largetestvalue"); + } + + logger.info("Initializing server ID 1"); + syncManagers[0] = new SyncManager(); + setupSyncManager(moduleContexts[0], syncManagers[0], nodes.get(0)); + + waitForFullMesh(2000); + + client0 = syncManagers[0].getStoreClient("global", + String.class, String.class); + waitForValue(client0, "newkey1", "newvalue1", 1000, "client0"); + waitForValue(client0, "newkey2", "newvalue2", 1000, "client0"); + waitForValue(client0, "key0", "newvalue0", 1000, "client0"); + waitForValue(client0, "key2", "newvalue2", 1000, "client0"); + + for (int i = 0; i < 500; i++) { + waitForValue(client0, "largetest" + i, + "largetestvalue", 1000, "client0"); + } + } + + protected class TestListener implements IStoreListener<String> { + HashSet<String> notified = new HashSet<String>(); + + @Override + public void keysModified(Iterator<String> keys) { + while (keys.hasNext()) + notified.add(keys.next()); + } + + } + + @SuppressWarnings("rawtypes") + private void waitForNotify(TestListener tl, + HashSet comp, + int maxTime) throws Exception { + long then = System.currentTimeMillis(); + + while (true) { + if (tl.notified.containsAll(comp)) break; + Thread.sleep(100); + assertTrue(then + maxTime > System.currentTimeMillis()); + } + } + + @Test + public void testNotify() throws Exception { + IStoreClient<String, String> client0 = + syncManagers[0].getStoreClient("local", + String.class, String.class); + IStoreClient<String, String> client2 = + syncManagers[2].getStoreClient("local", + new TypeReference<String>() {}, + new TypeReference<String>() {}); + + TestListener t0 = new TestListener(); + TestListener t2 = new TestListener(); + client0.addStoreListener(t0); + client2.addStoreListener(t2); + + client0.put("test0", "value"); + client2.put("test2", "value"); + + HashSet<String> c = new HashSet<String>(); + c.add("test0"); + c.add("test2"); + + waitForNotify(t0, c, 2000); + waitForNotify(t2, c, 2000); + assertEquals(2, t0.notified.size()); + assertEquals(2, t2.notified.size()); + + Versioned<String> v0 = client0.get("test0"); + v0.setValue("newvalue"); + client0.put("test0", v0); + + Versioned<String> v2 = client0.get("test2"); + v2.setValue("newvalue"); + client2.put("test2", v2); + + waitForNotify(t0, c, 2000); + waitForNotify(t2, c, 2000); + assertEquals(2, t0.notified.size()); + assertEquals(2, t2.notified.size()); + + t0.notified.clear(); + t2.notified.clear(); + + client0.delete("test0"); + client2.delete("test2"); + + waitForNotify(t0, c, 2000); + waitForNotify(t2, c, 2000); + assertEquals(2, t0.notified.size()); + assertEquals(2, t2.notified.size()); + } + + @Test + public void testAddNode() throws Exception { + waitForFullMesh(2000); + IStoreClient<String, String> client0 = + syncManagers[0].getStoreClient("global", + String.class, String.class); + IStoreClient<String, String> client1 = + syncManagers[1].getStoreClient("global", + String.class, String.class); + client0.put("key", "value"); + waitForValue(client1, "key", "value", 2000, "client1"); + + nodes.add(new Node("localhost", 40105, (short)5, (short)5)); + SyncManager[] sms = Arrays.copyOf(syncManagers, + syncManagers.length + 1); + FloodlightModuleContext[] fmcs = + Arrays.copyOf(moduleContexts, + moduleContexts.length + 1); + sms[syncManagers.length] = new SyncManager(); + fmcs[moduleContexts.length] = new FloodlightModuleContext(); + nodeString = mapper.writeValueAsString(nodes); + + setupSyncManager(fmcs[moduleContexts.length], + sms[syncManagers.length], + nodes.get(syncManagers.length)); + syncManagers = sms; + moduleContexts = fmcs; + + for(int i = 0; i < 4; i++) { + moduleContexts[i].addConfigParam(syncManagers[i], + "nodes", nodeString); + syncManagers[i].updateConfiguration(); + } + waitForFullMesh(2000); + + IStoreClient<String, String> client4 = + syncManagers[4].getStoreClient("global", + String.class, String.class); + client4.put("newkey", "newvalue"); + waitForValue(client4, "key", "value", 2000, "client4"); + waitForValue(client0, "newkey", "newvalue", 2000, "client0"); + } + + @Test + public void testRemoveNode() throws Exception { + waitForFullMesh(2000); + IStoreClient<String, String> client0 = + syncManagers[0].getStoreClient("global", + String.class, String.class); + IStoreClient<String, String> client1 = + syncManagers[1].getStoreClient("global", + String.class, String.class); + IStoreClient<String, String> client2 = + syncManagers[2].getStoreClient("global", + String.class, String.class); + + client0.put("key", "value"); + waitForValue(client1, "key", "value", 2000, "client1"); + + nodes.remove(0); + nodeString = mapper.writeValueAsString(nodes); + + SyncManager oldNode = syncManagers[0]; + syncManagers = Arrays.copyOfRange(syncManagers, 1, 4); + moduleContexts = Arrays.copyOfRange(moduleContexts, 1, 4); + + try { + for(int i = 0; i < syncManagers.length; i++) { + moduleContexts[i].addConfigParam(syncManagers[i], + "nodes", nodeString); + syncManagers[i].updateConfiguration(); + waitForConnection(syncManagers[i], (short)1, false, 2000); + } + } finally { + oldNode.shutdown(); + } + waitForFullMesh(2000); + + client1.put("newkey", "newvalue"); + waitForValue(client2, "key", "value", 2000, "client4"); + waitForValue(client2, "newkey", "newvalue", 2000, "client0"); + } + + @Test + public void testChangeNode() throws Exception { + waitForFullMesh(2000); + IStoreClient<String, String> client0 = + syncManagers[0].getStoreClient("global", + String.class, String.class); + IStoreClient<String, String> client2 = + syncManagers[2].getStoreClient("global", + String.class, String.class); + client0.put("key", "value"); + waitForValue(client2, "key", "value", 2000, "client2"); + + nodes.set(2, new Node("localhost", 50103, (short)3, (short)1)); + nodeString = mapper.writeValueAsString(nodes); + + for(int i = 0; i < syncManagers.length; i++) { + moduleContexts[i].addConfigParam(syncManagers[i], + "nodes", nodeString); + syncManagers[i].updateConfiguration(); + } + waitForFullMesh(2000); + + waitForValue(client2, "key", "value", 2000, "client2"); + client2 = syncManagers[2].getStoreClient("global", + String.class, String.class); + client0.put("key", "newvalue"); + waitForValue(client2, "key", "newvalue", 2000, "client2"); + } + + /** + * Do a brain-dead performance test with one thread writing and waiting + * for the values on the other node. The result get printed to the log + */ + public void testSimpleWritePerformance(String store) throws Exception { + waitForFullMesh(5000); + + final int count = 1000000; + + IStoreClient<String, String> client0 = + syncManagers[0].getStoreClient(store, + String.class, String.class); + IStoreClient<String, String> client2 = + syncManagers[2].getStoreClient(store, + String.class, String.class); + + long then = System.currentTimeMillis(); + + for (int i = 1; i <= count; i++) { + client0.put(""+i, ""+i); + } + + long donewriting = System.currentTimeMillis(); + + waitForValue(client2, ""+count, null, count, "client2"); + + long now = System.currentTimeMillis(); + + logger.info("Simple write ({}): {} values in {}+/-100 " + + "millis ({} synced writes/s) ({} local writes/s)", + new Object[]{store, count, (now-then), + 1000.0*count/(now-then), + 1000.0*count/(donewriting-then)}); + + } + + @Test + @Ignore // ignored just to speed up routine tests + public void testPerfSimpleWriteLocal() throws Exception { + testSimpleWritePerformance("local"); + } + + @Test + @Ignore // ignored just to speed up routine tests + public void testPerfSimpleWriteGlobal() throws Exception { + testSimpleWritePerformance("global"); + } + + @Test + @Ignore + public void testPerfOneNode() throws Exception { + tearDown(); + tp = new ThreadPool(); + tp.init(null); + tp.startUp(null); + nodes = new ArrayList<Node>(); + nodes.add(new Node("localhost", 40101, (short)1, (short)1)); + nodeString = mapper.writeValueAsString(nodes); + SyncManager sm = new SyncManager(); + FloodlightModuleContext fmc = new FloodlightModuleContext(); + setupSyncManager(fmc, sm, nodes.get(0)); + fmc.addService(ISyncService.class, sm); + SyncTorture st = new SyncTorture(); + //fmc.addConfigParam(st, "iterations", "1"); + st.init(fmc); + st.startUp(fmc); + Thread.sleep(10000); + } +} diff --git a/src/test/java/org/sdnplatform/sync/internal/TUtils.java b/src/test/java/org/sdnplatform/sync/internal/TUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..6b3fc84b6a608692dbe8b3f6668a20603a2e0eb5 --- /dev/null +++ b/src/test/java/org/sdnplatform/sync/internal/TUtils.java @@ -0,0 +1,301 @@ +/* + * Copyright 2008-2009 LinkedIn, Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.sdnplatform.sync.internal; + +import java.io.File; +import java.io.UnsupportedEncodingException; +import java.lang.reflect.Field; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Calendar; +import java.util.Collections; +import java.util.GregorianCalendar; +import java.util.List; +import java.util.Random; + +import org.sdnplatform.sync.internal.util.ByteArray; +import org.sdnplatform.sync.internal.version.VectorClock; + + +/** + * Helper utilities for tests + * + * + */ +public class TUtils { + + public static final String DIGITS = "0123456789"; + public static final String LETTERS = "qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM"; + public static final String CHARACTERS = LETTERS + DIGITS + "~!@#$%^&*()____+-=[];',,,./>?:{}"; + public static final Random SEEDED_RANDOM = new Random(19873482374L); + public static final Random UNSEEDED_RANDOM = new Random(); + + /** + * Get a vector clock with events on the sequence of nodes given So + * getClock(1,1,2,2,2) means a clock that has two writes on node 1 and 3 + * writes on node 2. + * + * @param nodes The sequence of nodes + * @return A VectorClock initialized with the given sequence of events + */ + public static VectorClock getClock(int... nodes) { + VectorClock clock = new VectorClock(); + return increment(clock, nodes); + } + + /** + * Record events for the given sequence of nodes + * + * @param clock The VectorClock to record the events on + * @param nodes The sequences of node events + */ + public static VectorClock increment(VectorClock clock, int... nodes) { + for(int n: nodes) + clock = clock.incremented((short) n, System.currentTimeMillis()); + return clock; + } + + /** + * Test two byte arrays for (deep) equality. I think this exists in java 6 + * but not java 5 + * + * @param a1 Array 1 + * @param a2 Array 2 + * @return True iff a1.length == a2.length and a1[i] == a2[i] for 0 <= i < + * a1.length + */ + public static boolean bytesEqual(byte[] a1, byte[] a2) { + if(a1 == a2) { + return true; + } else if(a1 == null || a2 == null) { + return false; + } else if(a1.length != a2.length) { + return false; + } else { + for(int i = 0; i < a1.length; i++) + if(a1[i] != a2[i]) + return false; + } + + return true; + } + + /** + * Create a string with some random letters + * + * @param length The length of the string to create + * @return The string + */ + public static String randomLetters(int length) { + return randomString(LETTERS, length); + } + + /** + * Create a string that is a random sample (with replacement) from the given + * string + * + * @param sampler The string to sample from + * @param length The length of the string to create + * @return The created string + */ + public static String randomString(String sampler, int length) { + StringBuilder builder = new StringBuilder(length); + for(int i = 0; i < length; i++) + builder.append(sampler.charAt(SEEDED_RANDOM.nextInt(sampler.length()))); + return builder.toString(); + } + + /** + * Generate an array of random bytes + * + * @param length + * @return + */ + public static byte[] randomBytes(int length) { + byte[] bytes = new byte[length]; + SEEDED_RANDOM.nextBytes(bytes); + return bytes; + } + + /** + * Return an array of length count containing random integers in the range + * (0, max) generated off the test rng. + * + * @param max The bound on the random number size + * @param count The number of integers to generate + * @return The array of integers + */ + public static int[] randomInts(int max, int count) { + int[] vals = new int[count]; + for(int i = 0; i < count; i++) + vals[i] = SEEDED_RANDOM.nextInt(max); + return vals; + } + + /** + * Weirdly java doesn't seem to have Arrays.shuffle(), this terrible hack + * does that. + * + * @return A shuffled copy of the input + */ + public static int[] shuffle(int[] input) { + List<Integer> vals = new ArrayList<Integer>(input.length); + for(int i = 0; i < input.length; i++) + vals.add(input[i]); + Collections.shuffle(vals, SEEDED_RANDOM); + int[] copy = new int[input.length]; + for(int i = 0; i < input.length; i++) + copy[i] = vals.get(i); + return copy; + } + + /** + * Compute the requested quantile of the given array + * + * @param values The array of values + * @param quantile The quantile requested (must be between 0.0 and 1.0 + * inclusive) + * @return The quantile + */ + public static long quantile(long[] values, double quantile) { + if(values == null) + throw new IllegalArgumentException("Values cannot be null."); + if(quantile < 0.0 || quantile > 1.0) + throw new IllegalArgumentException("Quantile must be between 0.0 and 1.0"); + + long[] copy = new long[values.length]; + System.arraycopy(values, 0, copy, 0, copy.length); + Arrays.sort(copy); + int index = (int) (copy.length * quantile); + return copy[index]; + } + + /** + * Compute the mean of the given values + * + * @param values The values + * @return The mean + */ + public static double mean(long[] values) { + double total = 0.0; + for(int i = 0; i < values.length; i++) + total += values[i]; + return total / values.length; + } + + /** + * Create a temporary directory in the directory given by java.io.tmpdir + * + * @return The directory created. + */ + public static File createTempDir() { + return createTempDir(new File(System.getProperty("java.io.tmpdir"))); + } + + /** + * Create a temporary directory that is a child of the given directory + * + * @param parent The parent directory + * @return The temporary directory + */ + public static File createTempDir(File parent) { + File temp = new File(parent, + Integer.toString(Math.abs(UNSEEDED_RANDOM.nextInt()) % 1000000)); + temp.delete(); + temp.mkdir(); + temp.deleteOnExit(); + return temp; + } + + /** + * Wrap the given string in quotation marks. This is slightly more readable + * then the java inline quotes that require escaping. + * + * @param s The string to wrap in quotes + * @return The string + */ + public static String quote(String s) { + return "\"" + s + "\""; + } + + /** + * Always uses UTF-8. + */ + public static ByteArray toByteArray(String s) { + try { + return new ByteArray(s.getBytes("UTF-8")); + } catch(UnsupportedEncodingException e) { + /* Should not happen */ + throw new IllegalStateException(e); + } + } +/* + public static void assertWithBackoff(long timeout, Attempt attempt) throws Exception { + assertWithBackoff(30, timeout, attempt); + } + + public static void assertWithBackoff(long initialDelay, long timeout, Attempt attempt) + throws Exception { + long delay = initialDelay; + long finishBy = System.currentTimeMillis() + timeout; + + while(true) { + try { + attempt.checkCondition(); + return; + } catch(AssertionError e) { + if(System.currentTimeMillis() < finishBy) { + Thread.sleep(delay); + delay *= 2; + } else { + throw e; + } + } + } + } +*/ + /** + * Because java.beans.ReflectionUtils isn't public... + */ + + @SuppressWarnings("unchecked") + public static <T> T getPrivateValue(Object instance, String fieldName) throws Exception { + Field eventDataQueueField = instance.getClass().getDeclaredField(fieldName); + eventDataQueueField.setAccessible(true); + return (T) eventDataQueueField.get(instance); + } + + /** + * Constructs a calendar object representing the given time + */ + public static GregorianCalendar getCalendar(int year, + int month, + int day, + int hour, + int mins, + int secs) { + GregorianCalendar cal = new GregorianCalendar(); + cal.set(Calendar.YEAR, year); + cal.set(Calendar.MONTH, month); + cal.set(Calendar.DATE, day); + cal.set(Calendar.HOUR_OF_DAY, hour); + cal.set(Calendar.MINUTE, mins); + cal.set(Calendar.SECOND, secs); + cal.set(Calendar.MILLISECOND, 0); + return cal; + } +} diff --git a/src/test/java/org/sdnplatform/sync/internal/store/AbstractByteArrayStoreT.java b/src/test/java/org/sdnplatform/sync/internal/store/AbstractByteArrayStoreT.java new file mode 100644 index 0000000000000000000000000000000000000000..2c7ce1ee00fb500941f87633f986ee7c162875f5 --- /dev/null +++ b/src/test/java/org/sdnplatform/sync/internal/store/AbstractByteArrayStoreT.java @@ -0,0 +1,65 @@ +/* + * Copyright 2008-2009 LinkedIn, Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.sdnplatform.sync.internal.store; + +import static org.junit.Assert.*; + +import java.util.List; + +import org.junit.Test; +import org.sdnplatform.sync.Versioned; +import org.sdnplatform.sync.internal.TUtils; +import org.sdnplatform.sync.internal.store.IStore; +import org.sdnplatform.sync.internal.util.ByteArray; + +import com.google.common.collect.Lists; + +/** + * + */ +public abstract class AbstractByteArrayStoreT extends + AbstractStoreT<ByteArray, byte[]> { + + @Override + public List<ByteArray> getKeys(int numValues) { + List<ByteArray> keys = Lists.newArrayList(); + for(byte[] array: this.getByteValues(numValues, 8)) + keys.add(new ByteArray(array)); + return keys; + } + + @Override + public List<byte[]> getValues(int numValues) { + return this.getByteValues(numValues, 10); + } + + @Override + protected boolean valuesEqual(byte[] t1, byte[] t2) { + return TUtils.bytesEqual(t1, t2); + } + + @Test + public void testEmptyByteArray() throws Exception { + IStore<ByteArray, byte[]> store = getStore(); + Versioned<byte[]> bytes = new Versioned<byte[]>(new byte[0]); + store.put(new ByteArray(new byte[0]), bytes); + List<Versioned<byte[]>> found = store.get(new ByteArray(new byte[0])); + assertEquals("Incorrect number of results.", 1, found.size()); + bassertEquals("Get doesn't equal put.", bytes, found.get(0)); + } + +} diff --git a/src/test/java/org/sdnplatform/sync/internal/store/AbstractStorageEngineT.java b/src/test/java/org/sdnplatform/sync/internal/store/AbstractStorageEngineT.java new file mode 100644 index 0000000000000000000000000000000000000000..aaae98011f36478c4f4596580bde59fee4d41cc4 --- /dev/null +++ b/src/test/java/org/sdnplatform/sync/internal/store/AbstractStorageEngineT.java @@ -0,0 +1,191 @@ +/* + * Copyright 2008-2009 LinkedIn, Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.sdnplatform.sync.internal.store; + +import java.util.Iterator; +import java.util.List; +import java.util.Map.Entry; + +import org.junit.Test; +import org.sdnplatform.sync.IClosableIterator; +import org.sdnplatform.sync.Versioned; +import org.sdnplatform.sync.error.SyncException; +import org.sdnplatform.sync.internal.TUtils; +import org.sdnplatform.sync.internal.store.IStorageEngine; +import org.sdnplatform.sync.internal.store.IStore; +import org.sdnplatform.sync.internal.util.ByteArray; + +import static org.junit.Assert.*; + + +public abstract class AbstractStorageEngineT extends AbstractByteArrayStoreT { + + @Override + public IStore<ByteArray, byte[]> getStore() { + return getStorageEngine(); + } + + public abstract IStorageEngine<ByteArray, byte[]> getStorageEngine(); + + public void testGetNoEntries() { + IClosableIterator<Entry<ByteArray, List<Versioned<byte[]>>>> it = null; + try { + IStorageEngine<ByteArray, byte[]> engine = getStorageEngine(); + it = engine.entries(); + while(it.hasNext()) + fail("There shouldn't be any entries in this store."); + } finally { + if(it != null) + it.close(); + } + } + + @Test + public void testGetNoKeys() { + IClosableIterator<ByteArray> it = null; + try { + IStorageEngine<ByteArray, byte[]> engine = getStorageEngine(); + it = engine.keys(); + while(it.hasNext()) + fail("There shouldn't be any entries in this store."); + } finally { + if(it != null) + it.close(); + } + } + + @Test + public void testPruneOnWrite() throws SyncException { + IStorageEngine<ByteArray, byte[]> engine = getStorageEngine(); + Versioned<byte[]> v1 = new Versioned<byte[]>(new byte[] { 1 }, TUtils.getClock(1)); + Versioned<byte[]> v2 = new Versioned<byte[]>(new byte[] { 2 }, TUtils.getClock(2)); + Versioned<byte[]> v3 = new Versioned<byte[]>(new byte[] { 3 }, TUtils.getClock(1, 2)); + ByteArray key = new ByteArray((byte) 3); + engine.put(key, v1); + engine.put(key, v2); + assertEquals(2, engine.get(key).size()); + engine.put(key, v3); + assertEquals(1, engine.get(key).size()); + } + + @Test + public void testTruncate() throws Exception { + IStorageEngine<ByteArray, byte[]> engine = getStorageEngine(); + Versioned<byte[]> v1 = new Versioned<byte[]>(new byte[] { 1 }); + Versioned<byte[]> v2 = new Versioned<byte[]>(new byte[] { 2 }); + Versioned<byte[]> v3 = new Versioned<byte[]>(new byte[] { 3 }); + ByteArray key1 = new ByteArray((byte) 3); + ByteArray key2 = new ByteArray((byte) 4); + ByteArray key3 = new ByteArray((byte) 5); + + engine.put(key1, v1); + engine.put(key2, v2); + engine.put(key3, v3); + engine.truncate(); + + IClosableIterator<Entry<ByteArray, List<Versioned<byte[]>>>> it = null; + try { + it = engine.entries(); + while(it.hasNext()) { + fail("There shouldn't be any entries in this store."); + } + } finally { + if(it != null) { + it.close(); + } + } + } + + @Test + public void testCleanupTask() throws Exception { + IStorageEngine<ByteArray, byte[]> engine = getStorageEngine(); + engine.setTombstoneInterval(500); + + Versioned<byte[]> v1_1 = new Versioned<byte[]>(new byte[] { 1 }, TUtils.getClock(1)); + Versioned<byte[]> v1_2 = new Versioned<byte[]>(null, TUtils.getClock(1, 1)); + + // add, update, delete + Versioned<byte[]> v2_1 = new Versioned<byte[]>(new byte[] { 1 }, TUtils.getClock(1)); + Versioned<byte[]> v2_2 = new Versioned<byte[]>(new byte[] { 2 }, TUtils.getClock(1, 2)); + Versioned<byte[]> v2_3 = new Versioned<byte[]>(null, TUtils.getClock(1, 2, 1)); + + // delete then add again + Versioned<byte[]> v3_1 = new Versioned<byte[]>(new byte[] { 1 }, TUtils.getClock(1)); + Versioned<byte[]> v3_2 = new Versioned<byte[]>(null, TUtils.getClock(1, 2)); + Versioned<byte[]> v3_3 = new Versioned<byte[]>(new byte[] { 2 }, TUtils.getClock(1, 2, 1)); + + // delete concurrent to update + Versioned<byte[]> v4_1 = new Versioned<byte[]>(new byte[] { 1 }, TUtils.getClock(1)); + Versioned<byte[]> v4_2 = new Versioned<byte[]>(new byte[] { 2 }, TUtils.getClock(1, 2)); + Versioned<byte[]> v4_3 = new Versioned<byte[]>(null, TUtils.getClock(1, 1)); + + ByteArray key1 = new ByteArray((byte) 3); + ByteArray key2 = new ByteArray((byte) 4); + ByteArray key3 = new ByteArray((byte) 5); + ByteArray key4 = new ByteArray((byte) 6); + + engine.put(key1, v1_1); + assertEquals(1, engine.get(key1).size()); + + engine.put(key1, v1_2); + List<Versioned<byte[]>> r = engine.get(key1); + assertEquals(1, r.size()); + assertNull(r.get(0).getValue()); + + engine.put(key2, v2_1); + engine.put(key2, v2_2); + engine.put(key2, v2_3); + engine.put(key3, v3_1); + engine.put(key3, v3_2); + engine.put(key4, v4_1); + engine.put(key4, v4_2); + engine.put(key4, v4_3); + + engine.cleanupTask(); + r = engine.get(key1); + assertEquals(1, r.size()); + assertNull(r.get(0).getValue()); + + engine.put(key3, v3_3); + + Thread.sleep(501); + engine.cleanupTask(); + r = engine.get(key1); + assertEquals(0, r.size()); + r = engine.get(key2); + assertEquals(0, r.size()); + r = engine.get(key3); + assertEquals(1, r.size()); + r = engine.get(key4); + assertEquals(2, r.size()); + + } + + @SuppressWarnings("unused") + private boolean remove(List<byte[]> list, byte[] item) { + Iterator<byte[]> it = list.iterator(); + boolean removedSomething = false; + while(it.hasNext()) { + if(TUtils.bytesEqual(item, it.next())) { + it.remove(); + removedSomething = true; + } + } + return removedSomething; + } + +} diff --git a/src/test/java/org/sdnplatform/sync/internal/store/AbstractStoreT.java b/src/test/java/org/sdnplatform/sync/internal/store/AbstractStoreT.java new file mode 100644 index 0000000000000000000000000000000000000000..9f8f22edec20172399954a919d3d3276dc51fcda --- /dev/null +++ b/src/test/java/org/sdnplatform/sync/internal/store/AbstractStoreT.java @@ -0,0 +1,286 @@ +/* + * Copyright 2008-2009 LinkedIn, Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.sdnplatform.sync.internal.store; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map.Entry; + +import org.junit.Test; +import org.sdnplatform.sync.IClosableIterator; +import org.sdnplatform.sync.IVersion; +import org.sdnplatform.sync.Versioned; +import org.sdnplatform.sync.error.ObsoleteVersionException; +import org.sdnplatform.sync.error.SyncException; +import org.sdnplatform.sync.internal.TUtils; +import org.sdnplatform.sync.internal.store.IStore; +import org.sdnplatform.sync.internal.util.ByteArray; +import org.sdnplatform.sync.internal.version.VectorClock; + + +import static org.junit.Assert.*; +import static org.sdnplatform.sync.internal.TUtils.*; + +import com.google.common.base.Objects; + +public abstract class AbstractStoreT<K, V> { + + public abstract IStore<K, V> getStore() throws Exception; + + public abstract List<V> getValues(int numValues); + + public abstract List<K> getKeys(int numKeys); + + public List<String> getStrings(int numKeys, int size) { + List<String> ts = new ArrayList<String>(numKeys); + for(int i = 0; i < numKeys; i++) + ts.add(randomLetters(size)); + return ts; + } + + public List<byte[]> getByteValues(int numValues, int size) { + List<byte[]> values = new ArrayList<byte[]>(); + for(int i = 0; i < numValues; i++) + values.add(TUtils.randomBytes(size)); + return values; + } + + public List<ByteArray> getByteArrayValues(int numValues, int size) { + List<ByteArray> values = new ArrayList<ByteArray>(); + for(int i = 0; i < numValues; i++) + values.add(new ByteArray(TUtils.randomBytes(size))); + return values; + } + + public K getKey() { + return getKeys(1).get(0); + } + + public V getValue() { + return getValues(1).get(0); + } + + public IVersion getExpectedVersionAfterPut(IVersion version) { + return version; + } + + protected boolean valuesEqual(V t1, V t2) { + if (t1 instanceof byte[]) return Arrays.equals((byte[])t1, (byte[])t2); + return Objects.equal(t1, t2); + } + + protected void bassertEquals(String message, Versioned<V> v1, Versioned<V> v2) { + String assertTrueMessage = v1 + " != " + v2 + "."; + if(message != null) + assertTrueMessage += message; + assertTrue(assertTrueMessage, valuesEqual(v1.getValue(), v2.getValue())); + assertEquals(message, v1.getVersion(), v2.getVersion()); + } + + protected void bassertEquals(Versioned<V> v1, Versioned<V> v2) { + bassertEquals(null, v1, v2); + } + + public void assertContains(Collection<Versioned<V>> collection, Versioned<V> value) { + boolean found = false; + for(Versioned<V> t: collection) + if(valuesEqual(t.getValue(), value.getValue())) + found = true; + assertTrue(collection + " does not contain " + value + ".", found); + } + + @Test + public void testNullKeys() throws Exception { + IStore<K, V> store = getStore(); + try { + store.put(null, new Versioned<V>(getValue())); + fail("Store should not put null keys!"); + } catch(IllegalArgumentException e) { + // this is good + } + try { + store.get(null); + fail("Store should not get null keys!"); + } catch(IllegalArgumentException e) { + // this is good + } + } + + @Test + public void testPutNullValue() throws Exception { + IStore<K,V> store = getStore(); + K key = getKey(); + store.put(key, new Versioned<V>(null)); + List<Versioned<V>> found = store.get(key); + assertEquals("Wrong number of values.", 1, found.size()); + assertEquals("Returned non-null value.", null, + found.get(0).getValue()); + } + + @Test + public void testGetAndDeleteNonExistentKey() throws Exception { + K key = getKey(); + IStore<K, V> store = getStore(); + List<Versioned<V>> found = store.get(key); + assertEquals("Found non-existent key: " + found, 0, found.size()); + } + + private void testObsoletePutFails(String message, + IStore<K, V> store, + K key, + Versioned<V> versioned) throws SyncException { + VectorClock clock = (VectorClock) versioned.getVersion(); + clock = clock.clone(); + try { + store.put(key, versioned); + fail(message); + } catch(ObsoleteVersionException e) { + // this is good, but check that we didn't fuck with the version + assertEquals(clock, versioned.getVersion()); + } + } + + @Test + public void testFetchedEqualsPut() throws Exception { + K key = getKey(); + IStore<K, V> store = getStore(); + VectorClock clock = getClock(1, 1, 2, 3, 3, 4); + V value = getValue(); + assertEquals("Store not empty at start!", 0, store.get(key).size()); + Versioned<V> versioned = new Versioned<V>(value, clock); + store.put(key, versioned); + List<Versioned<V>> found = store.get(key); + assertEquals("Should only be one version stored.", 1, found.size()); + assertTrue("Values not equal!", valuesEqual(versioned.getValue(), found.get(0).getValue())); + } + + @Test + public void testVersionedPut() throws Exception { + K key = getKey(); + IStore<K, V> store = getStore(); + VectorClock clock = getClock(1, 1); + VectorClock clockCopy = clock.clone(); + V value = getValue(); + assertEquals("Store not empty at start!", 0, store.get(key).size()); + Versioned<V> versioned = new Versioned<V>(value, clock); + + // put initial version + store.put(key, versioned); + assertContains(store.get(key), versioned); + + // test that putting obsolete versions fails + testObsoletePutFails("Put of identical version/value succeeded.", + store, + key, + new Versioned<V>(value, clockCopy)); + testObsoletePutFails("Put of identical version succeeded.", + store, + key, + new Versioned<V>(getValue(), clockCopy)); + testObsoletePutFails("Put of obsolete version succeeded.", + store, + key, + new Versioned<V>(getValue(), getClock(1))); + assertEquals("Should still only be one version in store.", store.get(key).size(), 1); + assertContains(store.get(key), versioned); + + // test that putting a concurrent version succeeds + if(allowConcurrentOperations()) { + store.put(key, new Versioned<V>(getValue(), getClock(1, 2))); + assertEquals(2, store.get(key).size()); + } else { + try { + store.put(key, new Versioned<V>(getValue(), getClock(1, 2))); + fail(); + } catch(ObsoleteVersionException e) { + // expected + } + } + + // test that putting an incremented version succeeds + Versioned<V> newest = new Versioned<V>(getValue(), getClock(1, 1, 2, 2)); + store.put(key, newest); + assertContains(store.get(key), newest); + } + + @Test + public void testGetVersions() throws Exception { + List<K> keys = getKeys(2); + K key = keys.get(0); + V value = getValue(); + IStore<K, V> store = getStore(); + store.put(key, Versioned.value(value)); + List<Versioned<V>> versioneds = store.get(key); + List<IVersion> versions = store.getVersions(key); + assertEquals(1, versioneds.size()); + assertTrue(versions.size() > 0); + for(int i = 0; i < versions.size(); i++) + assertEquals(versioneds.get(0).getVersion(), versions.get(i)); + + assertEquals(0, store.getVersions(keys.get(1)).size()); + } + + @Test + public void testCloseIsIdempotent() throws Exception { + IStore<K, V> store = getStore(); + store.close(); + // second close is okay, should not throw an exception + store.close(); + } + + @Test + public void testEntries() throws Exception { + IStore<K, V> store = getStore(); + int putCount = 537; + List<K> keys = getKeys(putCount); + List<V> values = getValues(putCount); + assertEquals(putCount, values.size()); + for(int i = 0; i < putCount; i++) + store.put(keys.get(i), new Versioned<V>(values.get(i))); + + HashMap<K, V> map = new HashMap<K, V>(); + for (int i = 0; i < keys.size(); i++) { + map.put(keys.get(i), values.get(i)); + } + + IClosableIterator<Entry<K, List<Versioned<V>>>> iter = store.entries(); + int size = 0; + try { + while (iter.hasNext()) { + Entry<K, List<Versioned<V>>> e = iter.next(); + size += 1; + assertGetAllValues(map.get(e.getKey()), e.getValue()); + + } + } finally { + iter.close(); + } + assertEquals("Number of entries", keys.size(), size); + } + + protected void assertGetAllValues(V expectedValue, List<Versioned<V>> versioneds) { + assertEquals(1, versioneds.size()); + valuesEqual(expectedValue, versioneds.get(0).getValue()); + } + + protected boolean allowConcurrentOperations() { + return true; + } +} diff --git a/src/test/java/org/sdnplatform/sync/internal/store/InMemoryStorageEngineTest.java b/src/test/java/org/sdnplatform/sync/internal/store/InMemoryStorageEngineTest.java new file mode 100644 index 0000000000000000000000000000000000000000..84bf376136d0cf3141abf7216ce6eb53b8f77047 --- /dev/null +++ b/src/test/java/org/sdnplatform/sync/internal/store/InMemoryStorageEngineTest.java @@ -0,0 +1,51 @@ +/* + * Copyright 2008-2009 LinkedIn, Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.sdnplatform.sync.internal.store; + +import java.util.ArrayList; +import java.util.List; + +import org.junit.Before; +import org.sdnplatform.sync.internal.TUtils; +import org.sdnplatform.sync.internal.store.IStorageEngine; +import org.sdnplatform.sync.internal.store.InMemoryStorageEngine; +import org.sdnplatform.sync.internal.util.ByteArray; + + +public class InMemoryStorageEngineTest extends AbstractStorageEngineT { + + private IStorageEngine<ByteArray, byte[]> store; + + @Override + public IStorageEngine<ByteArray, byte[]> getStorageEngine() { + return store; + } + + @Before + public void setUp() throws Exception { + this.store = new InMemoryStorageEngine<ByteArray, byte[]>("test"); + } + + @Override + public List<ByteArray> getKeys(int numKeys) { + List<ByteArray> keys = new ArrayList<ByteArray>(numKeys); + for(int i = 0; i < numKeys; i++) + keys.add(new ByteArray(TUtils.randomBytes(10))); + return keys; + } + +} diff --git a/src/test/java/org/sdnplatform/sync/internal/store/JacksonStoreTest.java b/src/test/java/org/sdnplatform/sync/internal/store/JacksonStoreTest.java new file mode 100644 index 0000000000000000000000000000000000000000..0846cf40c73bf5e1af7842a316fd3b9929b3b8d0 --- /dev/null +++ b/src/test/java/org/sdnplatform/sync/internal/store/JacksonStoreTest.java @@ -0,0 +1,46 @@ +package org.sdnplatform.sync.internal.store; + +import java.util.ArrayList; +import java.util.List; + +import org.sdnplatform.sync.internal.store.IStore; +import org.sdnplatform.sync.internal.store.InMemoryStorageEngine; +import org.sdnplatform.sync.internal.store.JacksonStore; +import org.sdnplatform.sync.internal.util.ByteArray; + + +public class JacksonStoreTest extends AbstractStoreT<Key, TBean> { + + @Override + public IStore<Key, TBean> getStore() throws Exception { + IStore<ByteArray,byte[]> ims = + new InMemoryStorageEngine<ByteArray,byte[]>("test"); + IStore<Key,TBean> js = + new JacksonStore<Key, TBean>(ims, Key.class, TBean.class); + return js; + } + + @Override + public List<TBean> getValues(int numValues) { + List<TBean> v = new ArrayList<TBean>(numValues); + for (int i = 0; i < numValues; i++) { + TBean tb = new TBean(); + tb.setI(i); + tb.setS("" + i); + v.add(tb); + } + return v; + } + + @Override + public List<Key> getKeys(int numKeys) { + List<Key> k = new ArrayList<Key>(numKeys); + for (int i = 0; i < numKeys; i++) { + Key tk = new Key("com.bigswitch.bigsync.internal.store", "" + i); + k.add(tk); + } + return k; + } + + +} diff --git a/src/test/java/org/sdnplatform/sync/internal/store/JavaDBStorageEngineTest.java b/src/test/java/org/sdnplatform/sync/internal/store/JavaDBStorageEngineTest.java new file mode 100644 index 0000000000000000000000000000000000000000..6dd5300c026ac01718feb651f467bf4764bd1224 --- /dev/null +++ b/src/test/java/org/sdnplatform/sync/internal/store/JavaDBStorageEngineTest.java @@ -0,0 +1,64 @@ +package org.sdnplatform.sync.internal.store; + +import java.util.ArrayList; +import java.util.List; + +import javax.sql.ConnectionPoolDataSource; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.core.type.TypeReference; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.sdnplatform.sync.internal.TUtils; +import org.sdnplatform.sync.internal.store.IStorageEngine; +import org.sdnplatform.sync.internal.store.JavaDBStorageEngine; +import org.sdnplatform.sync.internal.util.ByteArray; +import org.sdnplatform.sync.internal.version.VectorClock; + +import static org.junit.Assert.*; +import static org.sdnplatform.sync.internal.TUtils.getClock; + + +public class JavaDBStorageEngineTest extends AbstractStorageEngineT { + + private IStorageEngine<ByteArray, byte[]> store; + + @Before + public void setUp() throws Exception { + ConnectionPoolDataSource dataSource = + JavaDBStorageEngine.getDataSource(true); + this.store = new JavaDBStorageEngine("test", dataSource); + } + + @After + public void tearDown() throws Exception { + this.store.truncate(); + this.store.close(); + this.store = null; + } + + @Override + public IStorageEngine<ByteArray, byte[]> getStorageEngine() { + return store; + } + + @Override + public List<ByteArray> getKeys(int numKeys) { + List<ByteArray> keys = new ArrayList<ByteArray>(numKeys); + for(int i = 0; i < numKeys; i++) + keys.add(new ByteArray(TUtils.randomBytes(10))); + return keys; + } + + @Test + public void testSerialization() throws Exception { + ObjectMapper mapper = new ObjectMapper(); + VectorClock clock = getClock(1,2); + String cs = mapper.writeValueAsString(clock); + VectorClock reconstructed = + mapper.readValue(cs, new TypeReference<VectorClock>() {}); + assertEquals(clock, reconstructed); + } + +} diff --git a/src/test/java/org/sdnplatform/sync/internal/store/Key.java b/src/test/java/org/sdnplatform/sync/internal/store/Key.java new file mode 100644 index 0000000000000000000000000000000000000000..4f290fb377244d11e07f31ad74c168acb45bc853 --- /dev/null +++ b/src/test/java/org/sdnplatform/sync/internal/store/Key.java @@ -0,0 +1,67 @@ +package org.sdnplatform.sync.internal.store; + +import java.io.Serializable; + +/** + * Represent a key in the sync system. Keys consist of a namespace and a + * key name. Namespaces should be dot-separated such as "com.bigswitch.device" + * @author readams + * + */ +public class Key implements Serializable { + + private static final long serialVersionUID = -3998115385199627376L; + + private String namespace; + private String key; + + public Key() { + super(); + } + + public Key(String namespace, String key) { + super(); + this.namespace = namespace; + this.key = key; + } + + public String getNamespace() { + return namespace; + } + public void setNamespace(String namespace) { + this.namespace = namespace; + } + public String getKey() { + return key; + } + public void setKey(String key) { + this.key = key; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((key == null) ? 0 : key.hashCode()); + result = + prime * result + + ((namespace == null) ? 0 : namespace.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + Key other = (Key) obj; + if (key == null) { + if (other.key != null) return false; + } else if (!key.equals(other.key)) return false; + if (namespace == null) { + if (other.namespace != null) return false; + } else if (!namespace.equals(other.namespace)) return false; + return true; + } + +} diff --git a/src/test/java/org/sdnplatform/sync/internal/store/RemoteStoreTest.java b/src/test/java/org/sdnplatform/sync/internal/store/RemoteStoreTest.java new file mode 100644 index 0000000000000000000000000000000000000000..95a1aeafa924b9317ec37b8af1ff1dd939bc9277 --- /dev/null +++ b/src/test/java/org/sdnplatform/sync/internal/store/RemoteStoreTest.java @@ -0,0 +1,76 @@ +package org.sdnplatform.sync.internal.store; + +import java.util.ArrayList; +import java.util.List; + +import net.floodlightcontroller.core.module.FloodlightModuleContext; +import net.floodlightcontroller.threadpool.IThreadPoolService; +import net.floodlightcontroller.threadpool.ThreadPool; + +import com.fasterxml.jackson.databind.ObjectMapper; +import org.junit.After; +import org.junit.Before; +import org.sdnplatform.sync.ISyncService.Scope; +import org.sdnplatform.sync.internal.SyncManager; +import org.sdnplatform.sync.internal.remote.RemoteSyncManager; +import org.sdnplatform.sync.internal.store.IStore; +import org.sdnplatform.sync.internal.util.ByteArray; + + +public class RemoteStoreTest extends AbstractStoreT<ByteArray,byte[]> { + ThreadPool tp; + SyncManager syncManager; + protected final static ObjectMapper mapper = new ObjectMapper(); + + RemoteSyncManager remoteSyncManager; + + @Before + public void setUp() throws Exception { + FloodlightModuleContext fmc = new FloodlightModuleContext(); + tp = new ThreadPool(); + + fmc.addService(IThreadPoolService.class, tp); + syncManager = new SyncManager(); + syncManager.registerStore("local", Scope.LOCAL); + + remoteSyncManager = new RemoteSyncManager(); + + tp.init(fmc); + syncManager.init(fmc); + remoteSyncManager.init(fmc); + tp.startUp(fmc); + syncManager.startUp(fmc); + remoteSyncManager.startUp(fmc); + } + + @After + public void tearDown() { + tp.getScheduledExecutor().shutdownNow(); + tp = null; + syncManager.shutdown(); + remoteSyncManager.shutdown(); + } + + @Override + public IStore<ByteArray, byte[]> getStore() throws Exception { + return remoteSyncManager.getStore("local"); + } + + @Override + public List<byte[]> getValues(int numValues) { + ArrayList<byte[]> r = new ArrayList<byte[]>(); + for (int i = 0; i < numValues; i++) { + r.add(Integer.toString(i).getBytes()); + } + return r; + } + + @Override + public List<ByteArray> getKeys(int numKeys) { + ArrayList<ByteArray> r = new ArrayList<ByteArray>(); + for (int i = 0; i < numKeys; i++) { + r.add(new ByteArray(Integer.toString(i).getBytes())); + } + return r; + } +} diff --git a/src/test/java/org/sdnplatform/sync/internal/store/TBean.java b/src/test/java/org/sdnplatform/sync/internal/store/TBean.java new file mode 100644 index 0000000000000000000000000000000000000000..5a720438335d420a466f32c4392a3096e515c9dc --- /dev/null +++ b/src/test/java/org/sdnplatform/sync/internal/store/TBean.java @@ -0,0 +1,51 @@ +package org.sdnplatform.sync.internal.store; + +public class TBean { + String s; + int i; + + public TBean(String s, int i) { + super(); + this.s = s; + this.i = i; + } + public TBean() { + super(); + } + public String getS() { + return s; + } + public void setS(String s) { + this.s = s; + } + public int getI() { + return i; + } + public void setI(int i) { + this.i = i; + } + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + i; + result = prime * result + ((s == null) ? 0 : s.hashCode()); + return result; + } + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + TBean other = (TBean) obj; + if (i != other.i) return false; + if (s == null) { + if (other.s != null) return false; + } else if (!s.equals(other.s)) return false; + return true; + } + @Override + public String toString() { + return "TestBean [s=" + s + ", i=" + i + "]"; + } +} diff --git a/src/test/java/org/sdnplatform/sync/internal/version/ClockEntryTest.java b/src/test/java/org/sdnplatform/sync/internal/version/ClockEntryTest.java new file mode 100644 index 0000000000000000000000000000000000000000..46ae2cddd49e6a1ed2a1096a7d2445c1050d8c46 --- /dev/null +++ b/src/test/java/org/sdnplatform/sync/internal/version/ClockEntryTest.java @@ -0,0 +1,57 @@ +/* + * Copyright 2008-2009 LinkedIn, Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.sdnplatform.sync.internal.version; + +import static org.junit.Assert.*; + +import org.junit.Test; +import org.sdnplatform.sync.internal.version.ClockEntry; + + +/** + * + */ +public class ClockEntryTest { + + @Test + public void testEquality() { + ClockEntry v1 = new ClockEntry((short) 0, 1); + ClockEntry v2 = new ClockEntry((short) 0, 1); + assertTrue(v1.equals(v1)); + assertTrue(!v1.equals(null)); + assertEquals(v1, v2); + + v1 = new ClockEntry((short) 0, 1); + v2 = new ClockEntry((short) 0, 2); + assertTrue(!v1.equals(v2)); + + v1 = new ClockEntry(Short.MAX_VALUE, 256); + v2 = new ClockEntry(Short.MAX_VALUE, 256); + assertEquals(v1, v2); + } + + @Test + public void testIncrement() { + ClockEntry v = new ClockEntry((short) 0, 1); + assertEquals(v.getNodeId(), 0); + assertEquals(v.getVersion(), 1); + ClockEntry v2 = v.incremented(); + assertEquals(v.getVersion(), 1); + assertEquals(v2.getVersion(), 2); + } + +} diff --git a/src/test/java/org/sdnplatform/sync/internal/version/VectorClockInconsistencyResolverTest.java b/src/test/java/org/sdnplatform/sync/internal/version/VectorClockInconsistencyResolverTest.java new file mode 100644 index 0000000000000000000000000000000000000000..1ad45ec9cd15400658c48085d8003a82415ce0fd --- /dev/null +++ b/src/test/java/org/sdnplatform/sync/internal/version/VectorClockInconsistencyResolverTest.java @@ -0,0 +1,112 @@ +/* + * Copyright 2008-2009 LinkedIn, Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.sdnplatform.sync.internal.version; + +import static org.junit.Assert.*; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import org.junit.Before; +import org.junit.Test; +import org.sdnplatform.sync.IInconsistencyResolver; +import org.sdnplatform.sync.Versioned; +import org.sdnplatform.sync.internal.TUtils; +import org.sdnplatform.sync.internal.version.VectorClockInconsistencyResolver; + + +public class VectorClockInconsistencyResolverTest { + + private IInconsistencyResolver<Versioned<String>> resolver; + private Versioned<String> later; + private Versioned<String> prior; + private Versioned<String> current; + private Versioned<String> concurrent; + private Versioned<String> concurrent2; + + @Before + public void setUp() { + resolver = new VectorClockInconsistencyResolver<String>(); + current = getVersioned(1, 1, 2, 3); + prior = getVersioned(1, 2, 3); + concurrent = getVersioned(1, 2, 3, 3); + concurrent2 = getVersioned(1, 2, 3, 4); + later = getVersioned(1, 1, 2, 2, 3); + } + + private Versioned<String> getVersioned(int... nodes) { + return new Versioned<String>("my-value", TUtils.getClock(nodes)); + } + + @Test + public void testEmptyList() { + assertEquals(0, resolver.resolveConflicts(new ArrayList<Versioned<String>>()).size()); + } + + @SuppressWarnings("unchecked") + @Test + public void testDuplicatesResolve() { + assertEquals(2, resolver.resolveConflicts(Arrays.asList(concurrent, + current, + current, + concurrent, + current)).size()); + } + + @SuppressWarnings("unchecked") + @Test + public void testResolveNormal() { + assertEquals(later, resolver.resolveConflicts(Arrays.asList(current, prior, later)).get(0)); + assertEquals(later, resolver.resolveConflicts(Arrays.asList(prior, current, later)).get(0)); + assertEquals(later, resolver.resolveConflicts(Arrays.asList(later, current, prior)).get(0)); + } + + @SuppressWarnings("unchecked") + @Test + public void testResolveConcurrent() { + List<Versioned<String>> resolved = resolver.resolveConflicts(Arrays.asList(current, + concurrent, + prior)); + assertEquals(2, resolved.size()); + assertTrue("Version not found", resolved.contains(current)); + assertTrue("Version not found", resolved.contains(concurrent)); + } + + @SuppressWarnings("unchecked") + @Test + public void testResolveLargerConcurrent() { + assertEquals(3, resolver.resolveConflicts(Arrays.asList(concurrent, + concurrent2, + current, + concurrent2, + current, + concurrent, + current)).size()); + } + + @SuppressWarnings("unchecked") + @Test + public void testResolveConcurrentPairWithLater() { + Versioned<String> later2 = getVersioned(1, 2, 3, 3, 4, 4); + List<Versioned<String>> resolved = resolver.resolveConflicts(Arrays.asList(concurrent, + concurrent2, + later2)); + assertEquals(1, resolved.size()); + assertEquals(later2, resolved.get(0)); + } +} diff --git a/src/test/java/org/sdnplatform/sync/internal/version/VectorClockTest.java b/src/test/java/org/sdnplatform/sync/internal/version/VectorClockTest.java new file mode 100644 index 0000000000000000000000000000000000000000..7fc22f8569317a114d66800982525461d224723b --- /dev/null +++ b/src/test/java/org/sdnplatform/sync/internal/version/VectorClockTest.java @@ -0,0 +1,128 @@ +/* + * Copyright 2008-2009 LinkedIn, Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.sdnplatform.sync.internal.version; + +import static org.junit.Assert.*; +import static org.sdnplatform.sync.internal.TUtils.getClock; + +import org.junit.Test; +import org.sdnplatform.sync.IVersion.Occurred; +import org.sdnplatform.sync.internal.TUtils; +import org.sdnplatform.sync.internal.version.ClockEntry; +import org.sdnplatform.sync.internal.version.VectorClock; + +import com.google.common.collect.Lists; + +/** + * VectorClock tests + * + * + */ + +public class VectorClockTest { + @Test + public void testEqualsAndHashcode() { + VectorClock one = getClock(1, 2); + VectorClock other = getClock(1, 2); + assertEquals(one, other); + assertEquals(one.hashCode(), other.hashCode()); + } + + @Test + public void testComparisons() { + assertTrue("The empty clock should not happen before itself.", + getClock().compare(getClock()) != Occurred.CONCURRENTLY); + assertTrue("A clock should not happen before an identical clock.", + getClock(1, 1, 2).compare(getClock(1, 1, 2)) != Occurred.CONCURRENTLY); + assertTrue(" A clock should happen before an identical clock with a single additional event.", + getClock(1, 1, 2).compare(getClock(1, 1, 2, 3)) == Occurred.BEFORE); + assertTrue("Clocks with different events should be concurrent.", + getClock(1).compare(getClock(2)) == Occurred.CONCURRENTLY); + assertTrue("Clocks with different events should be concurrent.", + getClock(1, 1, 2).compare(getClock(1, 1, 3)) == Occurred.CONCURRENTLY); + assertTrue(getClock(2, 2).compare(getClock(1, 2, 2, 3)) == Occurred.BEFORE + && getClock(1, 2, 2, 3).compare(getClock(2, 2)) == Occurred.AFTER); + } + + @Test + public void testMerge() { + // merging two clocks should create a clock contain the element-wise + // maximums + assertEquals("Two empty clocks merge to an empty clock.", + getClock().merge(getClock()), + getClock()); + assertEquals("Merge of a clock with itself does nothing", + getClock(1).merge(getClock(1)), + getClock(1)); + assertEquals(getClock(1).merge(getClock(2)), getClock(1, 2)); + assertEquals(getClock(1).merge(getClock(1, 2)), getClock(1, 2)); + assertEquals(getClock(1, 2).merge(getClock(1)), getClock(1, 2)); + assertEquals("Two-way merge fails.", + getClock(1, 1, 1, 2, 3, 5).merge(getClock(1, 2, 2, 4)), + getClock(1, 1, 1, 2, 2, 3, 4, 5)); + assertEquals(getClock(2, 3, 5).merge(getClock(1, 2, 2, 4, 7)), + getClock(1, 2, 2, 3, 4, 5, 7)); + } + + /** + * See gihub issue #25: Incorrect coersion of version to short before + * passing to ClockEntry constructor + */ + @Test + public void testMergeWithLargeVersion() { + VectorClock clock1 = getClock(1); + VectorClock clock2 = new VectorClock(Lists.newArrayList(new ClockEntry((short) 1, + Short.MAX_VALUE + 1)), + System.currentTimeMillis()); + VectorClock mergedClock = clock1.merge(clock2); + assertEquals(mergedClock.getMaxVersion(), Short.MAX_VALUE + 1); + } + + @Test + public void testIncrementOrderDoesntMatter() { + // Clocks should have the property that no matter what order the + // increment operations are done in the resulting clocks are equal + int numTests = 10; + int numNodes = 10; + int numValues = 100; + VectorClock[] clocks = new VectorClock[numNodes]; + for(int t = 0; t < numTests; t++) { + int[] test = TUtils.randomInts(numNodes, numValues); + for(int n = 0; n < numNodes; n++) + clocks[n] = getClock(TUtils.shuffle(test)); + // test all are equal + for(int n = 0; n < numNodes - 1; n++) + assertEquals("Clock " + n + " and " + (n + 1) + " are not equal.", + clocks[n].getEntries(), + clocks[n + 1].getEntries()); + } + } +/* + public void testIncrementAndSerialize() { + int node = 1; + VectorClock vc = getClock(node); + assertEquals(node, vc.getMaxVersion()); + int increments = 3000; + for(int i = 0; i < increments; i++) { + vc.incrementVersion(node, 45); + // serialize + vc = new VectorClock(vc.toBytes()); + } + assertEquals(increments + 1, vc.getMaxVersion()); + } */ + +} diff --git a/src/test/java/org/sdnplatform/sync/test/MockSyncService.java b/src/test/java/org/sdnplatform/sync/test/MockSyncService.java new file mode 100644 index 0000000000000000000000000000000000000000..eacd9f64ab89a525396da4acb4a61beb17648cec --- /dev/null +++ b/src/test/java/org/sdnplatform/sync/test/MockSyncService.java @@ -0,0 +1,122 @@ +package org.sdnplatform.sync.test; + +import java.util.Collection; +import java.util.HashMap; + +import org.sdnplatform.sync.ISyncService; +import org.sdnplatform.sync.error.SyncException; +import org.sdnplatform.sync.error.UnknownStoreException; +import org.sdnplatform.sync.internal.AbstractSyncManager; +import org.sdnplatform.sync.internal.store.IStorageEngine; +import org.sdnplatform.sync.internal.store.IStore; +import org.sdnplatform.sync.internal.store.InMemoryStorageEngine; +import org.sdnplatform.sync.internal.store.ListenerStorageEngine; +import org.sdnplatform.sync.internal.store.MappingStoreListener; +import org.sdnplatform.sync.internal.util.ByteArray; + +import net.floodlightcontroller.core.module.FloodlightModuleContext; +import net.floodlightcontroller.core.module.FloodlightModuleException; +import net.floodlightcontroller.core.module.IFloodlightService; + + +/** + * Mock sync service useful for testing + * @author readams + */ +public class MockSyncService extends AbstractSyncManager { + /** + * The storage engines that contain the locally-stored data + */ + private HashMap<String,ListenerStorageEngine> localStores = + new HashMap<String, ListenerStorageEngine>(); + + + // ************ + // ISyncService + // ************ + + @Override + public void registerStore(String storeName, Scope scope) + throws SyncException { + ListenerStorageEngine store = localStores.get(storeName); + if (store != null) return; + IStorageEngine<ByteArray, byte[]> memstore = + new InMemoryStorageEngine<ByteArray, byte[]>(storeName); + store = new ListenerStorageEngine(memstore); + localStores.put(storeName, store); + } + + /** + * Persistent stores are not actually persistent in the mock sync service + * @see ISyncService#registerPersistentStore(String, + * org.sdnplatform.sync.ISyncService.Scope) + */ + @Override + public void registerPersistentStore(String storeName, Scope scope) + throws SyncException { + registerStore(storeName, scope); + } + + // ***************** + // IFloodlightModule + // ***************** + + @Override + public void init(FloodlightModuleContext context) + throws FloodlightModuleException { + + } + + @Override + public void startUp(FloodlightModuleContext context) + throws FloodlightModuleException { + + } + + @Override + public Collection<Class<? extends IFloodlightService>> + getModuleDependencies() { + return null; + } + + // ******************* + // AbstractSyncManager + // ******************* + + @Override + public IStore<ByteArray, byte[]> + getStore(String storeName) throws UnknownStoreException { + return localStores.get(storeName); + } + + @Override + public short getLocalNodeId() { + return Short.MAX_VALUE; + } + + @Override + public void addListener(String storeName, MappingStoreListener listener) + throws UnknownStoreException { + ListenerStorageEngine store = localStores.get(storeName); + if (store == null) + throw new UnknownStoreException("Store " + storeName + + " has not been registered"); + store.addListener(listener); + } + + @Override + public void shutdown() { + + } + + // *************** + // MockSyncService + // *************** + + /** + * Reset to pristine condition + */ + public void reset() { + localStores = new HashMap<String, ListenerStorageEngine>(); + } +}