diff --git a/Analysis/Caida/Caida23MAggVerification.xlsx b/Analysis/Caida/Caida23MAggVerification.xlsx new file mode 100644 index 0000000..73a21b6 Binary files /dev/null and b/Analysis/Caida/Caida23MAggVerification.xlsx differ diff --git a/Analysis/Caida/Experiments/Caida1MBaselineTopKComparisonWithFormula.xlsx b/Analysis/Caida/Experiments/Caida1MBaselineTopKComparisonWithFormula.xlsx new file mode 100644 index 0000000..4c4eaf9 Binary files /dev/null and b/Analysis/Caida/Experiments/Caida1MBaselineTopKComparisonWithFormula.xlsx differ diff --git a/Analysis/Caida/Experiments/Comparison Between Aggregate And Non-aggregateMin (1M Caida).xlsx b/Analysis/Caida/Experiments/Comparison Between Aggregate And Non-aggregateMin (1M Caida).xlsx new file mode 100644 index 0000000..5964ffa Binary files /dev/null and b/Analysis/Caida/Experiments/Comparison Between Aggregate And Non-aggregateMin (1M Caida).xlsx differ diff --git a/code/AggregateModelVerifier.java b/code/AggregateModelVerifier.java index e56365a..2ba63e4 100644 --- a/code/AggregateModelVerifier.java +++ b/code/AggregateModelVerifier.java @@ -76,35 +76,45 @@ public static void runExperiment(SummaryStructureType type, ArrayList observedHH = new HashMap(); + // index at which k for comparing number of heavy hitters is at + int comp_index = 0; + for (int t = 0; t < numberOfTrials; t++){ Collections.shuffle(inputStream); - if (t < 2){ + /*if (t < 2){ for (int i = 0; i < 15; i++) System.err.println(inputStream.get(i).flowid + "," + inputStream.get(i).count); System.err.println("new trial"); - } + }*/ lostFlowHashTable = new DLeftHashTable(tableSize, type, inputStream.size(), D); + // given input, so ideal order of heavy hitters + FlowWithCount[] inputStreamArray = new FlowWithCount[inputStream.size()]; + inputStreamArray = inputStream.toArray(inputStreamArray); + Arrays.sort(inputStreamArray); + + // first k in inputStream are expected hh - fix which heavy hitters you look at for cdf of competitors + expectedHH = new HashSet(); + for (int i = 0; i < k[comp_index]; i++){ + expectedHH.add(inputStreamArray[i].flowid); + } + int count = 0; for (FlowWithCount f : inputStream){ - lostFlowHashTable.processAggData(f.flowid, count++, f.count); + lostFlowHashTable.processAggData(f.flowid, count++, f.count, nonHHCompetitors, expectedHH); } // observed flows in sorted order so that we can pick the hh as the top k FlowWithCount[] outputFlowBuckets = Arrays.copyOf(lostFlowHashTable.getBuckets(), lostFlowHashTable.getBuckets().length); Arrays.sort(outputFlowBuckets); - - // given input, so ideal order of heavy hitters - FlowWithCount[] inputStreamArray = new FlowWithCount[inputStream.size()]; - inputStreamArray = inputStream.toArray(inputStreamArray); - Arrays.sort(inputStreamArray); cumDroppedPacketInfoCount += lostFlowHashTable.getDroppedPacketInfoCount(); @@ -165,6 +175,11 @@ public static void runExperiment(SummaryStructureType type, ArrayList expectedHH){ // hardcoded values for the hash functions given that the number of flows is 100 final int P = 5171; final int hashA[] = { 421, 199, 79, 83, 89, 97, 101, 103, 107, 109, 113, @@ -546,6 +546,8 @@ public void processAggData(long key, int keynum, long value){ int k = 0; int firstLocation = 0; // how to track this in hardware + int currentCompetitors = 0; // variable to track the current number of heavy hitter competitors the current packet has seen + if (key == 0) { System.out.print("invalid Key"); @@ -572,6 +574,9 @@ public void processAggData(long key, int keynum, long value){ break; } + if (expectedHH.contains(buckets[index].flowid)) + currentCompetitors++; + // track min - first time explicitly set the value if (buckets[index].count < minValue || k == 0){ minValue = buckets[index].count; @@ -580,6 +585,8 @@ public void processAggData(long key, int keynum, long value){ } } + nonHHCompetitors[currentCompetitors] += 1; + boolean isAggregateData = true; // none of the D locations were free if (k == D) { @@ -633,7 +640,7 @@ public void basicHeuristic(int minIndex, long key, boolean isAggregateData, long else { droppedPacketInfoCount = droppedPacketInfoCount + (int) buckets[minIndex].count; buckets[minIndex].flowid = key; - buckets[minIndex].count = 1; + buckets[minIndex].count = 1; // replace with min+1 } } diff --git a/code/LossyFlowIdentifier.java b/code/LossyFlowIdentifier.java index 34d94ee..e81a469 100644 --- a/code/LossyFlowIdentifier.java +++ b/code/LossyFlowIdentifier.java @@ -13,6 +13,8 @@ public class LossyFlowIdentifier{ private static HashSet expectedHH; private static HashMap flowSizes; private static ArrayList flowsToBeLost; + + private static double accuracy = 0.99; /*public static PriorityQueue HeapOfLossyFlows; private class BucketMatrixIndex{ int hashfunctionIndex; @@ -67,6 +69,8 @@ public static void runLossIdentificationTrials(SummaryStructureType type, Sketch else lostFlowHashTable = new DLeftHashTable(tableSize, type, lostPacketStream.size(), D); + Collections.shuffle(lostPacketStream); // randomizing the order + int count = 0; for (Packet p : lostPacketStream){ //lostPacketSketch.updateCountInSketch(p); @@ -427,7 +431,7 @@ public static void runSizeDifferenceMeasurementOnSketch(SummaryStructureType typ } } - public static void runTrialsOnSketch(SummaryStructureType type, ArrayList lostPacketStream, double[] threshold, int totalMemory, int D, long thr_totalPackets){ + public static void runTrialsPerThreshold(SummaryStructureType type, ArrayList lostPacketStream, double[] threshold, int totalMemory, int D, long thr_totalPackets){ int numberOfTrials = 1000; int observedSize[] = new int[threshold.length]; int expectedSize[] = new int[threshold.length]; @@ -461,10 +465,14 @@ public static void runTrialsOnSketch(SummaryStructureType type, ArrayList observedHH; HashMap observedHHfromDump; for (int t = 0; t < numberOfTrials; t++){ + + Collections.shuffle(lostPacketStream); + for (int thr_index = 0; thr_index < threshold.length; thr_index++){ // find the expected HH in the idealistic 100% accuracy case expectedHH = new HashSet(); observedHHfromDump = new HashMap(); + observedHH = new HashMap(); for (String f : flowsToBeLost){ if (flowSizes.get(FlowDataParser.convertAddressToLong(f)) > (int) (threshold[thr_index] * lostPacketStream.size())){ expectedHH.add(FlowDataParser.convertAddressToLong(f)); @@ -475,10 +483,21 @@ public static void runTrialsOnSketch(SummaryStructureType type, ArrayList threshold[thr_index]*lostPacketStream.size()){ - observedHHfromDump.put(f.flowid, f.count); + observedHHfromDump.put(f.flowid, f.count); } } } observedSizeFromDump[thr_index] = observedHHfromDump.size(); - - //get the heavy hitters and clean them up - observedHH = cmsketch.getHeavyHitters();/* new HashMap();*/ - //System.out.print("Before cleaning:" + cmsketch.getHeavyHitters().size()); - ArrayList flowsToRemove = new ArrayList(); - for (long flowid : cmsketch.getHeavyHitters().keySet()) { - if (type == SummaryStructureType.CountMinCacheNoKeys && cmsketch.getHeavyHitters().get(flowid) > threshold[thr_index]*lostPacketStream.size()) - observedHH.put(flowid, cmsketch.getHeavyHitters().get(flowid)); - if (type == SummaryStructureType.CountMinCacheWithKeys && observedHH.get(flowid) <= threshold[thr_index]*lostPacketStream.size()){ - // check if the cache has a mre updated value that would account for this particular flowid being a hh - // you would technically hash on this flowid and look up that index -- eliminated that part - if (!observedHHfromDump.containsKey(flowid)) - flowsToRemove.add(flowid); - else if (observedHHfromDump.get(flowid) <= threshold[thr_index]*lostPacketStream.size()) - flowsToRemove.add(flowid); + // get the heavy hitters from the sample and hold flow memory + if (type == SummaryStructureType.SampleAndHold){ + cacheSize[thr_index] = flowMemoryFromSampling.getBuckets().size(); + for (Long f : flowMemoryFromSampling.getBuckets().keySet()){ + if (flowMemoryFromSampling.getBuckets().get(f) > threshold[thr_index]*lostPacketStream.size()) + observedHH.put(f, flowMemoryFromSampling.getBuckets().get(f)); + } + } + else { + //get the heavy hitters and clean them up + observedHH = cmsketch.getHeavyHitters(); + ArrayList flowsToRemove = new ArrayList(); + for (long flowid : cmsketch.getHeavyHitters().keySet()) { + if (type == SummaryStructureType.CountMinCacheNoKeys && cmsketch.getHeavyHitters().get(flowid) > threshold[thr_index]*lostPacketStream.size()) + observedHH.put(flowid, cmsketch.getHeavyHitters().get(flowid)); + if (type == SummaryStructureType.CountMinCacheWithKeys && observedHH.get(flowid) <= threshold[thr_index]*lostPacketStream.size()){ + // check if the cache has a mre updated value that would account for this particular flowid being a hh + // you would technically hash on this flowid and look up that index -- eliminated that part + if (!observedHHfromDump.containsKey(flowid)) + flowsToRemove.add(flowid); + else if (observedHHfromDump.get(flowid) <= threshold[thr_index]*lostPacketStream.size()) + flowsToRemove.add(flowid); + } } + for (long flowid : flowsToRemove) + observedHH.remove(flowid); + //System.out.println("after cleaning: " + observedHH.size()); } - for (long flowid : flowsToRemove) - observedHH.remove(flowid); - //System.out.println("after cleaning: " + observedHH.size()); observedSize[thr_index] = observedHH.size(); - occupancy[thr_index] += (float) cmsketch.getSketch().getOccupancy(); - controllerReportCount[thr_index] += (float) cmsketch.getControllerReports(); + if (type != SummaryStructureType.SampleAndHold) { + occupancy[thr_index] += (float) cmsketch.getSketch().getOccupancy(); + controllerReportCount[thr_index] += (float) cmsketch.getControllerReports(); + } int bigLoserPacketsLost = 0; int flag = 0; @@ -717,14 +746,23 @@ else if (args[3].contains("coalesce")) //} } } - else if (args[2].equals("countMin")){ + else if (args[2].equals("PerThreshold")){ System.out.print("totalMemory," + "cacheSize," + "threshold," + "D," + "FalsePositive %," + "False Negative %," + "expected number, reported number, hhReportedFraction, deviation, table occupancy, thr_totalPackets, Controlleer Report Count"); if (args[3].contains("Keys")) System.out.print("FalsePositiveinDump %," + "False Negativ in Dump %," + "expected number, reported number in dump, hhReportedFraction in dump, deviation in dump,"); System.out.println(); + int tempCount = 0; for (int tableSize_index = 0; tableSize_index < tableSize.length; tableSize_index++) { //for (long thr_totalPackets = 100000; thr_totalPackets <= 500000; thr_totalPackets += 100000) + if (tempCount != 0) + continue; + + if (args[3].contains("SampleAndHold")) { + runTrialsPerThreshold(SummaryStructureType.SampleAndHold, lostPacketStream, threshold, tableSize[tableSize_index], 0, 0); + tempCount++; + continue; + } for (long thr_totalPackets = 0; thr_totalPackets <= 0; thr_totalPackets += 100000){ for (int D = 3; D <= 3; D++){ //System.out.println(expectedHH.size() + " " + totalPacketsLost); @@ -732,11 +770,11 @@ else if (args[2].equals("countMin")){ // run the loss identification trials for the appropriate heuristic if (args[3].contains("NoKeyNoRepBit")) - runTrialsOnSketch(SummaryStructureType.CountMinCacheNoKeys, lostPacketStream, threshold, tableSize[tableSize_index]*9, D, thr_totalPackets); + runTrialsPerThreshold(SummaryStructureType.CountMinCacheNoKeys, lostPacketStream, threshold, tableSize[tableSize_index]*9, D, thr_totalPackets); else if (args[3].contains("NoKeyRepBit")) - runTrialsOnSketch(SummaryStructureType.CountMinCacheNoKeysReportedBit, lostPacketStream, threshold, tableSize[tableSize_index]*9, D, thr_totalPackets); + runTrialsPerThreshold(SummaryStructureType.CountMinCacheNoKeysReportedBit, lostPacketStream, threshold, tableSize[tableSize_index]*9, D, thr_totalPackets); else if (args[3].contains("Keys")) - runTrialsOnSketch(SummaryStructureType.CountMinCacheWithKeys, lostPacketStream, threshold, tableSize[tableSize_index]*9, D, thr_totalPackets); + runTrialsPerThreshold(SummaryStructureType.CountMinCacheWithKeys, lostPacketStream, threshold, tableSize[tableSize_index]*9, D, thr_totalPackets); } } } diff --git a/code/Packet.java b/code/Packet.java index a2fba2e..a4a8ba5 100644 --- a/code/Packet.java +++ b/code/Packet.java @@ -4,26 +4,26 @@ the key fields from the packet*/ public class Packet{ private long srcip; - private long dstip; - private String srcPort; - private String dstPort; - private String protocol; + //private long dstip; + //private String srcPort; + //private String dstPort; + //private String protocol; public Packet(long srcip, long dstip, String srcPort, String dstPort, String protocol){ this.srcip = srcip; - this.dstip = dstip; - this.srcPort = new String(srcPort); - this.dstPort = new String(dstPort); - this.protocol = new String(protocol); + //this.dstip = dstip; + //this.srcPort = new String(srcPort); + //this.dstPort = new String(dstPort); + //this.protocol = new String(protocol); } public long getSrcIp(){ return srcip; } - public long getDstIp(){ + /*public long getDstIp(){ return dstip; - } + }*/ public String fivetuple(){ return Long.toString(srcip); /* + long.toString(dstip) + srcPort + dstPort + protocol;*/ diff --git a/code/SampleAndHold.java b/code/SampleAndHold.java new file mode 100644 index 0000000..30d73fc --- /dev/null +++ b/code/SampleAndHold.java @@ -0,0 +1,79 @@ +import java.util.*; + +/*+---------------------------------------------------------------------- + || + || Class SampleAndHold + || + || Author: Vibhaa Sivaraman + || + || Purpose: To simulate the sample and hold method proposed by + || Estan and Varghese where packets are sampled with a certain + || probability, but once a packet is sampled, all subsequent + || packets of that flow are counted + || + || Inherits From: None + || + || Interfaces: None + || + |+----------------------------------------------------------------------- + || + || Class Methods: processData that does the sampleAndHold procedure + || on a given packet that belongs to a given new flow + || + ++-----------------------------------------------------------------------*/ + +public class SampleAndHold{ + private int tableSize; + private int droppedPacketInfoCount; + private int cumDroppedPacketInfoCount; + private int totalNumberOfPackets; + private double samplingProb; + + private HashMap flowMemory; + private HashMap heavyhitterList; + + private SummaryStructureType type; + + public SampleAndHold(int totalMemory, SummaryStructureType type, int numberOfFlows, double samplingProb){ + this.tableSize = totalMemory; + // keys take twice as much space as counters and 2 sets of counters + + droppedPacketInfoCount = 0; + cumDroppedPacketInfoCount = 0; + totalNumberOfPackets = 0; + + this.type = type; + this.samplingProb = samplingProb; + + flowMemory = new HashMap(); + /*for (int j = 0; j < tableSize; j++){ + flowMemory[j] = new FlowWithCount(0, 0); + }*/ + + heavyhitterList = new HashMap(); + } + + public void processData(long key){ + // hardcoded values for the hash functions given that the number of flows is 100 + final int P = 5171; + final int hashA[] = { 421, 149, 311, 701, 557, 1667, 773, 2017, 1783, 883, 307, 199, 2719, 2851, 1453}; + final int hashB[] = { 73, 109, 233, 31, 151, 3359, 643, 1103, 2927, 3061, 409, 3079, 2341, 179, 1213}; + + totalNumberOfPackets++; + + int curKeyIndex = (int) ((hashA[0]*key + hashB[0]) % P) % (tableSize); + + if (flowMemory.containsKey(key)) + flowMemory.put(key, flowMemory.get(key) + 1); + else if (Math.random() <= samplingProb) + flowMemory.put(key, (long) 1); + } + + public int getDroppedPacketInfoCount(){ + return droppedPacketInfoCount; + } + + public HashMap getBuckets(){ + return flowMemory; + } +} \ No newline at end of file diff --git a/code/SummaryStructureType.java b/code/SummaryStructureType.java index aae5063..8b3e7b5 100644 --- a/code/SummaryStructureType.java +++ b/code/SummaryStructureType.java @@ -111,5 +111,8 @@ public enum SummaryStructureType{ // Claim - if there is a key that hashes to this location more than 50% of the time // that packets hash here, that will be left in this location at the end of this // process - GroupCounters + GroupCounters, + + // data structure that emulates Sample and HOld algorithm from Estan and Varghese's paper + SampleAndHold }; \ No newline at end of file diff --git a/code/TopKIdentifier.java b/code/TopKIdentifier.java new file mode 100644 index 0000000..762c92f --- /dev/null +++ b/code/TopKIdentifier.java @@ -0,0 +1,268 @@ +import java.util.*; +import java.io.File; +import java.io.FileNotFoundException; + +/* high level procedure that uses packet info from a csv file, parses it, + induces loss on it, and produces a sketch for the lost packet on which + the big loser identification process is performed + + initially written for the sketches to all be reversible so that the + reversibility procedure would identify the lossy buckets - unused + code in the context of the hash table approach*/ +public class TopKIdentifier{ + private static HashSet expectedHH; + private static HashMap flowSizes; + private static ArrayList flowAggWithSizes; + + private static double accuracy = 0.99; + + public static void runTopKIdentificationTrials(SummaryStructureType type, ArrayList inputStream, int[] k, int tableSize, int D){ + int numberOfTrials = 1000; + int cumDroppedPacketInfoCount = 0; + int observedSize[] = new int[k.length]; + int expectedSize[] = new int[k.length]; + int numberOfFalsePositives[] = new int[k.length]; + int numberOfFalseNegatives[] = new int[k.length]; + int missingFromTable[] = new int[k.length]; + long bigLoserPacketReported[] = new long[k.length]; + long bigLoserPacketCount[] = new long[k.length]; + float occupiedSlots[] = new float[k.length]; + float duplicates[] = new float[k.length]; + + // track the unique lost flows + DLeftHashTable lostFlowHashTable = null; + GroupCounters gcHashTable = null; + + double cumDeviation[] = new double[k.length]; + HashMap observedHH = new HashMap(); + ArrayList outputFlowsList = new ArrayList(); + for (int t = 0; t < numberOfTrials; t++){ + + // given input, so ideal order of heavy hitters + FlowWithCount[] inputFlowArray = new FlowWithCount[flowAggWithSizes.size()]; + inputFlowArray = flowAggWithSizes.toArray(inputFlowArray); + Arrays.sort(inputFlowArray); + + if (type == SummaryStructureType.GroupCounters) + gcHashTable = new GroupCounters(tableSize, type, inputStream.size(), D); + else + lostFlowHashTable = new DLeftHashTable(tableSize, type, inputStream.size(), D); + + Collections.shuffle(inputStream); // randomizing the order + + int count = 0; + for (Packet p : inputStream){ + //lostPacketSketch.updateCountInSketch(p); + //System.out.println(p.getSrcIp()); + if (type == SummaryStructureType.GroupCounters) + gcHashTable.processData(p.getSrcIp()); + else + lostFlowHashTable.processData(p.getSrcIp(), count++); + + } + + if (type == SummaryStructureType.GroupCounters) + cumDroppedPacketInfoCount += gcHashTable.getDroppedPacketInfoCount(); + else + cumDroppedPacketInfoCount += lostFlowHashTable.getDroppedPacketInfoCount(); + + + for (int k_index = 0; k_index < k.length; k_index++){ + outputFlowsList = new ArrayList(); + + + if (type == SummaryStructureType.EvictionWithoutCount){ + Sketch lossEstimateSketch = lostFlowHashTable.getSketch(); + for (Long f : lostFlowHashTable.getFlowIdBuckets()){ + if (f != 0) + occupiedSlots[k_index]++; + + outputFlowsList.add(new FlowWithCount(f, lossEstimateSketch.estimateCount(f))); + //System.out.println(f.flowid); + } + } + else if (type == SummaryStructureType.RollingMinSingleLookup){ + HashMap currentFlows = new HashMap(); + for (FlowWithCount f : lostFlowHashTable.getBuckets()){ + if (f.flowid != 0) + occupiedSlots[k_index]++; + + //System.out.println(f.flowid + " " + f.count); + //if (/*f.flowid != 0 && */f.count > k*lostPacketStream.size()){ + if (currentFlows.containsKey(f.flowid)){ + currentFlows.put(f.flowid, currentFlows.get(f.flowid) + f.count); + duplicates[k_index]++; + } + else + currentFlows.put(f.flowid, f.count); + //System.out.println(f.flowid); + } + + for (Long f : currentFlows.keySet()){ + outputFlowsList.add(new FlowWithCount(f, currentFlows.get(f))); + } + } + else { + HashMap currentFlows = new HashMap(); + for (FlowWithCount f : lostFlowHashTable.getBuckets()){ + if (f.flowid != 0) + occupiedSlots[k_index]++; + + //System.out.println(f.flowid + " " + f.count); + //if (/*f.flowid != 0 && */f.count > threshold*lostPacketStream.size()){ + if (currentFlows.containsKey(f.flowid)){ + currentFlows.put(f.flowid, currentFlows.get(f.flowid)); + duplicates[k_index]++; + } + else + currentFlows.put(f.flowid, f.count); + //System.out.println(f.flowid); + } + + for (FlowWithCount f : lostFlowHashTable.getBuckets()){ + outputFlowsList.add(new FlowWithCount(f.flowid, f.count)); + } + } + + // observed flows in sorted order so that we can pick the hh as the top k + FlowWithCount[] outputFlowBuckets = new FlowWithCount[outputFlowsList.size()]; + outputFlowBuckets = outputFlowsList.toArray(outputFlowBuckets); + Arrays.sort(outputFlowBuckets); + + observedHH = new HashMap(); + for (int i = 0; i < k[k_index]; i++){ + observedHH.put(outputFlowBuckets[i].flowid, outputFlowBuckets[i].count); + } + + int bigLoserPacketsLost = 0; + int flag = 0; + double deviation = 0; + double denominator = 0; + + + // first k in inputStream are expected hh - fix which heavy hitters you look at for cdf of competitors + expectedHH = new HashSet(); + for (int i = 0; i < k[k_index]; i++){ + expectedHH.add(inputFlowArray[i].flowid); + } + + for (long flowid : expectedHH){ + if (!observedHH.containsKey(flowid)){ + numberOfFalseNegatives[k_index]++; + } + else + bigLoserPacketReported[k_index] += flowSizes.get(flowid); + + int tempFlag = 0; + for (FlowWithCount f : outputFlowsList){ + if (f.flowid == flowid){ + tempFlag = 1; + break; + } + } + if (tempFlag == 0) + missingFromTable[k_index]++; + bigLoserPacketCount[k_index] += flowSizes.get(flowid); + } + + expectedSize[k_index] = expectedHH.size(); + observedSize[k_index] = observedHH.size(); + + for (long flowid : observedHH.keySet()){ + //System.out.println("hello"); + //System.out.println(FlowDataParser.convertLongToAddress(flowid)); + if (!expectedHH.contains(flowid)){ + //System.out.println(FlowDataParser.convertLongToAddress(flowid)); + numberOfFalsePositives[k_index]++; + } + if (flowSizes.get(flowid) == null) + System.out.println(FlowDataParser.convertLongToAddress(flowid)); + //System.out.print(observedHH.get(flowid)); + //System.out.print(" flowid=" + flowid + " " + flowSizes.get(flowid)); + /*if (!expectedHH.contains(flowid) && observedHH.get(flowid) > flowSizes.get(flowid)){ + System.out.println(flowid + " " + observedHH.get(flowid) + " " + flowSizes.get(flowid)); + }*/ + + deviation += Math.abs(observedHH.get(flowid) - flowSizes.get(flowid)); + denominator += flowSizes.get(flowid); + } + + cumDeviation[k_index] += deviation/denominator; + } + } + + for (int k_index = 0; k_index < k.length; k_index++){ + System.out.print(tableSize + "," + k[k_index] + "," + D + ","); + System.out.print((double) numberOfFalsePositives[k_index]/numberOfTrials/observedSize[k_index] + ","); + System.out.print((double) numberOfFalseNegatives[k_index]/numberOfTrials/expectedSize[k_index] + ","); + System.out.print(expectedSize[k_index] + "," + observedSize[k_index] + "," + (double) bigLoserPacketReported[k_index]/bigLoserPacketCount[k_index]); + System.out.print("," + cumDeviation[k_index]/numberOfTrials + "," + occupiedSlots[k_index]/tableSize/numberOfTrials + "," + duplicates[k_index]/tableSize/numberOfTrials); + System.out.println("," + (double) missingFromTable[k_index]/numberOfTrials/expectedSize[k_index]); + } + + //lostFlowHashTable.printBuckets(); + } + + public static void main(String[] args){ + ArrayList inputPacketStream; + flowAggWithSizes = new ArrayList(); // input stream in a convenient format + flowSizes = new HashMap(); + if(args[0].contains("caida") || args[0].contains("Caida")) + inputPacketStream = FlowDataParser.parseCAIDAPacketData(args[0]); + else + inputPacketStream = FlowDataParser.parsePacketData(args[0]); + + // read the flows to be lost from a file mentioned in the command line and create a new stream with that flow lost + int totalPacketsLost = 0; + File file = new File(args[1]); + try + { + Scanner scanner = new Scanner(file); + String line; + //int linenumber = 0; + String[] fields = new String[24]; + while (scanner.hasNextLine()) + { + line = scanner.nextLine(); + fields = line.split(","); + flowSizes.put(FlowDataParser.convertAddressToLong(fields[0]), Integer.parseInt(fields[1])); + totalPacketsLost+= Integer.parseInt(fields[1]); + FlowWithCount f = new FlowWithCount(FlowDataParser.convertAddressToLong(fields[0]), Integer.parseInt(fields[1])); + flowAggWithSizes.add(f); + } + scanner.close(); + } + catch (FileNotFoundException e) + { + System.err.format("Exception occurred trying to read '%s'.", args[1]); + e.printStackTrace(); + return; + } + + //final int tableSize[] = {30, 75, 150, 300, 500, 900, 1200, 1500, 2000}; + //final int tableSize[] = {/*100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1200, 1400, 1600, 1800, 1024, 2048/*, 4096, 8192*/}; + final int k[] = {50, 100, 150, 200, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750}; + final int tableSize[] = {/*2520, */5040, 7560, /*10080*/}; // LCM of the first 12 integers + //final int tableSize[] = {64}; + + if (args[2].equals("runTrial")) { + System.out.println("tableSize" + "," + "k" + "," + "D," + "FalsePositive %" + "," + "False Negative %" + "," + "expected number, reported number, hhReportedFraction, deviation, table occupancy, duplicates, fraction missing in table"); + for (int tableSize_index = 0; tableSize_index < tableSize.length; tableSize_index++) { + for (int D = 2; D <= 15; D++){ + if (D == 11 || D == 13) + continue; + + // run the loss identification trials for the appropriate heuristic + if (args[3].contains("Basic")) + runTopKIdentificationTrials(SummaryStructureType.BasicHeuristic, inputPacketStream, k, tableSize[tableSize_index], D); + else if (args[3].contains("Multi")) + runTopKIdentificationTrials(SummaryStructureType.RollingMinWihoutCoalescense, inputPacketStream, k, tableSize[tableSize_index], D); + else if (args[3].contains("Single")) + runTopKIdentificationTrials(SummaryStructureType.RollingMinSingleLookup , inputPacketStream, k, tableSize[tableSize_index], D); + else if (args[3].contains("coalesce")) + runTopKIdentificationTrials(SummaryStructureType.RollingMinWithBloomFilter , inputPacketStream, k, tableSize[tableSize_index], D); + } + } + } + } +} \ No newline at end of file diff --git a/code/TopKIdentifier2.java b/code/TopKIdentifier2.java new file mode 100644 index 0000000..3af8b8f --- /dev/null +++ b/code/TopKIdentifier2.java @@ -0,0 +1,268 @@ +import java.util.*; +import java.io.File; +import java.io.FileNotFoundException; + +/* high level procedure that uses packet info from a csv file, parses it, + induces loss on it, and produces a sketch for the lost packet on which + the big loser identification process is performed + + initially written for the sketches to all be reversible so that the + reversibility procedure would identify the lossy buckets - unused + code in the context of the hash table approach*/ +public class TopKIdentifier2{ + private static HashSet expectedHH; + private static HashMap flowSizes; + private static ArrayList flowAggWithSizes; + + private static double accuracy = 0.99; + + public static void runTopKIdentificationTrials(SummaryStructureType type, ArrayList inputStream, int[] k, int tableSize, int D){ + int numberOfTrials = 1000; + int cumDroppedPacketInfoCount = 0; + int observedSize[] = new int[k.length]; + int expectedSize[] = new int[k.length]; + int numberOfFalsePositives[] = new int[k.length]; + int numberOfFalseNegatives[] = new int[k.length]; + int missingFromTable[] = new int[k.length]; + long bigLoserPacketReported[] = new long[k.length]; + long bigLoserPacketCount[] = new long[k.length]; + float occupiedSlots[] = new float[k.length]; + float duplicates[] = new float[k.length]; + + // track the unique lost flows + DLeftHashTable lostFlowHashTable = null; + GroupCounters gcHashTable = null; + + double cumDeviation[] = new double[k.length]; + HashMap observedHH = new HashMap(); + ArrayList outputFlowsList = new ArrayList(); + for (int t = 0; t < numberOfTrials; t++){ + + // given input, so ideal order of heavy hitters + FlowWithCount[] inputFlowArray = new FlowWithCount[flowAggWithSizes.size()]; + inputFlowArray = flowAggWithSizes.toArray(inputFlowArray); + Arrays.sort(inputFlowArray); + + if (type == SummaryStructureType.GroupCounters) + gcHashTable = new GroupCounters(tableSize, type, inputStream.size(), D); + else + lostFlowHashTable = new DLeftHashTable(tableSize, type, inputStream.size(), D); + + Collections.shuffle(inputStream); // randomizing the order + + int count = 0; + for (Packet p : inputStream){ + //lostPacketSketch.updateCountInSketch(p); + //System.out.println(p.getSrcIp()); + if (type == SummaryStructureType.GroupCounters) + gcHashTable.processData(p.getSrcIp()); + else + lostFlowHashTable.processData(p.getSrcIp(), count++); + + } + + if (type == SummaryStructureType.GroupCounters) + cumDroppedPacketInfoCount += gcHashTable.getDroppedPacketInfoCount(); + else + cumDroppedPacketInfoCount += lostFlowHashTable.getDroppedPacketInfoCount(); + + + for (int k_index = 0; k_index < k.length; k_index++){ + outputFlowsList = new ArrayList(); + + + if (type == SummaryStructureType.EvictionWithoutCount){ + Sketch lossEstimateSketch = lostFlowHashTable.getSketch(); + for (Long f : lostFlowHashTable.getFlowIdBuckets()){ + if (f != 0) + occupiedSlots[k_index]++; + + outputFlowsList.add(new FlowWithCount(f, lossEstimateSketch.estimateCount(f))); + //System.out.println(f.flowid); + } + } + else if (type == SummaryStructureType.RollingMinSingleLookup){ + HashMap currentFlows = new HashMap(); + for (FlowWithCount f : lostFlowHashTable.getBuckets()){ + if (f.flowid != 0) + occupiedSlots[k_index]++; + + //System.out.println(f.flowid + " " + f.count); + //if (/*f.flowid != 0 && */f.count > k*lostPacketStream.size()){ + if (currentFlows.containsKey(f.flowid)){ + currentFlows.put(f.flowid, currentFlows.get(f.flowid) + f.count); + duplicates[k_index]++; + } + else + currentFlows.put(f.flowid, f.count); + //System.out.println(f.flowid); + } + + for (Long f : currentFlows.keySet()){ + outputFlowsList.add(new FlowWithCount(f, currentFlows.get(f))); + } + } + else { + HashMap currentFlows = new HashMap(); + for (FlowWithCount f : lostFlowHashTable.getBuckets()){ + if (f.flowid != 0) + occupiedSlots[k_index]++; + + //System.out.println(f.flowid + " " + f.count); + //if (/*f.flowid != 0 && */f.count > threshold*lostPacketStream.size()){ + if (currentFlows.containsKey(f.flowid)){ + currentFlows.put(f.flowid, currentFlows.get(f.flowid)); + duplicates[k_index]++; + } + else + currentFlows.put(f.flowid, f.count); + //System.out.println(f.flowid); + } + + for (FlowWithCount f : lostFlowHashTable.getBuckets()){ + outputFlowsList.add(new FlowWithCount(f.flowid, f.count)); + } + } + + // observed flows in sorted order so that we can pick the hh as the top k + FlowWithCount[] outputFlowBuckets = new FlowWithCount[outputFlowsList.size()]; + outputFlowBuckets = outputFlowsList.toArray(outputFlowBuckets); + Arrays.sort(outputFlowBuckets); + + observedHH = new HashMap(); + for (int i = 0; i < k[k_index]; i++){ + observedHH.put(outputFlowBuckets[i].flowid, outputFlowBuckets[i].count); + } + + int bigLoserPacketsLost = 0; + int flag = 0; + double deviation = 0; + double denominator = 0; + + + // first k in inputStream are expected hh - fix which heavy hitters you look at for cdf of competitors + expectedHH = new HashSet(); + for (int i = 0; i < k[k_index]; i++){ + expectedHH.add(inputFlowArray[i].flowid); + } + + for (long flowid : expectedHH){ + if (!observedHH.containsKey(flowid)){ + numberOfFalseNegatives[k_index]++; + } + else + bigLoserPacketReported[k_index] += flowSizes.get(flowid); + + int tempFlag = 0; + for (FlowWithCount f : outputFlowsList){ + if (f.flowid == flowid){ + tempFlag = 1; + break; + } + } + if (tempFlag == 0) + missingFromTable[k_index]++; + bigLoserPacketCount[k_index] += flowSizes.get(flowid); + } + + expectedSize[k_index] = expectedHH.size(); + observedSize[k_index] = observedHH.size(); + + for (long flowid : observedHH.keySet()){ + //System.out.println("hello"); + //System.out.println(FlowDataParser.convertLongToAddress(flowid)); + if (!expectedHH.contains(flowid)){ + //System.out.println(FlowDataParser.convertLongToAddress(flowid)); + numberOfFalsePositives[k_index]++; + } + if (flowSizes.get(flowid) == null) + System.out.println(FlowDataParser.convertLongToAddress(flowid)); + //System.out.print(observedHH.get(flowid)); + //System.out.print(" flowid=" + flowid + " " + flowSizes.get(flowid)); + /*if (!expectedHH.contains(flowid) && observedHH.get(flowid) > flowSizes.get(flowid)){ + System.out.println(flowid + " " + observedHH.get(flowid) + " " + flowSizes.get(flowid)); + }*/ + + deviation += Math.abs(observedHH.get(flowid) - flowSizes.get(flowid)); + denominator += flowSizes.get(flowid); + } + + cumDeviation[k_index] += deviation/denominator; + } + } + + for (int k_index = 0; k_index < k.length; k_index++){ + System.out.print(tableSize + "," + k[k_index] + "," + D + ","); + System.out.print((double) numberOfFalsePositives[k_index]/numberOfTrials/observedSize[k_index] + ","); + System.out.print((double) numberOfFalseNegatives[k_index]/numberOfTrials/expectedSize[k_index] + ","); + System.out.print(expectedSize[k_index] + "," + observedSize[k_index] + "," + (double) bigLoserPacketReported[k_index]/bigLoserPacketCount[k_index]); + System.out.print("," + cumDeviation[k_index]/numberOfTrials + "," + occupiedSlots[k_index]/tableSize/numberOfTrials + "," + duplicates[k_index]/tableSize/numberOfTrials); + System.out.println("," + (double) missingFromTable[k_index]/numberOfTrials/expectedSize[k_index]); + } + + //lostFlowHashTable.printBuckets(); + } + + public static void main(String[] args){ + ArrayList inputPacketStream; + flowAggWithSizes = new ArrayList(); // input stream in a convenient format + flowSizes = new HashMap(); + if(args[0].contains("caida") || args[0].contains("Caida")) + inputPacketStream = FlowDataParser.parseCAIDAPacketData(args[0]); + else + inputPacketStream = FlowDataParser.parsePacketData(args[0]); + + // read the flows to be lost from a file mentioned in the command line and create a new stream with that flow lost + int totalPacketsLost = 0; + File file = new File(args[1]); + try + { + Scanner scanner = new Scanner(file); + String line; + //int linenumber = 0; + String[] fields = new String[24]; + while (scanner.hasNextLine()) + { + line = scanner.nextLine(); + fields = line.split(","); + flowSizes.put(FlowDataParser.convertAddressToLong(fields[0]), Integer.parseInt(fields[1])); + totalPacketsLost+= Integer.parseInt(fields[1]); + FlowWithCount f = new FlowWithCount(FlowDataParser.convertAddressToLong(fields[0]), Integer.parseInt(fields[1])); + flowAggWithSizes.add(f); + } + scanner.close(); + } + catch (FileNotFoundException e) + { + System.err.format("Exception occurred trying to read '%s'.", args[1]); + e.printStackTrace(); + return; + } + + //final int tableSize[] = {30, 75, 150, 300, 500, 900, 1200, 1500, 2000}; + //final int tableSize[] = {/*100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1200, 1400, 1600, 1800, 1024, 2048/*, 4096, 8192*/}; + final int k[] = {50, 100, 150, 200, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750}; + final int tableSize[] = {/*2520, */5040, 7560, /*10080*/}; // LCM of the first 12 integers + //final int tableSize[] = {64}; + + if (args[2].equals("runTrial")) { + System.out.println("tableSize" + "," + "k" + "," + "D," + "FalsePositive %" + "," + "False Negative %" + "," + "expected number, reported number, hhReportedFraction, deviation, table occupancy, duplicates, fraction missing in table"); + for (int tableSize_index = 0; tableSize_index < tableSize.length; tableSize_index++) { + for (int D = 2; D <= 15; D++){ + if (D == 11 || D == 13) + continue; + + // run the loss identification trials for the appropriate heuristic + if (args[3].contains("Basic")) + runTopKIdentificationTrials(SummaryStructureType.BasicHeuristic, inputPacketStream, k, tableSize[tableSize_index], D); + else if (args[3].contains("Multi")) + runTopKIdentificationTrials(SummaryStructureType.RollingMinWihoutCoalescense, inputPacketStream, k, tableSize[tableSize_index], D); + else if (args[3].contains("Single")) + runTopKIdentificationTrials(SummaryStructureType.RollingMinSingleLookup , inputPacketStream, k, tableSize[tableSize_index], D); + else if (args[3].contains("coalesce")) + runTopKIdentificationTrials(SummaryStructureType.RollingMinWithBloomFilter , inputPacketStream, k, tableSize[tableSize_index], D); + } + } + } + } +} \ No newline at end of file diff --git a/code/runAggVerification.sh b/code/runAggVerification.sh index 745fa75..63fb96f 100644 --- a/code/runAggVerification.sh +++ b/code/runAggVerification.sh @@ -1,5 +1,9 @@ -java AggregateModelVerifier ../Analysis/Caida/Caida750ThSizeBySrcIp.csv > ../Analysis/Caida/Caida750ThAggVerification.csv 2> ../Analysis/Caida/Caida750ThAggInputStream.csv -java AggregateModelVerifier ../Analysis/Caida/Caida3MSizeBySrcIp.csv > ../Analysis/Caida/Caida3MAggVerification.csv 2> ../Analysis/Caida/Caida3MAggInputStream.csv -java AggregateModelVerifier ../Analysis/Caida/caida5MillionSplit1SizeBySrcIp.csv > ../Analysis/Caida/Caida5MAggVerification.csv 2> ../Analysis/Caida/Caida5MAggInputStream.csv -java AggregateModelVerifier ../Analysis/Caida/Caida23MSizeBySrcIp.csv > ../Analysis/Caida/Caida23MAggVerification.csv 2> ../Analysis/Caida/Caida23MAggInputStream.csv +#java AggregateModelVerifier ../Analysis/Caida/Caida750ThSizeBySrcIp.csv > ../Analysis/Caida/Caida750ThAggVerification.csv 2> ../Analysis/Caida/Caida750ThAggInputStream.csv +#java AggregateModelVerifier ../Analysis/Caida/Caida3MSizeBySrcIp.csv > ../Analysis/Caida/Caida3MAggVerification.csv 2> ../Analysis/Caida/Caida3MAggInputStream.csv +#java AggregateModelVerifier ../Analysis/Caida/caida5MillionSplit1SizeBySrcIp.csv > ../Analysis/Caida/Caida5MAggVerification.csv 2> ../Analysis/Caida/Caida5MAggInputStream.csv +#java AggregateModelVerifier ../Analysis/Caida/Caida23MSizeBySrcIp.csv > ../Analysis/Caida/Caida23MAggVerification.csv 2> ../Analysis/Caida/Caida23MAggInputStream.csv +java AggregateModelVerifier ../Analysis/Caida/Caida750ThSizeBySrcIp.csv 2> ../Analysis/Caida/Caida750ThAggNumCompFlowsFreq.csv +java AggregateModelVerifier ../Analysis/Caida/Caida3MSizeBySrcIp.csv 2> ../Analysis/Caida/Caida3MAggNumCompFlowsFreq.csv +java AggregateModelVerifier ../Analysis/Caida/caida5MillionSplit1SizeBySrcIp.csv 2> ../Analysis/Caida/Caida5MAggNumCompFlowsFreq.csv +java AggregateModelVerifier ../Analysis/Caida/Caida23MSizeBySrcIp.csv 2> ../Analysis/Caida/Caida23MAggNumCompFlowsFreq.csv diff --git a/code/runCMExperiments.sh b/code/runCMExperiments.sh index 5412474..1351f03 100644 --- a/code/runCMExperiments.sh +++ b/code/runCMExperiments.sh @@ -1,3 +1,3 @@ -#java LossyFlowIdentifier ../Analysis/Caida/Caida750Thpackets.csv ../Analysis/Caida/Caida750ThSizeBySrcIp.csv countMin NoKeyNoRepBit > ../Analysis/Caida/Caida750ThCMNoKeysSummary.csv -java LossyFlowIdentifier ../Analysis/Caida/Caida750Thpackets.csv ../Analysis/Caida/Caida750ThSizeBySrcIp.csv countMin Keys > ../Analysis/Caida/Caida750ThCMKeysSummary.csv -# java LossyFlowIdentifier ../Analysis/Caida/caidaSplit1.csv ../Analysis/Caida/caidaSplit1SizeBySrcIp.csv countMin NoKeyRepBit > ../Analysis/Caida/CaidaSplit1CountMinNoKeyRepBitWithThrSummary.csv +#java LossyFlowIdentifier ../Analysis/Caida/Caida750Thpackets.csv ../Analysis/Caida/Caida750ThSizeBySrcIp.csv perThreshold NoKeyNoRepBit > ../Analysis/Caida/Caida750ThCMNoKeysSummary.csv +java LossyFlowIdentifier ../Analysis/Caida/Caida750Thpackets.csv ../Analysis/Caida/Caida750ThSizeBySrcIp.csv perThreshold Keys > ../Analysis/Caida/Caida750ThCMKeysSummary.csv +# java LossyFlowIdentifier ../Analysis/Caida/caidaSplit1.csv ../Analysis/Caida/caidaSplit1SizeBySrcIp.csv perThreshold NoKeyRepBit > ../Analysis/Caida/CaidaSplit1CountMinNoKeyRepBitWithThrSummary.csv diff --git a/code/runComparisons.sh b/code/runComparisons.sh new file mode 100644 index 0000000..68dad2d --- /dev/null +++ b/code/runComparisons.sh @@ -0,0 +1,13 @@ +#1M packets +#java LossyFlowIdentifier ../Analysis/Caida/caidaSplit1.csv ../Analysis/Caida/caidaSplit1SizeBySrcIp.csv PerThreshold SampleAndHold > ../Analysis/Caida/Caida1MSampleAndHoldSummary.csv +#java LossyFlowIdentifier ../Analysis/Caida/caidaSplit1.csv ../Analysis/Caida/caidaSplit1SizeBySrcIp.csv runTrial Basic > ../Analysis/Caida/Caida1MBaselineSummary.csv +#java LossyFlowIdentifier ../Analysis/Caida/caidaSplit1.csv ../Analysis/Caida/caidaSplit1SizeBySrcIp.csv runTrial Single > ../Analysis/Caida/Caida1MSingleSummary.csv + +#23M packets +java LossyFlowIdentifier ../Analysis/Caida/Caida23Mpackets.csv ../Analysis/Caida/Caida23MSizeBySrcIp.csv PerThreshold SampleAndHold > ../Analysis/Caida/Caida23MSampleAndHoldSummary.csv +java LossyFlowIdentifier ../Analysis/Caida/Caida23Mpackets.csv ../Analysis/Caida/Caida23MSizeBySrcIp.csv runTrial Basic > ../Analysis/Caida/Caida23MBaselineSummaryWithMinVal.csv +java LossyFlowIdentifier ../Analysis/Caida/Caida23Mpackets.csv ../Analysis/Caida/Caida23MSizeBySrcIp.csv runTrial Single > ../Analysis/Caida/Caida23MSingleSummary.csv + + + + diff --git a/code/runExperiments.sh b/code/runExperiments.sh index b0d4148..12af4c9 100644 --- a/code/runExperiments.sh +++ b/code/runExperiments.sh @@ -1,9 +1,9 @@ #java LossyFlowIdentifier ../Analysis/Caida/caidaSplit1.csv ../Analysis/Caida/caidaSplit1SizeBySrcIp.csv noTrial Basic 1500 > ../Analysis/Caida/CaidaSplit1BaselineSize1500.csv #java LossyFlowIdentifier ../Analysis/Caida/caidaSplit1.csv ../Analysis/Caida/caidaSplit1SizeBySrcIp.csv noTrial Multi 1500 > ../Analysis/Caida/CaidaSplit1MultiLookUpSize1500.csv #java LossyFlowIdentifier ../Analysis/Caida/caidaSplit1.csv ../Analysis/Caida/caidaSplit1SizeBySrcIp.csv noTrial Single 1500 > ../Analysis/Caida/CaidaSplit1SingleLookUpSize1500.csv -java LossyFlowIdentifier ../Analysis/Caida/caidaSplit1.csv ../Analysis/Caida/caidaSplit1SizeBySrcIp.csv runTrial Basic > ../Analysis/Caida/CaidaSplit1BaselineIncD.csv -java LossyFlowIdentifier ../Analysis/Caida/caidaSplit1.csv ../Analysis/Caida/caidaSplit1SizeBySrcIp.csv runTrial Multi > ../Analysis/Caida/CaidaSplit1MultiLookupIncD.csv -java LossyFlowIdentifier ../Analysis/Caida/caidaSplit1.csv ../Analysis/Caida/caidaSplit1SizeBySrcIp.csv runTrial Single > ../Analysis/Caida/CaidaSplit1SingleLookUpIncD.csv +java TopKIdentifier2 ../Analysis/Caida/caidaSplit1.csv ../Analysis/Caida/caidaSplit1SizeBySrcIp.csv runTrial Basic > ../Analysis/Caida/Caida1MTopKBaselineSummaryWith1.csv +java TopKIdentifier2 ../Analysis/Caida/caidaSplit1.csv ../Analysis/Caida/caidaSplit1SizeBySrcIp.csv runTrial Multi > ../Analysis/Caida/Caida1MTopKSingleSummary.csv +#java LossyFlowIdentifier ../Analysis/Caida/caidaSplit1.csv ../Analysis/Caida/caidaSplit1SizeBySrcIp.csv runTrial Single > ../Analysis/Caida/CaidaSplit1SingleLookUpIncD.csv #java LossyFlowIdentifier ../Analysis/Caida/caidaSplit1.csv ../Analysis/Caida/caidaSplit1SizeBySrcIp.csv runTrial coalesce > ../Analysis/Caida/CaidaSplit1CoalesceSummary.csv #java LossyFlowIdentifier ../Analysis/Caida/caidaSplit1.csv ../Analysis/Caida/caidaSplit1SizeBySrcIp.csv noTrial coalesce 1500 > ../Analysis/Caida/CaidaSplit1CoalesceSize1500.csv diff --git a/code/runSizeMeasurementOnSam2.sh b/code/runSizeMeasurementOnSam2.sh deleted file mode 100644 index 73bff68..0000000 --- a/code/runSizeMeasurementOnSam2.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env sh - -java LossyFlowIdentifier ../Analysis/sample2.csv ../Analysis/SizeOfAllFlowsSample2.csv no 30 > ../Analysis/30/Sam2SingleLookupSizeDifference30.csv - -java LossyFlowIdentifier ../Analysis/sample2.csv ../Analysis/SizeOfAllFlowsSample2.csv no 50 > ../Analysis/50/Sam2SingleLookupSizeDifference50.csv - -java LossyFlowIdentifier ../Analysis/sample2.csv ../Analysis/SizeOfAllFlowsSample2.csv no 100 > ../Analysis/100/Sam2SingleLookupSizeDifference100.csv - -java LossyFlowIdentifier ../Analysis/sample2.csv ../Analysis/SizeOfAllFlowsSample2.csv no 200 > ../Analysis/200/Sam2SingleLookupSizeDifference200.csv - -java LossyFlowIdentifier ../Analysis/sample2.csv ../Analysis/SizeOfAllFlowsSample2.csv no 300 > ../Analysis/300/Sam2SingleLookupSizeDifference300.csv - -java LossyFlowIdentifier ../Analysis/sample2.csv ../Analysis/SizeOfAllFlowsSample2.csv no 500 > ../Analysis/500/Sam2SingleLookupSizeDifference500.csv - - -