From 556959fa43ab4ae4c775f8a32be5ab491704877f Mon Sep 17 00:00:00 2001 From: F5 Date: Sun, 30 Oct 2022 15:19:23 +0800 Subject: [PATCH] mtp: Add examples --- examples/mtp/CMakeLists.txt | 120 +++ examples/mtp/dctcp-example-mtp.cc | 567 +++++++++++ examples/mtp/dynamic-global-routing-mtp.cc | 231 +++++ examples/mtp/queue-discs-benchmark-mtp.cc | 315 ++++++ examples/mtp/ripng-simple-network-mtp.cc | 274 ++++++ examples/mtp/simple-multicast-flooding-mtp.cc | 211 ++++ .../socket-bound-tcp-static-routing-mtp.cc | 235 +++++ examples/mtp/tcp-bbr-example-mtp.cc | 265 +++++ examples/mtp/tcp-pacing-mtp.cc | 334 +++++++ examples/mtp/tcp-star-server-mtp.cc | 172 ++++ examples/mtp/tcp-validation-mtp.cc | 927 ++++++++++++++++++ 11 files changed, 3651 insertions(+) create mode 100644 examples/mtp/CMakeLists.txt create mode 100644 examples/mtp/dctcp-example-mtp.cc create mode 100644 examples/mtp/dynamic-global-routing-mtp.cc create mode 100644 examples/mtp/queue-discs-benchmark-mtp.cc create mode 100644 examples/mtp/ripng-simple-network-mtp.cc create mode 100644 examples/mtp/simple-multicast-flooding-mtp.cc create mode 100644 examples/mtp/socket-bound-tcp-static-routing-mtp.cc create mode 100644 examples/mtp/tcp-bbr-example-mtp.cc create mode 100644 examples/mtp/tcp-pacing-mtp.cc create mode 100644 examples/mtp/tcp-star-server-mtp.cc create mode 100644 examples/mtp/tcp-validation-mtp.cc diff --git a/examples/mtp/CMakeLists.txt b/examples/mtp/CMakeLists.txt new file mode 100644 index 000000000..42810c312 --- /dev/null +++ b/examples/mtp/CMakeLists.txt @@ -0,0 +1,120 @@ +if(${ENABLE_MTP}) + build_example( + NAME dctcp-example-mtp + SOURCE_FILES dctcp-example-mtp.cc + LIBRARIES_TO_LINK + ${libcore} + ${libnetwork} + ${libinternet} + ${libpoint-to-point} + ${libapplications} + ${libtraffic-control} + ${libmtp} + ) + + build_example( + NAME dynamic-global-routing-mtp + SOURCE_FILES dynamic-global-routing-mtp.cc + LIBRARIES_TO_LINK + ${libpoint-to-point} + ${libcsma} + ${libinternet} + ${libapplications} + ${libmtp} + ) + + build_example( + NAME queue-discs-benchmark-mtp + SOURCE_FILES queue-discs-benchmark-mtp.cc + LIBRARIES_TO_LINK + ${libinternet} + ${libpoint-to-point} + ${libapplications} + ${libinternet-apps} + ${libtraffic-control} + ${libflow-monitor} + ${libmtp} + ) + + build_example( + NAME ripng-simple-network-mtp + SOURCE_FILES ripng-simple-network-mtp.cc + LIBRARIES_TO_LINK + ${libcsma} + ${libinternet} + ${libpoint-to-point} + ${libinternet-apps} + ${libmtp} + ) + + build_example( + NAME simple-multicast-flooding-mtp + SOURCE_FILES simple-multicast-flooding-mtp.cc + LIBRARIES_TO_LINK + ${libcore} + ${libnetwork} + ${libapplications} + ${libinternet} + ${libmtp} + ) + + build_example( + NAME socket-bound-tcp-static-routing-mtp + SOURCE_FILES socket-bound-tcp-static-routing-mtp.cc + LIBRARIES_TO_LINK + ${libnetwork} + ${libcsma} + ${libpoint-to-point} + ${libinternet} + ${libapplications} + ${libmtp} + ) + + build_example( + NAME tcp-bbr-example-mtp + SOURCE_FILES tcp-bbr-example-mtp.cc + LIBRARIES_TO_LINK + ${libpoint-to-point} + ${libinternet} + ${libapplications} + ${libtraffic-control} + ${libnetwork} + ${libinternet-apps} + ${libflow-monitor} + ${libmtp} + ) + + build_example( + NAME tcp-pacing-mtp + SOURCE_FILES tcp-pacing-mtp.cc + LIBRARIES_TO_LINK + ${libpoint-to-point} + ${libinternet} + ${libapplications} + ${libflow-monitor} + ${libmtp} + ) + + build_example( + NAME tcp-star-server-mtp + SOURCE_FILES tcp-star-server-mtp.cc + LIBRARIES_TO_LINK + ${libpoint-to-point} + ${libapplications} + ${libinternet} + ${libmtp} + ) + + build_example( + NAME tcp-validation-mtp + SOURCE_FILES tcp-validation-mtp.cc + LIBRARIES_TO_LINK + ${libpoint-to-point} + ${libinternet} + ${libapplications} + ${libtraffic-control} + ${libnetwork} + ${libinternet-apps} + ${libmtp} + ) +endif() diff --git a/examples/mtp/dctcp-example-mtp.cc b/examples/mtp/dctcp-example-mtp.cc new file mode 100644 index 000000000..cfaf68612 --- /dev/null +++ b/examples/mtp/dctcp-example-mtp.cc @@ -0,0 +1,567 @@ +/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */ +/* + * Copyright (c) 2017-20 NITK Surathkal + * Copyright (c) 2020 Tom Henderson (better alignment with experiment) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation; + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Authors: Shravya K.S. + * Apoorva Bhargava + * Shikha Bakshi + * Mohit P. Tahiliani + * Tom Henderson + */ + +// The network topology used in this example is based on Fig. 17 described in +// Mohammad Alizadeh, Albert Greenberg, David A. Maltz, Jitendra Padhye, +// Parveen Patel, Balaji Prabhakar, Sudipta Sengupta, and Murari Sridharan. +// "Data Center TCP (DCTCP)." In ACM SIGCOMM Computer Communication Review, +// Vol. 40, No. 4, pp. 63-74. ACM, 2010. + +// The topology is roughly as follows +// +// S1 S3 +// | | (1 Gbps) +// T1 ------- T2 -- R1 +// | | (1 Gbps) +// S2 R2 +// +// The link between switch T1 and T2 is 10 Gbps. All other +// links are 1 Gbps. In the SIGCOMM paper, there is a Scorpion switch +// between T1 and T2, but it doesn't contribute another bottleneck. +// +// S1 and S3 each have 10 senders sending to receiver R1 (20 total) +// S2 (20 senders) sends traffic to R2 (20 receivers) +// +// This sets up two bottlenecks: 1) T1 -> T2 interface (30 senders +// using the 10 Gbps link) and 2) T2 -> R1 (20 senders using 1 Gbps link) +// +// RED queues configured for ECN marking are used at the bottlenecks. +// +// Figure 17 published results are that each sender in S1 gets 46 Mbps +// and each in S3 gets 54 Mbps, while each S2 sender gets 475 Mbps, and +// that these are within 10% of their fair-share throughputs (Jain index +// of 0.99). +// +// This program runs the program by default for five seconds. The first +// second is devoted to flow startup (all 40 TCP flows are stagger started +// during this period). There is a three second convergence time where +// no measurement data is taken, and then there is a one second measurement +// interval to gather raw throughput for each flow. These time intervals +// can be changed at the command line. +// +// The program outputs six files. The first three: +// * dctcp-example-s1-r1-throughput.dat +// * dctcp-example-s2-r2-throughput.dat +// * dctcp-example-s3-r1-throughput.dat +// provide per-flow throughputs (in Mb/s) for each of the forty flows, summed +// over the measurement window. The fourth file, +// * dctcp-example-fairness.dat +// provides average throughputs for the three flow paths, and computes +// Jain's fairness index for each flow group (i.e. across each group of +// 10, 20, and 10 flows). It also sums the throughputs across each bottleneck. +// The fifth and sixth: +// * dctcp-example-t1-length.dat +// * dctcp-example-t2-length.dat +// report on the bottleneck queue length (in packets and microseconds +// of delay) at 10 ms intervals during the measurement window. +// +// By default, the throughput averages are 23 Mbps for S1 senders, 471 Mbps +// for S2 senders, and 74 Mbps for S3 senders, and the Jain index is greater +// than 0.99 for each group of flows. The average queue delay is about 1ms +// for the T2->R2 bottleneck, and about 200us for the T1->T2 bottleneck. +// +// The RED parameters (min_th and max_th) are set to the same values as +// reported in the paper, but we observed that throughput distributions +// and queue delays are very sensitive to these parameters, as was also +// observed in the paper; it is likely that the paper's throughput results +// could be achieved by further tuning of the RED parameters. However, +// the default results show that DCTCP is able to achieve high link +// utilization and low queueing delay and fairness across competing flows +// sharing the same path. + +#include +#include + +#include "ns3/core-module.h" +#include "ns3/network-module.h" +#include "ns3/internet-module.h" +#include "ns3/point-to-point-module.h" +#include "ns3/applications-module.h" +#include "ns3/traffic-control-module.h" +#include "ns3/mtp-interface.h" + +using namespace ns3; + +std::stringstream filePlotQueue1; +std::stringstream filePlotQueue2; +std::ofstream rxS1R1Throughput; +std::ofstream rxS2R2Throughput; +std::ofstream rxS3R1Throughput; +std::ofstream fairnessIndex; +std::ofstream t1QueueLength; +std::ofstream t2QueueLength; +std::vector rxS1R1Bytes; +std::vector rxS2R2Bytes; +std::vector rxS3R1Bytes; + +void +PrintProgress (Time interval) +{ + std::cout << "Progress to " << std::fixed << std::setprecision (1) << Simulator::Now ().GetSeconds () << " seconds simulation time" << std::endl; + Simulator::Schedule (interval, &PrintProgress, interval); +} + +void +TraceS1R1Sink (std::size_t index, Ptr p, const Address& a) +{ + rxS1R1Bytes[index] += p->GetSize (); +} + +void +TraceS2R2Sink (std::size_t index, Ptr p, const Address& a) +{ + rxS2R2Bytes[index] += p->GetSize (); +} + +void +TraceS3R1Sink (std::size_t index, Ptr p, const Address& a) +{ + rxS3R1Bytes[index] += p->GetSize (); +} + +void +InitializeCounters (void) +{ + for (std::size_t i = 0; i < 10; i++) + { + rxS1R1Bytes[i] = 0; + } + for (std::size_t i = 0; i < 20; i++) + { + rxS2R2Bytes[i] = 0; + } + for (std::size_t i = 0; i < 10; i++) + { + rxS3R1Bytes[i] = 0; + } +} + +void +PrintThroughput (Time measurementWindow) +{ + for (std::size_t i = 0; i < 10; i++) + { + rxS1R1Throughput << measurementWindow.GetSeconds () << "s " << i << " " << (rxS1R1Bytes[i] * 8) / (measurementWindow.GetSeconds ()) / 1e6 << std::endl; + } + for (std::size_t i = 0; i < 20; i++) + { + rxS2R2Throughput << Simulator::Now ().GetSeconds () << "s " << i << " " << (rxS2R2Bytes[i] * 8) / (measurementWindow.GetSeconds ()) / 1e6 << std::endl; + } + for (std::size_t i = 0; i < 10; i++) + { + rxS3R1Throughput << Simulator::Now ().GetSeconds () << "s " << i << " " << (rxS3R1Bytes[i] * 8) / (measurementWindow.GetSeconds ()) / 1e6 << std::endl; + } +} + +// Jain's fairness index: https://en.wikipedia.org/wiki/Fairness_measure +void +PrintFairness (Time measurementWindow) +{ + double average = 0; + uint64_t sumSquares = 0; + uint64_t sum = 0; + double fairness = 0; + for (std::size_t i = 0; i < 10; i++) + { + sum += rxS1R1Bytes[i]; + sumSquares += (rxS1R1Bytes[i] * rxS1R1Bytes[i]); + } + average = ((sum / 10) * 8 / measurementWindow.GetSeconds ()) / 1e6; + fairness = static_cast (sum * sum) / (10 * sumSquares); + fairnessIndex << "Average throughput for S1-R1 flows: " + << std::fixed << std::setprecision (2) << average << " Mbps; fairness: " + << std::fixed << std::setprecision (3) << fairness << std::endl; + average = 0; + sumSquares = 0; + sum = 0; + fairness = 0; + for (std::size_t i = 0; i < 20; i++) + { + sum += rxS2R2Bytes[i]; + sumSquares += (rxS2R2Bytes[i] * rxS2R2Bytes[i]); + } + average = ((sum / 20) * 8 / measurementWindow.GetSeconds ()) / 1e6; + fairness = static_cast (sum * sum) / (20 * sumSquares); + fairnessIndex << "Average throughput for S2-R2 flows: " + << std::fixed << std::setprecision (2) << average << " Mbps; fairness: " + << std::fixed << std::setprecision (3) << fairness << std::endl; + average = 0; + sumSquares = 0; + sum = 0; + fairness = 0; + for (std::size_t i = 0; i < 10; i++) + { + sum += rxS3R1Bytes[i]; + sumSquares += (rxS3R1Bytes[i] * rxS3R1Bytes[i]); + } + average = ((sum / 10) * 8 / measurementWindow.GetSeconds ()) / 1e6; + fairness = static_cast (sum * sum) / (10 * sumSquares); + fairnessIndex << "Average throughput for S3-R1 flows: " + << std::fixed << std::setprecision (2) << average << " Mbps; fairness: " + << std::fixed << std::setprecision (3) << fairness << std::endl; + sum = 0; + for (std::size_t i = 0; i < 10; i++) + { + sum += rxS1R1Bytes[i]; + } + for (std::size_t i = 0; i < 20; i++) + { + sum += rxS2R2Bytes[i]; + } + fairnessIndex << "Aggregate user-level throughput for flows through T1: " << static_cast (sum * 8) / 1e9 << " Gbps" << std::endl; + sum = 0; + for (std::size_t i = 0; i < 10; i++) + { + sum += rxS3R1Bytes[i]; + } + for (std::size_t i = 0; i < 10; i++) + { + sum += rxS1R1Bytes[i]; + } + fairnessIndex << "Aggregate user-level throughput for flows to R1: " << static_cast (sum * 8) / 1e9 << " Gbps" << std::endl; +} + +void +CheckT1QueueSize (Ptr queue) +{ + // 1500 byte packets + uint32_t qSize = queue->GetNPackets (); + Time backlog = Seconds (static_cast (qSize * 1500 * 8) / 1e10); // 10 Gb/s + // report size in units of packets and ms + t1QueueLength << std::fixed << std::setprecision (2) << Simulator::Now ().GetSeconds () << " " << qSize << " " << backlog.GetMicroSeconds () << std::endl; + // check queue size every 1/100 of a second + Simulator::Schedule (MilliSeconds (10), &CheckT1QueueSize, queue); +} + +void +CheckT2QueueSize (Ptr queue) +{ + uint32_t qSize = queue->GetNPackets (); + Time backlog = Seconds (static_cast (qSize * 1500 * 8) / 1e9); // 1 Gb/s + // report size in units of packets and ms + t2QueueLength << std::fixed << std::setprecision (2) << Simulator::Now ().GetSeconds () << " " << qSize << " " << backlog.GetMicroSeconds () << std::endl; + // check queue size every 1/100 of a second + Simulator::Schedule (MilliSeconds (10), &CheckT2QueueSize, queue); +} + +int main (int argc, char *argv[]) +{ + LogComponentEnable ("LogicalProcess", LOG_LEVEL_INFO); + LogComponentEnable ("MultithreadedSimulatorImpl", LOG_LEVEL_INFO); + MtpInterface::Enable (4); + + std::string outputFilePath = "."; + std::string tcpTypeId = "TcpDctcp"; + Time flowStartupWindow = Seconds (1); + Time convergenceTime = Seconds (3); + Time measurementWindow = Seconds (1); + bool enableSwitchEcn = true; + Time progressInterval = MilliSeconds (100); + + CommandLine cmd (__FILE__); + cmd.AddValue ("tcpTypeId", "ns-3 TCP TypeId", tcpTypeId); + cmd.AddValue ("flowStartupWindow", "startup time window (TCP staggered starts)", flowStartupWindow); + cmd.AddValue ("convergenceTime", "convergence time", convergenceTime); + cmd.AddValue ("measurementWindow", "measurement window", measurementWindow); + cmd.AddValue ("enableSwitchEcn", "enable ECN at switches", enableSwitchEcn); + cmd.Parse (argc, argv); + + Config::SetDefault ("ns3::TcpL4Protocol::SocketType", StringValue ("ns3::" + tcpTypeId)); + + Time startTime = Seconds (0); + Time stopTime = flowStartupWindow + convergenceTime + measurementWindow; + + Time clientStartTime = startTime; + + rxS1R1Bytes.reserve (10); + rxS2R2Bytes.reserve (20); + rxS3R1Bytes.reserve (10); + + NodeContainer S1, S2, S3, R2; + Ptr T1 = CreateObject (); + Ptr T2 = CreateObject (); + Ptr R1 = CreateObject (); + S1.Create (10); + S2.Create (20); + S3.Create (10); + R2.Create (20); + + Config::SetDefault ("ns3::TcpSocket::SegmentSize", UintegerValue (1448)); + Config::SetDefault ("ns3::TcpSocket::DelAckCount", UintegerValue (2)); + GlobalValue::Bind ("ChecksumEnabled", BooleanValue (false)); + + // Set default parameters for RED queue disc + Config::SetDefault ("ns3::RedQueueDisc::UseEcn", BooleanValue (enableSwitchEcn)); + // ARED may be used but the queueing delays will increase; it is disabled + // here because the SIGCOMM paper did not mention it + // Config::SetDefault ("ns3::RedQueueDisc::ARED", BooleanValue (true)); + // Config::SetDefault ("ns3::RedQueueDisc::Gentle", BooleanValue (true)); + Config::SetDefault ("ns3::RedQueueDisc::UseHardDrop", BooleanValue (false)); + Config::SetDefault ("ns3::RedQueueDisc::MeanPktSize", UintegerValue (1500)); + // Triumph and Scorpion switches used in DCTCP Paper have 4 MB of buffer + // If every packet is 1500 bytes, 2666 packets can be stored in 4 MB + Config::SetDefault ("ns3::RedQueueDisc::MaxSize", QueueSizeValue (QueueSize ("2666p"))); + // DCTCP tracks instantaneous queue length only; so set QW = 1 + Config::SetDefault ("ns3::RedQueueDisc::QW", DoubleValue (1)); + Config::SetDefault ("ns3::RedQueueDisc::MinTh", DoubleValue (20)); + Config::SetDefault ("ns3::RedQueueDisc::MaxTh", DoubleValue (60)); + + PointToPointHelper pointToPointSR; + pointToPointSR.SetDeviceAttribute ("DataRate", StringValue ("1Gbps")); + pointToPointSR.SetChannelAttribute ("Delay", StringValue ("10us")); + + PointToPointHelper pointToPointT; + pointToPointT.SetDeviceAttribute ("DataRate", StringValue ("10Gbps")); + pointToPointT.SetChannelAttribute ("Delay", StringValue ("10us")); + + + // Create a total of 62 links. + std::vector S1T1; + S1T1.reserve (10); + std::vector S2T1; + S2T1.reserve (20); + std::vector S3T2; + S3T2.reserve (10); + std::vector R2T2; + R2T2.reserve (20); + NetDeviceContainer T1T2 = pointToPointT.Install (T1, T2); + NetDeviceContainer R1T2 = pointToPointSR.Install (R1, T2); + + for (std::size_t i = 0; i < 10; i++) + { + Ptr n = S1.Get (i); + S1T1.push_back (pointToPointSR.Install (n, T1)); + } + for (std::size_t i = 0; i < 20; i++) + { + Ptr n = S2.Get (i); + S2T1.push_back (pointToPointSR.Install (n, T1)); + } + for (std::size_t i = 0; i < 10; i++) + { + Ptr n = S3.Get (i); + S3T2.push_back (pointToPointSR.Install (n, T2)); + } + for (std::size_t i = 0; i < 20; i++) + { + Ptr n = R2.Get (i); + R2T2.push_back (pointToPointSR.Install (n, T2)); + } + + InternetStackHelper stack; + stack.InstallAll (); + + TrafficControlHelper tchRed10; + // MinTh = 50, MaxTh = 150 recommended in ACM SIGCOMM 2010 DCTCP Paper + // This yields a target (MinTh) queue depth of 60us at 10 Gb/s + tchRed10.SetRootQueueDisc ("ns3::RedQueueDisc", + "LinkBandwidth", StringValue ("10Gbps"), + "LinkDelay", StringValue ("10us"), + "MinTh", DoubleValue (50), + "MaxTh", DoubleValue (150)); + QueueDiscContainer queueDiscs1 = tchRed10.Install (T1T2); + + TrafficControlHelper tchRed1; + // MinTh = 20, MaxTh = 60 recommended in ACM SIGCOMM 2010 DCTCP Paper + // This yields a target queue depth of 250us at 1 Gb/s + tchRed1.SetRootQueueDisc ("ns3::RedQueueDisc", + "LinkBandwidth", StringValue ("1Gbps"), + "LinkDelay", StringValue ("10us"), + "MinTh", DoubleValue (20), + "MaxTh", DoubleValue (60)); + QueueDiscContainer queueDiscs2 = tchRed1.Install (R1T2.Get (1)); + for (std::size_t i = 0; i < 10; i++) + { + tchRed1.Install (S1T1[i].Get (1)); + } + for (std::size_t i = 0; i < 20; i++) + { + tchRed1.Install (S2T1[i].Get (1)); + } + for (std::size_t i = 0; i < 10; i++) + { + tchRed1.Install (S3T2[i].Get (1)); + } + for (std::size_t i = 0; i < 20; i++) + { + tchRed1.Install (R2T2[i].Get (1)); + } + + Ipv4AddressHelper address; + std::vector ipS1T1; + ipS1T1.reserve (10); + std::vector ipS2T1; + ipS2T1.reserve (20); + std::vector ipS3T2; + ipS3T2.reserve (10); + std::vector ipR2T2; + ipR2T2.reserve (20); + address.SetBase ("172.16.1.0", "255.255.255.0"); + Ipv4InterfaceContainer ipT1T2 = address.Assign (T1T2); + address.SetBase ("192.168.0.0", "255.255.255.0"); + Ipv4InterfaceContainer ipR1T2 = address.Assign (R1T2); + address.SetBase ("10.1.1.0", "255.255.255.0"); + for (std::size_t i = 0; i < 10; i++) + { + ipS1T1.push_back (address.Assign (S1T1[i])); + address.NewNetwork (); + } + address.SetBase ("10.2.1.0", "255.255.255.0"); + for (std::size_t i = 0; i < 20; i++) + { + ipS2T1.push_back (address.Assign (S2T1[i])); + address.NewNetwork (); + } + address.SetBase ("10.3.1.0", "255.255.255.0"); + for (std::size_t i = 0; i < 10; i++) + { + ipS3T2.push_back (address.Assign (S3T2[i])); + address.NewNetwork (); + } + address.SetBase ("10.4.1.0", "255.255.255.0"); + for (std::size_t i = 0; i < 20; i++) + { + ipR2T2.push_back (address.Assign (R2T2[i])); + address.NewNetwork (); + } + + Ipv4GlobalRoutingHelper::PopulateRoutingTables (); + + // Each sender in S2 sends to a receiver in R2 + std::vector > r2Sinks; + r2Sinks.reserve (20); + for (std::size_t i = 0; i < 20; i++) + { + uint16_t port = 50000 + i; + Address sinkLocalAddress (InetSocketAddress (Ipv4Address::GetAny (), port)); + PacketSinkHelper sinkHelper ("ns3::TcpSocketFactory", sinkLocalAddress); + ApplicationContainer sinkApp = sinkHelper.Install (R2.Get (i)); + Ptr packetSink = sinkApp.Get (0)->GetObject (); + r2Sinks.push_back (packetSink); + sinkApp.Start (startTime); + sinkApp.Stop (stopTime); + + OnOffHelper clientHelper1 ("ns3::TcpSocketFactory", Address ()); + clientHelper1.SetAttribute ("OnTime", StringValue ("ns3::ConstantRandomVariable[Constant=1]")); + clientHelper1.SetAttribute ("OffTime", StringValue ("ns3::ConstantRandomVariable[Constant=0]")); + clientHelper1.SetAttribute ("DataRate", DataRateValue (DataRate ("1Gbps"))); + clientHelper1.SetAttribute ("PacketSize", UintegerValue (1000)); + + ApplicationContainer clientApps1; + AddressValue remoteAddress (InetSocketAddress (ipR2T2[i].GetAddress (0), port)); + clientHelper1.SetAttribute ("Remote", remoteAddress); + clientApps1.Add (clientHelper1.Install (S2.Get (i))); + clientApps1.Start (i * flowStartupWindow / 20 + clientStartTime + MilliSeconds (i * 5)); + clientApps1.Stop (stopTime); + } + + // Each sender in S1 and S3 sends to R1 + std::vector > s1r1Sinks; + std::vector > s3r1Sinks; + s1r1Sinks.reserve (10); + s3r1Sinks.reserve (10); + for (std::size_t i = 0; i < 20; i++) + { + uint16_t port = 50000 + i; + Address sinkLocalAddress (InetSocketAddress (Ipv4Address::GetAny (), port)); + PacketSinkHelper sinkHelper ("ns3::TcpSocketFactory", sinkLocalAddress); + ApplicationContainer sinkApp = sinkHelper.Install (R1); + Ptr packetSink = sinkApp.Get (0)->GetObject (); + if (i < 10) + { + s1r1Sinks.push_back (packetSink); + } + else + { + s3r1Sinks.push_back (packetSink); + } + sinkApp.Start (startTime); + sinkApp.Stop (stopTime); + + OnOffHelper clientHelper1 ("ns3::TcpSocketFactory", Address ()); + clientHelper1.SetAttribute ("OnTime", StringValue ("ns3::ConstantRandomVariable[Constant=1]")); + clientHelper1.SetAttribute ("OffTime", StringValue ("ns3::ConstantRandomVariable[Constant=0]")); + clientHelper1.SetAttribute ("DataRate", DataRateValue (DataRate ("1Gbps"))); + clientHelper1.SetAttribute ("PacketSize", UintegerValue (1000)); + + ApplicationContainer clientApps1; + AddressValue remoteAddress (InetSocketAddress (ipR1T2.GetAddress (0), port)); + clientHelper1.SetAttribute ("Remote", remoteAddress); + if (i < 10) + { + clientApps1.Add (clientHelper1.Install (S1.Get (i))); + clientApps1.Start (i * flowStartupWindow / 10 + clientStartTime + MilliSeconds (i * 5)); + } + else + { + clientApps1.Add (clientHelper1.Install (S3.Get (i - 10))); + clientApps1.Start ((i - 10) * flowStartupWindow / 10 + clientStartTime + MilliSeconds (i * 5)); + } + + clientApps1.Stop (stopTime); + } + + rxS1R1Throughput.open ("dctcp-example-s1-r1-throughput.dat", std::ios::out); + rxS1R1Throughput << "#Time(s) flow thruput(Mb/s)" << std::endl; + rxS2R2Throughput.open ("dctcp-example-s2-r2-throughput.dat", std::ios::out); + rxS2R2Throughput << "#Time(s) flow thruput(Mb/s)" << std::endl; + rxS3R1Throughput.open ("dctcp-example-s3-r1-throughput.dat", std::ios::out); + rxS3R1Throughput << "#Time(s) flow thruput(Mb/s)" << std::endl; + fairnessIndex.open ("dctcp-example-fairness.dat", std::ios::out); + t1QueueLength.open ("dctcp-example-t1-length.dat", std::ios::out); + t1QueueLength << "#Time(s) qlen(pkts) qlen(us)" << std::endl; + t2QueueLength.open ("dctcp-example-t2-length.dat", std::ios::out); + t2QueueLength << "#Time(s) qlen(pkts) qlen(us)" << std::endl; + for (std::size_t i = 0; i < 10; i++) + { + s1r1Sinks[i]->TraceConnectWithoutContext ("Rx", MakeBoundCallback (&TraceS1R1Sink, i)); + } + for (std::size_t i = 0; i < 20; i++) + { + r2Sinks[i]->TraceConnectWithoutContext ("Rx", MakeBoundCallback (&TraceS2R2Sink, i)); + } + for (std::size_t i = 0; i < 10; i++) + { + s3r1Sinks[i]->TraceConnectWithoutContext ("Rx", MakeBoundCallback (&TraceS3R1Sink, i)); + } + Simulator::Schedule (flowStartupWindow + convergenceTime, &InitializeCounters); + Simulator::Schedule (flowStartupWindow + convergenceTime + measurementWindow, &PrintThroughput, measurementWindow); + Simulator::Schedule (flowStartupWindow + convergenceTime + measurementWindow, &PrintFairness, measurementWindow); + Simulator::Schedule (progressInterval, &PrintProgress, progressInterval); + Simulator::Schedule (flowStartupWindow + convergenceTime, &CheckT1QueueSize, queueDiscs1.Get (0)); + Simulator::Schedule (flowStartupWindow + convergenceTime, &CheckT2QueueSize, queueDiscs2.Get (0)); + Simulator::Stop (stopTime + TimeStep (1)); + + Simulator::Run (); + + rxS1R1Throughput.close (); + rxS2R2Throughput.close (); + rxS3R1Throughput.close (); + fairnessIndex.close (); + t1QueueLength.close (); + t2QueueLength.close (); + Simulator::Destroy (); + return 0; +} diff --git a/examples/mtp/dynamic-global-routing-mtp.cc b/examples/mtp/dynamic-global-routing-mtp.cc new file mode 100644 index 000000000..b4df0bfa4 --- /dev/null +++ b/examples/mtp/dynamic-global-routing-mtp.cc @@ -0,0 +1,231 @@ +/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation; + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Contributed by: Luis Cortes (cortes@gatech.edu) + */ + + +// This script exercises global routing code in a mixed point-to-point +// and csma/cd environment. We bring up and down interfaces and observe +// the effect on global routing. We explicitly enable the attribute +// to respond to interface events, so that routes are recomputed +// automatically. +// +// Network topology +// +// n0 +// \ p-p +// \ (shared csma/cd) +// n2 -------------------------n3 +// / | | +// / p-p n4 n5 ---------- n6 +// n1 p-p +// | | +// ---------------------------------------- +// p-p +// +// - at time 1 CBR/UDP flow from n1 to n6's IP address on the n5/n6 link +// - at time 10, start similar flow from n1 to n6's address on the n1/n6 link +// +// Order of events +// At pre-simulation time, configure global routes. Shortest path from +// n1 to n6 is via the direct point-to-point link +// At time 1s, start CBR traffic flow from n1 to n6 +// At time 2s, set the n1 point-to-point interface to down. Packets +// will be diverted to the n1-n2-n5-n6 path +// At time 4s, re-enable the n1/n6 interface to up. n1-n6 route restored. +// At time 6s, set the n6-n1 point-to-point Ipv4 interface to down (note, this +// keeps the point-to-point link "up" from n1's perspective). Traffic will +// flow through the path n1-n2-n5-n6 +// At time 8s, bring the interface back up. Path n1-n6 is restored +// At time 10s, stop the first flow. +// At time 11s, start a new flow, but to n6's other IP address (the one +// on the n1/n6 p2p link) +// At time 12s, bring the n1 interface down between n1 and n6. Packets +// will be diverted to the alternate path +// At time 14s, re-enable the n1/n6 interface to up. This will change +// routing back to n1-n6 since the interface up notification will cause +// a new local interface route, at higher priority than global routing +// At time 16s, stop the second flow. + +// - Tracing of queues and packet receptions to file "dynamic-global-routing.tr" + +#include +#include +#include +#include + +#include "ns3/core-module.h" +#include "ns3/network-module.h" +#include "ns3/csma-module.h" +#include "ns3/internet-module.h" +#include "ns3/point-to-point-module.h" +#include "ns3/applications-module.h" +#include "ns3/ipv4-global-routing-helper.h" +#include "ns3/mtp-module.h" + +using namespace ns3; + +NS_LOG_COMPONENT_DEFINE ("DynamicGlobalRoutingExample"); + +int +main (int argc, char *argv[]) +{ + LogComponentEnable ("LogicalProcess", LOG_LEVEL_INFO); + LogComponentEnable ("MultithreadedSimulatorImpl", LOG_LEVEL_INFO); + MtpInterface::Enable (); + + // The below value configures the default behavior of global routing. + // By default, it is disabled. To respond to interface events, set to true + Config::SetDefault ("ns3::Ipv4GlobalRouting::RespondToInterfaceEvents", BooleanValue (true)); + + // Allow the user to override any of the defaults and the above + // Bind ()s at run-time, via command-line arguments + CommandLine cmd (__FILE__); + cmd.Parse (argc, argv); + + NS_LOG_INFO ("Create nodes."); + NodeContainer c; + c.Create (7); + NodeContainer n0n2 = NodeContainer (c.Get (0), c.Get (2)); + NodeContainer n1n2 = NodeContainer (c.Get (1), c.Get (2)); + NodeContainer n5n6 = NodeContainer (c.Get (5), c.Get (6)); + NodeContainer n1n6 = NodeContainer (c.Get (1), c.Get (6)); + NodeContainer n2345 = NodeContainer (c.Get (2), c.Get (3), c.Get (4), c.Get (5)); + + InternetStackHelper internet; + internet.Install (c); + + // We create the channels first without any IP addressing information + NS_LOG_INFO ("Create channels."); + PointToPointHelper p2p; + p2p.SetDeviceAttribute ("DataRate", StringValue ("5Mbps")); + p2p.SetChannelAttribute ("Delay", StringValue ("2ms")); + NetDeviceContainer d0d2 = p2p.Install (n0n2); + NetDeviceContainer d1d6 = p2p.Install (n1n6); + + NetDeviceContainer d1d2 = p2p.Install (n1n2); + + p2p.SetDeviceAttribute ("DataRate", StringValue ("1500kbps")); + p2p.SetChannelAttribute ("Delay", StringValue ("10ms")); + NetDeviceContainer d5d6 = p2p.Install (n5n6); + + // We create the channels first without any IP addressing information + CsmaHelper csma; + csma.SetChannelAttribute ("DataRate", StringValue ("5Mbps")); + csma.SetChannelAttribute ("Delay", StringValue ("2ms")); + NetDeviceContainer d2345 = csma.Install (n2345); + + // Later, we add IP addresses. + NS_LOG_INFO ("Assign IP Addresses."); + Ipv4AddressHelper ipv4; + ipv4.SetBase ("10.1.1.0", "255.255.255.0"); + ipv4.Assign (d0d2); + + ipv4.SetBase ("10.1.2.0", "255.255.255.0"); + ipv4.Assign (d1d2); + + ipv4.SetBase ("10.1.3.0", "255.255.255.0"); + Ipv4InterfaceContainer i5i6 = ipv4.Assign (d5d6); + + ipv4.SetBase ("10.250.1.0", "255.255.255.0"); + ipv4.Assign (d2345); + + ipv4.SetBase ("172.16.1.0", "255.255.255.0"); + Ipv4InterfaceContainer i1i6 = ipv4.Assign (d1d6); + + // Create router nodes, initialize routing database and set up the routing + // tables in the nodes. + Ipv4GlobalRoutingHelper::PopulateRoutingTables (); + + // Create the OnOff application to send UDP datagrams of size + // 210 bytes at a rate of 448 Kb/s + NS_LOG_INFO ("Create Applications."); + uint16_t port = 9; // Discard port (RFC 863) + OnOffHelper onoff ("ns3::UdpSocketFactory", + InetSocketAddress (i5i6.GetAddress (1), port)); + onoff.SetConstantRate (DataRate ("2kbps")); + onoff.SetAttribute ("PacketSize", UintegerValue (50)); + + ApplicationContainer apps = onoff.Install (c.Get (1)); + apps.Start (Seconds (1.0)); + apps.Stop (Seconds (10.0)); + + // Create a second OnOff application to send UDP datagrams of size + // 210 bytes at a rate of 448 Kb/s + OnOffHelper onoff2 ("ns3::UdpSocketFactory", + InetSocketAddress (i1i6.GetAddress (1), port)); + onoff2.SetAttribute ("OnTime", StringValue ("ns3::ConstantRandomVariable[Constant=1]")); + onoff2.SetAttribute ("OffTime", StringValue ("ns3::ConstantRandomVariable[Constant=0]")); + onoff2.SetAttribute ("DataRate", StringValue ("2kbps")); + onoff2.SetAttribute ("PacketSize", UintegerValue (50)); + + ApplicationContainer apps2 = onoff2.Install (c.Get (1)); + apps2.Start (Seconds (11.0)); + apps2.Stop (Seconds (16.0)); + + // Create an optional packet sink to receive these packets + PacketSinkHelper sink ("ns3::UdpSocketFactory", + Address (InetSocketAddress (Ipv4Address::GetAny (), port))); + apps = sink.Install (c.Get (6)); + apps.Start (Seconds (1.0)); + apps.Stop (Seconds (10.0)); + + PacketSinkHelper sink2 ("ns3::UdpSocketFactory", + Address (InetSocketAddress (Ipv4Address::GetAny (), port))); + apps2 = sink2.Install (c.Get (6)); + apps2.Start (Seconds (11.0)); + apps2.Stop (Seconds (16.0)); + + + AsciiTraceHelper ascii; + Ptr stream = ascii.CreateFileStream ("dynamic-global-routing.tr"); + p2p.EnableAsciiAll (stream); + csma.EnableAsciiAll (stream); + internet.EnableAsciiIpv4All (stream); + + p2p.EnablePcapAll ("dynamic-global-routing"); + csma.EnablePcapAll ("dynamic-global-routing", false); + + Ptr n1 = c.Get (1); + Ptr ipv41 = n1->GetObject (); + // The first ifIndex is 0 for loopback, then the first p2p is numbered 1, + // then the next p2p is numbered 2 + uint32_t ipv4ifIndex1 = 2; + + Simulator::Schedule (Seconds (2),&Ipv4::SetDown,ipv41, ipv4ifIndex1); + Simulator::Schedule (Seconds (4),&Ipv4::SetUp,ipv41, ipv4ifIndex1); + + Ptr n6 = c.Get (6); + Ptr ipv46 = n6->GetObject (); + // The first ifIndex is 0 for loopback, then the first p2p is numbered 1, + // then the next p2p is numbered 2 + uint32_t ipv4ifIndex6 = 2; + Simulator::Schedule (Seconds (6),&Ipv4::SetDown,ipv46, ipv4ifIndex6); + Simulator::Schedule (Seconds (8),&Ipv4::SetUp,ipv46, ipv4ifIndex6); + + Simulator::Schedule (Seconds (12),&Ipv4::SetDown,ipv41, ipv4ifIndex1); + Simulator::Schedule (Seconds (14),&Ipv4::SetUp,ipv41, ipv4ifIndex1); + + // Trace routing tables + Ipv4GlobalRoutingHelper g; + Ptr routingStream = Create ("dynamic-global-routing.routes", std::ios::out); + g.PrintRoutingTableAllAt (Seconds (12), routingStream); + + NS_LOG_INFO ("Run Simulation."); + Simulator::Run (); + Simulator::Destroy (); + NS_LOG_INFO ("Done."); +} diff --git a/examples/mtp/queue-discs-benchmark-mtp.cc b/examples/mtp/queue-discs-benchmark-mtp.cc new file mode 100644 index 000000000..2a7ad5127 --- /dev/null +++ b/examples/mtp/queue-discs-benchmark-mtp.cc @@ -0,0 +1,315 @@ +/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */ +/* + * Copyright (c) 2015 Universita' degli Studi di Napoli Federico II + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation; + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Authors: Pasquale Imputato + * Stefano Avallone + */ + +// This example serves as a benchmark for all the queue discs (with BQL enabled or not) +// +// Network topology +// +// 192.168.1.0 192.168.2.0 +// n1 ------------------------------------ n2 ----------------------------------- n3 +// point-to-point (access link) point-to-point (bottleneck link) +// 100 Mbps, 0.1 ms bandwidth [10 Mbps], delay [5 ms] +// qdiscs PfifoFast with capacity qdiscs queueDiscType in {PfifoFast, ARED, CoDel, FqCoDel, PIE} [PfifoFast] +// of 1000 packets with capacity of queueDiscSize packets [1000] +// netdevices queues with size of 100 packets netdevices queues with size of netdevicesQueueSize packets [100] +// without BQL bql BQL [false] +// *** fixed configuration *** +// +// Two TCP flows are generated: one from n1 to n3 and the other from n3 to n1. +// Additionally, n1 pings n3, so that the RTT can be measured. +// +// The output will consist of a number of ping Rtt such as: +// +// /NodeList/0/ApplicationList/2/$ns3::V4Ping/Rtt=111 ms +// /NodeList/0/ApplicationList/2/$ns3::V4Ping/Rtt=111 ms +// /NodeList/0/ApplicationList/2/$ns3::V4Ping/Rtt=110 ms +// /NodeList/0/ApplicationList/2/$ns3::V4Ping/Rtt=111 ms +// /NodeList/0/ApplicationList/2/$ns3::V4Ping/Rtt=111 ms +// /NodeList/0/ApplicationList/2/$ns3::V4Ping/Rtt=112 ms +// /NodeList/0/ApplicationList/2/$ns3::V4Ping/Rtt=111 ms +// +// The files output will consist of a trace file with bytes in queue and of a trace file for limits +// (when BQL is enabled) both for bottleneck NetDevice on n2, two files with upload and download +// goodput for flows configuration and a file with flow monitor stats. +// +// If you use an AQM as queue disc on the bottleneck netdevices, you can observe that the ping Rtt +// decrease. A further decrease can be observed when you enable BQL. + +#include "ns3/core-module.h" +#include "ns3/network-module.h" +#include "ns3/internet-module.h" +#include "ns3/point-to-point-module.h" +#include "ns3/applications-module.h" +#include "ns3/internet-apps-module.h" +#include "ns3/traffic-control-module.h" +#include "ns3/flow-monitor-module.h" +#include "ns3/mtp-interface.h" + +using namespace ns3; + +NS_LOG_COMPONENT_DEFINE ("BenchmarkQueueDiscs"); + +void +LimitsTrace (Ptr stream, uint32_t oldVal, uint32_t newVal) +{ + *stream->GetStream () << Simulator::Now ().GetSeconds () << " " << newVal << std::endl; +} + +void +BytesInQueueTrace (Ptr stream, uint32_t oldVal, uint32_t newVal) +{ + *stream->GetStream () << Simulator::Now ().GetSeconds () << " " << newVal << std::endl; +} + +static void +GoodputSampling (std::string fileName, ApplicationContainer app, Ptr stream, float period) +{ + Simulator::Schedule (Seconds (period), &GoodputSampling, fileName, app, stream, period); + double goodput; + uint64_t totalPackets = DynamicCast (app.Get (0))->GetTotalRx (); + goodput = totalPackets * 8 / (Simulator::Now ().GetSeconds () * 1024); // Kbit/s + *stream->GetStream () << Simulator::Now ().GetSeconds () << " " << goodput << std::endl; +} + +static void PingRtt (std::string context, Time rtt) +{ + std::cout << context << "=" << rtt.GetMilliSeconds () << " ms" << std::endl; +} + +int main (int argc, char *argv[]) +{ + LogComponentEnable ("LogicalProcess", LOG_LEVEL_INFO); + LogComponentEnable ("MultithreadedSimulatorImpl", LOG_LEVEL_INFO); + MtpInterface::Enable (); + + std::string bandwidth = "10Mbps"; + std::string delay = "5ms"; + std::string queueDiscType = "PfifoFast"; + uint32_t queueDiscSize = 1000; + uint32_t netdevicesQueueSize = 50; + bool bql = false; + + std::string flowsDatarate = "20Mbps"; + uint32_t flowsPacketsSize = 1000; + + float startTime = 0.1f; // in s + float simDuration = 60; + float samplingPeriod = 1; + + CommandLine cmd (__FILE__); + cmd.AddValue ("bandwidth", "Bottleneck bandwidth", bandwidth); + cmd.AddValue ("delay", "Bottleneck delay", delay); + cmd.AddValue ("queueDiscType", "Bottleneck queue disc type in {PfifoFast, ARED, CoDel, FqCoDel, PIE, prio}", queueDiscType); + cmd.AddValue ("queueDiscSize", "Bottleneck queue disc size in packets", queueDiscSize); + cmd.AddValue ("netdevicesQueueSize", "Bottleneck netdevices queue size in packets", netdevicesQueueSize); + cmd.AddValue ("bql", "Enable byte queue limits on bottleneck netdevices", bql); + cmd.AddValue ("flowsDatarate", "Upload and download flows datarate", flowsDatarate); + cmd.AddValue ("flowsPacketsSize", "Upload and download flows packets sizes", flowsPacketsSize); + cmd.AddValue ("startTime", "Simulation start time", startTime); + cmd.AddValue ("simDuration", "Simulation duration in seconds", simDuration); + cmd.AddValue ("samplingPeriod", "Goodput sampling period in seconds", samplingPeriod); + cmd.Parse (argc, argv); + + float stopTime = startTime + simDuration; + + // Create nodes + NodeContainer n1, n2, n3; + n1.Create (1); + n2.Create (1); + n3.Create (1); + + // Create and configure access link and bottleneck link + PointToPointHelper accessLink; + accessLink.SetDeviceAttribute ("DataRate", StringValue ("100Mbps")); + accessLink.SetChannelAttribute ("Delay", StringValue ("0.1ms")); + accessLink.SetQueue ("ns3::DropTailQueue", "MaxSize", StringValue ("100p")); + + PointToPointHelper bottleneckLink; + bottleneckLink.SetDeviceAttribute ("DataRate", StringValue (bandwidth)); + bottleneckLink.SetChannelAttribute ("Delay", StringValue (delay)); + bottleneckLink.SetQueue ("ns3::DropTailQueue", "MaxSize", StringValue (std::to_string (netdevicesQueueSize) + "p")); + + InternetStackHelper stack; + stack.InstallAll (); + + // Access link traffic control configuration + TrafficControlHelper tchPfifoFastAccess; + tchPfifoFastAccess.SetRootQueueDisc ("ns3::PfifoFastQueueDisc", "MaxSize", StringValue ("1000p")); + + // Bottleneck link traffic control configuration + TrafficControlHelper tchBottleneck; + + if (queueDiscType.compare ("PfifoFast") == 0) + { + tchBottleneck.SetRootQueueDisc ("ns3::PfifoFastQueueDisc", "MaxSize", + QueueSizeValue (QueueSize (QueueSizeUnit::PACKETS, queueDiscSize))); + } + else if (queueDiscType.compare ("ARED") == 0) + { + tchBottleneck.SetRootQueueDisc ("ns3::RedQueueDisc"); + Config::SetDefault ("ns3::RedQueueDisc::ARED", BooleanValue (true)); + Config::SetDefault ("ns3::RedQueueDisc::MaxSize", + QueueSizeValue (QueueSize (QueueSizeUnit::PACKETS, queueDiscSize))); + } + else if (queueDiscType.compare ("CoDel") == 0) + { + tchBottleneck.SetRootQueueDisc ("ns3::CoDelQueueDisc"); + Config::SetDefault ("ns3::CoDelQueueDisc::MaxSize", + QueueSizeValue (QueueSize (QueueSizeUnit::PACKETS, queueDiscSize))); + } + else if (queueDiscType.compare ("FqCoDel") == 0) + { + tchBottleneck.SetRootQueueDisc ("ns3::FqCoDelQueueDisc"); + Config::SetDefault ("ns3::FqCoDelQueueDisc::MaxSize", + QueueSizeValue (QueueSize (QueueSizeUnit::PACKETS, queueDiscSize))); + } + else if (queueDiscType.compare ("PIE") == 0) + { + tchBottleneck.SetRootQueueDisc ("ns3::PieQueueDisc"); + Config::SetDefault ("ns3::PieQueueDisc::MaxSize", + QueueSizeValue (QueueSize (QueueSizeUnit::PACKETS, queueDiscSize))); + } + else if (queueDiscType.compare ("prio") == 0) + { + uint16_t handle = tchBottleneck.SetRootQueueDisc ("ns3::PrioQueueDisc", "Priomap", + StringValue ("0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1")); + TrafficControlHelper::ClassIdList cid = tchBottleneck.AddQueueDiscClasses (handle, 2, "ns3::QueueDiscClass"); + tchBottleneck.AddChildQueueDisc (handle, cid[0], "ns3::FifoQueueDisc"); + tchBottleneck.AddChildQueueDisc (handle, cid[1], "ns3::RedQueueDisc"); + } + else + { + NS_ABORT_MSG ("--queueDiscType not valid"); + } + + if (bql) + { + tchBottleneck.SetQueueLimits ("ns3::DynamicQueueLimits"); + } + + NetDeviceContainer devicesAccessLink = accessLink.Install (n1.Get (0), n2.Get (0)); + tchPfifoFastAccess.Install (devicesAccessLink); + Ipv4AddressHelper address; + address.SetBase ("192.168.0.0", "255.255.255.0"); + address.NewNetwork (); + Ipv4InterfaceContainer interfacesAccess = address.Assign (devicesAccessLink); + + NetDeviceContainer devicesBottleneckLink = bottleneckLink.Install (n2.Get (0), n3.Get (0)); + QueueDiscContainer qdiscs; + qdiscs = tchBottleneck.Install (devicesBottleneckLink); + + address.NewNetwork (); + Ipv4InterfaceContainer interfacesBottleneck = address.Assign (devicesBottleneckLink); + + Ptr interface = devicesBottleneckLink.Get (0)->GetObject (); + Ptr queueInterface = interface->GetTxQueue (0); + Ptr queueLimits = StaticCast (queueInterface->GetQueueLimits ()); + + AsciiTraceHelper ascii; + if (bql) + { + queueDiscType = queueDiscType + "-bql"; + Ptr streamLimits = ascii.CreateFileStream (queueDiscType + "-limits.txt"); + queueLimits->TraceConnectWithoutContext ("Limit",MakeBoundCallback (&LimitsTrace, streamLimits)); + } + Ptr > queue = StaticCast (devicesBottleneckLink.Get (0))->GetQueue (); + Ptr streamBytesInQueue = ascii.CreateFileStream (queueDiscType + "-bytesInQueue.txt"); + queue->TraceConnectWithoutContext ("BytesInQueue",MakeBoundCallback (&BytesInQueueTrace, streamBytesInQueue)); + + Ipv4InterfaceContainer n1Interface; + n1Interface.Add (interfacesAccess.Get (0)); + + Ipv4InterfaceContainer n3Interface; + n3Interface.Add (interfacesBottleneck.Get (1)); + + Ipv4GlobalRoutingHelper::PopulateRoutingTables (); + + Config::SetDefault ("ns3::TcpSocket::SegmentSize", UintegerValue (flowsPacketsSize)); + + // Flows configuration + // Bidirectional TCP streams with ping like flent tcp_bidirectional test. + uint16_t port = 7; + ApplicationContainer uploadApp, downloadApp, sourceApps; + // Configure and install upload flow + Address addUp (InetSocketAddress (Ipv4Address::GetAny (), port)); + PacketSinkHelper sinkHelperUp ("ns3::TcpSocketFactory", addUp); + sinkHelperUp.SetAttribute ("Protocol", TypeIdValue (TcpSocketFactory::GetTypeId ())); + uploadApp.Add (sinkHelperUp.Install (n3)); + + InetSocketAddress socketAddressUp = InetSocketAddress (n3Interface.GetAddress (0), port); + OnOffHelper onOffHelperUp ("ns3::TcpSocketFactory", Address ()); + onOffHelperUp.SetAttribute ("Remote", AddressValue (socketAddressUp)); + onOffHelperUp.SetAttribute ("OnTime", StringValue ("ns3::ConstantRandomVariable[Constant=1]")); + onOffHelperUp.SetAttribute ("OffTime", StringValue ("ns3::ConstantRandomVariable[Constant=0]")); + onOffHelperUp.SetAttribute ("PacketSize", UintegerValue (flowsPacketsSize)); + onOffHelperUp.SetAttribute ("DataRate", StringValue (flowsDatarate)); + sourceApps.Add (onOffHelperUp.Install (n1)); + + port = 8; + // Configure and install download flow + Address addDown (InetSocketAddress (Ipv4Address::GetAny (), port)); + PacketSinkHelper sinkHelperDown ("ns3::TcpSocketFactory", addDown); + sinkHelperDown.SetAttribute ("Protocol", TypeIdValue (TcpSocketFactory::GetTypeId ())); + downloadApp.Add (sinkHelperDown.Install (n1)); + + InetSocketAddress socketAddressDown = InetSocketAddress (n1Interface.GetAddress (0), port); + OnOffHelper onOffHelperDown ("ns3::TcpSocketFactory", Address ()); + onOffHelperDown.SetAttribute ("Remote", AddressValue (socketAddressDown)); + onOffHelperDown.SetAttribute ("OnTime", StringValue ("ns3::ConstantRandomVariable[Constant=1]")); + onOffHelperDown.SetAttribute ("OffTime", StringValue ("ns3::ConstantRandomVariable[Constant=0]")); + onOffHelperDown.SetAttribute ("PacketSize", UintegerValue (flowsPacketsSize)); + onOffHelperDown.SetAttribute ("DataRate", StringValue (flowsDatarate)); + sourceApps.Add (onOffHelperDown.Install (n3)); + + // Configure and install ping + V4PingHelper ping = V4PingHelper (n3Interface.GetAddress (0)); + ping.Install (n1); + + Config::Connect ("/NodeList/*/ApplicationList/*/$ns3::V4Ping/Rtt", MakeCallback (&PingRtt)); + + uploadApp.Start (Seconds (0)); + uploadApp.Stop (Seconds (stopTime)); + downloadApp.Start (Seconds (0)); + downloadApp.Stop (Seconds (stopTime)); + + sourceApps.Start (Seconds (0 + 0.1)); + sourceApps.Stop (Seconds (stopTime - 0.1)); + + Ptr uploadGoodputStream = ascii.CreateFileStream (queueDiscType + "-upGoodput.txt"); + Simulator::Schedule (Seconds (samplingPeriod), &GoodputSampling, queueDiscType + "-upGoodput.txt", uploadApp, + uploadGoodputStream, samplingPeriod); + Ptr downloadGoodputStream = ascii.CreateFileStream (queueDiscType + "-downGoodput.txt"); + Simulator::Schedule (Seconds (samplingPeriod), &GoodputSampling, queueDiscType + "-downGoodput.txt", downloadApp, + downloadGoodputStream, samplingPeriod); + + // Flow monitor + Ptr flowMonitor; + FlowMonitorHelper flowHelper; + flowMonitor = flowHelper.InstallAll(); + + Simulator::Stop (Seconds (stopTime)); + Simulator::Run (); + + flowMonitor->SerializeToXmlFile(queueDiscType + "-flowMonitor.xml", true, true); + + Simulator::Destroy (); + return 0; +} diff --git a/examples/mtp/ripng-simple-network-mtp.cc b/examples/mtp/ripng-simple-network-mtp.cc new file mode 100644 index 000000000..69c32f582 --- /dev/null +++ b/examples/mtp/ripng-simple-network-mtp.cc @@ -0,0 +1,274 @@ +/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */ +/* + * Copyright (c) 2014 Universita' di Firenze, Italy + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation; + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Author: Tommaso Pecorella + */ + +// Network topology +// +// SRC +// |<=== source network +// A-----B +// \ / \ all networks have cost 1, except +// \ / | for the direct link from C to D, which +// C / has cost 10 +// | / +// |/ +// D +// |<=== target network +// DST +// +// +// A, B, C and D are RIPng routers. +// A and D are configured with static addresses. +// SRC and DST will exchange packets. +// +// After about 3 seconds, the topology is built, and Echo Reply will be received. +// After 40 seconds, the link between B and D will break, causing a route failure. +// After 44 seconds from the failure, the routers will recovery from the failure. +// Split Horizoning should affect the recovery time, but it is not. See the manual +// for an explanation of this effect. +// +// If "showPings" is enabled, the user will see: +// 1) if the ping has been acknowledged +// 2) if a Destination Unreachable has been received by the sender +// 3) nothing, when the Echo Request has been received by the destination but +// the Echo Reply is unable to reach the sender. +// Examining the .pcap files with Wireshark can confirm this effect. + + +#include +#include "ns3/core-module.h" +#include "ns3/internet-module.h" +#include "ns3/point-to-point-module.h" +#include "ns3/internet-apps-module.h" +#include "ns3/ipv6-static-routing-helper.h" +#include "ns3/ipv6-routing-table-entry.h" +#include "ns3/mtp-module.h" + +using namespace ns3; + +NS_LOG_COMPONENT_DEFINE ("RipNgSimpleRouting"); + +void TearDownLink (Ptr nodeA, Ptr nodeB, uint32_t interfaceA, uint32_t interfaceB) +{ + nodeA->GetObject ()->SetDown (interfaceA); + nodeB->GetObject ()->SetDown (interfaceB); +} + +int main (int argc, char **argv) +{ + LogComponentEnable ("LogicalProcess", LOG_LEVEL_INFO); + LogComponentEnable ("MultithreadedSimulatorImpl", LOG_LEVEL_INFO); + MtpInterface::Enable (); + + bool verbose = false; + bool printRoutingTables = false; + bool showPings = false; + std::string SplitHorizon ("PoisonReverse"); + + CommandLine cmd (__FILE__); + cmd.AddValue ("verbose", "turn on log components", verbose); + cmd.AddValue ("printRoutingTables", "Print routing tables at 30, 60 and 90 seconds", printRoutingTables); + cmd.AddValue ("showPings", "Show Ping6 reception", showPings); + cmd.AddValue ("splitHorizonStrategy", "Split Horizon strategy to use (NoSplitHorizon, SplitHorizon, PoisonReverse)", SplitHorizon); + cmd.Parse (argc, argv); + + if (verbose) + { + LogComponentEnable ("RipNgSimpleRouting", LOG_LEVEL_INFO); + LogComponentEnable ("RipNg", LOG_LEVEL_ALL); + LogComponentEnable ("Icmpv6L4Protocol", LOG_LEVEL_INFO); + LogComponentEnable ("Ipv6Interface", LOG_LEVEL_ALL); + LogComponentEnable ("Icmpv6L4Protocol", LOG_LEVEL_ALL); + LogComponentEnable ("NdiscCache", LOG_LEVEL_ALL); + LogComponentEnable ("Ping6Application", LOG_LEVEL_ALL); + } + + if (showPings) + { + LogComponentEnable ("Ping6Application", LOG_LEVEL_INFO); + } + + if (SplitHorizon == "NoSplitHorizon") + { + Config::SetDefault ("ns3::RipNg::SplitHorizon", EnumValue (RipNg::NO_SPLIT_HORIZON)); + } + else if (SplitHorizon == "SplitHorizon") + { + Config::SetDefault ("ns3::RipNg::SplitHorizon", EnumValue (RipNg::SPLIT_HORIZON)); + } + else + { + Config::SetDefault ("ns3::RipNg::SplitHorizon", EnumValue (RipNg::POISON_REVERSE)); + } + + NS_LOG_INFO ("Create nodes."); + Ptr src = CreateObject (); + Names::Add ("SrcNode", src); + Ptr dst = CreateObject (); + Names::Add ("DstNode", dst); + Ptr a = CreateObject (); + Names::Add ("RouterA", a); + Ptr b = CreateObject (); + Names::Add ("RouterB", b); + Ptr c = CreateObject (); + Names::Add ("RouterC", c); + Ptr d = CreateObject (); + Names::Add ("RouterD", d); + NodeContainer net1 (src, a); + NodeContainer net2 (a, b); + NodeContainer net3 (a, c); + NodeContainer net4 (b, c); + NodeContainer net5 (c, d); + NodeContainer net6 (b, d); + NodeContainer net7 (d, dst); + NodeContainer routers (a, b, c, d); + NodeContainer nodes (src, dst); + + + NS_LOG_INFO ("Create channels."); + PointToPointHelper p2p; + p2p.SetDeviceAttribute ("DataRate", DataRateValue (5000000)); + p2p.SetChannelAttribute ("Delay", TimeValue (MilliSeconds (2))); + NetDeviceContainer ndc1 = p2p.Install (net1); + NetDeviceContainer ndc2 = p2p.Install (net2); + NetDeviceContainer ndc3 = p2p.Install (net3); + NetDeviceContainer ndc4 = p2p.Install (net4); + NetDeviceContainer ndc5 = p2p.Install (net5); + NetDeviceContainer ndc6 = p2p.Install (net6); + NetDeviceContainer ndc7 = p2p.Install (net7); + + NS_LOG_INFO ("Create IPv6 and routing"); + RipNgHelper ripNgRouting; + + // Rule of thumb: + // Interfaces are added sequentially, starting from 0 + // However, interface 0 is always the loopback... + ripNgRouting.ExcludeInterface (a, 1); + ripNgRouting.ExcludeInterface (d, 3); + + ripNgRouting.SetInterfaceMetric (c, 3, 10); + ripNgRouting.SetInterfaceMetric (d, 1, 10); + + Ipv6ListRoutingHelper listRH; + listRH.Add (ripNgRouting, 0); + Ipv6StaticRoutingHelper staticRh; + listRH.Add (staticRh, 5); + + InternetStackHelper internetv6; + internetv6.SetIpv4StackInstall (false); + internetv6.SetRoutingHelper (listRH); + internetv6.Install (routers); + + InternetStackHelper internetv6Nodes; + internetv6Nodes.SetIpv4StackInstall (false); + internetv6Nodes.Install (nodes); + + // Assign addresses. + // The source and destination networks have global addresses + // The "core" network just needs link-local addresses for routing. + // We assign global addresses to the routers as well to receive + // ICMPv6 errors. + NS_LOG_INFO ("Assign IPv6 Addresses."); + Ipv6AddressHelper ipv6; + + ipv6.SetBase (Ipv6Address ("2001:1::"), Ipv6Prefix (64)); + Ipv6InterfaceContainer iic1 = ipv6.Assign (ndc1); + iic1.SetForwarding (1, true); + iic1.SetDefaultRouteInAllNodes (1); + + ipv6.SetBase (Ipv6Address ("2001:0:1::"), Ipv6Prefix (64)); + Ipv6InterfaceContainer iic2 = ipv6.Assign (ndc2); + iic2.SetForwarding (0, true); + iic2.SetForwarding (1, true); + + ipv6.SetBase (Ipv6Address ("2001:0:2::"), Ipv6Prefix (64)); + Ipv6InterfaceContainer iic3 = ipv6.Assign (ndc3); + iic3.SetForwarding (0, true); + iic3.SetForwarding (1, true); + + ipv6.SetBase (Ipv6Address ("2001:0:3::"), Ipv6Prefix (64)); + Ipv6InterfaceContainer iic4 = ipv6.Assign (ndc4); + iic4.SetForwarding (0, true); + iic4.SetForwarding (1, true); + + ipv6.SetBase (Ipv6Address ("2001:0:4::"), Ipv6Prefix (64)); + Ipv6InterfaceContainer iic5 = ipv6.Assign (ndc5); + iic5.SetForwarding (0, true); + iic5.SetForwarding (1, true); + + ipv6.SetBase (Ipv6Address ("2001:0:5::"), Ipv6Prefix (64)); + Ipv6InterfaceContainer iic6 = ipv6.Assign (ndc6); + iic6.SetForwarding (0, true); + iic6.SetForwarding (1, true); + + ipv6.SetBase (Ipv6Address ("2001:2::"), Ipv6Prefix (64)); + Ipv6InterfaceContainer iic7 = ipv6.Assign (ndc7); + iic7.SetForwarding (0, true); + iic7.SetDefaultRouteInAllNodes (0); + + if (printRoutingTables) + { + RipNgHelper routingHelper; + + Ptr routingStream = Create (&std::cout); + + routingHelper.PrintRoutingTableAt (Seconds (30.0), a, routingStream); + routingHelper.PrintRoutingTableAt (Seconds (30.0), b, routingStream); + routingHelper.PrintRoutingTableAt (Seconds (30.0), c, routingStream); + routingHelper.PrintRoutingTableAt (Seconds (30.0), d, routingStream); + + routingHelper.PrintRoutingTableAt (Seconds (60.0), a, routingStream); + routingHelper.PrintRoutingTableAt (Seconds (60.0), b, routingStream); + routingHelper.PrintRoutingTableAt (Seconds (60.0), c, routingStream); + routingHelper.PrintRoutingTableAt (Seconds (60.0), d, routingStream); + + routingHelper.PrintRoutingTableAt (Seconds (90.0), a, routingStream); + routingHelper.PrintRoutingTableAt (Seconds (90.0), b, routingStream); + routingHelper.PrintRoutingTableAt (Seconds (90.0), c, routingStream); + routingHelper.PrintRoutingTableAt (Seconds (90.0), d, routingStream); + } + + NS_LOG_INFO ("Create Applications."); + uint32_t packetSize = 1024; + uint32_t maxPacketCount = 100; + Time interPacketInterval = Seconds (1.0); + Ping6Helper ping6; + + ping6.SetLocal (iic1.GetAddress (0, 1)); + ping6.SetRemote (iic7.GetAddress (1, 1)); + ping6.SetAttribute ("MaxPackets", UintegerValue (maxPacketCount)); + ping6.SetAttribute ("Interval", TimeValue (interPacketInterval)); + ping6.SetAttribute ("PacketSize", UintegerValue (packetSize)); + ApplicationContainer apps = ping6.Install (src); + apps.Start (Seconds (1.0)); + apps.Stop (Seconds (110.0)); + + AsciiTraceHelper ascii; + p2p.EnableAsciiAll (ascii.CreateFileStream ("ripng-simple-routing.tr")); + p2p.EnablePcapAll ("ripng-simple-routing", true); + + Simulator::Schedule (Seconds (40), &TearDownLink, b, d, 3, 2); + + /* Now, do the actual simulation. */ + NS_LOG_INFO ("Run Simulation."); + Simulator::Stop (Seconds (120)); + Simulator::Run (); + Simulator::Destroy (); + NS_LOG_INFO ("Done."); +} diff --git a/examples/mtp/simple-multicast-flooding-mtp.cc b/examples/mtp/simple-multicast-flooding-mtp.cc new file mode 100644 index 000000000..4cc8821d4 --- /dev/null +++ b/examples/mtp/simple-multicast-flooding-mtp.cc @@ -0,0 +1,211 @@ +/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */ +/* + * Copyright (c) 2013 Universita' di Firenze + * Copyright (c) 2019 Caliola Engineering, LLC : RFC 6621 multicast packet de-duplication + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation; + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Author: Tommaso Pecorella + * Modified (2019): Jared Dulmage + * Demonstrates dissemination of multicast packets across a mesh + * network to all nodes over multiple hops. + */ + +#include "ns3/test.h" +#include "ns3/simulator.h" +#include "ns3/simple-channel.h" +#include "ns3/simple-net-device.h" +#include "ns3/socket.h" +#include "ns3/boolean.h" +#include "ns3/double.h" +#include "ns3/string.h" +#include "ns3/config.h" +#include "ns3/data-rate.h" +#include "ns3/uinteger.h" + +#include "ns3/names.h" +#include "ns3/log.h" +#include "ns3/node.h" +#include "ns3/inet-socket-address.h" +#include "ns3/random-variable-stream.h" + +#include "ns3/ipv4-l3-protocol.h" +#include "ns3/ipv4-static-routing.h" +#include "ns3/udp-socket-factory.h" +#include "ns3/udp-socket.h" +#include "ns3/packet-sink.h" + +#include "ns3/internet-stack-helper.h" +#include "ns3/ipv4-list-routing-helper.h" +#include "ns3/ipv4-static-routing-helper.h" +#include "ns3/ipv4-address-helper.h" +#include "ns3/simple-net-device-helper.h" +#include "ns3/packet-sink-helper.h" +#include "ns3/on-off-helper.h" +#include "ns3/trace-helper.h" + +#include "ns3/traffic-control-layer.h" +#include "ns3/mtp-module.h" + +#include +#include +#include + +using namespace ns3; + +/** + * Network topology: + * + * /---- B ----\ + * A ---- | ---- D ---- E + * \---- C ----/ + * + * This example demonstrates configuration of + * static routing to realize broadcast-like + * flooding of packets from node A + * across the illustrated topology. + */ +int +main (int argc, char *argv[]) +{ + LogComponentEnable ("LogicalProcess", LOG_LEVEL_INFO); + LogComponentEnable ("MultithreadedSimulatorImpl", LOG_LEVEL_INFO); + MtpInterface::Enable (); + + // multicast target + const std::string targetAddr = "239.192.100.1"; + Config::SetDefault ("ns3::Ipv4L3Protocol::EnableDuplicatePacketDetection", BooleanValue (true)); + Config::SetDefault ("ns3::Ipv4L3Protocol::DuplicateExpire", TimeValue (Seconds (10))); + + // Create topology + + // Create nodes + auto nodes = NodeContainer (); + nodes.Create (5); + + // Name nodes + Names::Add ("A", nodes.Get (0)); + Names::Add ("B", nodes.Get (1)); + Names::Add ("C", nodes.Get (2)); + Names::Add ("D", nodes.Get (3)); + Names::Add ("E", nodes.Get (4)); + + SimpleNetDeviceHelper simplenet; + auto devices = simplenet.Install (nodes); + // name devices + Names::Add ("A/dev", devices.Get (0)); + Names::Add ("B/dev", devices.Get (1)); + Names::Add ("C/dev", devices.Get (2)); + Names::Add ("D/dev", devices.Get (3)); + Names::Add ("E/dev", devices.Get (4)); + + // setup static routes to facilitate multicast flood + Ipv4ListRoutingHelper listRouting; + Ipv4StaticRoutingHelper staticRouting; + listRouting.Add (staticRouting, 0); + + InternetStackHelper internet; + internet.SetIpv6StackInstall (false); + internet.SetIpv4ArpJitter (true); + internet.SetRoutingHelper (listRouting); + internet.Install (nodes); + + Ipv4AddressHelper ipv4address; + ipv4address.SetBase ("10.0.0.0", "255.255.255.0"); + ipv4address.Assign (devices); + + // add static routes for each node / device + for (auto diter = devices.Begin (); diter != devices.End (); ++diter) + { + Ptr node = (*diter)->GetNode (); + + if (Names::FindName (node) == "A") + { + // route for host + // Use host routing entry according to note in Ipv4StaticRouting::RouteOutput: + //// Note: Multicast routes for outbound packets are stored in the + //// normal unicast table. An implication of this is that it is not + //// possible to source multicast datagrams on multiple interfaces. + //// This is a well-known property of sockets implementation on + //// many Unix variants. + //// So, we just log it and fall through to LookupStatic () + auto ipv4 = node->GetObject (); + NS_ASSERT_MSG ((bool) ipv4, "Node " << Names::FindName (node) << " does not have Ipv4 aggregate"); + auto routing = staticRouting.GetStaticRouting (ipv4); + routing->AddHostRouteTo (targetAddr.c_str (), ipv4->GetInterfaceForDevice (*diter), 0); + } + else + { + // route for forwarding + staticRouting.AddMulticastRoute (node, Ipv4Address::GetAny (), targetAddr.c_str (), *diter, NetDeviceContainer (*diter)); + } + } + + // set the topology, by default fully-connected + auto channel = devices.Get (0)->GetChannel (); + auto simplechannel = channel->GetObject (); + simplechannel->BlackList (Names::Find ("A/dev"), Names::Find ("D/dev")); + simplechannel->BlackList (Names::Find ("D/dev"), Names::Find ("A/dev")); + + simplechannel->BlackList (Names::Find ("A/dev"), Names::Find ("E/dev")); + simplechannel->BlackList (Names::Find ("E/dev"), Names::Find ("A/dev")); + + simplechannel->BlackList (Names::Find ("B/dev"), Names::Find ("E/dev")); + simplechannel->BlackList (Names::Find ("E/dev"), Names::Find ("B/dev")); + + simplechannel->BlackList (Names::Find ("C/dev"), Names::Find ("E/dev")); + simplechannel->BlackList (Names::Find ("E/dev"), Names::Find ("C/dev")); + // ensure some time progress between re-transmissions + simplechannel->SetAttribute ("Delay", TimeValue (MilliSeconds (1))); + + // sinks + PacketSinkHelper sinkHelper ("ns3::UdpSocketFactory", InetSocketAddress (Ipv4Address::GetAny (), 9)); + auto sinks = sinkHelper.Install ("B"); + sinks.Add (sinkHelper.Install ("C")); + sinks.Add (sinkHelper.Install ("D")); + sinks.Add (sinkHelper.Install ("E")); + sinks.Start (Seconds (1)); + + // source + OnOffHelper onoffHelper ("ns3::UdpSocketFactory", InetSocketAddress (targetAddr.c_str (), 9)); + onoffHelper.SetAttribute ("DataRate", DataRateValue (DataRate ("8Mbps"))); + onoffHelper.SetAttribute ("MaxBytes", UintegerValue (10 * 1024)); + auto source = onoffHelper.Install ("A"); + source.Start (Seconds (1.1)); + + // pcap traces + for (auto end = nodes.End (), + iter = nodes.Begin (); iter != end; ++iter) + { + internet.EnablePcapIpv4 ("smf-trace", (*iter)->GetId (), 1, false); + } + + // run simulation + Simulator::Run (); + + std::cout << "Node A sent " << 10 * 1024 << " bytes" << std::endl; + for (auto end = sinks.End (), + iter = sinks.Begin (); iter != end; ++iter) + { + auto node = (*iter)->GetNode (); + auto sink = (*iter)->GetObject (); + std::cout << "Node " << Names::FindName (node) + << " received " << sink->GetTotalRx () << " bytes" << std::endl; + } + + Simulator::Destroy (); + + Names::Clear (); + return 0; +} diff --git a/examples/mtp/socket-bound-tcp-static-routing-mtp.cc b/examples/mtp/socket-bound-tcp-static-routing-mtp.cc new file mode 100644 index 000000000..bf75fdc53 --- /dev/null +++ b/examples/mtp/socket-bound-tcp-static-routing-mtp.cc @@ -0,0 +1,235 @@ +/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation; + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +/* Test program for multi-interface host, static routing + + Destination host (10.20.1.2) + | + | 10.20.1.0/24 + DSTRTR + 10.10.1.0/24 / \ 10.10.2.0/24 + / \ + Rtr1 Rtr2 + 10.1.1.0/24 | | 10.1.2.0/24 + | / + \ / + Source +*/ + +#include +#include +#include +#include + +#include "ns3/core-module.h" +#include "ns3/network-module.h" +#include "ns3/internet-module.h" +#include "ns3/point-to-point-module.h" +#include "ns3/applications-module.h" +#include "ns3/ipv4-static-routing-helper.h" +#include "ns3/ipv4-list-routing-helper.h" +#include "ns3/mtp-module.h" + +using namespace ns3; + +NS_LOG_COMPONENT_DEFINE ("SocketBoundTcpRoutingExample"); + +static const uint32_t totalTxBytes = 20000; +static uint32_t currentTxBytes = 0; +static const uint32_t writeSize = 1040; +uint8_t data[writeSize]; + + +void StartFlow (Ptr, Ipv4Address, uint16_t); +void WriteUntilBufferFull (Ptr, uint32_t); + +void SendStuff (Ptr sock, Ipv4Address dstaddr, uint16_t port); +void BindSock (Ptr sock, Ptr netdev); +void srcSocketRecv (Ptr socket); +void dstSocketRecv (Ptr socket); + +int +main (int argc, char *argv[]) +{ + LogComponentEnable ("LogicalProcess", LOG_LEVEL_INFO); + LogComponentEnable ("MultithreadedSimulatorImpl", LOG_LEVEL_INFO); + MtpInterface::Enable (); + + // Allow the user to override any of the defaults and the above + // DefaultValue::Bind ()s at run-time, via command-line arguments + CommandLine cmd (__FILE__); + cmd.Parse (argc, argv); + + Ptr nSrc = CreateObject (); + Ptr nDst = CreateObject (); + Ptr nRtr1 = CreateObject (); + Ptr nRtr2 = CreateObject (); + Ptr nDstRtr = CreateObject (); + + NodeContainer c = NodeContainer (nSrc, nDst, nRtr1, nRtr2, nDstRtr); + + InternetStackHelper internet; + internet.Install (c); + + // Point-to-point links + NodeContainer nSrcnRtr1 = NodeContainer (nSrc, nRtr1); + NodeContainer nSrcnRtr2 = NodeContainer (nSrc, nRtr2); + NodeContainer nRtr1nDstRtr = NodeContainer (nRtr1, nDstRtr); + NodeContainer nRtr2nDstRtr = NodeContainer (nRtr2, nDstRtr); + NodeContainer nDstRtrnDst = NodeContainer (nDstRtr, nDst); + + // We create the channels first without any IP addressing information + PointToPointHelper p2p; + p2p.SetDeviceAttribute ("DataRate", StringValue ("5Mbps")); + p2p.SetChannelAttribute ("Delay", StringValue ("2ms")); + NetDeviceContainer dSrcdRtr1 = p2p.Install (nSrcnRtr1); + NetDeviceContainer dSrcdRtr2 = p2p.Install (nSrcnRtr2); + NetDeviceContainer dRtr1dDstRtr = p2p.Install (nRtr1nDstRtr); + NetDeviceContainer dRtr2dDstRtr = p2p.Install (nRtr2nDstRtr); + NetDeviceContainer dDstRtrdDst = p2p.Install (nDstRtrnDst); + + Ptr SrcToRtr1=dSrcdRtr1.Get (0); + Ptr SrcToRtr2=dSrcdRtr2.Get (0); + + // Later, we add IP addresses. + Ipv4AddressHelper ipv4; + ipv4.SetBase ("10.1.1.0", "255.255.255.0"); + Ipv4InterfaceContainer iSrciRtr1 = ipv4.Assign (dSrcdRtr1); + ipv4.SetBase ("10.1.2.0", "255.255.255.0"); + Ipv4InterfaceContainer iSrciRtr2 = ipv4.Assign (dSrcdRtr2); + ipv4.SetBase ("10.10.1.0", "255.255.255.0"); + Ipv4InterfaceContainer iRtr1iDstRtr = ipv4.Assign (dRtr1dDstRtr); + ipv4.SetBase ("10.10.2.0", "255.255.255.0"); + Ipv4InterfaceContainer iRtr2iDstRtr = ipv4.Assign (dRtr2dDstRtr); + ipv4.SetBase ("10.20.1.0", "255.255.255.0"); + Ipv4InterfaceContainer iDstRtrDst = ipv4.Assign (dDstRtrdDst); + + Ptr ipv4Src = nSrc->GetObject (); + Ptr ipv4Rtr1 = nRtr1->GetObject (); + Ptr ipv4Rtr2 = nRtr2->GetObject (); + Ptr ipv4DstRtr = nDstRtr->GetObject (); + Ptr ipv4Dst = nDst->GetObject (); + + Ipv4StaticRoutingHelper ipv4RoutingHelper; + Ptr staticRoutingSrc = ipv4RoutingHelper.GetStaticRouting (ipv4Src); + Ptr staticRoutingRtr1 = ipv4RoutingHelper.GetStaticRouting (ipv4Rtr1); + Ptr staticRoutingRtr2 = ipv4RoutingHelper.GetStaticRouting (ipv4Rtr2); + Ptr staticRoutingDstRtr = ipv4RoutingHelper.GetStaticRouting (ipv4DstRtr); + Ptr staticRoutingDst = ipv4RoutingHelper.GetStaticRouting (ipv4Dst); + + // Create static routes from Src to Dst + staticRoutingRtr1->AddHostRouteTo (Ipv4Address ("10.20.1.2"), Ipv4Address ("10.10.1.2"), 2); + staticRoutingRtr2->AddHostRouteTo (Ipv4Address ("10.20.1.2"), Ipv4Address ("10.10.2.2"), 2); + + // Two routes to same destination - setting separate metrics. + // You can switch these to see how traffic gets diverted via different routes + staticRoutingSrc->AddHostRouteTo (Ipv4Address ("10.20.1.2"), Ipv4Address ("10.1.1.2"), 1,5); + staticRoutingSrc->AddHostRouteTo (Ipv4Address ("10.20.1.2"), Ipv4Address ("10.1.2.2"), 2,10); + + // Creating static routes from DST to Source pointing to Rtr1 VIA Rtr2(!) + staticRoutingDst->AddHostRouteTo (Ipv4Address ("10.1.1.1"), Ipv4Address ("10.20.1.1"), 1); + staticRoutingDstRtr->AddHostRouteTo (Ipv4Address ("10.1.1.1"), Ipv4Address ("10.10.2.1"), 2); + staticRoutingRtr2->AddHostRouteTo (Ipv4Address ("10.1.1.1"), Ipv4Address ("10.1.2.1"), 1); + + staticRoutingDst->AddHostRouteTo (Ipv4Address ("10.1.2.1"), Ipv4Address ("10.20.1.1"), 1); + staticRoutingDstRtr->AddHostRouteTo (Ipv4Address ("10.1.2.1"), Ipv4Address ("10.10.2.1"), 2); + staticRoutingRtr2->AddHostRouteTo (Ipv4Address ("10.1.2.1"), Ipv4Address ("10.1.2.1"), 1); + + // There are no apps that can utilize the Socket Option so doing the work directly.. + // Taken from tcp-large-transfer example + + Ptr srcSocket1 = Socket::CreateSocket (nSrc, TypeId::LookupByName ("ns3::TcpSocketFactory")); + Ptr srcSocket2 = Socket::CreateSocket (nSrc, TypeId::LookupByName ("ns3::TcpSocketFactory")); + Ptr srcSocket3 = Socket::CreateSocket (nSrc, TypeId::LookupByName ("ns3::TcpSocketFactory")); + Ptr srcSocket4 = Socket::CreateSocket (nSrc, TypeId::LookupByName ("ns3::TcpSocketFactory")); + + + uint16_t dstport = 12345; + Ipv4Address dstaddr ("10.20.1.2"); + + PacketSinkHelper sink ("ns3::TcpSocketFactory", InetSocketAddress (Ipv4Address::GetAny (), dstport)); + ApplicationContainer apps = sink.Install (nDst); + apps.Start (Seconds (0.0)); + apps.Stop (Seconds (10.0)); + + AsciiTraceHelper ascii; + p2p.EnableAsciiAll (ascii.CreateFileStream ("socket-bound-tcp-static-routing.tr")); + p2p.EnablePcapAll ("socket-bound-tcp-static-routing"); + + LogComponentEnableAll (LOG_PREFIX_TIME); + LogComponentEnable ("SocketBoundTcpRoutingExample", LOG_LEVEL_INFO); + + // First packet as normal (goes via Rtr1) + Simulator::Schedule (Seconds (0.1),&StartFlow, srcSocket1, dstaddr, dstport); + // Second via Rtr1 explicitly + Simulator::Schedule (Seconds (1.0),&BindSock, srcSocket2, SrcToRtr1); + Simulator::Schedule (Seconds (1.1),&StartFlow, srcSocket2, dstaddr, dstport); + // Third via Rtr2 explicitly + Simulator::Schedule (Seconds (2.0),&BindSock, srcSocket3, SrcToRtr2); + Simulator::Schedule (Seconds (2.1),&StartFlow, srcSocket3, dstaddr, dstport); + // Fourth again as normal (goes via Rtr1) + Simulator::Schedule (Seconds (3.0),&BindSock, srcSocket4, Ptr(0)); + Simulator::Schedule (Seconds (3.1),&StartFlow, srcSocket4, dstaddr, dstport); + // If you uncomment what's below, it results in ASSERT failing since you can't + // bind to a socket not existing on a node + // Simulator::Schedule(Seconds(4.0),&BindSock, srcSocket, dDstRtrdDst.Get(0)); + Simulator::Run (); + Simulator::Destroy (); + + return 0; +} + +void BindSock (Ptr sock, Ptr netdev) +{ + sock->BindToNetDevice (netdev); + return; +} + +void StartFlow (Ptr localSocket, + Ipv4Address servAddress, + uint16_t servPort) +{ + NS_LOG_INFO ("Starting flow at time " << Simulator::Now ().GetSeconds ()); + currentTxBytes = 0; + localSocket->Bind (); + localSocket->Connect (InetSocketAddress (servAddress, servPort)); //connect + + // tell the tcp implementation to call WriteUntilBufferFull again + // if we blocked and new tx buffer space becomes available + localSocket->SetSendCallback (MakeCallback (&WriteUntilBufferFull)); + WriteUntilBufferFull (localSocket, localSocket->GetTxAvailable ()); +} + +void WriteUntilBufferFull (Ptr localSocket, uint32_t txSpace) +{ + while (currentTxBytes < totalTxBytes && localSocket->GetTxAvailable () > 0) + { + uint32_t left = totalTxBytes - currentTxBytes; + uint32_t dataOffset = currentTxBytes % writeSize; + uint32_t toWrite = writeSize - dataOffset; + toWrite = std::min (toWrite, left); + toWrite = std::min (toWrite, localSocket->GetTxAvailable ()); + int amountSent = localSocket->Send (&data[dataOffset], toWrite, 0); + if(amountSent < 0) + { + // we will be called again when new tx space becomes available. + return; + } + currentTxBytes += amountSent; + } + localSocket->Close (); +} diff --git a/examples/mtp/tcp-bbr-example-mtp.cc b/examples/mtp/tcp-bbr-example-mtp.cc new file mode 100644 index 000000000..334ea83ee --- /dev/null +++ b/examples/mtp/tcp-bbr-example-mtp.cc @@ -0,0 +1,265 @@ +/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */ +/* + * Copyright (c) 2018-20 NITK Surathkal + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation; + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Authors: Aarti Nandagiri + * Vivek Jain + * Mohit P. Tahiliani + */ + +// This program simulates the following topology: +// +// 1000 Mbps 10Mbps 1000 Mbps +// Sender -------------- R1 -------------- R2 -------------- Receiver +// 5ms 10ms 5ms +// +// The link between R1 and R2 is a bottleneck link with 10 Mbps. All other +// links are 1000 Mbps. +// +// This program runs by default for 100 seconds and creates a new directory +// called 'bbr-results' in the ns-3 root directory. The program creates one +// sub-directory called 'pcap' in 'bbr-results' directory (if pcap generation +// is enabled) and three .dat files. +// +// (1) 'pcap' sub-directory contains six PCAP files: +// * bbr-0-0.pcap for the interface on Sender +// * bbr-1-0.pcap for the interface on Receiver +// * bbr-2-0.pcap for the first interface on R1 +// * bbr-2-1.pcap for the second interface on R1 +// * bbr-3-0.pcap for the first interface on R2 +// * bbr-3-1.pcap for the second interface on R2 +// (2) cwnd.dat file contains congestion window trace for the sender node +// (3) throughput.dat file contains sender side throughput trace +// (4) queueSize.dat file contains queue length trace from the bottleneck link +// +// BBR algorithm enters PROBE_RTT phase in every 10 seconds. The congestion +// window is fixed to 4 segments in this phase with a goal to achieve a better +// estimate of minimum RTT (because queue at the bottleneck link tends to drain +// when the congestion window is reduced to 4 segments). +// +// The congestion window and queue occupancy traces output by this program show +// periodic drops every 10 seconds when BBR algorithm is in PROBE_RTT phase. + +#include "ns3/core-module.h" +#include "ns3/network-module.h" +#include "ns3/internet-module.h" +#include "ns3/point-to-point-module.h" +#include "ns3/applications-module.h" +#include "ns3/traffic-control-module.h" +#include "ns3/flow-monitor-module.h" +#include "ns3/mtp-module.h" + +using namespace ns3; + +std::string dir; +uint32_t prev = 0; +Time prevTime = Seconds (0); + +// Calculate throughput +static void +TraceThroughput (Ptr monitor) +{ + FlowMonitor::FlowStatsContainer stats = monitor->GetFlowStats (); + auto itr = stats.begin (); + Time curTime = Now (); + std::ofstream thr (dir + "/throughput.dat", std::ios::out | std::ios::app); + thr << curTime << " " << 8 * (itr->second.txBytes - prev) / (1000 * 1000 * (curTime.GetSeconds () - prevTime.GetSeconds ())) << std::endl; + prevTime = curTime; + prev = itr->second.txBytes; + Simulator::Schedule (Seconds (0.2), &TraceThroughput, monitor); +} + +// Check the queue size +void CheckQueueSize (Ptr qd) +{ + uint32_t qsize = qd->GetCurrentSize ().GetValue (); + Simulator::Schedule (Seconds (0.2), &CheckQueueSize, qd); + std::ofstream q (dir + "/queueSize.dat", std::ios::out | std::ios::app); + q << Simulator::Now ().GetSeconds () << " " << qsize << std::endl; + q.close (); +} + +// Trace congestion window +static void CwndTracer (Ptr stream, uint32_t oldval, uint32_t newval) +{ + *stream->GetStream () << Simulator::Now ().GetSeconds () << " " << newval / 1448.0 << std::endl; +} + +void TraceCwnd (uint32_t nodeId, uint32_t socketId) +{ + AsciiTraceHelper ascii; + Ptr stream = ascii.CreateFileStream (dir + "/cwnd.dat"); + Config::ConnectWithoutContext ("/NodeList/" + std::to_string (nodeId) + "/$ns3::TcpL4Protocol/SocketList/" + std::to_string (socketId) + "/CongestionWindow", MakeBoundCallback (&CwndTracer, stream)); +} + +int main (int argc, char *argv []) +{ + LogComponentEnable ("LogicalProcess", LOG_LEVEL_INFO); + LogComponentEnable ("MultithreadedSimulatorImpl", LOG_LEVEL_INFO); + MtpInterface::Enable (); + + // Naming the output directory using local system time + time_t rawtime; + struct tm * timeinfo; + char buffer [80]; + time (&rawtime); + timeinfo = localtime (&rawtime); + strftime (buffer, sizeof (buffer), "%d-%m-%Y-%I-%M-%S", timeinfo); + std::string currentTime (buffer); + + std::string tcpTypeId = "TcpBbr"; + std::string queueDisc = "FifoQueueDisc"; + uint32_t delAckCount = 2; + bool bql = true; + bool enablePcap = false; + Time stopTime = Seconds (100); + + CommandLine cmd (__FILE__); + cmd.AddValue ("tcpTypeId", "Transport protocol to use: TcpNewReno, TcpBbr", tcpTypeId); + cmd.AddValue ("delAckCount", "Delayed ACK count", delAckCount); + cmd.AddValue ("enablePcap", "Enable/Disable pcap file generation", enablePcap); + cmd.AddValue ("stopTime", "Stop time for applications / simulation time will be stopTime + 1", stopTime); + cmd.Parse (argc, argv); + + queueDisc = std::string ("ns3::") + queueDisc; + + Config::SetDefault ("ns3::TcpL4Protocol::SocketType", StringValue ("ns3::" + tcpTypeId)); + Config::SetDefault ("ns3::TcpSocket::SndBufSize", UintegerValue (4194304)); + Config::SetDefault ("ns3::TcpSocket::RcvBufSize", UintegerValue (6291456)); + Config::SetDefault ("ns3::TcpSocket::InitialCwnd", UintegerValue (10)); + Config::SetDefault ("ns3::TcpSocket::DelAckCount", UintegerValue (delAckCount)); + Config::SetDefault ("ns3::TcpSocket::SegmentSize", UintegerValue (1448)); + Config::SetDefault ("ns3::DropTailQueue::MaxSize", QueueSizeValue (QueueSize ("1p"))); + Config::SetDefault (queueDisc + "::MaxSize", QueueSizeValue (QueueSize ("100p"))); + + NodeContainer sender, receiver; + NodeContainer routers; + sender.Create (1); + receiver.Create (1); + routers.Create (2); + + // Create the point-to-point link helpers + PointToPointHelper bottleneckLink; + bottleneckLink.SetDeviceAttribute ("DataRate", StringValue ("10Mbps")); + bottleneckLink.SetChannelAttribute ("Delay", StringValue ("10ms")); + + PointToPointHelper edgeLink; + edgeLink.SetDeviceAttribute ("DataRate", StringValue ("1000Mbps")); + edgeLink.SetChannelAttribute ("Delay", StringValue ("5ms")); + + // Create NetDevice containers + NetDeviceContainer senderEdge = edgeLink.Install (sender.Get (0), routers.Get (0)); + NetDeviceContainer r1r2 = bottleneckLink.Install (routers.Get (0), routers.Get (1)); + NetDeviceContainer receiverEdge = edgeLink.Install (routers.Get (1), receiver.Get (0)); + + // Install Stack + InternetStackHelper internet; + internet.Install (sender); + internet.Install (receiver); + internet.Install (routers); + + // Configure the root queue discipline + TrafficControlHelper tch; + tch.SetRootQueueDisc (queueDisc); + + if (bql) + { + tch.SetQueueLimits ("ns3::DynamicQueueLimits", "HoldTime", StringValue ("1000ms")); + } + + tch.Install (senderEdge); + tch.Install (receiverEdge); + + // Assign IP addresses + Ipv4AddressHelper ipv4; + ipv4.SetBase ("10.0.0.0", "255.255.255.0"); + + Ipv4InterfaceContainer i1i2 = ipv4.Assign (r1r2); + + ipv4.NewNetwork (); + Ipv4InterfaceContainer is1 = ipv4.Assign (senderEdge); + + ipv4.NewNetwork (); + Ipv4InterfaceContainer ir1 = ipv4.Assign (receiverEdge); + + // Populate routing tables + Ipv4GlobalRoutingHelper::PopulateRoutingTables (); + + // Select sender side port + uint16_t port = 50001; + + // Install application on the sender + BulkSendHelper source ("ns3::TcpSocketFactory", InetSocketAddress (ir1.GetAddress (1), port)); + source.SetAttribute ("MaxBytes", UintegerValue (0)); + ApplicationContainer sourceApps = source.Install (sender.Get (0)); + sourceApps.Start (Seconds (0.1)); + // Hook trace source after application starts + Simulator::Schedule (Seconds (0.1) + MilliSeconds (1), &TraceCwnd, 0, 0); + sourceApps.Stop (stopTime); + + // Install application on the receiver + PacketSinkHelper sink ("ns3::TcpSocketFactory", InetSocketAddress (Ipv4Address::GetAny (), port)); + ApplicationContainer sinkApps = sink.Install (receiver.Get (0)); + sinkApps.Start (Seconds (0.0)); + sinkApps.Stop (stopTime); + + // Create a new directory to store the output of the program + dir = "bbr-results/" + currentTime + "/"; + std::string dirToSave = "mkdir -p " + dir; + if (system (dirToSave.c_str ()) == -1) + { + exit (1); + } + + // The plotting scripts are provided in the following repository, if needed: + // https://github.com/mohittahiliani/BBR-Validation/ + // + // Download 'PlotScripts' directory (which is inside ns-3 scripts directory) + // from the link given above and place it in the ns-3 root directory. + // Uncomment the following three lines to generate plots for Congestion + // Window, sender side throughput and queue occupancy on the bottleneck link. + // + // system (("cp -R PlotScripts/gnuplotScriptCwnd " + dir).c_str ()); + // system (("cp -R PlotScripts/gnuplotScriptThroughput " + dir).c_str ()); + // system (("cp -R PlotScripts/gnuplotScriptQueueSize " + dir).c_str ()); + + // Trace the queue occupancy on the second interface of R1 + tch.Uninstall (routers.Get (0)->GetDevice (1)); + QueueDiscContainer qd; + qd = tch.Install (routers.Get (0)->GetDevice (1)); + Simulator::ScheduleNow (&CheckQueueSize, qd.Get (0)); + + // Generate PCAP traces if it is enabled + if (enablePcap) + { + if (system ((dirToSave + "/pcap/").c_str ()) == -1) + { + exit (1); + } + bottleneckLink.EnablePcapAll (dir + "/pcap/bbr", true); + } + + // Check for dropped packets using Flow Monitor + FlowMonitorHelper flowmon; + Ptr monitor = flowmon.InstallAll (); + Simulator::Schedule (Seconds (0 + 0.000001), &TraceThroughput, monitor); + + Simulator::Stop (stopTime + TimeStep (1)); + Simulator::Run (); + Simulator::Destroy (); + + return 0; +} diff --git a/examples/mtp/tcp-pacing-mtp.cc b/examples/mtp/tcp-pacing-mtp.cc new file mode 100644 index 000000000..4e2a675c7 --- /dev/null +++ b/examples/mtp/tcp-pacing-mtp.cc @@ -0,0 +1,334 @@ +/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */ +/* + * Copyright (c) 2020 NITK Surathkal + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation; + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Authors: Vivek Jain + * Deepak Kumaraswamy + */ + +// The following network topology is used in this example, and is taken from +// Figure 2 of https://homes.cs.washington.edu/~tom/pubs/pacing.pdf +// +// n0 n4 +// | | +// |(4x Mbps, 5ms) |(4x Mbps, 5ms) +// | | +// | | +// | (x Mbps, 40ms) | +// n2 ------------------------ n3 +// | | +// | | +// |(4x Mbps, 5ms) |(4x Mbps, 5ms) +// | | +// n1 n5 +// +// + +// This example illustrates how TCP pacing can be enabled on a socket. +// Two long-running TCP flows are instantiated at nodes n0 and n1 to +// send data over a bottleneck link (n2->n3) to sink nodes n4 and n5. +// At the end of the simulation, the IP-level flow monitor tool will +// print out summary statistics of the flows. The flow monitor detects +// four flows, but that is because the flow records are unidirectional; +// the latter two flows reported are actually ack streams. +// +// At the end of this simulation, data files are also generated +// that track changes in Congestion Window, Slow Start threshold and +// TCP pacing rate for the first flow (n0). Additionally, a data file +// that contains information about packet transmission and reception times +// (collected through TxTrace and RxTrace respectively) is also produced. +// This transmission and reception (ack) trace is the most direct way to +// observe the effects of pacing. All the above information is traced +// just for the single node n0. +// +// A small amount of randomness is introduced to the program to control +// the start time of the flows. +// +// This example has pacing enabled by default, which means that TCP +// does not send packets back-to-back, but instead paces them out over +// an RTT. The size of initial congestion window is set to 10, and pacing +// of the initial window is enabled. The available command-line options and +// their default values can be observed in the usual way by running the +// program to print the help info; i.e.: ./ns3 run 'tcp-pacing --PrintHelp' +// +// When pacing is disabled, TCP sends eligible packets back-to-back. The +// differences in behaviour when pacing is disabled can be observed from the +// packet transmission data file. For instance, one can observe that +// packets in the initial window are sent one after the other simultaneously, +// without any inter-packet gaps. Another instance is when n0 receives a +// packet in the form of an acknowledgement, and sends out data packets without +// pacing them. +// +// Although this example serves as a useful demonstration of how pacing could +// be enabled/disabled in ns-3 TCP congestion controls, we could not observe +// significant improvements in throughput for the above topology when pacing +// was enabled. In future, one could try and incorporate models such as +// TCP Prague and ACK-filtering, which may show a stronger performance +// impact for TCP pacing. + +#include +#include +#include +#include +#include "ns3/core-module.h" +#include "ns3/point-to-point-module.h" +#include "ns3/internet-module.h" +#include "ns3/applications-module.h" +#include "ns3/network-module.h" +#include "ns3/packet-sink.h" +#include "ns3/flow-monitor-module.h" +#include "ns3/ipv4-global-routing-helper.h" +#include "ns3/traffic-control-module.h" +#include "ns3/mtp-module.h" + +using namespace ns3; + +NS_LOG_COMPONENT_DEFINE ("TcpPacingExample"); + +std::ofstream cwndStream; +std::ofstream pacingRateStream; +std::ofstream ssThreshStream; +std::ofstream packetTraceStream; + +static void +CwndTracer (uint32_t oldval, uint32_t newval) +{ + cwndStream << std::fixed << std::setprecision (6) << Simulator::Now ().GetSeconds () << std::setw (12) << newval << std::endl; +} + +static void +PacingRateTracer (DataRate oldval, DataRate newval) +{ + pacingRateStream << std::fixed << std::setprecision (6) << Simulator::Now ().GetSeconds () << std::setw (12) << newval.GetBitRate () / 1e6 << std::endl; +} + +static void +SsThreshTracer (uint32_t oldval, uint32_t newval) +{ + ssThreshStream << std::fixed << std::setprecision (6) << Simulator::Now ().GetSeconds () << std::setw (12) << newval << std::endl; +} + +static void +TxTracer (Ptr p, Ptr ipv4, uint32_t interface) +{ + packetTraceStream << std::fixed << std::setprecision (6) << Simulator::Now ().GetSeconds () << " tx " << p->GetSize () << std::endl; +} + +static void +RxTracer (Ptr p, Ptr ipv4, uint32_t interface) +{ + packetTraceStream << std::fixed << std::setprecision (6) << Simulator::Now ().GetSeconds () << " rx " << p->GetSize () << std::endl; +} + +void +ConnectSocketTraces (void) +{ + Config::ConnectWithoutContext ("/NodeList/0/$ns3::TcpL4Protocol/SocketList/0/CongestionWindow", MakeCallback (&CwndTracer)); + Config::ConnectWithoutContext ("/NodeList/0/$ns3::TcpL4Protocol/SocketList/0/PacingRate", MakeCallback (&PacingRateTracer)); + Config::ConnectWithoutContext ("/NodeList/0/$ns3::TcpL4Protocol/SocketList/0/SlowStartThreshold", MakeCallback (&SsThreshTracer)); + Config::ConnectWithoutContext ("/NodeList/0/$ns3::Ipv4L3Protocol/Tx", MakeCallback (&TxTracer)); + Config::ConnectWithoutContext ("/NodeList/0/$ns3::Ipv4L3Protocol/Rx", MakeCallback (&RxTracer)); +} + +int +main (int argc, char *argv[]) +{ + LogComponentEnable ("LogicalProcess", LOG_LEVEL_INFO); + LogComponentEnable ("MultithreadedSimulatorImpl", LOG_LEVEL_INFO); + MtpInterface::Enable (); + + bool tracing = false; + + uint32_t maxBytes = 0; // value of zero corresponds to unlimited send + std::string transportProtocol = "ns3::TcpCubic"; + + Time simulationEndTime = Seconds (5); + DataRate bottleneckBandwidth ("10Mbps"); // value of x as shown in the above network topology + Time bottleneckDelay = MilliSeconds (40); + DataRate regLinkBandwidth = DataRate (4 * bottleneckBandwidth.GetBitRate ()); + Time regLinkDelay = MilliSeconds (5); + DataRate maxPacingRate ("4Gbps"); + + bool isPacingEnabled = true; + bool useEcn = true; + bool useQueueDisc = true; + bool shouldPaceInitialWindow = true; + + // Configure defaults that are not based on explicit command-line arguments + // They may be overridden by general attribute configuration of command line + Config::SetDefault ("ns3::TcpL4Protocol::SocketType", TypeIdValue (TypeId::LookupByName (transportProtocol))); + Config::SetDefault ("ns3::TcpSocket::InitialCwnd", UintegerValue (10)); + + CommandLine cmd (__FILE__); + cmd.AddValue ("tracing", "Flag to enable/disable Ascii and Pcap tracing", tracing); + cmd.AddValue ("maxBytes", "Total number of bytes for application to send", maxBytes); + cmd.AddValue ("isPacingEnabled", "Flag to enable/disable pacing in TCP", isPacingEnabled); + cmd.AddValue ("maxPacingRate", "Max Pacing Rate", maxPacingRate); + cmd.AddValue ("useEcn", "Flag to enable/disable ECN", useEcn); + cmd.AddValue ("useQueueDisc", "Flag to enable/disable queue disc on bottleneck", useQueueDisc); + cmd.AddValue ("shouldPaceInitialWindow", "Flag to enable/disable pacing of TCP initial window", shouldPaceInitialWindow); + cmd.AddValue ("simulationEndTime", "Simulation end time", simulationEndTime); + cmd.Parse (argc, argv); + + // Configure defaults based on command-line arguments + Config::SetDefault ("ns3::TcpSocketState::EnablePacing", BooleanValue (isPacingEnabled)); + Config::SetDefault ("ns3::TcpSocketState::PaceInitialWindow", BooleanValue (shouldPaceInitialWindow)); + Config::SetDefault ("ns3::TcpSocketBase::UseEcn", (useEcn ? EnumValue (TcpSocketState::On) : EnumValue (TcpSocketState::Off))); + Config::SetDefault ("ns3::TcpSocketState::MaxPacingRate", DataRateValue (maxPacingRate)); + + NS_LOG_INFO ("Create nodes."); + NodeContainer c; + c.Create (6); + + NS_LOG_INFO ("Create channels."); + NodeContainer n0n2 = NodeContainer (c.Get (0), c.Get (2)); + NodeContainer n1n2 = NodeContainer (c.Get (1), c.Get (2)); + + NodeContainer n2n3 = NodeContainer (c.Get (2), c.Get (3)); + + NodeContainer n3n4 = NodeContainer (c.Get (3), c.Get (4)); + NodeContainer n3n5 = NodeContainer (c.Get (3), c.Get (5)); + + //Define Node link properties + PointToPointHelper regLink; + regLink.SetDeviceAttribute ("DataRate", DataRateValue (regLinkBandwidth)); + regLink.SetChannelAttribute ("Delay", TimeValue (regLinkDelay)); + + NetDeviceContainer d0d2 = regLink.Install (n0n2); + NetDeviceContainer d1d2 = regLink.Install (n1n2); + NetDeviceContainer d3d4 = regLink.Install (n3n4); + NetDeviceContainer d3d5 = regLink.Install (n3n5); + + PointToPointHelper bottleNeckLink; + bottleNeckLink.SetDeviceAttribute ("DataRate", DataRateValue (bottleneckBandwidth)); + bottleNeckLink.SetChannelAttribute ("Delay", TimeValue (bottleneckDelay)); + + NetDeviceContainer d2d3 = bottleNeckLink.Install (n2n3); + + //Install Internet stack + InternetStackHelper stack; + stack.Install (c); + + // Install traffic control + if (useQueueDisc) + { + TrafficControlHelper tchBottleneck; + tchBottleneck.SetRootQueueDisc ("ns3::FqCoDelQueueDisc"); + tchBottleneck.Install (d2d3); + } + + NS_LOG_INFO ("Assign IP Addresses."); + Ipv4AddressHelper ipv4; + ipv4.SetBase ("10.1.1.0", "255.255.255.0"); + Ipv4InterfaceContainer regLinkInterface0 = ipv4.Assign (d0d2); + + ipv4.SetBase ("10.1.2.0", "255.255.255.0"); + Ipv4InterfaceContainer regLinkInterface1 = ipv4.Assign (d1d2); + + ipv4.SetBase ("10.1.3.0", "255.255.255.0"); + Ipv4InterfaceContainer bottleneckInterface = ipv4.Assign (d2d3); + + ipv4.SetBase ("10.1.4.0", "255.255.255.0"); + Ipv4InterfaceContainer regLinkInterface4 = ipv4.Assign (d3d4); + + ipv4.SetBase ("10.1.5.0", "255.255.255.0"); + Ipv4InterfaceContainer regLinkInterface5 = ipv4.Assign (d3d5); + + Ipv4GlobalRoutingHelper::PopulateRoutingTables (); + + NS_LOG_INFO ("Create Applications."); + + // Two Sink Applications at n4 and n5 + uint16_t sinkPort = 8080; + Address sinkAddress4 (InetSocketAddress (regLinkInterface4.GetAddress (1), sinkPort)); // interface of n4 + Address sinkAddress5 (InetSocketAddress (regLinkInterface5.GetAddress (1), sinkPort)); // interface of n5 + PacketSinkHelper packetSinkHelper ("ns3::TcpSocketFactory", InetSocketAddress (Ipv4Address::GetAny (), sinkPort)); + ApplicationContainer sinkApps4 = packetSinkHelper.Install (c.Get (4)); //n4 as sink + ApplicationContainer sinkApps5 = packetSinkHelper.Install (c.Get (5)); //n5 as sink + + sinkApps4.Start (Seconds (0)); + sinkApps4.Stop (simulationEndTime); + sinkApps5.Start (Seconds (0)); + sinkApps5.Stop (simulationEndTime); + + // Randomize the start time between 0 and 1ms + Ptr uniformRv = CreateObject (); + uniformRv->SetStream (0); + + // Two Source Applications at n0 and n1 + BulkSendHelper source0 ("ns3::TcpSocketFactory", sinkAddress4); + BulkSendHelper source1 ("ns3::TcpSocketFactory", sinkAddress5); + // Set the amount of data to send in bytes. Zero is unlimited. + source0.SetAttribute ("MaxBytes", UintegerValue (maxBytes)); + source1.SetAttribute ("MaxBytes", UintegerValue (maxBytes)); + ApplicationContainer sourceApps0 = source0.Install (c.Get (0)); + ApplicationContainer sourceApps1 = source1.Install (c.Get (1)); + + sourceApps0.Start (MicroSeconds (uniformRv->GetInteger (0, 1000))); + sourceApps0.Stop (simulationEndTime); + sourceApps1.Start (MicroSeconds (uniformRv->GetInteger (0, 1000))); + sourceApps1.Stop (simulationEndTime); + + if (tracing) + { + AsciiTraceHelper ascii; + regLink.EnableAsciiAll (ascii.CreateFileStream ("tcp-dynamic-pacing.tr")); + regLink.EnablePcapAll ("tcp-dynamic-pacing", false); + } + + cwndStream.open ("tcp-dynamic-pacing-cwnd.dat", std::ios::out); + cwndStream << "#Time(s) Congestion Window (B)" << std::endl; + + pacingRateStream.open ("tcp-dynamic-pacing-pacing-rate.dat", std::ios::out); + pacingRateStream << "#Time(s) Pacing Rate (Mb/s)" << std::endl; + + ssThreshStream.open ("tcp-dynamic-pacing-ssthresh.dat", std::ios::out); + ssThreshStream << "#Time(s) Slow Start threshold (B)" << std::endl; + + packetTraceStream.open ("tcp-dynamic-pacing-packet-trace.dat", std::ios::out); + packetTraceStream << "#Time(s) tx/rx size (B)" << std::endl; + + Simulator::Schedule (MicroSeconds (1001), &ConnectSocketTraces); + + FlowMonitorHelper flowmon; + Ptr monitor = flowmon.InstallAll (); + + NS_LOG_INFO ("Run Simulation."); + Simulator::Stop (simulationEndTime); + Simulator::Run (); + + monitor->CheckForLostPackets (); + Ptr classifier = DynamicCast (flowmon.GetClassifier ()); + FlowMonitor::FlowStatsContainer stats = monitor->GetFlowStats (); + for (std::map::const_iterator i = stats.begin (); i != stats.end (); ++i) + { + Ipv4FlowClassifier::FiveTuple t = classifier->FindFlow (i->first); + + std::cout << "Flow " << i->first << " (" << t.sourceAddress << " -> " << t.destinationAddress << ")\n"; + std::cout << " Tx Packets: " << i->second.txPackets << "\n"; + std::cout << " Tx Bytes: " << i->second.txBytes << "\n"; + std::cout << " TxOffered: " << i->second.txBytes * 8.0 / simulationEndTime.GetSeconds () / 1000 / 1000 << " Mbps\n"; + std::cout << " Rx Packets: " << i->second.rxPackets << "\n"; + std::cout << " Rx Bytes: " << i->second.rxBytes << "\n"; + std::cout << " Throughput: " << i->second.rxBytes * 8.0 / simulationEndTime.GetSeconds () / 1000 / 1000 << " Mbps\n"; + } + + + cwndStream.close (); + pacingRateStream.close (); + ssThreshStream.close (); + Simulator::Destroy (); +} diff --git a/examples/mtp/tcp-star-server-mtp.cc b/examples/mtp/tcp-star-server-mtp.cc new file mode 100644 index 000000000..5f214028d --- /dev/null +++ b/examples/mtp/tcp-star-server-mtp.cc @@ -0,0 +1,172 @@ +/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation; + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + + +// Default Network topology, 9 nodes in a star +/* + n2 n3 n4 + \ | / + \|/ + n1---n0---n5 + /| \ + / | \ + n8 n7 n6 +*/ +// - CBR Traffic goes from the star "arms" to the "hub" +// - Tracing of queues and packet receptions to file +// "tcp-star-server.tr" +// - pcap traces also generated in the following files +// "tcp-star-server-$n-$i.pcap" where n and i represent node and interface +// numbers respectively +// Usage examples for things you might want to tweak: +// ./ns3 run="tcp-star-server" +// ./ns3 run="tcp-star-server --nNodes=25" +// ./ns3 run="tcp-star-server --ns3::OnOffApplication::DataRate=10000" +// ./ns3 run="tcp-star-server --ns3::OnOffApplication::PacketSize=500" +// See the ns-3 tutorial for more info on the command line: +// http://www.nsnam.org/tutorials.html + + + + +#include +#include +#include +#include + +#include "ns3/core-module.h" +#include "ns3/network-module.h" +#include "ns3/internet-module.h" +#include "ns3/point-to-point-module.h" +#include "ns3/applications-module.h" +#include "ns3/ipv4-global-routing-helper.h" +#include "ns3/mtp-module.h" + +using namespace ns3; + +NS_LOG_COMPONENT_DEFINE ("TcpServer"); + +int +main (int argc, char *argv[]) +{ + LogComponentEnable ("LogicalProcess", LOG_LEVEL_INFO); + LogComponentEnable ("MultithreadedSimulatorImpl", LOG_LEVEL_INFO); + MtpInterface::Enable (); + + // Users may find it convenient to turn on explicit debugging + // for selected modules; the below lines suggest how to do this + + //LogComponentEnable ("TcpServer", LOG_LEVEL_INFO); + //LogComponentEnable ("TcpL4Protocol", LOG_LEVEL_ALL); + //LogComponentEnable ("TcpSocketImpl", LOG_LEVEL_ALL); + //LogComponentEnable ("PacketSink", LOG_LEVEL_ALL); + + // Set up some default values for the simulation. + Config::SetDefault ("ns3::OnOffApplication::PacketSize", UintegerValue (250)); + Config::SetDefault ("ns3::OnOffApplication::DataRate", StringValue ("5kb/s")); + uint32_t N = 9; //number of nodes in the star + + // Allow the user to override any of the defaults and the above + // Config::SetDefault()s at run-time, via command-line arguments + CommandLine cmd (__FILE__); + cmd.AddValue ("nNodes", "Number of nodes to place in the star", N); + cmd.Parse (argc, argv); + + // Here, we will create N nodes in a star. + NS_LOG_INFO ("Create nodes."); + NodeContainer serverNode; + NodeContainer clientNodes; + serverNode.Create (1); + clientNodes.Create (N-1); + NodeContainer allNodes = NodeContainer (serverNode, clientNodes); + + // Install network stacks on the nodes + InternetStackHelper internet; + internet.Install (allNodes); + + //Collect an adjacency list of nodes for the p2p topology + std::vector nodeAdjacencyList (N-1); + for(uint32_t i=0; i deviceAdjacencyList (N-1); + for(uint32_t i=0; i interfaceAdjacencyList (N-1); + for(uint32_t i=0; i downstream (primary data transfer from servers to clients) +// <--- upstream (return acks and ICMP echo response) +// +// ---- bottleneck link ---- +// servers ---| WR |--------------------| LR |--- clients +// ---- ---- +// ns-3 node IDs: +// nodes 0-2 3 4 5-7 +// +// - The box WR is notionally a WAN router, aggregating all server links +// - The box LR is notionally a LAN router, aggregating all client links +// - Three servers are connected to WR, three clients are connected to LR +// +// clients and servers are configured for ICMP measurements and TCP throughput +// and latency measurements in the downstream direction +// +// All link rates are enforced by a point-to-point (P2P) ns-3 model with full +// duplex operation. Dynamic queue limits +// (BQL) are enabled to allow for queueing to occur at the priority queue layer; +// the notional P2P hardware device queue is limited to three packets. +// +// One-way link delays and link rates +// ----------------------------------- +// (1) server to WR links, 1000 Mbps, 1us delay +// (2) bottleneck link: configurable rate, configurable delay +// (3) client to LR links, 1000 Mbps, 1us delay +// +// By default, ns-3 FQ-CoDel model is installed on all interfaces, but +// the bottleneck queue uses CoDel by default and is configurable. +// +// The ns-3 FQ-CoDel model uses ns-3 defaults: +// - 100ms interval +// - 5ms target +// - drop batch size of 64 packets +// - minbytes of 1500 +// +// Default simulation time is 70 sec. For single flow experiments, the flow is +// started at simulation time 5 sec; if a second flow is used, it starts +// at 15 sec. +// +// ping frequency is set at 100ms. +// +// A command-line option to enable a step-threshold CE threshold +// from the CoDel queue model is provided. +// +// Measure: +// - ping RTT +// - TCP RTT estimate +// - TCP throughput +// +// IPv4 addressing +// ---------------------------- +// pingServer 10.1.1.2 (ping source) +// firstServer 10.1.2.2 (data sender) +// secondServer 10.1.3.2 (data sender) +// pingClient 192.168.1.2 +// firstClient 192.168.2.2 +// secondClient 192.168.3.2 +// +// Program Options: +// --------------- +// --firstTcpType: first TCP type (cubic, dctcp, or reno) [cubic] +// --secondTcpType: second TCP type (cubic, dctcp, or reno) [] +// --queueType: bottleneck queue type (fq, codel, pie, or red) [codel] +// --baseRtt: base RTT [+80ms] +// --ceThreshold: CoDel CE threshold (for DCTCP) [+1ms] +// --linkRate: data rate of bottleneck link [50000000bps] +// --stopTime: simulation stop time [+1.16667min] +// --queueUseEcn: use ECN on queue [false] +// --enablePcap: enable Pcap [false] +// --validate: validation case to run [] +// +// validation cases (and syntax of how to run): +// ------------ +// Case 'dctcp-10ms': DCTCP single flow, 10ms base RTT, 50 Mbps link, ECN enabled, CoDel: +// ./ns3 run 'tcp-validation --firstTcpType=dctcp --linkRate=50Mbps --baseRtt=10ms --queueUseEcn=1 --stopTime=15s --validate=1 --validation=dctcp-10ms' +// - Throughput between 48 Mbps and 49 Mbps for time greater than 5.6s +// - DCTCP alpha below 0.1 for time greater than 5.4s +// - DCTCP alpha between 0.06 and 0.085 for time greater than 7s +// +// Case 'dctcp-80ms': DCTCP single flow, 80ms base RTT, 50 Mbps link, ECN enabled, CoDel: +// ./ns3 run 'tcp-validation --firstTcpType=dctcp --linkRate=50Mbps --baseRtt=80ms --queueUseEcn=1 --stopTime=40s --validate=1 --validation=dctcp-80ms' +// - Throughput less than 20 Mbps for time less than 14s +// - Throughput less than 48 Mbps for time less than 30s +// - Throughput between 47.5 Mbps and 48.5 for time greater than 32s +// - DCTCP alpha above 0.1 for time less than 7.5 +// - DCTCP alpha below 0.01 for time greater than 11 and less than 30 +// - DCTCP alpha between 0.015 and 0.025 for time greater than 34 +// +// Case 'cubic-50ms-no-ecn': CUBIC single flow, 50ms base RTT, 50 Mbps link, ECN disabled, CoDel: +// ./ns3 run 'tcp-validation --firstTcpType=cubic --linkRate=50Mbps --baseRtt=50ms --queueUseEcn=0 --stopTime=20s --validate=1 --validation=cubic-50ms-no-ecn' +// - Maximum value of cwnd is 511 segments at 5.4593 seconds +// - cwnd decreases to 173 segments at 5.80304 seconds +// - cwnd reaches another local maxima around 14.2815 seconds of 236 segments +// - cwnd reaches a second maximum around 18.048 seconds of 234 segments +// +// Case 'cubic-50ms-ecn': CUBIC single flow, 50ms base RTT, 50 Mbps link, ECN enabled, CoDel: +// ./ns3 run 'tcp-validation --firstTcpType=cubic --linkRate=50Mbps --baseRtt=50ms --queueUseEcn=0 --stopTime=20s --validate=1 --validation=cubic-50ms-no-ecn' +// - Maximum value of cwnd is 511 segments at 5.4593 seconds +// - cwnd decreases to 173 segments at 5.7939 seconds +// - cwnd reaches another local maxima around 14.3477 seconds of 236 segments +// - cwnd reaches a second maximum around 18.064 seconds of 234 segments + +#include +#include +#include +#include "ns3/core-module.h" +#include "ns3/network-module.h" +#include "ns3/applications-module.h" +#include "ns3/traffic-control-module.h" +#include "ns3/internet-module.h" +#include "ns3/internet-apps-module.h" +#include "ns3/point-to-point-module.h" +#include "ns3/mtp-module.h" + +using namespace ns3; + +NS_LOG_COMPONENT_DEFINE ("TcpValidation"); + +// These variables are declared outside of main() so that they can +// be used in trace sinks. +std::atomic g_firstBytesReceived = 0; +std::atomic g_secondBytesReceived = 0; +std::atomic g_marksObserved = 0; +std::atomic g_dropsObserved = 0; +std::string g_validate = ""; // Empty string disables this mode +bool g_validationFailed = false; + +void +TraceFirstCwnd (std::ofstream* ofStream, uint32_t oldCwnd, uint32_t newCwnd) +{ + // TCP segment size is configured below to be 1448 bytes + // so that we can report cwnd in units of segments + if (g_validate == "") + { + *ofStream << Simulator::Now ().GetSeconds () << " " << static_cast (newCwnd) / 1448 << std::endl; + } + // Validation checks; both the ECN enabled and disabled cases are similar + if (g_validate == "cubic-50ms-no-ecn" || g_validate == "cubic-50ms-ecn") + { + double now = Simulator::Now ().GetSeconds (); + double cwnd = static_cast (newCwnd) / 1448; + if ((now > 5.43) && (now < 5.465) && (cwnd < 500)) + { + NS_LOG_WARN ("now " << Now ().As (Time::S) << " cwnd " << cwnd << " (expected >= 500)"); + g_validationFailed = true; + } + else if ((now > 5.795) && (now < 6) && (cwnd > 190)) + { + NS_LOG_WARN ("now " << Now ().As (Time::S) << " cwnd " << cwnd << " (expected <= 190)"); + g_validationFailed = true; + } + else if ((now > 14) && (now < 14.197) && (cwnd < 224)) + { + NS_LOG_WARN ("now " << Now ().As (Time::S) << " cwnd " << cwnd << " (expected >= 224)"); + g_validationFailed = true; + } + else if ((now > 17) && (now < 18.026) && (cwnd < 212)) + { + NS_LOG_WARN ("now " << Now ().As (Time::S) << " cwnd " << cwnd << " (expected >= 212)"); + g_validationFailed = true; + } + } +} + +void +TraceFirstDctcp (std::ofstream* ofStream, uint32_t bytesMarked, uint32_t bytesAcked, double alpha) +{ + if (g_validate == "") + { + *ofStream << Simulator::Now ().GetSeconds () << " " << alpha << std::endl; + } + // Validation checks + if (g_validate == "dctcp-80ms") + { + double now = Simulator::Now ().GetSeconds (); + if ((now < 7.5) && (alpha < 0.1)) + { + NS_LOG_WARN ("now " << Now ().As (Time::S) << " alpha " << alpha << " (expected >= 0.1)"); + g_validationFailed = true; + } + else if ((now > 11) && (now < 30) && (alpha > 0.01)) + { + NS_LOG_WARN ("now " << Now ().As (Time::S) << " alpha " << alpha << " (expected <= 0.01)"); + g_validationFailed = true; + } + else if ((now > 34) && (alpha < 0.015) && (alpha > 0.025)) + { + NS_LOG_WARN ("now " << Now ().As (Time::S) << " alpha " << alpha << " (expected 0.015 <= alpha <= 0.025)"); + g_validationFailed = true; + } + } + else if (g_validate == "dctcp-10ms") + { + double now = Simulator::Now ().GetSeconds (); + if ((now > 5.6) && (alpha > 0.1)) + { + NS_LOG_WARN ("now " << Now ().As (Time::S) << " alpha " << alpha << " (expected <= 0.1)"); + g_validationFailed = true; + } + if ((now > 7) && ((alpha > 0.09) || (alpha < 0.055))) + { + NS_LOG_WARN ("now " << Now ().As (Time::S) << " alpha " << alpha << " (expected 0.09 <= alpha <= 0.055)"); + g_validationFailed = true; + } + } +} + +void +TraceFirstRtt (std::ofstream* ofStream, Time oldRtt, Time newRtt) +{ + if (g_validate == "") + { + *ofStream << Simulator::Now ().GetSeconds () << " " << newRtt.GetSeconds () * 1000 << std::endl; + } +} + +void +TraceSecondCwnd (std::ofstream* ofStream, uint32_t oldCwnd, uint32_t newCwnd) +{ + // TCP segment size is configured below to be 1448 bytes + // so that we can report cwnd in units of segments + if (g_validate == "") + { + *ofStream << Simulator::Now ().GetSeconds () << " " << static_cast (newCwnd) / 1448 << std::endl; + } +} + +void +TraceSecondRtt (std::ofstream* ofStream, Time oldRtt, Time newRtt) +{ + if (g_validate == "") + { + *ofStream << Simulator::Now ().GetSeconds () << " " << newRtt.GetSeconds () * 1000 << std::endl; + } +} + +void +TraceSecondDctcp (std::ofstream* ofStream, uint32_t bytesMarked, uint32_t bytesAcked, double alpha) +{ + if (g_validate == "") + { + *ofStream << Simulator::Now ().GetSeconds () << " " << alpha << std::endl; + } +} + +void +TracePingRtt (std::ofstream* ofStream, Time rtt) +{ + if (g_validate == "") + { + *ofStream << Simulator::Now ().GetSeconds () << " " << rtt.GetSeconds () * 1000 << std::endl; + } +} + +void +TraceFirstRx (Ptr packet, const Address &address) +{ + g_firstBytesReceived += packet->GetSize (); +} + +void +TraceSecondRx (Ptr packet, const Address &address) +{ + g_secondBytesReceived += packet->GetSize (); +} + +void +TraceQueueDrop (std::ofstream* ofStream, Ptr item) +{ + if (g_validate == "") + { + *ofStream << Simulator::Now ().GetSeconds () << " " << std::hex << item->Hash () << std::endl; + } + g_dropsObserved++; +} + +void +TraceQueueMark (std::ofstream* ofStream, Ptr item, const char* reason) +{ + if (g_validate == "") + { + *ofStream << Simulator::Now ().GetSeconds () << " " << std::hex << item->Hash () << std::endl; + } + g_marksObserved++; +} + +void +TraceQueueLength (std::ofstream* ofStream, DataRate queueLinkRate, uint32_t oldVal, uint32_t newVal) +{ + // output in units of ms + if (g_validate == "") + { + *ofStream << Simulator::Now ().GetSeconds () << " " << std::fixed << static_cast (newVal * 8) / (queueLinkRate.GetBitRate () / 1000) << std::endl; + } +} + +void +TraceMarksFrequency (std::ofstream* ofStream, Time marksSamplingInterval) +{ + if (g_validate == "") + { + *ofStream << Simulator::Now ().GetSeconds () << " " << g_marksObserved << std::endl; + } + g_marksObserved = 0; + Simulator::Schedule (marksSamplingInterval, &TraceMarksFrequency, ofStream, marksSamplingInterval); +} + +void +TraceFirstThroughput (std::ofstream* ofStream, Time throughputInterval) +{ + double throughput = g_firstBytesReceived * 8 / throughputInterval.GetSeconds () / 1e6; + if (g_validate == "") + { + *ofStream << Simulator::Now ().GetSeconds () << " " << throughput << std::endl; + } + g_firstBytesReceived = 0; + Simulator::Schedule (throughputInterval, &TraceFirstThroughput, ofStream, throughputInterval); + if (g_validate == "dctcp-80ms") + { + double now = Simulator::Now ().GetSeconds (); + if ((now < 14) && (throughput > 20)) + { + NS_LOG_WARN ("now " << Now ().As (Time::S) << " throughput " << throughput << " (expected <= 20)"); + g_validationFailed = true; + } + if ((now < 30) && (throughput > 48)) + { + NS_LOG_WARN ("now " << Now ().As (Time::S) << " throughput " << throughput << " (expected <= 48)"); + g_validationFailed = true; + } + if ((now > 32) && ((throughput < 47.5) || (throughput > 48.5))) + { + NS_LOG_WARN ("now " << Now ().As (Time::S) << " throughput " << throughput << " (expected 47.5 <= throughput <= 48.5)"); + g_validationFailed = true; + } + } + else if (g_validate == "dctcp-10ms") + { + double now = Simulator::Now ().GetSeconds (); + if ((now > 5.6) && ((throughput < 48) || (throughput > 49))) + { + NS_LOG_WARN ("now " << Now ().As (Time::S) << " throughput " << throughput << " (expected 48 <= throughput <= 49)"); + g_validationFailed = true; + } + } +} + +void +TraceSecondThroughput (std::ofstream* ofStream, Time throughputInterval) +{ + if (g_validate == "") + { + *ofStream << Simulator::Now ().GetSeconds () << " " << g_secondBytesReceived * 8 / throughputInterval.GetSeconds () / 1e6 << std::endl; + } + g_secondBytesReceived = 0; + Simulator::Schedule (throughputInterval, &TraceSecondThroughput, ofStream, throughputInterval); +} + +void +ScheduleFirstTcpCwndTraceConnection (std::ofstream* ofStream) +{ + Config::ConnectWithoutContextFailSafe ("/NodeList/1/$ns3::TcpL4Protocol/SocketList/0/CongestionWindow", MakeBoundCallback (&TraceFirstCwnd, ofStream)); +} + +void +ScheduleFirstTcpRttTraceConnection (std::ofstream* ofStream) +{ + Config::ConnectWithoutContextFailSafe ("/NodeList/1/$ns3::TcpL4Protocol/SocketList/0/RTT", MakeBoundCallback (&TraceFirstRtt, ofStream)); +} + +void +ScheduleFirstDctcpTraceConnection (std::ofstream* ofStream) +{ + Config::ConnectWithoutContextFailSafe ("/NodeList/1/$ns3::TcpL4Protocol/SocketList/0/CongestionOps/$ns3::TcpDctcp/CongestionEstimate", MakeBoundCallback (&TraceFirstDctcp, ofStream)); +} + +void +ScheduleSecondDctcpTraceConnection (std::ofstream* ofStream) +{ + Config::ConnectWithoutContextFailSafe ("/NodeList/2/$ns3::TcpL4Protocol/SocketList/0/CongestionOps/$ns3::TcpDctcp/CongestionEstimate", MakeBoundCallback (&TraceSecondDctcp, ofStream)); +} + +void +ScheduleFirstPacketSinkConnection (void) +{ + Config::ConnectWithoutContextFailSafe ("/NodeList/6/ApplicationList/*/$ns3::PacketSink/Rx", MakeCallback (&TraceFirstRx)); +} + +void +ScheduleSecondTcpCwndTraceConnection (std::ofstream* ofStream) +{ + Config::ConnectWithoutContext ("/NodeList/2/$ns3::TcpL4Protocol/SocketList/0/CongestionWindow", MakeBoundCallback (&TraceSecondCwnd, ofStream)); +} + +void +ScheduleSecondTcpRttTraceConnection (std::ofstream* ofStream) +{ + Config::ConnectWithoutContext ("/NodeList/2/$ns3::TcpL4Protocol/SocketList/0/RTT", MakeBoundCallback (&TraceSecondRtt, ofStream)); +} + +void +ScheduleSecondPacketSinkConnection (void) +{ + Config::ConnectWithoutContext ("/NodeList/7/ApplicationList/*/$ns3::PacketSink/Rx", MakeCallback (&TraceSecondRx)); +} + +int +main (int argc, char *argv[]) +{ + LogComponentEnable ("LogicalProcess", LOG_LEVEL_INFO); + LogComponentEnable ("MultithreadedSimulatorImpl", LOG_LEVEL_INFO); + MtpInterface::Enable (); + + //////////////////////////////////////////////////////////// + // variables not configured at command line // + //////////////////////////////////////////////////////////// + uint32_t pingSize = 100; // bytes + bool enableSecondTcp = false; + bool enableLogging = false; + Time pingInterval = MilliSeconds (100); + Time marksSamplingInterval = MilliSeconds (100); + Time throughputSamplingInterval = MilliSeconds (200); + std::string pingTraceFile = "tcp-validation-ping.dat"; + std::string firstTcpRttTraceFile = "tcp-validation-first-tcp-rtt.dat"; + std::string firstTcpCwndTraceFile = "tcp-validation-first-tcp-cwnd.dat"; + std::string firstDctcpTraceFile = "tcp-validation-first-dctcp-alpha.dat"; + std::string firstTcpThroughputTraceFile = "tcp-validation-first-tcp-throughput.dat"; + std::string secondTcpRttTraceFile = "tcp-validation-second-tcp-rtt.dat"; + std::string secondTcpCwndTraceFile = "tcp-validation-second-tcp-cwnd.dat"; + std::string secondTcpThroughputTraceFile = "tcp-validation-second-tcp-throughput.dat"; + std::string secondDctcpTraceFile = "tcp-validation-second-dctcp-alpha.dat"; + std::string queueMarkTraceFile = "tcp-validation-queue-mark.dat"; + std::string queueDropTraceFile = "tcp-validation-queue-drop.dat"; + std::string queueMarksFrequencyTraceFile = "tcp-validation-queue-marks-frequency.dat"; + std::string queueLengthTraceFile = "tcp-validation-queue-length.dat"; + + //////////////////////////////////////////////////////////// + // variables configured at command line // + //////////////////////////////////////////////////////////// + std::string firstTcpType = "cubic"; + std::string secondTcpType = ""; + std::string queueType = "codel"; + Time stopTime = Seconds (70); + Time baseRtt = MilliSeconds (80); + DataRate linkRate ("50Mbps"); + bool queueUseEcn = false; + Time ceThreshold = MilliSeconds (1); + bool enablePcap = false; + + //////////////////////////////////////////////////////////// + // Override ns-3 defaults // + //////////////////////////////////////////////////////////// + Config::SetDefault ("ns3::TcpSocket::SegmentSize", UintegerValue (1448)); + // Increase default buffer sizes to improve throughput over long delay paths + //Config::SetDefault ("ns3::TcpSocket::SndBufSize",UintegerValue (8192000)); + //Config::SetDefault ("ns3::TcpSocket::RcvBufSize",UintegerValue (8192000)); + Config::SetDefault ("ns3::TcpSocket::SndBufSize",UintegerValue (32768000)); + Config::SetDefault ("ns3::TcpSocket::RcvBufSize",UintegerValue (32768000)); + Config::SetDefault ("ns3::TcpSocket::InitialCwnd", UintegerValue (10)); + Config::SetDefault ("ns3::TcpL4Protocol::RecoveryType", TypeIdValue (TcpPrrRecovery::GetTypeId ())); + + //////////////////////////////////////////////////////////// + // command-line argument parsing // + //////////////////////////////////////////////////////////// + CommandLine cmd (__FILE__); + cmd.AddValue ("firstTcpType", "first TCP type (cubic, dctcp, or reno)", firstTcpType); + cmd.AddValue ("secondTcpType", "second TCP type (cubic, dctcp, or reno)", secondTcpType); + cmd.AddValue ("queueType", "bottleneck queue type (fq, codel, pie, or red)", queueType); + cmd.AddValue ("baseRtt", "base RTT", baseRtt); + cmd.AddValue ("ceThreshold", "CoDel CE threshold (for DCTCP)", ceThreshold); + cmd.AddValue ("linkRate", "data rate of bottleneck link", linkRate); + cmd.AddValue ("stopTime", "simulation stop time", stopTime); + cmd.AddValue ("queueUseEcn", "use ECN on queue", queueUseEcn); + cmd.AddValue ("enablePcap", "enable Pcap", enablePcap); + cmd.AddValue ("validate", "validation case to run", g_validate); + cmd.Parse (argc, argv); + + // If validation is selected, perform some configuration checks + if (g_validate != "") + { + NS_ABORT_MSG_UNLESS (g_validate == "dctcp-10ms" + || g_validate == "dctcp-80ms" + || g_validate == "cubic-50ms-no-ecn" + || g_validate == "cubic-50ms-ecn", "Unknown test"); + if (g_validate == "dctcp-10ms" || g_validate == "dctcp-80ms") + { + NS_ABORT_MSG_UNLESS (firstTcpType == "dctcp", "Incorrect TCP"); + NS_ABORT_MSG_UNLESS (secondTcpType == "", "Incorrect TCP"); + NS_ABORT_MSG_UNLESS (linkRate == DataRate ("50Mbps"), "Incorrect data rate"); + NS_ABORT_MSG_UNLESS (queueUseEcn == true, "Incorrect ECN configuration"); + NS_ABORT_MSG_UNLESS (stopTime >= Seconds (15), "Incorrect stopTime"); + if (g_validate == "dctcp-10ms") + { + NS_ABORT_MSG_UNLESS (baseRtt == MilliSeconds (10), "Incorrect RTT"); + } + else if (g_validate == "dctcp-80ms") + { + NS_ABORT_MSG_UNLESS (baseRtt == MilliSeconds (80), "Incorrect RTT"); + } + } + else if (g_validate == "cubic-50ms-no-ecn" || g_validate == "cubic-50ms-ecn") + { + NS_ABORT_MSG_UNLESS (firstTcpType == "cubic", "Incorrect TCP"); + NS_ABORT_MSG_UNLESS (secondTcpType == "", "Incorrect TCP"); + NS_ABORT_MSG_UNLESS (baseRtt == MilliSeconds (50), "Incorrect RTT"); + NS_ABORT_MSG_UNLESS (linkRate == DataRate ("50Mbps"), "Incorrect data rate"); + NS_ABORT_MSG_UNLESS (stopTime >= Seconds (20), "Incorrect stopTime"); + if (g_validate == "cubic-50ms-no-ecn") + { + NS_ABORT_MSG_UNLESS (queueUseEcn == false, "Incorrect ECN configuration"); + } + else if (g_validate == "cubic-50ms-ecn") + { + NS_ABORT_MSG_UNLESS (queueUseEcn == true, "Incorrect ECN configuration"); + } + } + } + + if (enableLogging) + { + LogComponentEnable ("TcpSocketBase", (LogLevel)(LOG_PREFIX_FUNC | LOG_PREFIX_NODE | LOG_PREFIX_TIME | LOG_LEVEL_ALL)); + LogComponentEnable ("TcpDctcp", (LogLevel)(LOG_PREFIX_FUNC | LOG_PREFIX_NODE | LOG_PREFIX_TIME | LOG_LEVEL_ALL)); + } + + Time oneWayDelay = baseRtt / 2; + + TypeId firstTcpTypeId; + if (firstTcpType == "reno") + { + firstTcpTypeId = TcpLinuxReno::GetTypeId (); + } + else if (firstTcpType == "cubic") + { + firstTcpTypeId = TcpCubic::GetTypeId (); + } + else if (firstTcpType == "dctcp") + { + firstTcpTypeId = TcpDctcp::GetTypeId (); + Config::SetDefault ("ns3::CoDelQueueDisc::CeThreshold", TimeValue (ceThreshold)); + Config::SetDefault ("ns3::FqCoDelQueueDisc::CeThreshold", TimeValue (ceThreshold)); + if (queueUseEcn == false) + { + std::cout << "Warning: using DCTCP with queue ECN disabled" << std::endl; + } + } + else + { + NS_FATAL_ERROR ("Fatal error: tcp unsupported"); + } + TypeId secondTcpTypeId; + if (secondTcpType == "reno") + { + enableSecondTcp = true; + secondTcpTypeId = TcpLinuxReno::GetTypeId (); + } + else if (secondTcpType == "cubic") + { + enableSecondTcp = true; + secondTcpTypeId = TcpCubic::GetTypeId (); + } + else if (secondTcpType == "dctcp") + { + enableSecondTcp = true; + secondTcpTypeId = TcpDctcp::GetTypeId (); + } + else if (secondTcpType == "") + { + enableSecondTcp = false; + NS_LOG_DEBUG ("No second TCP selected"); + } + else + { + NS_FATAL_ERROR ("Fatal error: tcp unsupported"); + } + TypeId queueTypeId; + if (queueType == "fq") + { + queueTypeId = FqCoDelQueueDisc::GetTypeId (); + } + else if (queueType == "codel") + { + queueTypeId = CoDelQueueDisc::GetTypeId (); + } + else if (queueType == "pie") + { + queueTypeId = PieQueueDisc::GetTypeId (); + } + else if (queueType == "red") + { + queueTypeId = RedQueueDisc::GetTypeId (); + } + else + { + NS_FATAL_ERROR ("Fatal error: queueType unsupported"); + } + + if (queueUseEcn) + { + Config::SetDefault ("ns3::CoDelQueueDisc::UseEcn", BooleanValue (true)); + Config::SetDefault ("ns3::FqCoDelQueueDisc::UseEcn", BooleanValue (true)); + Config::SetDefault ("ns3::PieQueueDisc::UseEcn", BooleanValue (true)); + Config::SetDefault ("ns3::RedQueueDisc::UseEcn", BooleanValue (true)); + } + // Enable TCP to use ECN regardless + Config::SetDefault ("ns3::TcpSocketBase::UseEcn", StringValue ("On")); + + // Report on configuration + if (enableSecondTcp) + { + NS_LOG_DEBUG ("first TCP: " << firstTcpTypeId.GetName () << "; second TCP: " << secondTcpTypeId.GetName () << "; queue: " << queueTypeId.GetName () << "; ceThreshold: " << ceThreshold.GetSeconds () * 1000 << "ms"); + } + else + { + NS_LOG_DEBUG ("first TCP: " << firstTcpTypeId.GetName () << "; queue: " << queueTypeId.GetName () << "; ceThreshold: " << ceThreshold.GetSeconds () * 1000 << "ms"); + } + + // Write traces only if we are not in validation mode (g_validate == "") + std::ofstream pingOfStream; + std::ofstream firstTcpRttOfStream; + std::ofstream firstTcpCwndOfStream; + std::ofstream firstTcpThroughputOfStream; + std::ofstream firstTcpDctcpOfStream; + std::ofstream secondTcpRttOfStream; + std::ofstream secondTcpCwndOfStream; + std::ofstream secondTcpThroughputOfStream; + std::ofstream secondTcpDctcpOfStream; + std::ofstream queueDropOfStream; + std::ofstream queueMarkOfStream; + std::ofstream queueMarksFrequencyOfStream; + std::ofstream queueLengthOfStream; + if (g_validate == "") + { + pingOfStream.open (pingTraceFile.c_str (), std::ofstream::out); + firstTcpRttOfStream.open (firstTcpRttTraceFile.c_str (), std::ofstream::out); + firstTcpCwndOfStream.open (firstTcpCwndTraceFile.c_str (), std::ofstream::out); + firstTcpThroughputOfStream.open (firstTcpThroughputTraceFile.c_str (), std::ofstream::out); + if (firstTcpType == "dctcp") + { + firstTcpDctcpOfStream.open (firstDctcpTraceFile.c_str (), std::ofstream::out); + } + if (enableSecondTcp) + { + secondTcpRttOfStream.open (secondTcpRttTraceFile.c_str (), std::ofstream::out); + secondTcpCwndOfStream.open (secondTcpCwndTraceFile.c_str (), std::ofstream::out); + secondTcpThroughputOfStream.open (secondTcpThroughputTraceFile.c_str (), std::ofstream::out); + if (secondTcpType == "dctcp") + { + secondTcpDctcpOfStream.open (secondDctcpTraceFile.c_str (), std::ofstream::out); + } + } + queueDropOfStream.open (queueDropTraceFile.c_str (), std::ofstream::out); + queueMarkOfStream.open (queueMarkTraceFile.c_str (), std::ofstream::out); + queueMarksFrequencyOfStream.open (queueMarksFrequencyTraceFile.c_str (), std::ofstream::out); + queueLengthOfStream.open (queueLengthTraceFile.c_str (), std::ofstream::out); + } + + //////////////////////////////////////////////////////////// + // scenario setup // + //////////////////////////////////////////////////////////// + Ptr pingServer = CreateObject (); + Ptr firstServer = CreateObject (); + Ptr secondServer = CreateObject (); + Ptr wanRouter = CreateObject (); + Ptr lanRouter = CreateObject (); + Ptr pingClient = CreateObject (); + Ptr firstClient = CreateObject (); + Ptr secondClient = CreateObject (); + + // Device containers + NetDeviceContainer pingServerDevices; + NetDeviceContainer firstServerDevices; + NetDeviceContainer secondServerDevices; + NetDeviceContainer wanLanDevices; + NetDeviceContainer pingClientDevices; + NetDeviceContainer firstClientDevices; + NetDeviceContainer secondClientDevices; + + PointToPointHelper p2p; + p2p.SetQueue ("ns3::DropTailQueue", "MaxSize", QueueSizeValue (QueueSize ("3p"))); + p2p.SetDeviceAttribute ("DataRate", DataRateValue (DataRate ("1000Mbps"))); + // Add delay only on the WAN links + p2p.SetChannelAttribute ("Delay", TimeValue (MicroSeconds (1))); + pingServerDevices = p2p.Install (wanRouter, pingServer); + firstServerDevices = p2p.Install (wanRouter, firstServer); + secondServerDevices = p2p.Install (wanRouter, secondServer); + p2p.SetChannelAttribute ("Delay", TimeValue (oneWayDelay)); + wanLanDevices = p2p.Install (wanRouter, lanRouter); + p2p.SetQueue ("ns3::DropTailQueue", "MaxSize", QueueSizeValue (QueueSize ("3p"))); + p2p.SetChannelAttribute ("Delay", TimeValue (MicroSeconds (1))); + pingClientDevices = p2p.Install (lanRouter, pingClient); + firstClientDevices = p2p.Install (lanRouter, firstClient); + secondClientDevices = p2p.Install (lanRouter, secondClient); + + // Limit the bandwidth on the wanRouter->lanRouter interface + Ptr p = wanLanDevices.Get (0)->GetObject (); + p->SetAttribute ("DataRate", DataRateValue (linkRate)); + + InternetStackHelper stackHelper; + stackHelper.Install (pingServer); + Ptr proto; + stackHelper.Install (firstServer); + proto = firstServer->GetObject (); + proto->SetAttribute ("SocketType", TypeIdValue (firstTcpTypeId)); + stackHelper.Install (secondServer); + stackHelper.Install (wanRouter); + stackHelper.Install (lanRouter); + stackHelper.Install (pingClient); + + stackHelper.Install (firstClient); + // Set the per-node TCP type here + proto = firstClient->GetObject (); + proto->SetAttribute ("SocketType", TypeIdValue (firstTcpTypeId)); + stackHelper.Install (secondClient); + + if (enableSecondTcp) + { + proto = secondClient->GetObject (); + proto->SetAttribute ("SocketType", TypeIdValue (secondTcpTypeId)); + proto = secondServer->GetObject (); + proto->SetAttribute ("SocketType", TypeIdValue (secondTcpTypeId)); + } + + // InternetStackHelper will install a base TrafficControLayer on the node, + // but the Ipv4AddressHelper below will install the default FqCoDelQueueDisc + // on all single device nodes. The below code overrides the configuration + // that is normally done by the Ipv4AddressHelper::Install() method by + // instead explicitly configuring the queue discs we want on each device. + TrafficControlHelper tchFq; + tchFq.SetRootQueueDisc ("ns3::FqCoDelQueueDisc"); + tchFq.SetQueueLimits ("ns3::DynamicQueueLimits", "HoldTime", StringValue ("1ms")); + tchFq.Install (pingServerDevices); + tchFq.Install (firstServerDevices); + tchFq.Install (secondServerDevices); + tchFq.Install (wanLanDevices.Get (1)); + tchFq.Install (pingClientDevices); + tchFq.Install (firstClientDevices); + tchFq.Install (secondClientDevices); + // Install queue for bottleneck link + TrafficControlHelper tchBottleneck; + tchBottleneck.SetRootQueueDisc (queueTypeId.GetName ()); + tchBottleneck.SetQueueLimits ("ns3::DynamicQueueLimits", "HoldTime", StringValue ("1ms")); + tchBottleneck.Install (wanLanDevices.Get (0)); + + Ipv4AddressHelper ipv4; + ipv4.SetBase ("10.1.1.0", "255.255.255.0"); + Ipv4InterfaceContainer pingServerIfaces = ipv4.Assign (pingServerDevices); + ipv4.SetBase ("10.1.2.0", "255.255.255.0"); + Ipv4InterfaceContainer firstServerIfaces = ipv4.Assign (firstServerDevices); + ipv4.SetBase ("10.1.3.0", "255.255.255.0"); + Ipv4InterfaceContainer secondServerIfaces = ipv4.Assign (secondServerDevices); + ipv4.SetBase ("172.16.1.0", "255.255.255.0"); + Ipv4InterfaceContainer wanLanIfaces = ipv4.Assign (wanLanDevices); + ipv4.SetBase ("192.168.1.0", "255.255.255.0"); + Ipv4InterfaceContainer pingClientIfaces = ipv4.Assign (pingClientDevices); + ipv4.SetBase ("192.168.2.0", "255.255.255.0"); + Ipv4InterfaceContainer firstClientIfaces = ipv4.Assign (firstClientDevices); + ipv4.SetBase ("192.168.3.0", "255.255.255.0"); + Ipv4InterfaceContainer secondClientIfaces = ipv4.Assign (secondClientDevices); + + Ipv4GlobalRoutingHelper::PopulateRoutingTables (); + + //////////////////////////////////////////////////////////// + // application setup // + //////////////////////////////////////////////////////////// + V4PingHelper pingHelper ("192.168.1.2"); + pingHelper.SetAttribute ("Interval", TimeValue (pingInterval)); + pingHelper.SetAttribute ("Size", UintegerValue (pingSize)); + ApplicationContainer pingContainer = pingHelper.Install (pingServer); + Ptr v4Ping = pingContainer.Get (0)->GetObject (); + v4Ping->TraceConnectWithoutContext ("Rtt", MakeBoundCallback (&TracePingRtt, &pingOfStream)); + pingContainer.Start (Seconds (1)); + pingContainer.Stop (stopTime - Seconds (1)); + + ApplicationContainer firstApp; + uint16_t firstPort = 5000; + BulkSendHelper tcp ("ns3::TcpSocketFactory", Address ()); + // set to large value: e.g. 1000 Mb/s for 60 seconds = 7500000000 bytes + tcp.SetAttribute ("MaxBytes", UintegerValue (7500000000)); + // Configure first TCP client/server pair + InetSocketAddress firstDestAddress (firstClientIfaces.GetAddress (1), firstPort); + tcp.SetAttribute ("Remote", AddressValue (firstDestAddress)); + firstApp = tcp.Install (firstServer); + firstApp.Start (Seconds (5)); + firstApp.Stop (stopTime - Seconds (1)); + + Address firstSinkAddress (InetSocketAddress (Ipv4Address::GetAny (), firstPort)); + ApplicationContainer firstSinkApp; + PacketSinkHelper firstSinkHelper ("ns3::TcpSocketFactory", firstSinkAddress); + firstSinkApp = firstSinkHelper.Install (firstClient); + firstSinkApp.Start (Seconds (5)); + firstSinkApp.Stop (stopTime - MilliSeconds (500)); + + // Configure second TCP client/server pair + if (enableSecondTcp) + { + BulkSendHelper tcp ("ns3::TcpSocketFactory", Address ()); + uint16_t secondPort = 5000; + ApplicationContainer secondApp; + InetSocketAddress secondDestAddress (secondClientIfaces.GetAddress (1), secondPort); + tcp.SetAttribute ("Remote", AddressValue (secondDestAddress)); + secondApp = tcp.Install (secondServer); + secondApp.Start (Seconds (15)); + secondApp.Stop (stopTime - Seconds (1)); + + Address secondSinkAddress (InetSocketAddress (Ipv4Address::GetAny (), secondPort)); + PacketSinkHelper secondSinkHelper ("ns3::TcpSocketFactory", secondSinkAddress); + ApplicationContainer secondSinkApp; + secondSinkApp = secondSinkHelper.Install (secondClient); + secondSinkApp.Start (Seconds (15)); + secondSinkApp.Stop (stopTime - MilliSeconds (500)); + } + + // Setup traces that can be hooked now + Ptr tc; + Ptr qd; + // Trace drops and marks for bottleneck + tc = wanLanDevices.Get (0)->GetNode ()->GetObject (); + qd = tc->GetRootQueueDiscOnDevice (wanLanDevices.Get (0)); + qd->TraceConnectWithoutContext ("Drop", MakeBoundCallback (&TraceQueueDrop, &queueDropOfStream)); + qd->TraceConnectWithoutContext ("Mark", MakeBoundCallback (&TraceQueueMark, &queueMarkOfStream)); + qd->TraceConnectWithoutContext ("BytesInQueue", MakeBoundCallback (&TraceQueueLength, &queueLengthOfStream, linkRate)); + + // Setup scheduled traces; TCP traces must be hooked after socket creation + Simulator::Schedule (Seconds (5) + MilliSeconds (100), &ScheduleFirstTcpRttTraceConnection, &firstTcpRttOfStream); + Simulator::Schedule (Seconds (5) + MilliSeconds (100), &ScheduleFirstTcpCwndTraceConnection, &firstTcpCwndOfStream); + Simulator::Schedule (Seconds (5) + MilliSeconds (100), &ScheduleFirstPacketSinkConnection); + if (firstTcpType == "dctcp") + { + Simulator::Schedule (Seconds (5) + MilliSeconds (100), &ScheduleFirstDctcpTraceConnection, &firstTcpDctcpOfStream); + } + Simulator::Schedule (throughputSamplingInterval, &TraceFirstThroughput, &firstTcpThroughputOfStream, throughputSamplingInterval); + if (enableSecondTcp) + { + // Setup scheduled traces; TCP traces must be hooked after socket creation + Simulator::Schedule (Seconds (15) + MilliSeconds (100), &ScheduleSecondTcpRttTraceConnection, &secondTcpRttOfStream); + Simulator::Schedule (Seconds (15) + MilliSeconds (100), &ScheduleSecondTcpCwndTraceConnection, &secondTcpCwndOfStream); + Simulator::Schedule (Seconds (15) + MilliSeconds (100), &ScheduleSecondPacketSinkConnection); + Simulator::Schedule (throughputSamplingInterval, &TraceSecondThroughput, &secondTcpThroughputOfStream, throughputSamplingInterval); + if (secondTcpType == "dctcp") + { + Simulator::Schedule (Seconds (15) + MilliSeconds (100), &ScheduleSecondDctcpTraceConnection, &secondTcpDctcpOfStream); + } + } + Simulator::Schedule (marksSamplingInterval, &TraceMarksFrequency, &queueMarksFrequencyOfStream, marksSamplingInterval); + + if (enablePcap) + { + p2p.EnablePcapAll ("tcp-validation", false); + } + + Simulator::Stop (stopTime); + Simulator::Run (); + Simulator::Destroy (); + + if (g_validate == "") + { + pingOfStream.close (); + firstTcpCwndOfStream.close (); + firstTcpRttOfStream.close (); + if (firstTcpType == "dctcp") + { + firstTcpDctcpOfStream.close (); + } + firstTcpThroughputOfStream.close (); + if (enableSecondTcp) + { + secondTcpCwndOfStream.close (); + secondTcpRttOfStream.close (); + secondTcpThroughputOfStream.close (); + if (secondTcpType == "dctcp") + { + secondTcpDctcpOfStream.close (); + } + } + queueDropOfStream.close (); + queueMarkOfStream.close (); + queueMarksFrequencyOfStream.close (); + queueLengthOfStream.close (); + } + + if (g_validationFailed) + { + NS_FATAL_ERROR ("Validation failed"); + } +} +