tcp: Some TcpLinuxReno documentation and example program edits

This commit is contained in:
Tom Henderson
2020-08-06 13:23:37 -07:00
parent 721dc72499
commit 337ab206d1
4 changed files with 63 additions and 62 deletions

View File

@@ -54,7 +54,7 @@ us a note on ns-developers mailing list.</p>
<h1>Changes from ns-3.31 to ns-3.32</h1>
<h2>New API:</h2>
<ul>
<li></li>
<li>A new TCP congestion control, <b>TcpLinuxReno</b>, has been added.</li>
</ul>
<h2>Changes to existing API:</h2>
<ul>
@@ -68,6 +68,7 @@ us a note on ns-developers mailing list.</p>
<h2>Changed behavior:</h2>
<ul>
<li>Support for <b>RIFS</b> has been dropped from wifi. RIFS has been obsoleted by the 802.11 standard and support for it was not implemented according to the standard.</li>
<li>The behavior of <b>TcpPrrRecovery</b> algorithm was aligned to that of Linux.</b>
</ul>
<hr>

View File

@@ -16,6 +16,7 @@ New user-visible features
-------------------------
- (build system) Added "--enable-asserts" and "--enable-logs" to waf configure,
to selectively enable asserts and/or logs in release and optimized builds.
- (tcp) Added TcpLinuxReno congestion control (aligns with Linux 'reno' congestion control).
Bugs fixed
----------

View File

@@ -25,9 +25,9 @@
// 1 ms 10 ms 1 ms
//
// - TCP flow from n0 to n3 using BulkSendApplication.
// - The following simulation output is stored in result/ in ns-3 root directory:
// - The following simulation output is stored in results/ in ns-3 top-level directory:
// - cwnd traces are stored in cwndTraces folder
// - queue length statistics are stored in queue-size.plotme file
// - queue length statistics are stored in queue-size.dat file
// - pcaps are stored in pcap folder
// - queueTraces folder contain the drop statistics at queue
// - queueStats.txt file contains the queue stats and config.txt file contains
@@ -48,10 +48,9 @@
#include "ns3/traffic-control-module.h"
using namespace ns3;
Ptr<UniformRandomVariable> uv = CreateObject<UniformRandomVariable> ();
std::string dir = "result/";
double stopTime = 300;
uint32_t dataSize = 524;
std::string dir = "results/";
Time stopTime = Seconds (60);
uint32_t segmentSize = 524;
// Function to check queue length of Router 1
void
@@ -61,7 +60,7 @@ CheckQueueSize (Ptr<QueueDisc> queue)
// Check queue size every 1/100 of a second
Simulator::Schedule (Seconds (0.001), &CheckQueueSize, queue);
std::ofstream fPlotQueue (std::stringstream (dir + "queue-size.plotme").str ().c_str (), std::ios::out | std::ios::app);
std::ofstream fPlotQueue (std::stringstream (dir + "queue-size.dat").str ().c_str (), std::ios::out | std::ios::app);
fPlotQueue << Simulator::Now ().GetSeconds () << " " << qSize << std::endl;
fPlotQueue.close ();
}
@@ -70,8 +69,8 @@ CheckQueueSize (Ptr<QueueDisc> queue)
static void
CwndChange (uint32_t oldCwnd, uint32_t newCwnd)
{
std::ofstream fPlotQueue (dir + "cwndTraces/n0.plotme", std::ios::out | std::ios::app);
fPlotQueue << Simulator::Now ().GetSeconds () << " " << newCwnd / dataSize << std::endl;
std::ofstream fPlotQueue (dir + "cwndTraces/n0.dat", std::ios::out | std::ios::app);
fPlotQueue << Simulator::Now ().GetSeconds () << " " << newCwnd / segmentSize << std::endl;
fPlotQueue.close ();
}
@@ -91,60 +90,53 @@ TraceCwnd (uint32_t node, uint32_t cwndWindow,
}
// Function to install BulkSend application
void InstallBulkSend (Ptr<Node> node, Ipv4Address address, uint16_t port, std::string sock_factory,
void InstallBulkSend (Ptr<Node> node, Ipv4Address address, uint16_t port, std::string socketFactory,
uint32_t nodeId, uint32_t cwndWindow,
Callback <void, uint32_t, uint32_t> CwndTrace)
{
BulkSendHelper source (sock_factory, InetSocketAddress (address, port));
BulkSendHelper source (socketFactory, InetSocketAddress (address, port));
source.SetAttribute ("MaxBytes", UintegerValue (0));
ApplicationContainer sourceApps = source.Install (node);
sourceApps.Start (Seconds (10.0));
Simulator::Schedule (Seconds (10.0) + Seconds (0.001), &TraceCwnd, nodeId, cwndWindow, CwndTrace);
sourceApps.Stop (Seconds (stopTime));
sourceApps.Stop (stopTime);
}
// Function to install sink application
void InstallPacketSink (Ptr<Node> node, uint16_t port, std::string sock_factory)
void InstallPacketSink (Ptr<Node> node, uint16_t port, std::string socketFactory)
{
PacketSinkHelper sink (sock_factory, InetSocketAddress (Ipv4Address::GetAny (), port));
PacketSinkHelper sink (socketFactory, InetSocketAddress (Ipv4Address::GetAny (), port));
ApplicationContainer sinkApps = sink.Install (node);
sinkApps.Start (Seconds (10.0));
sinkApps.Stop (Seconds (stopTime));
sinkApps.Stop (stopTime);
}
int main (int argc, char *argv[])
{
uint32_t stream = 1;
std::string sock_factory = "ns3::TcpSocketFactory";
std::string transport_prot = "TcpLinuxReno";
std::string queue_disc_type = "FifoQueueDisc";
std::string socketFactory = "ns3::TcpSocketFactory";
std::string tcpTypeId = "ns3::TcpLinuxReno";
std::string qdiscTypeId = "ns3::FifoQueueDisc";
bool isSack = true;
uint32_t delAckCount = 1;
std::string recovery = "TcpClassicRecovery";
std::string recovery = "ns3::TcpClassicRecovery";
CommandLine cmd;
cmd.AddValue ("stream", "Seed value for random variable", stream);
cmd.AddValue ("transport_prot", "Transport protocol to use: TcpNewReno, TcpLinuxReno", transport_prot);
cmd.AddValue ("queue_disc_type", "Queue disc type for gateway (e.g. ns3::CoDelQueueDisc)", queue_disc_type);
cmd.AddValue ("dataSize", "Data packet size", dataSize);
cmd.AddValue ("tcpTypeId", "TCP variant to use (e.g., ns3::TcpNewReno, ns3::TcpLinuxReno, etc.)", tcpTypeId);
cmd.AddValue ("qdiscTypeId", "Queue disc for gateway (e.g., ns3::CoDelQueueDisc)", qdiscTypeId);
cmd.AddValue ("segmentSize", "TCP segment size (bytes)", segmentSize);
cmd.AddValue ("delAckCount", "Delayed ack count", delAckCount);
cmd.AddValue ("Sack", "Flag to enable/disable sack in TCP", isSack);
cmd.AddValue ("enableSack", "Flag to enable/disable sack in TCP", isSack);
cmd.AddValue ("stopTime", "Stop time for applications / simulation time will be stopTime", stopTime);
cmd.AddValue ("recovery", "Recovery algorithm type to use (e.g., ns3::TcpPrrRecovery", recovery);
cmd.Parse (argc, argv);
uv->SetStream (stream);
queue_disc_type = std::string ("ns3::") + queue_disc_type;
transport_prot = std::string ("ns3::") + transport_prot;
recovery = std::string ("ns3::") + recovery;
TypeId qdTid;
NS_ABORT_MSG_UNLESS (TypeId::LookupByNameFailSafe (queue_disc_type, &qdTid), "TypeId " << queue_disc_type << " not found");
NS_ABORT_MSG_UNLESS (TypeId::LookupByNameFailSafe (qdiscTypeId, &qdTid), "TypeId " << qdiscTypeId << " not found");
// Set recovery algorithm and TCP variant
Config::SetDefault ("ns3::TcpL4Protocol::RecoveryType", TypeIdValue (TypeId::LookupByName (recovery)));
if (transport_prot.compare ("ns3::TcpWestwoodPlus") == 0)
if (tcpTypeId.compare ("ns3::TcpWestwoodPlus") == 0)
{
// TcpWestwoodPlus is not an actual TypeId name; we need TcpWestwood here
Config::SetDefault ("ns3::TcpL4Protocol::SocketType", TypeIdValue (TcpWestwood::GetTypeId ()));
@@ -154,8 +146,8 @@ int main (int argc, char *argv[])
else
{
TypeId tcpTid;
NS_ABORT_MSG_UNLESS (TypeId::LookupByNameFailSafe (transport_prot, &tcpTid), "TypeId " << transport_prot << " not found");
Config::SetDefault ("ns3::TcpL4Protocol::SocketType", TypeIdValue (TypeId::LookupByName (transport_prot)));
NS_ABORT_MSG_UNLESS (TypeId::LookupByNameFailSafe (tcpTypeId, &tcpTid), "TypeId " << tcpTypeId << " not found");
Config::SetDefault ("ns3::TcpL4Protocol::SocketType", TypeIdValue (TypeId::LookupByName (tcpTypeId)));
}
// Create nodes
@@ -212,12 +204,12 @@ int main (int argc, char *argv[])
Config::SetDefault ("ns3::TcpSocket::DelAckCount", UintegerValue (delAckCount));
// Set default segment size of TCP packet to a specified value
Config::SetDefault ("ns3::TcpSocket::SegmentSize", UintegerValue (dataSize));
Config::SetDefault ("ns3::TcpSocket::SegmentSize", UintegerValue (segmentSize));
// Enable/Disable SACK in TCP
Config::SetDefault ("ns3::TcpSocketBase::Sack", BooleanValue (isSack));
// Create directories to store plotme files
// Create directories to store dat files
struct stat buffer;
int retVal;
if ((stat (dir.c_str (), &buffer)) == 0)
@@ -238,11 +230,11 @@ int main (int argc, char *argv[])
NS_UNUSED (retVal);
// Set default parameters for queue discipline
Config::SetDefault (queue_disc_type + "::MaxSize", QueueSizeValue (QueueSize ("100p")));
Config::SetDefault (qdiscTypeId + "::MaxSize", QueueSizeValue (QueueSize ("100p")));
// Install queue discipline on router
TrafficControlHelper tch;
tch.SetRootQueueDisc (queue_disc_type);
tch.SetRootQueueDisc (qdiscTypeId);
QueueDiscContainer qd;
tch.Uninstall (routers.Get (0)->GetDevice (0));
qd.Add (tch.Install (routers.Get (0)->GetDevice (0)).Get (0));
@@ -256,8 +248,8 @@ int main (int argc, char *argv[])
AsciiTraceHelper asciiTraceHelper;
Ptr<OutputStreamWrapper> streamWrapper;
// Create plotme to store packets dropped and marked at the router
streamWrapper = asciiTraceHelper.CreateFileStream (dir + "/queueTraces/drop-0.plotme");
// Create dat to store packets dropped and marked at the router
streamWrapper = asciiTraceHelper.CreateFileStream (dir + "/queueTraces/drop-0.dat");
qd.Get (0)->TraceConnectWithoutContext ("Drop", MakeBoundCallback (&DropAtQueue, streamWrapper));
// Install packet sink at receiver side
@@ -265,12 +257,12 @@ int main (int argc, char *argv[])
InstallPacketSink (rightNodes.Get (0), port, "ns3::TcpSocketFactory");
// Install BulkSend application
InstallBulkSend (leftNodes.Get (0), routerToRightIPAddress [0].GetAddress (1), port, sock_factory, 2, 0, MakeCallback (&CwndChange));
InstallBulkSend (leftNodes.Get (0), routerToRightIPAddress [0].GetAddress (1), port, socketFactory, 2, 0, MakeCallback (&CwndChange));
// Enable PCAP on all the point to point interfaces
pointToPointLeaf.EnablePcapAll (dir + "pcap/ns-3", true);
Simulator::Stop (Seconds (stopTime));
Simulator::Stop (stopTime);
Simulator::Run ();
// Store queue stats in a file
@@ -283,11 +275,11 @@ int main (int argc, char *argv[])
// Store configuration of the simulation in a file
myfile.open (dir + "config.txt", std::fstream::in | std::fstream::out | std::fstream::app);
myfile << "queue_disc_type " << queue_disc_type << "\n";
myfile << "qdiscTypeId " << qdiscTypeId << "\n";
myfile << "stream " << stream << "\n";
myfile << "dataSize " << dataSize << "\n";
myfile << "segmentSize " << segmentSize << "\n";
myfile << "delAckCount " << delAckCount << "\n";
myfile << "stopTime " << stopTime << "\n";
myfile << "stopTime " << stopTime.As (Time::S) << "\n";
myfile.close ();
Simulator::Destroy ();

View File

@@ -393,17 +393,23 @@ start threshold is halved.
Linux Reno
^^^^^^^^^^
TCP Linux Reno is designed to provide users Linux like implementation of
TCP New Reno. The current implementation of TCP New Reno in ns-3 follows
RFC standards which increases cwnd more conservatively than Linux Reno.
TCP Linux Reno (class :cpp:class:`TcpLinuxReno`) is designed to provide a
Linux-like implementation of
TCP NewReno. The implementation of class :cpp:class:`TcpNewReno1 in ns-3
follows RFC standards, and increases cwnd more conservatively than does Linux Reno.
Linux Reno modifies slow start and congestion avoidance algorithms to
increase cwnd based on the number of bytes being acknowledged by each
arriving ACK, rather than by the number of ACKs that arrive.
arriving ACK, rather than by the number of ACKs that arrive. Another major
difference in implementation is that Linux maintains the congestion window
in units of segments, while the RFCs define the congestion window in units of
bytes.
In slow start phase, on each incoming ACK at the TCP sender side cwnd
is increased by the number of previously unacknowledged bytes ACKed by the
incoming acknowledgment. Whereas in default ns-3 New Reno, cwnd is increased
one segment.
incoming acknowledgment. In contrast, in ns-3 NewReno, cwnd is increased
by one segment per acknowledgment. In standards terminology, this
difference is referred to as Appropriate Byte Counting (RFC 3465); Linux
follows Appropriate Byte Counting while ns-3 NewReno does not.
.. math:: cwnd += segAcked * segmentSize
:label: linuxrenoslowstart
@@ -415,7 +421,7 @@ In congestion avoidance phase, the number of bytes that have been ACKed at
the TCP sender side are stored in a 'bytes_acked' variable in the TCP control
block. When 'bytes_acked' becomes greater than or equal to the value of the
cwnd, 'bytes_acked' is reduced by the value of cwnd. Next, cwnd is incremented
by a full-sized segment (SMSS). Whereas in ns-3 New Reno, cwnd is increased
by a full-sized segment (SMSS). In contrast, in ns-3 NewReno, cwnd is increased
by (1/cwnd) with a rounding off due to type casting into int.
.. code-block:: c++
@@ -444,23 +450,24 @@ by (1/cwnd) with a rounding off due to type casting into int.
:label: newrenocongavoid
So, there are two main difference between the TCP Linux Reno and TCP New Reno
So, there are two main difference between the TCP Linux Reno and TCP NewReno
in ns-3:
1) In TCP Linux Reno absence or presence of delayed acknowledgement make no
difference whereas in TCP New Reno it makes a difference.
1) In TCP Linux Reno, delayed acknowledgement configuration does not affect
congestion window growth, while in TCP NewReno, delayed acknowledgments cause
a slower congestion window growth.
2) In congestion avoidance phase, the arithmetic for counting the number of
segments acked and deciding when to increment the cwnd is different for TCP
Linux Reno and TCP New Reno.
Linux Reno and TCP NewReno.
Following graphs shows the behavior of window growth in TCP Linux Reno and
TCP New Reno with delayed acknowledgement of 2 segments:
TCP NewReno with delayed acknowledgement of 2 segments:
.. _fig-ns3-new-reno-vs-ns3-linux-reno:
.. figure:: figures/ns3-new-reno-vs-ns3-linux-reno.*
:align: center
ns-3 TCP New Reno v/s ns-3 TCP Linux Reno
ns-3 TCP NewReno v/s ns-3 TCP Linux Reno
HighSpeed
^^^^^^^^^
@@ -1199,15 +1206,15 @@ Several TCP validation test results can also be found in the
`wiki page <http://www.nsnam.org/wiki/New_TCP_Socket_Architecture>`_
describing this implementation.
The ns-3 implementation of TCP Linux Reno was validated against the New Reno
The ns-3 implementation of TCP Linux Reno was validated against the NewReno
implementation of Linux kernel 4.4.0 using ns-3 Direct Code Execution (DCE).
DCE is a framework which allows the users to run kernel space protocol inside
ns-3 without changing the source code.
In this validation, cwnd traces of DCE Linux Reno was compared to ns-3 Linux Reno
and New Reno for delayed acknowledgement of 1 segment. And it was observed that
and NewReno for delayed acknowledgement of 1 segment. And it was observed that
cwnd traces for ns-3 Linux Reno was closely overlapping with DCE Linux Reno whereas
for ns-3 New Reno there was deviation in congestion avoidance phase.
for ns-3 NewReno there was deviation in congestion avoidance phase.
.. _fig-dce-Linux-reno-vs-ns3-linux-reno: