Do not miss ACKs due to cumulative ACK strategy

Reworked slow start and congestion avoidance, in order to do not miss ACKs (due
to cumulative ACK strategy) in Tcp NewReno.
This commit is contained in:
Natale Patriciello
2015-10-16 10:42:18 -07:00
parent 2e5a2c1301
commit 958b641e2a
2 changed files with 100 additions and 15 deletions

View File

@@ -73,9 +73,6 @@ void
TcpNewReno::NewAck (const SequenceNumber32& seq)
{
NS_LOG_FUNCTION (this << seq);
NS_LOG_LOGIC ("TcpNewReno received ACK for seq " << seq <<
" cwnd " << m_tcb->m_cWnd <<
" ssthresh " << m_tcb->m_ssThresh);
// No cWnd management while recovering
if (m_ackState == RECOVERY && seq < m_recover)
@@ -84,25 +81,109 @@ TcpNewReno::NewAck (const SequenceNumber32& seq)
return;
}
// Increase of cwnd based on current phase (slow start or congestion avoidance)
if (m_tcb->m_cWnd < m_tcb->m_ssThresh)
{ // Slow start mode, add one segSize to cWnd. Default m_tcb->m_ssThresh is 65535. (RFC2001, sec.1)
m_tcb->m_cWnd += m_tcb->m_segmentSize;
NS_LOG_INFO ("In SlowStart, ACK of seq " << seq << "; update cwnd to " << m_tcb->m_cWnd << "; ssthresh " << m_tcb->m_ssThresh);
uint32_t segmentsAcked = (seq - m_txBuffer->HeadSequence ()) / m_tcb->m_segmentSize;
NS_LOG_LOGIC ("TcpNewReno received ACK for seq " << seq <<
" cwnd " << m_tcb->m_cWnd <<
" ssthresh " << m_tcb->m_ssThresh <<
" acked " << segmentsAcked);
if (m_tcb->m_cWnd <= m_tcb->m_ssThresh)
{
segmentsAcked = SlowStart (segmentsAcked);
}
else
{ // Congestion avoidance mode, increase by (segSize*segSize)/cwnd. (RFC2581, sec.3.1)
// To increase cwnd for one segSize per RTT, it should be (ackBytes*segSize)/cwnd
double adder = static_cast<double> (m_tcb->m_segmentSize * m_tcb->m_segmentSize) / m_tcb->m_cWnd.Get ();
adder = std::max (1.0, adder);
m_tcb->m_cWnd += static_cast<uint32_t> (adder);
NS_LOG_INFO ("In CongAvoid, updated to cwnd " << m_tcb->m_cWnd << " ssthresh " << m_tcb->m_ssThresh);
if (m_tcb->m_cWnd > m_tcb->m_ssThresh)
{
CongestionAvoidance (segmentsAcked);
}
// Complete newAck processing
TcpSocketBase::NewAck (seq);
}
/**
* \brief Tcp NewReno slow start algorithm
*
* Defined in RFC 5681 as
*
* > During slow start, a TCP increments cwnd by at most SMSS bytes for
* > each ACK received that cumulatively acknowledges new data. Slow
* > start ends when cwnd exceeds ssthresh (or, optionally, when it
* > reaches it, as noted above) or when congestion is observed. While
* > traditionally TCP implementations have increased cwnd by precisely
* > SMSS bytes upon receipt of an ACK covering new data, we RECOMMEND
* > that TCP implementations increase cwnd, per:
* >
* > cwnd += min (N, SMSS) (2)
* >
* > where N is the number of previously unacknowledged bytes acknowledged
* > in the incoming ACK.
*
* The ns-3 implementation respect the RFC definition. Linux does something
* different:
* \verbatim
u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
{
u32 cwnd = tp->snd_cwnd + acked;
if (cwnd > tp->snd_ssthresh)
cwnd = tp->snd_ssthresh + 1;
acked -= cwnd - tp->snd_cwnd;
tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
return acked;
}
\endverbatim
*
* As stated, we want to avoid the case when a cumulative ACK increases cWnd more
* than a segment size, but we keep count of how many segments we have ignored,
* and return them.
*
* \param segmentsAcked count of segments acked
* \return the number of segments not considered for increasing the cWnd
*/
uint32_t
TcpNewReno::SlowStart (uint32_t segmentsAcked)
{
NS_LOG_FUNCTION (this << segmentsAcked);
if (segmentsAcked >= 1)
{
m_tcb->m_cWnd = m_tcb->m_cWnd.Get () + m_tcb->m_segmentSize;
NS_LOG_INFO ("In SlowStart, updated to cwnd " << m_tcb->m_cWnd <<
" ssthresh " << m_tcb->m_ssThresh << " MSS " << m_tcb->m_segmentSize);
return segmentsAcked - 1;
}
return 0;
}
/**
* \brief NewReno congestion avoidance
*
* During congestion avoidance, cwnd is incremented by roughly 1 full-sized
* segment per round-trip time (RTT).
*
* \param segmentsAcked count of segments acked
*/
void
TcpNewReno::CongestionAvoidance (uint32_t segmentsAcked)
{
NS_LOG_FUNCTION (this << segmentsAcked);
if (segmentsAcked > 0)
{
double adder = static_cast<double> (m_tcb->m_segmentSize * m_tcb->m_segmentSize) / m_tcb->m_cWnd.Get ();
adder = std::max (1.0, adder);
m_tcb->m_cWnd += static_cast<uint32_t> (adder);
NS_LOG_INFO ("In CongAvoid, updated to cwnd " << m_tcb->m_cWnd <<
" ssthresh " << m_tcb->m_ssThresh);
}
}
uint32_t
TcpNewReno::GetSsThresh ()
{

View File

@@ -56,6 +56,10 @@ protected:
virtual Ptr<TcpSocketBase> Fork (void); // Call CopyObject<TcpNewReno> to clone me
virtual void NewAck (SequenceNumber32 const& seq); // Inc cwnd and call NewAck() of parent
virtual uint32_t GetSsThresh ();
private:
uint32_t SlowStart (uint32_t segmentsAcked);
void CongestionAvoidance (uint32_t segmentsAcked);
};
} // namespace ns3