Move tutorial to Sphinx

This commit is contained in:
Tom Henderson
2011-01-02 22:57:32 -08:00
parent aa3e2ff1dd
commit 7502977115
30 changed files with 3539 additions and 9267 deletions

View File

@@ -1,39 +1,150 @@
TEXI2HTML = texi2html
TEXI2PDF = texi2dvi --pdf
EPSTOPDF = epstopdf
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = build
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
# Additional variables for figures, not sphinx default:
DIA = dia
CONVERT = convert
CSS = --css-include=tutorial.css
SPLIT = --split section
EPSTOPDF = epstopdf
FIGURES = source/figures
IMAGES_EPS = \
DIA_SOURCES = pp.dia dumbbell.dia star.dia
IMAGES_PNG = ${IMAGES_EPS:.eps=.png}
IMAGES_PDF = ${IMAGES_EPS:.eps=.pdf}
DIA_EPS = ${DIA_SOURCES:.dia=.eps}
DIA_PNG = ${DIA_SOURCES:.dia=.png}
DIA_PDF = ${DIA_SOURCES:.dia=.pdf}
IMAGES = $(IMAGES_EPS) $(IMAGES_PNG) $(IMAGES_PDF)
all: html split-html pdf
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest
# Note: tgif requires a valid x display to convert from .obj to .png.
# If running this makefile on a remote console, the X virtual frame
# buffer may be needed (xorg-x11-server-Xvfb) to provide a "fake"
# display
images:
cd figures/; $(DIA) -t png $(DIA_SOURCES)
cd figures/; $(DIA) -t eps $(DIA_SOURCES)
cd figures/; $(foreach FILE,$(DIA_EPS),$(EPSTOPDF) $(FILE);)
%.eps : %.dia; $(DIA) -t eps $< -e $@
%.png : %.dia; $(DIA) -t png $< -e $@
%.pdf : %.eps; $(EPSTOPDF) $< -o=$@
html:
$(TEXI2HTML) ${CSS} tutorial.texi
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
split-html:
$(TEXI2HTML) ${CSS} ${SPLIT} --output tutorial tutorial.texi
clean:
-rm -rf $(BUILDDIR)/*
pdf:
$(TEXI2PDF) tutorial.texi
frag: pickle
@if test ! -d $(BUILDDIR)/frag; then mkdir $(BUILDDIR)/frag; fi
pushd $(BUILDDIR)/frag && ../../pickle-to-xml.py ../pickle/index.fpickle > navigation.xml && popd
cp -r $(BUILDDIR)/pickle/_images $(BUILDDIR)/frag
figures-clean:
cd figures/; rm -rf $(DIA_EPS); rm -rf $(DIA_PNG); rm -rf $(DIA_PDF)
html: $(IMAGES)
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
clean: # figures-clean
rm -rf tutorial.aux tutorial.cp tutorial.cps tutorial.fn tutorial.ky tutorial.pg tutorial.tp tutorial.vr tutorial.toc tutorial.log tutorial.pdf tutorial.html tutorial/
dirhtml: $(IMAGES)
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml: $(IMAGES)
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle: $(IMAGES)
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json: $(IMAGES)
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp: $(IMAGES)
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp: $(IMAGES)
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/ns-3.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/ns-3.qhc"
devhelp: $(IMAGES)
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/ns-3"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/ns-3"
@echo "# devhelp"
epub: $(IMAGES)
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex: $(IMAGES)
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf: $(IMAGES)
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
make -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text: $(IMAGES)
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man: $(IMAGES)
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
changes: $(IMAGES)
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck: $(IMAGEs)
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest: $(IMAGES)
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."

View File

@@ -1,847 +0,0 @@
@c ========================================================================
@c Begin document body here
@c ========================================================================
@c ========================================================================
@c Conceptual Overview
@c ========================================================================
@node Conceptual Overview
@chapter Conceptual Overview
@menu
* Key Abstractions::
* A First ns-3 Script::
@end menu
The first thing we need to do before actually starting to look at or write
@command{ns-3} code is to explain a few core concepts and abstractions in the
system. Much of this may appear transparently obvious to some, but we
recommend taking the time to read through this section just to ensure you
are starting on a firm foundation.
@node Key Abstractions
@section Key Abstractions
In this section, we'll review some terms that are commonly used in
networking, but have a specific meaning in @command{ns-3}.
@subsection Node
@cindex Node
In Internet jargon, a computing device that connects to a network is called
a @emph{host} or sometimes an @emph{end system}. Because @command{ns-3} is a
@emph{network} simulator, not specifically an @emph{Internet} simulator, we
intentionally do not use the term host since it is closely associated with
the Internet and its protocols. Instead, we use a more generic term also
used by other simulators that originates in Graph Theory --- the @emph{node}.
@cindex class Node
In @command{ns-3} the basic computing device abstraction is called the
node. This abstraction is represented in C++ by the class @code{Node}. The
@code{Node} class provides methods for managing the representations of
computing devices in simulations.
You should think of a @code{Node} as a computer to which you will add
functionality. One adds things like applications, protocol stacks and
peripheral cards with their associated drivers to enable the computer to do
useful work. We use the same basic model in @command{ns-3}.
@subsection Application
@cindex Application
Typically, computer software is divided into two broad classes. @emph{System
Software} organizes various computer resources such as memory, processor
cycles, disk, network, etc., according to some computing model. System
software usually does not use those resources to complete tasks that directly
benefit a user. A user would typically run an @emph{application} that acquires
and uses the resources controlled by the system software to accomplish some
goal.
@cindex system call
Often, the line of separation between system and application software is made
at the privilege level change that happens in operating system traps.
In @command{ns-3} there is no real concept of operating system and especially
no concept of privilege levels or system calls. We do, however, have the
idea of an application. Just as software applications run on computers to
perform tasks in the ``real world,'' @command{ns-3} applications run on
@command{ns-3} @code{Nodes} to drive simulations in the simulated world.
@cindex class Application
In @command{ns-3} the basic abstraction for a user program that generates some
activity to be simulated is the application. This abstraction is represented
in C++ by the class @code{Application}. The @code{Application} class provides
methods for managing the representations of our version of user-level
applications in simulations. Developers are expected to specialize the
@code{Application} class in the object-oriented programming sense to create new
applications. In this tutorial, we will use specializations of class
@code{Application} called @code{UdpEchoClientApplication} and
@code{UdpEchoServerApplication}. As you might expect, these applications
compose a client/server application set used to generate and echo simulated
network packets
@subsection Channel
@cindex Channel
In the real world, one can connect a computer to a network. Often the media
over which data flows in these networks are called @emph{channels}. When
you connect your Ethernet cable to the plug in the wall, you are connecting
your computer to an Ethernet communication channel. In the simulated world
of @command{ns-3}, one connects a @code{Node} to an object representing a
communication channel. Here the basic communication subnetwork abstraction
is called the channel and is represented in C++ by the class @code{Channel}.
The @code{Channel} class provides methods for managing communication
subnetwork objects and connecting nodes to them. @code{Channels} may also be
specialized by developers in the object oriented programming sense. A
@code{Channel} specialization may model something as simple as a wire. The
specialized @code{Channel} can also model things as complicated as a large
Ethernet switch, or three-dimensional space full of obstructions in the case
of wireless networks.
We will use specialized versions of the @code{Channel} called
@code{CsmaChannel}, @code{PointToPointChannel} and @code{WifiChannel} in this
tutorial. The @code{CsmaChannel}, for example, models a version of a
communication subnetwork that implements a @emph{carrier sense multiple
access} communication medium. This gives us Ethernet-like functionality.
@subsection Net Device
@cindex NetDevice
@cindex Ethernet
It used to be the case that if you wanted to connect a computers to a network,
you had to buy a specific kind of network cable and a hardware device called
(in PC terminology) a @emph{peripheral card} that needed to be installed in
your computer. If the peripheral card implemented some networking function,
they were called Network Interface Cards, or @emph{NICs}. Today most
computers come with the network interface hardware built in and users don't
see these building blocks.
A NIC will not work without a software driver to control the hardware. In
Unix (or Linux), a piece of peripheral hardware is classified as a
@emph{device}. Devices are controlled using @emph{device drivers}, and network
devices (NICs) are controlled using @emph{network device drivers}
collectively known as @emph{net devices}. In Unix and Linux you refer
to these net devices by names such as @emph{eth0}.
In @command{ns-3} the @emph{net device} abstraction covers both the software
driver and the simulated hardware. A net device is ``installed'' in a
@code{Node} in order to enable the @code{Node} to communicate with other
@code{Nodes} in the simulation via @code{Channels}. Just as in a real
computer, a @code{Node} may be connected to more than one @code{Channel} via
multiple @code{NetDevices}.
The net device abstraction is represented in C++ by the class @code{NetDevice}.
The @code{NetDevice} class provides methods for managing connections to
@code{Node} and @code{Channel} objects; and may be specialized by developers
in the object-oriented programming sense. We will use the several specialized
versions of the @code{NetDevice} called @code{CsmaNetDevice},
@code{PointToPointNetDevice}, and @code{WifiNetDevice} in this tutorial.
Just as an Ethernet NIC is designed to work with an Ethernet network, the
@code{CsmaNetDevice} is designed to work with a @code{CsmaChannel}; the
@code{PointToPointNetDevice} is designed to work with a
@code{PointToPointChannel} and a @code{WifiNetNevice} is designed to work with
a @code{WifiChannel}.
@subsection Topology Helpers
@cindex helper
@cindex topology
@cindex topology helper
In a real network, you will find host computers with added (or built-in)
NICs. In @command{ns-3} we would say that you will find @code{Nodes} with
attached @code{NetDevices}. In a large simulated network you will need to
arrange many connections between @code{Nodes}, @code{NetDevices} and
@code{Channels}.
Since connecting @code{NetDevices} to @code{Nodes}, @code{NetDevices}
to @code{Channels}, assigning IP addresses, etc., are such common tasks
in @command{ns-3}, we provide what we call @emph{topology helpers} to make
this as easy as possible. For example, it may take many distinct
@command{ns-3} core operations to create a NetDevice, add a MAC address,
install that net device on a @code{Node}, configure the node's protocol stack,
and then connect the @code{NetDevice} to a @code{Channel}. Even more
operations would be required to connect multiple devices onto multipoint
channels and then to connect individual networks together into internetworks.
We provide topology helper objects that combine those many distinct operations
into an easy to use model for your convenience.
@c ========================================================================
@c A First ns-3 script
@c ========================================================================
@node A First ns-3 Script
@section A First ns-3 Script
@cindex first script
If you downloaded the system as was suggested above, you will have a release
of @command{ns-3} in a directory called @code{repos} under your home
directory. Change into that release directory, and you should find a
directory structure something like the following:
@verbatim
AUTHORS doc/ README src/ waf.bat*
bindings/ examples/ RELEASE_NOTES utils/ wscript
build/ LICENSE samples/ VERSION wutils.py
CHANGES.html ns3/ scratch/ waf* wutils.pyc
@end verbatim
@cindex first.cc
Change into the @code{examples/tutorial} directory. You should see a file named
@code{first.cc} located there. This is a script that will create a simple
point-to-point link between two nodes and echo a single packet between the
nodes. Let's take a look at that script line by line, so go ahead and open
@code{first.cc} in your favorite editor.
@subsection Boilerplate
The first line in the file is an emacs mode line. This tells emacs about the
formatting conventions (coding style) we use in our source code.
@verbatim
/* -*- Mode:C++; c-file-style:''gnu''; indent-tabs-mode:nil; -*- */
@end verbatim
This is always a somewhat controversial subject, so we might as well get it
out of the way immediately. The @code{ns-3} project, like most large
projects, has adopted a coding style to which all contributed code must
adhere. If you want to contribute your code to the project, you will
eventually have to conform to the @command{ns-3} coding standard as described
in the file @code{doc/codingstd.txt} or shown on the project web page
@uref{http://www.nsnam.org/codingstyle.html,,here}.
We recommend that you, well, just get used to the look and feel of @code{ns-3}
code and adopt this standard whenever you are working with our code. All of
the development team and contributors have done so with various amounts of
grumbling. The emacs mode line above makes it easier to get the formatting
correct if you use the emacs editor.
The @command{ns-3} simulator is licensed using the GNU General Public
License. You will see the appropriate GNU legalese at the head of every file
in the @command{ns-3} distribution. Often you will see a copyright notice for
one of the institutions involved in the @code{ns-3} project above the GPL
text and an author listed below.
@verbatim
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
@end verbatim
@subsection Module Includes
The code proper starts with a number of include statements.
@verbatim
#include "ns3/core-module.h"
#include "ns3/simulator-module.h"
#include "ns3/node-module.h"
#include "ns3/helper-module.h"
@end verbatim
To help our high-level script users deal with the large number of include
files present in the system, we group includes according to relatively large
modules. We provide a single include file that will recursively load all of
the include files used in each module. Rather than having to look up exactly
what header you need, and possibly have to get a number of dependencies right,
we give you the ability to load a group of files at a large granularity. This
is not the most efficient approach but it certainly makes writing scripts much
easier.
Each of the @command{ns-3} include files is placed in a directory called
@code{ns3} (under the build directory) during the build process to help avoid
include file name collisions. The @code{ns3/core-module.h} file corresponds
to the ns-3 module you will find in the directory @code{src/core} in your
downloaded release distribution. If you list this directory you will find a
large number of header files. When you do a build, Waf will place public
header files in an @code{ns3} directory under the appropriate
@code{build/debug} or @code{build/optimized} directory depending on your
configuration. Waf will also automatically generate a module include file to
load all of the public header files.
Since you are, of course, following this tutorial religiously, you will
already have done a
@verbatim
./waf -d debug configure
@end verbatim
in order to configure the project to perform debug builds. You will also have
done a
@verbatim
./waf
@end verbatim
to build the project. So now if you look in the directory
@code{../../build/debug/ns3} you will find the four module include files shown
above. You can take a look at the contents of these files and find that they
do include all of the public include files in their respective modules.
@subsection Ns3 Namespace
The next line in the @code{first.cc} script is a namespace declaration.
@verbatim
using namespace ns3;
@end verbatim
The @command{ns-3} project is implemented in a C++ namespace called
@code{ns3}. This groups all @command{ns-3}-related declarations in a scope
outside the global namespace, which we hope will help with integration with
other code. The C++ @code{using} statement introduces the @code{ns-3}
namespace into the current (global) declarative region. This is a fancy way
of saying that after this declaration, you will not have to type @code{ns3::}
scope resolution operator before all of the @code{ns-3} code in order to use
it. If you are unfamiliar with namespaces, please consult almost any C++
tutorial and compare the @code{ns3} namespace and usage here with instances of
the @code{std} namespace and the @code{using namespace std;} statements you
will often find in discussions of @code{cout} and streams.
@subsection Logging
The next line of the script is the following,
@verbatim
NS_LOG_COMPONENT_DEFINE ("FirstScriptExample");
@end verbatim
We will use this statement as a convenient place to talk about our Doxygen
documentation system. If you look at the project web site,
@uref{http://www.nsnam.org,,ns-3 project}, you will find a link to ``Doxygen
(ns-3-dev)'' in the navigation bar. If you select this link, you will be
taken to our documentation page for the current development release. There
is also a link to ``Doxygen (stable)'' that will take you to the documentation
for the latest stable release of @code{ns-3}.
Along the left side, you will find a graphical representation of the structure
of the documentation. A good place to start is the @code{NS-3 Modules}
``book'' in the @code{ns-3} navigation tree. If you expand @code{Modules}
you will see a list of @command{ns-3} module documentation. The concept of
module here ties directly into the module include files discussed above. It
turns out that the @command{ns-3} logging subsystem is part of the @code{core}
module, so go ahead and expand that documentation node. Now, expand the
@code{Debugging} book and then select the @code{Logging} page.
You should now be looking at the Doxygen documentation for the Logging module.
In the list of @code{#define}s at the top of the page you will see the entry
for @code{NS_LOG_COMPONENT_DEFINE}. Before jumping in, it would probably be
good to look for the ``Detailed Description'' of the logging module to get a
feel for the overall operation. You can either scroll down or select the
``More...'' link under the collaboration diagram to do this.
Once you have a general idea of what is going on, go ahead and take a look at
the specific @code{NS_LOG_COMPONENT_DEFINE} documentation. I won't duplicate
the documentation here, but to summarize, this line declares a logging
component called @code{FirstScriptExample} that allows you to enable and
disable console message logging by reference to the name.
@subsection Main Function
The next lines of the script you will find are,
@verbatim
int
main (int argc, char *argv[])
{
@end verbatim
This is just the declaration of the main function of your program (script).
Just as in any C++ program, you need to define a main function that will be
the first function run. There is nothing at all special here. Your
@command{ns-3} script is just a C++ program.
The next two lines of the script are used to enable two logging components that
are built into the Echo Client and Echo Server applications:
@verbatim
LogComponentEnable("UdpEchoClientApplication", LOG_LEVEL_INFO);
LogComponentEnable("UdpEchoServerApplication", LOG_LEVEL_INFO);
@end verbatim
If you have read over the Logging component documentation you will have seen
that there are a number of levels of logging verbosity/detail that you can
enable on each component. These two lines of code enable debug logging at the
INFO level for echo clients and servers. This will result in the application
printing out messages as packets are sent and received during the simulation.
Now we will get directly to the business of creating a topology and running
a simulation. We use the topology helper objects to make this job as
easy as possible.
@subsection Topology Helpers
@subsubsection NodeContainer
The next two lines of code in our script will actually create the
@command{ns-3} @code{Node} objects that will represent the computers in the
simulation.
@verbatim
NodeContainer nodes;
nodes.Create (2);
@end verbatim
Let's find the documentation for the @code{NodeContainer} class before we
continue. Another way to get into the documentation for a given class is via
the @code{Classes} tab in the Doxygen pages. If you still have the Doxygen
handy, just scroll up to the top of the page and select the @code{Classes}
tab. You should see a new set of tabs appear, one of which is
@code{Class List}. Under that tab you will see a list of all of the
@command{ns-3} classes. Scroll down, looking for @code{ns3::NodeContainer}.
When you find the class, go ahead and select it to go to the documentation for
the class.
You may recall that one of our key abstractions is the @code{Node}. This
represents a computer to which we are going to add things like protocol stacks,
applications and peripheral cards. The @code{NodeContainer} topology helper
provides a convenient way to create, manage and access any @code{Node} objects
that we create in order to run a simulation. The first line above just
declares a NodeContainer which we call @code{nodes}. The second line calls the
@code{Create} method on the @code{nodes} object and asks the container to
create two nodes. As described in the Doxygen, the container calls down into
the @command{ns-3} system proper to create two @code{Node} objects and stores
pointers to those objects internally.
The nodes as they stand in the script do nothing. The next step in
constructing a topology is to connect our nodes together into a network.
The simplest form of network we support is a single point-to-point link
between two nodes. We'll construct one of those links here.
@subsubsection PointToPointHelper
We are constructing a point to point link, and, in a pattern which will become
quite familiar to you, we use a topology helper object to do the low-level
work required to put the link together. Recall that two of our key
abstractions are the @code{NetDevice} and the @code{Channel}. In the real
world, these terms correspond roughly to peripheral cards and network cables.
Typically these two things are intimately tied together and one cannot expect
to interchange, for example, Ethernet devices and wireless channels. Our
Topology Helpers follow this intimate coupling and therefore you will use a
single @code{PointToPointHelper} to configure and connect @command{ns-3}
@code{PointToPointNetDevice} and @code{PointToPointChannel} objects in this
script.
The next three lines in the script are,
@verbatim
PointToPointHelper pointToPoint;
pointToPoint.SetDeviceAttribute ("DataRate", StringValue ("5Mbps"));
pointToPoint.SetChannelAttribute ("Delay", StringValue ("2ms"));
@end verbatim
The first line,
@verbatim
PointToPointHelper pointToPoint;
@end verbatim
instantiates a @code{PointToPointHelper} object on the stack. From a
high-level perspective the next line,
@verbatim
pointToPoint.SetDeviceAttribute ("DataRate", StringValue ("5Mbps"));
@end verbatim
tells the @code{PointToPointHelper} object to use the value ``5Mbps''
(five megabits per second) as the ``DataRate'' when it creates a
@code{PointToPointNetDevice} object.
From a more detailed perspective, the string ``DataRate'' corresponds
to what we call an @code{Attribute} of the @code{PointToPointNetDevice}.
If you look at the Doxygen for class @code{ns3::PointToPointNetDevice} and
find the documentation for the @code{GetTypeId} method, you will find a list
of @code{Attributes} defined for the device. Among these is the ``DataRate''
@code{Attribute}. Most user-visible @command{ns-3} objects have similar lists of
@code{Attributes}. We use this mechanism to easily configure simulations without
recompiling as you will see in a following section.
Similar to the ``DataRate'' on the @code{PointToPointNetDevice} you will find a
``Delay'' @code{Attribute} associated with the @code{PointToPointChannel}. The
final line,
@verbatim
pointToPoint.SetChannelAttribute ("Delay", StringValue ("2ms"));
@end verbatim
tells the @code{PointToPointHelper} to use the value ``2ms'' (two milliseconds)
as the value of the transmission delay of every point to point channel it
subsequently creates.
@subsubsection NetDeviceContainer
At this point in the script, we have a @code{NodeContainer} that contains
two nodes. We have a @code{PointToPointHelper} that is primed and ready to
make @code{PointToPointNetDevices} and wire @code{PointToPointChannel} objects
between them. Just as we used the @code{NodeContainer} topology helper object
to create the @code{Nodes} for our simulation, we will ask the
@code{PointToPointHelper} to do the work involved in creating, configuring and
installing our devices for us. We will need to have a list of all of the
NetDevice objects that are created, so we use a NetDeviceContainer to hold
them just as we used a NodeContainer to hold the nodes we created. The
following two lines of code,
@verbatim
NetDeviceContainer devices;
devices = pointToPoint.Install (nodes);
@end verbatim
will finish configuring the devices and channel. The first line declares the
device container mentioned above and the second does the heavy lifting. The
@code{Install} method of the @code{PointToPointHelper} takes a
@code{NodeContainer} as a parameter. Internally, a @code{NetDeviceContainer}
is created. For each node in the @code{NodeContainer} (there must be exactly
two for a point-to-point link) a @code{PointToPointNetDevice} is created and
saved in the device container. A @code{PointToPointChannel} is created and
the two @code{PointToPointNetDevices} are attached. When objects are created
by the @code{PointToPointHelper}, the @code{Attributes} previously set in the
helper are used to initialize the corresponding @code{Attributes} in the
created objects.
After executing the @code{pointToPoint.Install (nodes)} call we will have
two nodes, each with an installed point-to-point net device and a single
point-to-point channel between them. Both devices will be configured to
transmit data at five megabits per second over the channel which has a two
millisecond transmission delay.
@subsubsection InternetStackHelper
We now have nodes and devices configured, but we don't have any protocol stacks
installed on our nodes. The next two lines of code will take care of that.
@verbatim
InternetStackHelper stack;
stack.Install (nodes);
@end verbatim
The @code{InternetStackHelper} is a topology helper that is to internet stacks
what the @code{PointToPointHelper} is to point-to-point net devices. The
@code{Install} method takes a @code{NodeContainer} as a parameter. When it is
executed, it will install an Internet Stack (TCP, UDP, IP, etc.) on each of
the nodes in the node container.
@subsubsection Ipv4AddressHelper
Next we need to associate the devices on our nodes with IP addresses. We
provide a topology helper to manage the allocation of IP addresses. The only
user-visible API is to set the base IP address and network mask to use when
performing the actual address allocation (which is done at a lower level
inside the helper).
The next two lines of code in our example script, @code{first.cc},
@verbatim
Ipv4AddressHelper address;
address.SetBase ("10.1.1.0", "255.255.255.0");
@end verbatim
declare an address helper object and tell it that it should begin allocating IP
addresses from the network 10.1.1.0 using the mask 255.255.255.0 to define
the allocatable bits. By default the addresses allocated will start at one
and increase monotonically, so the first address allocated from this base will
be 10.1.1.1, followed by 10.1.1.2, etc. The low level @command{ns-3} system
actually remembers all of the IP addresses allocated and will generate a
fatal error if you accidentally cause the same address to be generated twice
(which is a very hard to debug error, by the way).
The next line of code,
@verbatim
Ipv4InterfaceContainer interfaces = address.Assign (devices);
@end verbatim
performs the actual address assignment. In @command{ns-3} we make the
association between an IP address and a device using an @code{Ipv4Interface}
object. Just as we sometimes need a list of net devices created by a helper
for future reference we sometimes need a list of @code{Ipv4Interface} objects.
The @code{Ipv4InterfaceContainer} provides this functionality.
Now we have a point-to-point network built, with stacks installed and IP
addresses assigned. What we need at this point are applications to generate
traffic.
@subsection Applications
Another one of the core abstractions of the ns-3 system is the
@code{Application}. In this script we use two specializations of the core
@command{ns-3} class @code{Application} called @code{UdpEchoServerApplication}
and @code{UdpEchoClientApplication}. Just as we have in our previous
explanations, we use helper objects to help configure and manage the
underlying objects. Here, we use @code{UdpEchoServerHelper} and
@code{UdpEchoClientHelper} objects to make our lives easier.
@subsubsection UdpEchoServerHelper
The following lines of code in our example script, @code{first.cc}, are used
to set up a UDP echo server application on one of the nodes we have previously
created.
@verbatim
UdpEchoServerHelper echoServer (9);
ApplicationContainer serverApps = echoServer.Install (nodes.Get (1));
serverApps.Start (Seconds (1.0));
serverApps.Stop (Seconds (10.0));
@end verbatim
The first line of code in the above snippet declares the
@code{UdpEchoServerHelper}. As usual, this isn't the application itself, it
is an object used to help us create the actual applications. One of our
conventions is to place @emph{required} @code{Attributes} in the helper constructor.
In this case, the helper can't do anything useful unless it is provided with
a port number that the client also knows about. Rather than just picking one
and hoping it all works out, we require the port number as a parameter to the
constructor. The constructor, in turn, simply does a @code{SetAttribute}
with the passed value. If you want, you can set the ``Port'' @code{Attribute}
to another value later using @code{SetAttribute}.
Similar to many other helper objects, the @code{UdpEchoServerHelper} object
has an @code{Install} method. It is the execution of this method that actually
causes the underlying echo server application to be instantiated and attached
to a node. Interestingly, the @code{Install} method takes a
@code{NodeContainter} as a parameter just as the other @code{Install} methods
we have seen. This is actually what is passed to the method even though it
doesn't look so in this case. There is a C++ @emph{implicit conversion} at
work here that takes the result of @code{nodes.Get (1)} (which returns a smart
pointer to a node object --- @code{Ptr<Node>}) and uses that in a constructor
for an unnamed @code{NodeContainer} that is then passed to @code{Install}.
If you are ever at a loss to find a particular method signature in C++ code
that compiles and runs just fine, look for these kinds of implicit conversions.
We now see that @code{echoServer.Install} is going to install a
@code{UdpEchoServerApplication} on the node found at index number one of the
@code{NodeContainer} we used to manage our nodes. @code{Install} will return
a container that holds pointers to all of the applications (one in this case
since we passed a @code{NodeContainer} containing one node) created by the
helper.
Applications require a time to ``start'' generating traffic and may take an
optional time to ``stop''. We provide both. These times are set using the
@code{ApplicationContainer} methods @code{Start} and @code{Stop}. These
methods take @code{Time} parameters. In this case, we use an @emph{explicit}
C++ conversion sequence to take the C++ double 1.0 and convert it to an
@command{ns-3} @code{Time} object using a @code{Seconds} cast. Be aware that
the conversion rules may be controlled by the model author, and C++ has its
own rules, so you can't always just assume that parameters will be happily
converted for you. The two lines,
@verbatim
serverApps.Start (Seconds (1.0));
serverApps.Stop (Seconds (10.0));
@end verbatim
will cause the echo server application to @code{Start} (enable itself) at one
second into the simulation and to @code{Stop} (disable itself) at ten seconds
into the simulation. By virtue of the fact that we have declared a simulation
event (the application stop event) to be executed at ten seconds, the simulation
will last @emph{at least} ten seconds.
@subsubsection UdpEchoClientHelper
The echo client application is set up in a method substantially similar to
that for the server. There is an underlying @code{UdpEchoClientApplication}
that is managed by an @code{UdpEchoClientHelper}.
@verbatim
UdpEchoClientHelper echoClient (interfaces.GetAddress (1), 9);
echoClient.SetAttribute ("MaxPackets", UintegerValue (1));
echoClient.SetAttribute ("Interval", TimeValue (Seconds (1.)));
echoClient.SetAttribute ("PacketSize", UintegerValue (1024));
ApplicationContainer clientApps = echoClient.Install (nodes.Get (0));
clientApps.Start (Seconds (2.0));
clientApps.Stop (Seconds (10.0));
@end verbatim
For the echo client, however, we need to set five different @code{Attributes}.
The first two @code{Attributes} are set during construction of the
@code{UdpEchoClientHelper}. We pass parameters that are used (internally to
the helper) to set the ``RemoteAddress'' and ``RemotePort'' @code{Attributes}
in accordance with our convention to make required @code{Attributes} parameters
in the helper constructors.
Recall that we used an @code{Ipv4InterfaceContainer} to keep track of the IP
addresses we assigned to our devices. The zeroth interface in the
@code{interfaces} container is going to correspond to the IP address of the
zeroth node in the @code{nodes} container. The first interface in the
@code{interfaces} container corresponds to the IP address of the first node
in the @code{nodes} container. So, in the first line of code (from above), we
are creating the helper and telling it so set the remote address of the client
to be the IP address assigned to the node on which the server resides. We
also tell it to arrange to send packets to port nine.
The ``MaxPackets'' @code{Attribute} tells the client the maximum number of
packets we allow it to send during the simulation. The ``Interval''
@code{Attribute} tells the client how long to wait between packets, and the
``PacketSize'' @code{Attribute} tells the client how large its packet payloads
should be. With this particular combination of @code{Attributes}, we are
telling the client to send one 1024-byte packet.
Just as in the case of the echo server, we tell the echo client to @code{Start}
and @code{Stop}, but here we start the client one second after the server is
enabled (at two seconds into the simulation).
@subsection Simulator
What we need to do at this point is to actually run the simulation. This is
done using the global function @code{Simulator::Run}.
@verbatim
Simulator::Run ();
@end verbatim
When we previously called the methods,
@verbatim
serverApps.Start (Seconds (1.0));
serverApps.Stop (Seconds (10.0));
...
clientApps.Start (Seconds (2.0));
clientApps.Stop (Seconds (10.0));
@end verbatim
we actually scheduled events in the simulator at 1.0 seconds, 2.0 seconds and
two events at 10.0 seconds. When @code{Simulator::Run} is called, the system
will begin looking through the list of scheduled events and executing them.
First it will run the event at 1.0 seconds, which will enable the echo server
application (this event may, in turn, schedule many other events). Then it
will run the event scheduled for t=2.0 seconds which will start the echo client
application. Again, this event may schedule many more events. The start event
implementation in the echo client application will begin the data transfer phase
of the simulation by sending a packet to the server.
The act of sending the packet to the server will trigger a chain of events
that will be automatically scheduled behind the scenes and which will perform
the mechanics of the packet echo according to the various timing parameters
that we have set in the script.
Eventually, since we only send one packet (recall the @code{MaxPackets}
@code{Attribute} was set to one), the chain of events triggered by
that single client echo request will taper off and the simulation will go
idle. Once this happens, the remaining events will be the @code{Stop} events
for the server and the client. When these events are executed, there are
no further events to process and @code{Simulator::Run} returns. The simulation
is then complete.
All that remains is to clean up. This is done by calling the global function
@code{Simulator::Destroy}. As the helper functions (or low level
@command{ns-3} code) executed, they arranged it so that hooks were inserted in
the simulator to destroy all of the objects that were created. You did not
have to keep track of any of these objects yourself --- all you had to do
was to call @code{Simulator::Destroy} and exit. The @command{ns-3} system
took care of the hard part for you. The remaining lines of our first
@command{ns-3} script, @code{first.cc}, do just that:
@verbatim
Simulator::Destroy ();
return 0;
}
@end verbatim
@subsection Building Your Script
We have made it trivial to build your simple scripts. All you have to do is
to drop your script into the scratch directory and it will automatically be
built if you run Waf. Let's try it. Copy @code{examples/tutorial/first.cc} into
the @code{scratch} directory after changing back into the top level directory.
@verbatim
cd ..
cp examples/tutorial/first.cc scratch/myfirst.cc
@end verbatim
Now build your first example script using waf:
@verbatim
./waf
@end verbatim
You should see messages reporting that your @code{myfirst} example was built
successfully.
@verbatim
Waf: Entering directory `/home/craigdo/repos/ns-3-allinone/ns-3-dev/build'
[614/708] cxx: scratch/myfirst.cc -> build/debug/scratch/myfirst_3.o
[706/708] cxx_link: build/debug/scratch/myfirst_3.o -> build/debug/scratch/myfirst
Waf: Leaving directory `/home/craigdo/repos/ns-3-allinone/ns-3-dev/build'
'build' finished successfully (2.357s)
@end verbatim
You can now run the example (note that if you build your program in the scratch
directory you must run it out of the scratch directory):
@verbatim
./waf --run scratch/myfirst
@end verbatim
You should see some output:
@verbatim
Waf: Entering directory `/home/craigdo/repos/ns-3-allinone/ns-3-dev/build'
Waf: Leaving directory `/home/craigdo/repos/ns-3-allinone/ns-3-dev/build'
'build' finished successfully (0.418s)
Sent 1024 bytes to 10.1.1.2
Received 1024 bytes from 10.1.1.1
Received 1024 bytes from 10.1.1.2
@end verbatim
Here you see that the build system checks to make sure that the file has been
build and then runs it. You see the logging component on the echo client
indicate that it has sent one 1024 byte packet to the Echo Server on
10.1.1.2. You also see the logging component on the echo server say that
it has received the 1024 bytes from 10.1.1.1. The echo server silently
echoes the packet and you see the echo client log that it has received its
packet back from the server.
@c ========================================================================
@c Browsing ns-3
@c ========================================================================
@node Ns-3 Source Code
@section Ns-3 Source Code
Now that you have used some of the @command{ns-3} helpers you may want to
have a look at some of the source code that implements that functionality.
The most recent code can be browsed on our web server at the following link:
@uref{http://code.nsnam.org/ns-3-dev}. There, you will see the Mercurial
summary page for our @command{ns-3} development tree.
At the top of the page, you will see a number of links,
@verbatim
summary | shortlog | changelog | graph | tags | files
@end verbatim
Go ahead and select the @code{files} link. This is what the top-level of
most of our @emph{repositories} will look:
@verbatim
drwxr-xr-x [up]
drwxr-xr-x bindings python files
drwxr-xr-x doc files
drwxr-xr-x examples files
drwxr-xr-x ns3 files
drwxr-xr-x samples files
drwxr-xr-x scratch files
drwxr-xr-x src files
drwxr-xr-x utils files
-rw-r--r-- 2009-07-01 12:47 +0200 560 .hgignore file | revisions | annotate
-rw-r--r-- 2009-07-01 12:47 +0200 1886 .hgtags file | revisions | annotate
-rw-r--r-- 2009-07-01 12:47 +0200 1276 AUTHORS file | revisions | annotate
-rw-r--r-- 2009-07-01 12:47 +0200 30961 CHANGES.html file | revisions | annotate
-rw-r--r-- 2009-07-01 12:47 +0200 17987 LICENSE file | revisions | annotate
-rw-r--r-- 2009-07-01 12:47 +0200 3742 README file | revisions | annotate
-rw-r--r-- 2009-07-01 12:47 +0200 16171 RELEASE_NOTES file | revisions | annotate
-rw-r--r-- 2009-07-01 12:47 +0200 6 VERSION file | revisions | annotate
-rwxr-xr-x 2009-07-01 12:47 +0200 88110 waf file | revisions | annotate
-rwxr-xr-x 2009-07-01 12:47 +0200 28 waf.bat file | revisions | annotate
-rw-r--r-- 2009-07-01 12:47 +0200 35395 wscript file | revisions | annotate
-rw-r--r-- 2009-07-01 12:47 +0200 7673 wutils.py file | revisions | annotate
@end verbatim
Our example scripts are in the @code{examples} directory. If you click on @code{examples}
you will see a list of files. One of the files in that directory is @code{first.cc}. If
you click on @code{first.cc} you will find the code you just walked through.
The source code is mainly in the @code{src} directory. You can view source
code either by clicking on the directory name or by clicking on the @code{files}
link to the right of the directory name. If you click on the @code{src}
directory, you will be taken to the listing of the @code{src} subdirectories. If you
then click on @code{core} subdirectory, you will find a list of files. The first file
you will find (as of this writing) is @code{abort.h}. If you click on the
@code{abort.h} link, you will be sent to the source file for @code{abort.h} which
contains useful macros for exiting scripts if abnormal conditions are detected.
The source code for the helpers we have used in this chapter can be found in the
@code{src/helper} directory. Feel free to poke around in the directory tree to
get a feel for what is there and the style of @command{ns-3} programs.

View File

@@ -1,60 +0,0 @@
@c ============================================================================
@c Begin document body here
@c ============================================================================
@c ============================================================================
@c PART: Closing Remarks
@c ============================================================================
@c The below chapters are under the major heading "Closing Remarks"
@c This is similar to the Latex \part command
@c
@c ============================================================================
@c Closing Remarks
@c ============================================================================
@node Closing Remarks
@chapter Closing Remarks
@menu
* Futures::
* Closing::
@end menu
@c ============================================================================
@c Futures
@c ============================================================================
@node
@section Futures
This document is a work in process. We hope and expect it to grow over time
to cover more and more of the nuts and bolts of @command{ns-3}.
We hope to add the following chapters over the next few releases:
@itemize @bullet
@item The Callback System
@item The Object System and Memory Management
@item The Routing System
@item Adding a New NetDevice and Channel
@item Adding a New Protocol
@item Working with Real Networks and Hosts
@end itemize
Writing manual and tutorial chapters is not something we all get excited about,
but it is very important to the project. If you are an expert in one of these
areas, please consider contributing to @command{ns-3} by providing one of these
chapters; or any other chapter you may think is important.
@c ============================================================================
@c Closing
@c ============================================================================
@node
@section Closing
@code{ns-3} is a large and complicated system. It is impossible to cover all
of the things you will need to know in one small tutorial.
We have really just scratched the surface of @command{ns-3} in this tutorial,
but we hope to have covered enough to get you started doing useful networking
research using our favorite simulator.
-- The @command{ns-3} development team.

View File

@@ -1,49 +0,0 @@
TEXI2HTML = texi2html
TEXI2PDF = texi2dvi --pdf
EPSTOPDF = epstopdf
TGIF = tgif
DIA = dia
CONVERT = convert
CSS = --css-include=tutorial.css
SPLIT = --split section
DIA_SOURCES = pp.dia dumbbell.dia star.dia
TGIF_SOURCES = helpers.obj
DIA_EPS = ${DIA_SOURCES:.dia=.eps}
DIA_PNG = ${DIA_SOURCES:.dia=.png}
DIA_PDF = ${DIA_SOURCES:.dia=.pdf}
TGIF_EPS = ${TGIF_SOURCES:.obj=.eps}
TGIF_PNG = ${TGIF_SOURCES:.obj=.png}
TGIF_PDF = ${TGIF_SOURCES:.obj=.pdf}
all: images html split-html pdf
# Note: tgif requires a valid x display to convert from .obj to .png.
# If running this makefile on a remote console, the X virtual frame
# buffer may be needed (xorg-x11-server-Xvfb) to provide a "fake"
# display
images:
cd figures/; $(DIA) -t png $(DIA_SOURCES)
cd figures/; $(DIA) -t eps $(DIA_SOURCES)
cd figures/; $(foreach FILE,$(DIA_EPS),$(EPSTOPDF) $(FILE);)
cd figures/; $(TGIF) -print -png $(TGIF_SOURCES)
cd figures/; $(TGIF) -print -eps $(TGIF_SOURCES)
cd figures/; $(foreach FILE,$(TGIF_EPS),$(EPSTOPDF) $(FILE);)
html: images
$(TEXI2HTML) ${CSS} tutorial.texi
split-html: images
$(TEXI2HTML) ${CSS} ${SPLIT} tutorial.texi
pdf: images
$(TEXI2PDF) tutorial.texi
figures-clean:
cd figures/; rm -rf $(DIA_EPS); rm -rf $(DIA_PNG); rm -rf $(DIA_PDF)
cd figures/; rm -rf $(TGIF_EPS); rm -rf $(TGIF_PNG); rm -rf $(TGIF_PDF)
clean: figures-clean
rm -rf tutorial.aux tutorial.cp tutorial.cps tutorial.fn tutorial.ky tutorial.pg tutorial.tp tutorial.vr tutorial.toc tutorial.log tutorial.pdf tutorial.html tutorial/

View File

@@ -1,535 +0,0 @@
@node ns-3 Attributes
@chapter ns-3 Attributes
@anchor{chap:Attributes}
In ns-3 simulations, there are two main aspects to configuration:
@itemize @bullet
@item the simulation topology and how objects are connected
@item the values used by the models instantiated in the topology
@end itemize
This chapter focuses on the second item above: how the many values
in use in ns-3 are organized, documented, and modifiable by ns-3 users.
The ns-3 attribute system is also the underpinning of how traces
and statistics are gathered in the simulator.
Before delving into details of the attribute value system,
it will help to review some basic properties of @code{class ns3::Object}.
@node Object Overview
@section Object Overview
ns-3 is fundamentally a C++ object-based system. By this we mean that
new C++ classes (types) can be declared, defined, and subclassed
as usual.
Many ns-3 objects inherit from the @code{ns3::Object} base class. These
objects have some additional properties that we exploit for
organizing the system and improving the memory management
of our objects:
@itemize @bullet
@item a "metadata" system that links the class name to a lot of
meta-information about the object, including the base class of the subclass,
the set of accessible constructors in the subclass, and the set of
"attributes" of the subclass
@item a reference counting smart pointer implementation, for memory
management.
@end itemize
ns-3 objects that use the attribute system derive from either
@code{ns3::Object} or @code{ns3::ObjectBase}. Most ns-3 objects
we will discuss derive from @code{ns3::Object}, but a few that
are outside the smart pointer memory management framework derive
from @code{ns3::ObjectBase}.
Let's review a couple of properties of these objects.
@node Smart pointers
@subsection Smart pointers
As introduced above in @ref{Smart Pointers 101}, ns-3 objects
are memory managed by a
@uref{http://en.wikipedia.org/wiki/Smart_pointer,,reference counting smart pointer implementation}, @code{class ns3::Ptr}.
Smart pointers are used extensively in the ns-3 APIs, to avoid passing
references to heap-allocated objects that may cause memory leaks.
For most basic usage (syntax), treat a smart pointer like a regular pointer:
@verbatim
Ptr<WifiNetDevice> nd = ...;
nd->CallSomeFunction ();
// etc.
@end verbatim
@node CreateObject
@subsection CreateObject
As we discussed above in @ref{Object Creation},
at the lowest-level API, objects of type @code{ns3::Object} are
not instantiated using @code{operator new} as usual but instead by
a templated function called @code{CreateObject()}.
A typical way to create such an object is as follows:
@verbatim
Ptr<WifiNetDevice> nd = CreateObject<WifiNetDevice> ();
@end verbatim
You can think of this as being functionally equivalent to:
@verbatim
WifiNetDevice* nd = new WifiNetDevice ();
@end verbatim
Objects that derive from @code{ns3::Object} must be allocated
on the heap using CreateObject(). Those deriving from
@code{ns3::ObjectBase}, such as ns-3 helper functions and packet
headers and trailers, can be allocated on the stack.
In some scripts, you may not see a lot of CreateObject() calls
in the code;
this is because there are some helper objects in effect that
are doing the CreateObject()s for you.
@node TypeId
@subsection TypeId
ns-3 classes that derive from class ns3::Object can include
a metadata class called @code{TypeId} that records meta-information
about the class, for use in the object aggregation and component
manager systems:
@itemize @bullet
@item a unique string identifying the class
@item the base class of the subclass, within the metadata system
@item the set of accessible constructors in the subclass
@end itemize
@node Object Summary
@subsection Object Summary
Putting all of these concepts together, let's look at a specific
example: @code{class ns3::Node}.
The public header file node.h has a declaration that includes
a static GetTypeId function call:
@verbatim
class Node : public Object
{
public:
static TypeId GetTypeId (void);
...
@end verbatim
This is defined in the node.cc file as follows:
@verbatim
TypeId
Node::GetTypeId (void)
{
static TypeId tid = TypeId ("ns3::Node")
.SetParent<Object> ()
return tid;
}
@end verbatim
Finally, when users want to create Nodes, they call:
@verbatim
Ptr<Node> n = CreateObject<Node> n;
@end verbatim
We next discuss how attributes (values associated with member variables
or functions of the class) are plumbed into the above TypeId.
@node Attribute Overview
@section Attribute Overview
The goal of the attribute system is to organize the access of
internal member objects of a simulation. This goal arises because,
typically in simulation, users will cut and paste/modify existing
simulation scripts, or will use higher-level simulation constructs,
but often will be interested in studying or tracing particular
internal variables. For instance, use cases such as:
@itemize @bullet
@item "I want to trace the packets on the wireless interface only on
the first access point"
@item "I want to trace the value of the TCP congestion window (every
time it changes) on a particular TCP socket"
@item "I want a dump of all values that were used in my simulation."
@end itemize
Similarly, users may want fine-grained access to internal
variables in the simulation, or may want to broadly change the
initial value used for a particular parameter in all subsequently
created objects. Finally, users may wish to know what variables
are settable and retrievable in a simulation configuration. This
is not just for direct simulation interaction on the command line;
consider also a (future) graphical user interface
that would like to be able to provide a feature whereby a user
might right-click on an node on the canvas and see a hierarchical,
organized list of parameters that are settable on the node and its
constituent member objects, and help text and default values for
each parameter.
@node Functional overview
@subsection Functional overview
We provide a way for users to access values deep in the system, without
having to plumb accessors (pointers) through the system and walk
pointer chains to get to them. Consider a class DropTailQueue that
has a member variable that is an unsigned integer @code{m_maxPackets};
this member variable controls the depth of the queue.
If we look at the declaration of DropTailQueue, we see the following:
@verbatim
class DropTailQueue : public Queue {
public:
static TypeId GetTypeId (void);
...
private:
std::queue<Ptr<Packet> > m_packets;
uint32_t m_maxPackets;
};
@end verbatim
Let's consider things that a user may want to do with the value of
m_maxPackets:
@itemize @bullet
@item Set a default value for the system, such that whenever a new
DropTailQueue is created, this member is initialized to that default.
@item Set or get the value on an already instantiated queue.
@end itemize
The above things typically require providing Set() and Get() functions,
and some type of global default value.
In the ns-3 attribute system, these value definitions and accessor
functions are moved into the TypeId class; e.g.:
@verbatim
TypeId DropTailQueue::GetTypeId (void)
{
static TypeId tid = TypeId ("ns3::DropTailQueue")
.SetParent<Queue> ()
.AddConstructor<DropTailQueue> ()
.AddAttribute ("MaxPackets", "The maximum number of packets accepted by this DropTailQueue.",
Uinteger (100),
MakeUintegerAccessor (&DropTailQueue::m_maxPackets),
MakeUintegerChecker<uint32_t> ())
;
return tid;
}
@end verbatim
The AddAttribute() method is performing a number of things with this
value:
@itemize @bullet
@item Binding the variable m_maxPackets to a string "MaxPackets"
@item Providing a default value (100 packets)
@item Providing some help text defining the value
@item Providing a "checker" (not used in this example) that can be used to set
bounds on the allowable range of values
@end itemize
The key point is that now the value of this variable and its default
value are accessible in the attribute namespace, which is based on
strings such as "MaxPackets" and TypeId strings. In the next
section, we will provide an example script that shows how users
may manipulate these values.
@node Basic usage
@subsection Basic usage
Let's look at how a user script might access these values.
This is based on the script found at @code{samples/main-attribute-value.cc},
with some details stripped out.
@verbatim
//
// This is a basic example of how to use the attribute system to
// set and get a value in the underlying system; namely, an unsigned
// integer of the maximum number of packets in a queue
//
int
main (int argc, char *argv[])
{
// By default, the MaxPackets attribute has a value of 100 packets
// (this default can be observed in the function DropTailQueue::GetTypeId)
//
// Here, we set it to 80 packets. We could use one of two value types:
// a string-based value or a Uinteger value
Config::SetDefault ("ns3::DropTailQueue::MaxPackets", String ("80"));
// The below function call is redundant
Config::SetDefault ("ns3::DropTailQueue::MaxPackets", Uinteger(80));
// Allow the user to override any of the defaults and the above
// SetDefaults() at run-time, via command-line arguments
CommandLine cmd;
cmd.Parse (argc, argv);
@end verbatim
The main thing to notice in the above are the two calls to
@code{Config::SetDefault}. This is how we set the default value
for all subsequently instantiated DropTailQueues. We illustrate
that two types of Value classes, a String and a Uinteger class,
can be used to assign the value to the attribute named by
"ns3::DropTailQueue::MaxPackets".
Now, we will create a few objects using the low-level API; here,
our newly created queues will not have a m_maxPackets initialized to
100 packets but to 80 packets, because of what we did above with
default values.
@verbatim
Ptr<Node> n0 = CreateObject<Node> ();
Ptr<PointToPointNetDevice> net0 = CreateObject<PointToPointNetDevice> ();
n0->AddDevice (net0);
Ptr<Queue> q = CreateObject<DropTailQueue> ();
net0->AddQueue(q);
@end verbatim
At this point, we have created a single node (Node 0) and a
single PointToPointNetDevice (NetDevice 0) and added a
DropTailQueue to it.
Now, we can manipulate the MaxPackets value of the already
instantiated DropTailQueue. Here are various ways to do that.
@subsubsection Pointer-based access
We assume that a smart pointer (Ptr) to a relevant network device is
in hand; here, it is the net0 pointer.
One way to change the value is to access a pointer to the
underlying queue and modify its attribute.
First, we observe that we can get a pointer to the (base class)
queue via the PointToPointNetDevice attributes, where it is called
TxQueue
@verbatim
Ptr<Queue> txQueue = net0->GetAttribute ("TxQueue");
@end verbatim
Using the GetObject function, we can perform a safe downcast
to a DropTailQueue, where MaxPackets is a member
@verbatim
Ptr<DropTailQueue> dtq = txQueue->GetObject <DropTailQueue> ();
NS_ASSERT (dtq);
@end verbatim
Next, we can get the value of an attribute on this queue
We have introduced wrapper "Value" classes for the underlying
data types, similar to Java wrappers around these types, since
the attribute system stores values and not disparate types.
Here, the attribute value is assigned to a Uinteger, and
the Get() method on this value produces the (unwrapped) uint32_t.
@verbatim
Uinteger limit = dtq->GetAttribute ("MaxPackets");
NS_LOG_INFO ("1. dtq limit: " << limit.Get () << " packets");
@end verbatim
Note that the above downcast is not really needed; we could have
done the same using the Ptr<Queue> even though the attribute
is a member of the subclass
@verbatim
limit = txQueue->GetAttribute ("MaxPackets");
NS_LOG_INFO ("2. txQueue limit: " << limit.Get () << " packets");
@end verbatim
Now, let's set it to another value (60 packets)
@verbatim
txQueue->SetAttribute("MaxPackets", Uinteger (60));
limit = txQueue->GetAttribute ("MaxPackets");
NS_LOG_INFO ("3. txQueue limit changed: " << limit.Get () << " packets");
@end verbatim
@subsubsection Namespace-based access
An alternative way to get at the attribute is to use the configuration
namespace. Here, this attribute resides on a known path in this
namespace; this approach is useful if one doesn't have access to
the underlying pointers and would like to configure a specific
attribute with a single statement.
@verbatim
Config::Set ("/NodeList/0/DeviceList/0/TxQueue/MaxPackets", Uinteger (25));
limit = txQueue->GetAttribute ("MaxPackets");
NS_LOG_INFO ("4. txQueue limit changed through namespace: " <<
limit.Get () << " packets");
@end verbatim
We could have also used wildcards to set this value for all nodes
and all net devices (which in this simple example has the same
effect as the previous Set())
@verbatim
Config::Set ("/NodeList/*/DeviceList/*/TxQueue/MaxPackets", Uinteger (15));
limit = txQueue->GetAttribute ("MaxPackets");
NS_LOG_INFO ("5. txQueue limit changed through wildcarded namespace: " <<
limit.Get () << " packets");
@end verbatim
@node Setting through constructors and helper classes
@subsection Setting through constructors helper classes
Arbitrary combinations of attributes can be set and fetched from
the helper and low-level APIs; either from the constructors themselves:
@verbatim
Ptr<Object> p = CreateObject<MyNewObject> ("n1", v1, "n2", v2, ...);
@end verbatim
or from the higher-level helper APIs, such as:
@verbatim
mobility.SetPositionAllocator ("GridPositionAllocator",
"MinX", FpValue (-100.0),
"MinY", FpValue (-100.0),
"DeltaX", FpValue (5.0),
"DeltaY", FpValue (20.0),
"GridWidth", UintValue (20),
"LayoutType", "RowFirst");
@end verbatim
@node Value classes
@subsection Value classes
Readers will note the new Value classes. These can be thought of as
an intermediate class that can be used to convert from raw types to the
Values that are used by the system. Recall that this database is holding
objects of many types with a single generic type. Conversions to this
type can either be done using an intermediate class (IntValue, FpValue for
"floating point") or via strings. Direct implicit conversion of types
to Value is not really practical. So in the above, users have a choice
of using strings or values:
@verbatim
p->Set ("cwnd", "100"); // string-based setter
p->Set ("cwnd", IntValue(100)); // value-based setter
@end verbatim
The system provides some macros that help users declare and define
new Value subclasses for new types that they want to introduce into
the attribute system.
@node Extending attributes
@section Extending attributes
The ns-3 system will place a number of internal values under the
attribute system, but undoubtedly users will want to extend this
to pick up ones we have missed, or to add their own classes to this.
@subsection Adding an existing internal variable to the metadata system
// XXX revise me
Consider this variable in class TcpSocket:
@verbatim
uint32_t m_cWnd; // Congestion window
@end verbatim
Suppose that someone working with Tcp wanted to get or set the
value of that variable using the metadata system. If it were not
already provided by ns-3, the user could declare the following addition
in the metadata system (to the TypeId declaration for TcpSocket):
@verbatim
.AddParameter ("Congestion window",
"Tcp congestion window (bytes)",
MakeUIntParamSpec (&TcpSocket::m_cWnd, 1));
@end verbatim
Now, the user with a pointer to the TcpSocket can perform operations
such as setting and getting the value, without having to add these
functions explicitly. Furthermore, access controls can be applied, such
as allowing the parameter to be read and not written, or bounds
checking on the permissible values can be applied.
@subsection Adding a new TypeId
Here, we discuss the impact on a user who wants to add a new class to
ns-3; what additional things must be done to hook it into this system.
We've already introduced what a TypeId definition looks like:
@verbatim
TypeId
RandomWalk2dMobilityModel::GetTypeId (void)
{
static TypeId tid = TypeId ("RandomWalkMobilityModel")
.SetParent<MobilityModel> ()
.SetGroupName ("Mobility")
.AddConstructor<RandomWalk2dMobilityModel> ()
// followed by a number of Parameters
.AddParameter ("bounds",
"Bounds of the area to cruise.",
MakeRectangleParamSpec (&RandomWalk2dMobilityModel::m_bounds, Rectangle (0.0, 0.0, 100.0, 100.0)))
.AddParameter ("time",
"Change current direction and speed after moving for this delay.",
MakeTimeParamSpec (&RandomWalk2dMobilityModel::m_modeTime,
Seconds (1.0)))
// etc (more parameters).
@end verbatim
The declaration for this in the class declaration is one-line public
member method:
@verbatim
public:
static TypeId GetTypeId (void);
@end verbatim
@section Adding new class type to the Value system
From the perspective of the user who writes a new class in the system and
wants to hook it in to the attribute system, there is mainly the matter
of writing
the conversions to/from strings and Values. Most of this can be
copy/pasted with macro-ized code. For instance, consider class
Rectangle in the @code{src/mobility/} directory:
One line is added to the class declaration:
@verbatim
/**
* \brief a 2d rectangle
*/
class Rectangle
{
...
VALUE_HELPER_HEADER_1 (Rectangle);
};
@end verbatim
One templatized declaration, and two operators, are added below the
class declaration:
@verbatim
std::ostream &operator << (std::ostream &os, const Rectangle &rectangle);
std::istream &operator >> (std::istream &is, Rectangle &rectangle);
VALUE_HELPER_HEADER_2 (Rectangle);
@end verbatim
In the class definition, the code looks like this:
@verbatim
VALUE_HELPER_CPP (Rectangle);
std::ostream &
operator << (std::ostream &os, const Rectangle &rectangle)
{
os << rectangle.xMin << "|" << rectangle.xMax << "|" << rectangle.yMin << "|" << rectangle.yMax;
return os;
}
std::istream &
operator >> (std::istream &is, Rectangle &rectangle)
{
char c1, c2, c3;
is >> rectangle.xMin >> c1 >> rectangle.xMax >> c2 >> rectangle.yMin >> c3 >> rectangle.yMax;
if (c1 != '|' ||
c2 != '|' ||
c3 != '|')
{
is.setstate (std::ios_base::failbit);
}
return is;
}
@end verbatim
These stream operators simply convert from a string representation of the
Rectangle ("xMin|xMax|yMin|yMax") to the underlying Rectangle, and the
modeler must specify these operators and the string syntactical representation
of an instance of the new class.

View File

@@ -1,20 +0,0 @@
@node Helper Functions
@chapter Helper Functions
@anchor{chap:Helpers}
This chapter describes an intermediate API for the simulator; what we
call the "helper API". The helper API is implemented in
@code{src/helper/} directory; it depends on (and wraps) the low-level
API which is implemented everywhere else in @code{src/}. The following
figure shows this relationship.
@center @image{figures/helpers,,,,png}
The use of the helper API is optional. It has two main goals:
@itemize @bullet
@item Provide "syntactic sugar" to wrap a number of related low-level
API calls together, that would normally be grouped together often, into
something that is more user-friendly.
@item Handle configuration of larger topological units (e.g., a set
of nodes or a set of nodes on a particular link) .
@end itemize
(more to follow)

File diff suppressed because it is too large Load Diff

View File

@@ -1,24 +0,0 @@
@node Logging
@chapter Logging
@anchor{chap:Logging}
This chapter is the first in a series of chapters discussing things that
one can do to modify the input or output of existing ns-3 scripts.
Examples:
@itemize @bullet
@item Enable or disable the generation of log messages, with fine granularity
@item Set default values for configuration values in the system
@item Generate a report of all configuration values used during a simulation
run (not yet implemented)
@item Set or get values of member variables on objects already instantiated
@item Customizing the tracing output of the script
@item Generate statistics on (not yet implemented)
@item Perform a large number of independent runs of the same simulation
@end itemize
@node Logging Basics
@section Logging Basics
@node Enabling Log Output
@section Enabling Log Output

File diff suppressed because it is too large Load Diff

View File

@@ -1,462 +0,0 @@
@c ========================================================================
@c Simulation Output
@c ========================================================================
@node Simulation Output
@chapter Simulation Output
At this point, you should be able to execute any of the built-in
programs distributed with @command{ns-3}. Next, we will look at
how to generate and tailor the simulation output, before turning
to how to modify simulation scripts to do different things.
@node Tracing Basics
@section Tracing Basics
The whole point of simulation is to generate output for further
study, and the @command{ns-3} tracing system is a primary
mechanism for this.
Since @command{ns-3} is a C++ program, standard facilities for
generating output from C++ programs apply:
@verbatim
#include <iostream>
...
int main ()
{
...
std::cout << "The value of x is " << x << std::endl;
...
}
@end verbatim
The goal of the @command{ns-3} tracing system is to
provide a structured way to configure the simulator to output results
in standard or modifiable formats.
@itemize @bullet
@item For basic tasks, the tracing system should allow the user to
generate standard tracing for popular tracing sources, and to customize
which objects generate the tracing.
@item Intermediate users will be able to extend the tracing system to
modify the output format generated, or to insert new tracing sources,
without modifying the core of the simulator.
@item Advanced users can modify the simulator core to add new
tracing sources and sinks.
@end itemize
The @command{ns-3} tracing system is fundamentally built on the
concept of separating tracing sources from sinks.
@enumerate
@item Trace sources (e.g., provide access to every packet received)
@item Trace sinks (e.g., print out the packet)
@item A mechanism to tie together sources and sinks
@end enumerate
The rationale for this division is to allow users to attach new
types of sinks to existing tracing sources, without requiring
users to edit and recompile the core of the simulator.
Thus, in the example above, a user could write a new tracing sink
and attach it to an existing tracing source. What remains to
be defined is a way for users to find these hooks (tracing sources)
and attach sinks to them. A new tracing namespace is defined for
this purpose.
We will first walk through how some pre-defined sources and sinks
are provided and may be customized with little user effort. We
return later in this chapter to advanced tracing configuration including
extending the tracing namespace and creating new tracing sources.
@subsection ASCII tracing
@cindex ASCII
For Internet nodes, the ASCII trace wrapper is a wrapper around
the @command{ns-3} low-level
tracing system that lets you get access to underlying trace events easily.
The output of a trace of a simulation run is an ASCII file --- thus the name.
In the spirit of keeping things simple, you won't be able to control or
configure the output at this stage.
For those familiar with @command{ns-2} output, this type of trace is
analogous to the @command{out.tr} generated by many scripts.
@cindex tracing packets
Let's just jump right in. As usual, we need to include the definitions
related to using ASCII tracing (don't edit any files quite yet):
@verbatim
#include "ns3/ascii-trace.h"
@end verbatim
We then need to add the code to the script to actually enable the ASCII tracing
code. The following code must be inserted before the call to
@code{Simulator::Run ();}:
@verbatim
AsciiTrace asciitrace ("tutorial.tr");
asciitrace.TraceAllQueues ();
asciitrace.TraceAllNetDeviceRx ();
@end verbatim
The first line declares an object of type @code{AsciiTrace} named
@code{asciitrace} and passes a string parameter to its constructor. This
parameter is a file name to which all of the trace information will be written.
The second line, @code{asciitrace.TraceAllQueues ();} asks the trace object to
arrange that all queue operations (enqueue, dequeue, drop) on the queues
in all of the nodes of the system be traced. On the receive side,
@code{asciitrace.TraceAlllNetDeviceRx ()} traces packets received by
a NetDevice. For those familiar with @command{ns-2}, these are equivalent
to the popular trace points that log "+", "-", "d", and "r" events.
Try running the following program from the command line:
@verbatim
./waf --run tutorial-csma-echo-ascii-trace
@end verbatim
@cindex tutorial.tr
Just as you have seen previously, you will see some messages from @emph{Waf}
and then the ``Compilation finished successfully'' message. The
next message, @code{UDP Echo Simulation} is from the running program. When
it ran, the program will have created a file named @code{tutorial.tr}.
Because of the way that Waf works, the file is not created in the local
directory, it is created at the top-level directory of the repository. So,
change into the top level directory and take a look at the file
@code{tutorial.tr} in your favorite editor.
@subsubsection Parsing Ascii Traces
@cindex parsing ascii traces
This section parses in detail the structure of the ascii tracing
output. If you find this output format self explanatory (it
resembles tcpdump output), you may skip to the next
section on pcap tracing.
@cindex trace event
There's a lot of information there in a pretty dense form, but the first thing
to notice is that there are a number of distinct lines in this file. It may
be difficult to see this clearly unless you widen your windows considerably.
Each line in the file corresponds to a @emph{trace event}. A trace event
happens whenever specific conditions happen in the simulation. In this case
we are tracing events on the @emph{device queue} present in every net device
on every node in the simulation. The device queue is a queue through which
every packet destined for a channel must pass --- it is the device
@emph{transmit} queue. Note that each line in the trace file begins with a
lone character (has a space after it). This character will have the following
meaning:
@cindex enqueue
@cindex dequeue
@cindex drop
@itemize @bullet
@item @code{+}: An enqueue operation occurred on the device queue;
@item @code{-}: A dequeue operation occurred on the device queue;
@item @code{d}: A packet was dropped, typically because the queue was full.
@end itemize
Let's take a more detailed view of the first line. I'll break it down into
sections (indented for clarity) with a two digit reference number on the
left side:
@verbatim
00 +
01 2
02 nodeid=0
03 device=0
04 queue-enqueue
05 pkt-uid=9
06 ETHERNET
07 length/type=0x806,
08 source=08:00:2e:00:00:00,
09 destination=ff:ff:ff:ff:ff:ff
10 ARP(request
11 source mac: 08:00:2e:00:00:00
12 source ipv4: 10.1.1.1
13 dest ipv4: 10.1.1.2)
14 ETHERNET fcs=0
@end verbatim
@cindex trace event
@cindex simulation time
The first line of this expanded trace event (reference number 00) is the
queue operation. We have a @code{+} character, so this corresponds to an
@emph{enqueue} operation. The second line (reference 01) is the simulation
time expressed in seconds. You may recall that we asked the
@code{UdpEchoClient} to start sending packets at two seconds. Here we see
confirmation that this is, indeed, happening.
@cindex node number
@cindex net device number
@cindex smart pointer
The next lines of the example listing (references 02 and 03) tell us that
this trace event originated in a given node and net device. Each time a node
is created it is given an identifying number that monotonically increases from
zero. Therefore, @code{nodeid=0} means that the node in which the given trace
event originated is the first node we created. In the case of our script,
this first node is is the node pointed to by the smart pointer @code{n0}. Not
too surprisingly, this is also the node to which we attached the
@code{UdpEchoClient}. The device number is local to each node, and so the
device given by @code{device=0} is the first net device that we added to the
node in question. In our simulation, this corresponds to the
@code{CsmaNetDevice} we added to node zero (@code{n0}).
@cindex uid
@cindex unique ID
@cindex packet
The next line (reference 04) is a more readable form of the operation code
seen in the first line --- i.e., the character @code{+} means
@code{queue-enqueue}. Reference number 05 indicates that the @emph{unique id}
of the packet being enqueued is @code{9}. The fact that the first packet we
see has a unique ID of 9 should indicates to you that other things have
happened in the protocol stack before we got to this point. This will become
clear momentarily.
@cindex Ethernet
@cindex MAC address
Reference items 06 and 14 indicate that this is an Ethernet packet with
a zero (not computed) checksum (note the indentation to make parsing this
trace event a little easier). Reference 08 and 09 are the source and
destination addresses of this packet. The packet is from the MAC address we
assigned to the node zero net device in the script, and is destined for the
broadcast address --- this is a broadcast packet.
@cindex Address Resolution Protocol
@cindex ARP
@cindex ARP|request
Reference items 10 through 13 make clear what is happening. This is an ARP
(Address Resolution Protocol) request for the MAC address of the node on
which the @code{UdpEchoServer} resides. The protocol stack can't send a UDP
packet to be echoed until it knows (resolves) the MAC address; and this trace
event corresponds to an ARP request being queued for transmission to the local
network. The next line in the trace file (partially expanded),
@verbatim
00 -
01 2
02 nodeid=0
03 device=0
04 queue-dequeue
05 pkt-uid=9
...
@end verbatim
shows the (same) ARP request packet being dequeued from the device queue by
the net device and (implicitly) being sent down the channel to the broadcast
MAC address. We are not tracing net device reception events so we don't
actually see all of the net devices receiving the broadcast packet. We do,
however see the following in the third line of the trace file:
@verbatim
00 +
01 2.00207
02 nodeid=1
03 device=0
04 queue-enqueue
05 pkt-uid=10
06 ETHERNET
07 length/type=0x806,
08 source=08:00:2e:00:00:01,
09 destination=08:00:2e:00:00:00,
10 ARP(reply
11 source mac: 08:00:2e:00:00:01
12 source ipv4: 10.1.1.2
13 dest mac: 08:00:2e:00:00:00
14 dest ipv4: 10.1.1.1)
15 ETHERNET fcs=0
@end verbatim
@cindex simulation time
@cindex ARP|response
Notice that this is a queue-enqueue operation (references 00 and 04) happening
on node one (reference 02) at simulation time 2.00207 seconds (reference 01).
Looking at the packet payload (references 10-14) we see that this is an ARP
reply to the request sent by node one. Note that the simulation time
(reference 01) is now 2.00207 seconds. This is direct result of the data rate
(5 mb/s) and latency (2 ms) parameters that we passed to the
@code{CsmaChannel} when we created it. Clearly the ARP request packet was
sent over the channel and received approximately 2 ms later by node one. A
corresponding ARP response packet was created and enqueued on node one's net
device. It is this enqueue trace event that has being logged.
@cindex queue
@cindex queue|transmit
@cindex echo
Given the current state of affairs, the next thing you may expect to see is
this ARP request being received by node zero, but remember we are only looking
at trace events on the device @emph{transmit} queue. The reception of the ARP
response by node zero will not directly trigger any trace event in this case,
but it will enable the protocol stack to continue what it was originally doing
(trying to send an echo packet). Thus, the next line we see in the trace file
(@code{tutorial.tr}) is the first UDP echo packet being sent to the net device.
@verbatim
00 +
01 2.00415
02 nodeid=0
03 device=0
04 queue-enqueue
05 pkt-uid=7
06 ETHERNET
07 length/type=0x800,
08 source=08:00:2e:00:00:00,
09 destination=08:00:2e:00:00:01
10 IPV4(
11 tos 0x0
12 ttl 64
13 id 0
14 offset 0
15 flags [none]
16 length: 1052) 10.1.1.1 > 10.1.1.2
17 UDP(length: 1032)
18 49153 > 7
19 DATA (length 1024)
20 ETHERNET fcs=0
@end verbatim
@cindex simulation time
@cindex echo
@cindex ARP
@cindex ARP|request
@cindex ARP|response
@cindex IP
@cindex Ipv4
I won't go into too much detail about this packet, but I will point out a
few key items in the trace. First, the packet was enqueued at simulation time
of 2.00415 seconds. This time reflects the fact that the echo client
application started at 2. seconds and there were two ARP packets transmitted
across the network (two milliseconds + data transmission time each way). The
packet unique identifier (reference 05) is 7. Notice that this is a lower
number than the ARP request packet, which had a unique ID of 9. This tells
us that the UDP packet was actually created before the ARP request packet ---
which makes perfect sense since it was the attempt to send packet 7 that
triggered sending the ARP request packet 9. Note that this an Ethernet
packet (reference 06) like all other packets in this simulation, however this
particular packet carries an IPV4 payload and therefore has an IP version 4
header (indicated by references 10-16). This Ipv4 in turn contains a UDP
header (references 17, 18) and finally 1024 bytes of data (reference 20).
Clearly, this is the UDP echo packet emitted by the
@code{UdpEchoClient Application}.
The next trace event is an ARP request from node one. We can infer that node
one has received the UDP echo packet and the @code{UdpEchoServer Application}
on that node has turned the packet around. Just as node zero needed to ARP
for the MAC address of node one, now node one must ARP for the MAC address of
node zero. We see the ARP request enqueued on the transmit queue of node one;
then we see the ARP request dequeued from the transmit queue of node one (and
implicitly transmitted to node zero). Then we see an ARP response enqueued
on the transmit queue of node zero; and finally the ARP response dequeued (and
implicitly transmitted back to node one).
This exchange is summarized in the following trace event excerpts,
@verbatim
+ 2.00786 nodeid=1 ... ARP(request ...
- 2.00786 nodeid=1 ... ARP(request ...
+ 2.00994 nodeid=0 ... ARP(reply ...
- 2.00994 nodeid=0 ... ARP(reply ...
@end verbatim
The final two trace events in the @code{tutorial.tr} file correspond to the
echoed packet being enqueued for transmission on the net device for node one,
and that packet being dequeued (and implicitly transmitted back to node zero).
@cindex AsciiTrace!TraceAllNetDeviceRx
@cindex ARP!request
If you look at the trace file (@code{tutorial.tr}) you will also see some
entries with an @code{r} event, indicating a
@emph{receive} trace event. Recall that the first packet sent on the network
was a broadcast ARP request. We should then see all four nodes receive a
copy of this request. This is the case, as the first four receive trace
events are,
@verbatim
r 2.00207 nodeid=0 device=0 dev-rx pkt-uid=9 ARP(request ...
r 2.00207 nodeid=1 device=0 dev-rx pkt-uid=9 ARP(request ...
r 2.00207 nodeid=2 device=0 dev-rx pkt-uid=9 ARP(request ...
r 2.00207 nodeid=3 device=0 dev-rx pkt-uid=9 ARP(request ...
@end verbatim
@cindex unique ID
You can see that a copy of the broadcast packet with unique ID 9 was received
by the net devices on nodes 0, 1, 2 and 3. We leave it up to you to parse the
rest of the trace file and understand the remaining reception events.
@subsection PCAP Trace Wrapper
@cindex pcap
@cindex Wireshark
The @command{ns-3} @emph{pcap trace wrapper} is used to create trace files in
@code{.pcap} format. The acronym pcap (usually written in lower case) stands
for @emph{p}acket @emph{cap}ture, and is actually an API that includes the
definition of a @code{.pcap} file format. The most popular program that can
read and display this format is Wireshark (formerly called Ethereal).
However, there are many traffic trace analyzers that use this packet
format, including X, Y, and Z. We encourage users to exploit the
many tools available for analyzing pcap traces; below, we show how
tcpdump and Wireshark can be used..
@cindex tutorial-csma-echo-ascii-trace.cc
@cindex tutorial-csma-echo-pcap-trace.cc
The code used to enable pcap tracing is similar to that for ASCII tracing.
We have provided another file, @code{tutorial-csma-echo-pcap-trace.cc} that
uses the pcap trace wrapper. We have added the code to include the pcap
trace wrapper definitions:
@verbatim
#include "ns3/pcap-trace.h"
@end verbatim
And then added the following code below the AsciiTrace methods:
@cindex PcapTrace
@cindex PcapTrace!TraceAllIp
@verbatim
PcapTrace pcaptrace ("tutorial.pcap");
pcaptrace.TraceAllIp ();
@end verbatim
The first line of the code immediately above declares an object of type
@code{PcapTrace} named @code{pcaptrace} and passes a string parameter to its
constructor. This object is used to hide the details of the actual tracing
subsystem. The parameter is a base file name from which the actual trace file
names will be built. The second line of code tells the @code{PcamTrace}
object to trace all IP activity in all of the nodes present in the simulation.
@cindex interface index
Trace files are not created until trace activity is detected. Each file name
is composed of the base file name, followed by a @code{'-'}, a node id followed
by a @code{'-}', and an IP interface index. You will soon see a file named
@code{tutorial.pcap-0-1}, for example. This will be the trace file generated
as events are detected on node zero, interface index one. N.B. Interface
indices are different that net device indices --- interface index zero
corresponds to the loopback interface and interface index one corresponds to
the first net device you added to a node.
You may run the new program just like all of the others so far:
@cindex Waf
@verbatim
./waf --run tutorial-csma-echo-pcap-trace
@end verbatim
If you look at the top level directory of your distribution, you should now
see three log files: @code{tutorial.tr} is the ASCII trace file we have
previously examined. @code{tutorial.pcap-0-1} and @code{tutorial.pcap-1-1}
are the new pcap files we just generated. There will not be files
corresponding to nodes two and three since we have not sent any IP packets to
those nodes.
@subsubsection Reading output with tcpdump
@cindex tcpdump
@subsubsection Reading output with Wireshark
@cindex Wireshark
If you are unfamiliar with Wireshark, there is a web site available from which
you can download programs and documentation: @uref{http://www.wireshark.org/}.
If you have Wireshark available, you can open each of the trace files and
display the contents as if you had captured the packets using a
@emph{packet sniffer}. Note that only IP packets are traced using this
wrapper, so you will not see the ARP exchanges that were logged when using
the ASCII trace wrapper. You are encouraged to take a look at the contents
of these pcap files using your favorite pcap software (or Wireshark).
@node Advanced Tracing
@section Advanced Tracing

View File

@@ -1,9 +0,0 @@
@node Statistics
@chapter Statistics
@anchor{chap:Statistics}
ns-3 does not presently have support for statistics (automatically generated
statistical output). This is planned
for development later in 2008. If you are interested in contributing,
please see @uref{http://www.nsnam.org/wiki/index.php/Suggested_Projects,,our suggested projects page} or contact the ns-developers
list.

View File

@@ -1,82 +0,0 @@
@node Troubleshooting
@chapter Troubleshooting
This chapter posts some information about possibly common errors in building
or running ns-3 programs.
Please note that the wiki (@uref{http://www.nsnam.org/wiki/index.php/Troubleshooting}) may have contributed items.
@node Build errors
@section Build errors
@node Run-time errors
@section Run-time errors
Sometimes, errors can occur with a program after a successful build. These
are run-time errors, and can commonly occur when memory is corrupted or
pointer values are unexpectedly null.
Here is an example of what might occur:
@verbatim
ns-old:~/ns-3-nsc$ ./waf --run tcp-point-to-point
Entering directory `/home/tomh/ns-3-nsc/build'
Compilation finished successfully
Command ['/home/tomh/ns-3-nsc/build/debug/examples/tcp-point-to-point'] exited with code -11
@end verbatim
The error message says that the program terminated unsuccessfully, but it is
not clear from this information what might be wrong. To examine more
closely, try running it under the @uref{http://sources.redhat.com/gdb/,,gdb debugger}:
@verbatim
ns-old:~/ns-3-nsc$ ./waf --run tcp-point-to-point --command-template="gdb %s"
Entering directory `/home/tomh/ns-3-nsc/build'
Compilation finished successfully
GNU gdb Red Hat Linux (6.3.0.0-1.134.fc5rh)
Copyright 2004 Free Software Foundation, Inc.
GDB is free software, covered by the GNU General Public License, and you are
welcome to change it and/or distribute copies of it under certain conditions.
Type "show copying" to see the conditions.
There is absolutely no warranty for GDB. Type "show warranty" for details.
This GDB was configured as "i386-redhat-linux-gnu"...Using host libthread_db library "/lib/libthread_db.so.1".
(gdb) run
Starting program: /home/tomh/ns-3-nsc/build/debug/examples/tcp-point-to-point
Reading symbols from shared object read from target memory...done.
Loaded system supplied DSO at 0xf5c000
Program received signal SIGSEGV, Segmentation fault.
0x0804aa12 in main (argc=1, argv=0xbfdfefa4)
at ../examples/tcp-point-to-point.cc:136
136 Ptr<Socket> localSocket = socketFactory->CreateSocket ();
(gdb) p localSocket
$1 = {m_ptr = 0x3c5d65}
(gdb) p socketFactory
$2 = {m_ptr = 0x0}
(gdb) quit
The program is running. Exit anyway? (y or n) y
@end verbatim
Note first the way the program was invoked-- pass the command to run as
an argument to the command template "gdb %s".
This tells us that there was an attempt to dereference a null pointer
socketFactory.
Let's look around line 136 of tcp-point-to-point, as gdb suggests:
@verbatim
Ptr<SocketFactory> socketFactory = n2->GetObject<SocketFactory> (Tcp::iid);
Ptr<Socket> localSocket = socketFactory->CreateSocket ();
localSocket->Bind ();
@end verbatim
The culprit here is that the return value of GetObject is not being
checked and may be null.
Sometimes you may need to use the @uref{http://valgrind.org,,valgrind memory
checker} for more subtle errors. Again, you invoke the use of valgrind
similarly:
@verbatim
ns-old:~/ns-3-nsc$ ./waf --run tcp-point-to-point --command-template="valgrind %s"
@end verbatim

View File

@@ -1,156 +0,0 @@
body {
font-family: "Trebuchet MS", "Bitstream Vera Sans", verdana, lucida, arial, helvetica, sans-serif;
background: white;
color: black;
font-size: 11pt;
}
h1, h2, h3, h4, h5, h6 {
# color: #990000;
color: #009999;
}
pre {
font-size: 10pt;
background: #e0e0e0;
color: black;
}
a:link, a:visited {
font-weight: normal;
text-decoration: none;
color: #0047b9;
}
a:hover {
font-weight: normal;
text-decoration: underline;
color: #0047b9;
}
img {
border: 0px;
}
#main th {
font-size: 12pt;
background: #b0b0b0;
}
.odd {
font-size: 12pt;
background: white;
}
.even {
font-size: 12pt;
background: #e0e0e0;
}
.answer {
font-size: large;
font-weight: bold;
}
.answer p {
font-size: 12pt;
font-weight: normal;
}
.answer ul {
font-size: 12pt;
font-weight: normal;
}
#container {
position: absolute;
width: 100%;
height: 100%;
top: 0px;
}
#feedback {
color: #b0b0b0;
font-size: 9pt;
font-style: italic;
}
#header {
position: absolute;
margin: 0px;
top: 10px;
height:96px;
left: 175px;
right: 10em;
bottom: auto;
background: white;
clear: both;
}
#middle {
position: absolute;
left: 0;
height: auto;
width: 100%;
}
#main {
position: absolute;
top: 50px;
left: 175px;
right: 100px;
background: white;
padding: 0em 0em 0em 0em;
}
#navbar {
position: absolute;
top: 75px;
left: 0em;
width: 146px;
padding: 0px;
margin: 0px;
font-size: 10pt;
}
#navbar a:link, #navbar a:visited {
font-weight: normal;
text-decoration: none;
color: #0047b9;
}
#navbar a:hover {
font-weight: normal;
text-decoration: underline;
color: #0047b9;
}
#navbar dl {
width: 146px;
padding: 0;
margin: 0 0 10px 0px;
background: #99ffff url(images/box_bottom2.gif) no-repeat bottom left;
}
#navbar dt {
padding: 6px 10px;
font-size: 100%;
font-weight: bold;
background: #009999;
margin: 0px;
border-bottom: 1px solid #fff;
color: white;
background: #009999 url(images/box_top2.gif) no-repeat top left;
}
#navbar dd {
font-size: 100%;
margin: 0 0 0 0px;
padding: 6px 10px;
color: #0047b9;
}
dd#selected {
background: #99ffff url(images/arrow.gif) no-repeat;
background-position: 4px 10px;
}

View File

@@ -1,105 +0,0 @@
\input texinfo @c -*-texinfo-*-
@c %**start of header
@setfilename ns-3.info
@settitle ns-3 tutorial
@c @setchapternewpage odd
@c %**end of header
@ifinfo
Primary documentation for the @command{ns-3} project is available in
three forms:
@itemize @bullet
@item @uref{http://www.nsnam.org/doxygen/index.html,,ns-3 Doxygen/Manual}: Documentation of the public APIs of the simulator
@item Tutorial (this document)
@item @uref{http://www.nsnam.org/wiki/index.php,, ns-3 wiki}
@end itemize
This document is written in GNU Texinfo and is to be maintained in
revision control on the @command{ns-3} code server. Both PDF and HTML versions
should be available on the server. Changes to
the document should be discussed on the ns-developers@@isi.edu mailing list.
@end ifinfo
@copying
This is an @command{ns-3} tutorial.
Primary documentation for the @command{ns-3} project is available in
three forms:
@itemize @bullet
@item @uref{http://www.nsnam.org/doxygen/index.html,,ns-3 Doxygen/Manual}: Documentation of the public APIs of the simulator
@item Tutorial (this document)
@item @uref{http://www.nsnam.org/wiki/index.php,, ns-3 wiki}
@end itemize
This document is written in GNU Texinfo and is to be maintained in
revision control on the @command{ns-3} code server. Both PDF and HTML
versions should be available on the server. Changes to
the document should be discussed on the ns-developers@@isi.edu mailing list.
This software is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see @uref{http://www.gnu.org/licenses/}.
@end copying
@titlepage
@title ns-3 Tutorial
@author ns-3 project
@author feedback: ns-developers@@isi.edu
@today{}
@c @page
@vskip 0pt plus 1filll
@insertcopying
@end titlepage
@c So the toc is printed at the start.
@anchor{Full Table of Contents}
@contents
@ifnottex
@node Top, Overview, Full Table of Contents
@top ns-3 Tutorial (html version)
For a pdf version of this tutorial,
see @uref{http://www.nsnam.org/docs/tutorial.pdf}.
@insertcopying
@end ifnottex
@menu
* Tutorial Goals::
Part 1: Getting Started with ns-3
* Overview::
* Browsing::
* Resources::
* Downloading and Compiling::
* Some-Prerequisites::
Part 2: Reading ns-3 Programs
* A-First-ns-3-Script::
Part 3: Reconfiguring Existing ns-3 Scripts
* Logging::
* ns-3 Attributes::
* Tracing::
* Statistics::
Part 4: Creating New or Revised Topologies
* Helper Functions::
@end menu
@include introduction.texi
@include log.texi
@include attributes.texi
@include statistics.texi
@include helpers.texi
@printindex cp
@bye

View File

@@ -1,310 +0,0 @@
@c ========================================================================
@c Begin document body here
@c ========================================================================
@c ========================================================================
@c PART: Introduction
@c ========================================================================
@c The below chapters are under the major heading "Introduction"
@c This is similar to the Latex \part command
@c
@c ========================================================================
@c Introduction
@c ========================================================================
@node Introduction
@chapter Introduction
@menu
* For ns-2 Users::
* Contributing::
* Tutorial Organization::
@end menu
The @command{ns-3} simulator is a discrete-event network simulator targeted
primarily for research and educational use. The
@uref{http://www.nsnam.org,,ns-3 project},
started in 2006, is an open-source project developing @command{ns-3}.
Primary documentation for the @command{ns-3} project is available in four
forms:
@itemize @bullet
@item @uref{http://www.nsnam.org/doxygen/index.html,,ns-3 Doxygen/Manual}:
Documentation of the public APIs of the simulator
@item Tutorial (this document)
@item @uref{http://www.nsnam.org/docs/manual.html,,Reference Manual}: Reference Manual
@item @uref{http://www.nsnam.org/wiki/index.php,, ns-3 wiki}
@end itemize
The purpose of this tutorial is to introduce new @command{ns-3} users to the
system in a structured way. It is sometimes difficult for new users to
glean essential information from detailed manuals and to convert this
information into working simulations. In this tutorial, we will build
several example simulations, introducing and explaining key concepts and
features as we go.
As the tutorial unfolds, we will introduce the full @command{ns-3} documentation
and provide pointers to source code for those interested in delving deeper
into the workings of the system.
A few key points are worth noting at the onset:
@itemize @bullet
@item Ns-3 is not an extension of @uref{http://www.isi.edu/nsnam/ns,,ns-2};
it is a new simulator. The two simulators are both written in C++ but
@command{ns-3} is a new simulator that does not support the ns-2 APIs. Some
models from ns-2 have already been ported from ns-2 to @command{ns-3}. The
project will continue to maintain ns-2 while @command{ns-3} is being built,
and will study transition and integration mechanisms.
@item @command{Ns-3} is open-source, and the project strives to maintain an
open environment for researchers to contribute and share their software.
@end itemize
@node For ns-2 Users
@section For ns-2 Users
For those familiar with ns-2, the most visible outward change when moving to
@command{ns-3} is the choice of scripting language. Ns-2 is
scripted in OTcl and results of simulations can be visualized using the
Network Animator @command{nam}. It is not possible to run a simulation
in ns-2 purely from C++ (i.e., as a main() program without any OTcl).
Moreover, some components of ns-2 are written in C++ and others in OTcl.
In @command{ns-3}, the simulator is written entirely in C++, with optional
Python bindings. Simulation scripts can therefore be written in C++
or in Python. The results of some simulations can be visualized by
@command{nam}, but new animators are under development. Since @command{ns-3}
generates pcap packet trace files, other utilities can be used to
analyze traces as well.
In this tutorial, we will first concentrate on scripting
directly in C++ and interpreting results via trace files.
But there are similarities as well (both, for example, are based on C++
objects, and some code from ns-2 has already been ported to @command{ns-3}).
We will try to highlight differences between ns-2 and @command{ns-3}
as we proceed in this tutorial.
A question that we often hear is "Should I still use ns-2 or move to
@command{ns-3}?" The answer is that it depends. @command{ns-3} does not have
all of the models that ns-2 currently has, but on the other hand, @command{ns-3}
does have new capabilities (such as handling multiple interfaces on nodes
correctly, use of IP addressing and more alignment with Internet
protocols and designs, more detailed 802.11 models, etc.). ns-2
models can usually be ported to @command{ns-3} (a porting guide is under
development). There is active development on multiple fronts for
@command{ns-3}. The @command{ns-3} developers believe (and certain early users
have proven) that @command{ns-3} is ready for active use, and should be an
attractive alternative for users looking to start new simulation projects.
@node Contributing
@section Contributing
@cindex contributing
@command{Ns-3} is a research and educational simulator, by and for the
research community. It will rely on the ongoing contributions of the
community to develop new models, debug or maintain existing ones, and share
results. There are a few policies that we hope will encourage people to
contribute to @command{ns-3} like they have for ns-2:
@itemize @bullet
@item Open source licensing based on GNU GPLv2 compatibility;
@item @uref{http://www.nsnam.org/wiki/index.php,,wiki};
@item @uref{http://www.nsnam.org/wiki/index.php/Contributed_Code,,Contributed Code} page, similar to ns-2's popular
@uref{http://nsnam.isi.edu/nsnam/index.php/Contributed_Code,,Contributed Code}
page;
@item @code{src/contrib} directory (we will host your contributed code);
@item Open @uref{http://www.nsnam.org/bugzilla,,bug tracker};
@item @command{Ns-3} developers will gladly help potential contributors to get
started with the simulator (please contact @uref{http://www.nsnam.org/people.html,,one of us}).
@end itemize
We realize that if you are reading this document, contributing back to
the project is probably not your foremost concern at this point, but
we want you to be aware that contributing is in the spirit of the project and
that even the act of dropping us a note about your early experience
with @command{ns-3} (e.g. "this tutorial section was not clear..."),
reports of stale documentation, etc. are much appreciated.
@node Tutorial Organization
@section Tutorial Organization
The tutorial assumes that new users might initially follow a path such as the
following:
@itemize @bullet
@item Try to download and build a copy;
@item Try to run a few sample programs;
@item Look at simulation output, and try to adjust it.
@end itemize
As a result, we have tried to organize the tutorial along the above
broad sequences of events.
@c ========================================================================
@c Resources
@c ========================================================================
@node Resources
@chapter Resources
@menu
* The Web::
* Mercurial::
* Waf::
* Development Environment::
* Socket Programming::
@end menu
@node The Web
@section The Web
@cindex www.nsnam.org
@cindex documentation
@cindex architecture
There are several important resources of which any @command{ns-3} user must be
aware. The main web site is located at @uref{http://www.nsnam.org} and
provides access to basic information about the @command{ns-3} system. Detailed
documentation is available through the main web site at
@uref{http://www.nsnam.org/documents.html}. You can also find documents
relating to the system architecture from this page.
There is a Wiki that complements the main @command{ns-3} web site which you will
find at @uref{http://www.nsnam.org/wiki/}. You will find user and developer
FAQs there, as well as troubleshooting guides, third-party contributed code,
papers, etc.
@cindex mercurial repository
@cindex ns-3-dev repository
@cindex release repository
The source code may be found and browsed at @uref{http://code.nsnam.org/}.
There you will find the current development tree in the repository named
@code{ns-3-dev}. Past releases and experimental repositories of the core
developers may also be found there.
@node Mercurial
@section Mercurial
Complex software systems need some way to manage the organization and
changes to the underlying code and documentation. There are many ways to
perform this feat, and you may have heard of some of the systems that are
currently used to do this. The Concurrent Version System (CVS) is probably
the most well known.
@cindex software configuration management
@cindex Mercurial
The @command{ns-3} project uses Mercurial as its source code management system.
Although you do not need to know much about Mercurial in order to complete
this tutorial, we recommend becoming familiar with Mercurial and using it
to access the source code. Mercurial has a web site at
@uref{http://www.selenic.com/mercurial/},
from which you can get binary or source releases of this Software
Configuration Management (SCM) system. Selenic (the developer of Mercurial)
also provides a tutorial at
@uref{http://www.selenic.com/mercurial/wiki/index.cgi/Tutorial/},
and a QuickStart guide at
@uref{http://www.selenic.com/mercurial/wiki/index.cgi/QuickStart/}.
You can also find vital information about using Mercurial and @command{ns-3}
on the main @command{ns-3} web site.
@node Waf
@section Waf
@cindex Waf
@cindex make
@cindex build
Once you have source code downloaded to your local system, you will need
to compile that source to produce usable programs. Just as in the case of
source code management, there are many tools available to perform this
function. Probably the most well known of these tools is @code{make}. Along
with being the most well known, @code{make} is probably the most difficult to
use in a very large and highly configurable system. Because of this, many
alternatives have been developed. Recently these systems have been developed
using the Python language.
The build system @code{Waf} is used on the @command{ns-3} project. It is one
of the new generation of Python-based build systems. You will not need to
understand any Python to build the existing @command{ns-3} system, and will
only have to understand a tiny and intuitively obvious subset of Python in
order to extend the system in most cases.
For those interested in the gory details of Waf, the main web site can be
found at @uref{http://code.google.com/p/waf/}.
@node Development Environment
@section Development Environment
@cindex C++
@cindex Python
As mentioned above, scripting in @command{ns-3} is done in C++ or Python.
As of ns-3.2, most of the @command{ns-3} API is available in Python, but the
models are written in C++ in either case. A working
knowledge of C++ and object-oriented concepts is assumed in this document.
We will take some time to review some of the more advanced concepts or
possibly unfamiliar language features, idioms and design patterns as they
appear. We don't want this tutorial to devolve into a C++ tutorial, though,
so we do expect a basic command of the language. There are an almost
unimaginable number of sources of information on C++ available on the web or
in print.
If you are new to C++, you may want to find a tutorial- or cookbook-based
book or web site and work through at least the basic features of the language
before proceeding. For instance,
@uref{http://www.cplusplus.com/doc/tutorial/,,this tutorial}.
@cindex toolchain
@cindex GNU
The @command{ns-3} system uses several components of the GNU ``toolchain''
for development. A
software toolchain is the set of programming tools available in the given
environment. For a quick review of what is included in the GNU toolchain see,
@uref{http://en.wikipedia.org/wiki/GNU_toolchain}. @command{ns-3} uses gcc,
GNU binutils, and gdb. However, we do not use the GNU build system tools,
neither make nor autotools. We use Waf for these functions.
@cindex Linux
Typically an @command{ns-3} author will work in Linux or a Linux-like
environment. For those running under Windows, there do exist environments
which simulate the Linux environment to various degrees. The @command{ns-3}
project supports development in the Cygwin environment for
these users. See @uref{http://www.cygwin.com/}
for details on downloading (MinGW is presently not officially supported,
although some of the project maintainers to work with it). Cygwin provides
many of the popular Linux system commands. It can, however, sometimes be
problematic due to the way it actually does its emulation, and sometimes
interactions with other Windows software can cause problems.
@cindex Cygwin
@cindex MinGW
If you do use Cygwin or MinGW; and use Logitech products, we will save you
quite a bit of heartburn right off the bat and encourage you to take a look
at the @uref{http://oldwiki.mingw.org/index.php/FAQ,,MinGW FAQ}.
@cindex Logitech
Search for ``Logitech'' and read the FAQ entry, ``why does make often
crash creating a sh.exe.stackdump file when I try to compile my source code.''
Believe it or not, the @code{Logitech Process Monitor} insinuates itself into
every DLL in the system when it is running. It can cause your Cygwin or
MinGW DLLs to die in mysterious ways and often prevents debuggers from
running. Beware of Logitech software when using Cygwin.
Another alternative to Cygwin is to install a virtual machine environment
such as VMware server and install a Linux virtual machine.
@node Socket Programming
@section Socket Programming
@cindex sockets
We will assume a basic facility with the Berkeley Sockets API in the examples
used in this tutorial. If you are new to sockets, we recommend reviewing the
API and some common usage cases. For a good overview of programming TCP/IP
sockets we recommend @uref{http://www.elsevier.com/wps/find/bookdescription.cws_home/717656/description#description,,TCP/IP Sockets in C, Donahoo and Calvert}.
There is an associated web site that includes source for the examples in the
book, which you can find at:
@uref{http://cs.baylor.edu/~donahoo/practical/CSockets/}.
If you understand the first four chapters of the book (or for those who do
not have access to a copy of the book, the echo clients and servers shown in
the website above) you will be in good shape to understand the tutorial.
There is a similar book on Multicast Sockets,
@uref{http://www.elsevier.com/wps/find/bookdescription.cws_home/700736/description#description,,Multicast Sockets, Makofske and Almeroth}.
that covers material you may need to understand if you look at the multicast
examples in the distribution.

42
doc/tutorial/pickle-to-xml.py Executable file
View File

@@ -0,0 +1,42 @@
#!/usr/bin/python
# output xml format:
# <pages>
# <page url="xx"><prev url="yyy">zzz</prev><next url="hhh">lll</next><fragment>file.frag</fragment></page>
# ...
# </pages>
import pickle
import os
import codecs
def dump_pickles(out, dirname, filename, path):
f = open(os.path.join(dirname, filename), 'r')
data = pickle.load(f)
fragment_file = codecs.open(data['current_page_name'] + '.frag', mode='w', encoding='utf-8')
fragment_file.write(data['body'])
fragment_file.close()
out.write(' <page url="%s">\n' % path)
out.write(' <fragment>%s.frag</fragment>\n' % data['current_page_name'])
if data['prev'] is not None:
out.write(' <prev url="%s">%s</prev>\n' %
(os.path.normpath(os.path.join(path, data['prev']['link'])),
data['prev']['title']))
if data['next'] is not None:
out.write(' <next url="%s">%s</next>\n' %
(os.path.normpath(os.path.join(path, data['next']['link'])),
data['next']['title']))
out.write(' </page>\n')
f.close()
if data['next'] is not None:
next_path = os.path.normpath(os.path.join(path, data['next']['link']))
next_filename = os.path.basename(next_path) + '.fpickle'
dump_pickles(out, dirname, next_filename, next_path)
return
import sys
sys.stdout.write('<pages>\n')
dump_pickles(sys.stdout, os.path.dirname(sys.argv[1]), os.path.basename(sys.argv[1]), '/')
sys.stdout.write('</pages>')

View File

@@ -0,0 +1,840 @@
.. include:: replace.txt
Conceptual Overview
-------------------
The first thing we need to do before actually starting to look at or write
|ns3| code is to explain a few core concepts and abstractions in the
system. Much of this may appear transparently obvious to some, but we
recommend taking the time to read through this section just to ensure you
are starting on a firm foundation.
Key Abstractions
****************
In this section, we'll review some terms that are commonly used in
networking, but have a specific meaning in |ns3|.
Node
++++
In Internet jargon, a computing device that connects to a network is called
a *host* or sometimes an *end system*. Because |ns3| is a
*network* simulator, not specifically an *Internet* simulator, we
intentionally do not use the term host since it is closely associated with
the Internet and its protocols. Instead, we use a more generic term also
used by other simulators that originates in Graph Theory --- the *node*.
In |ns3| the basic computing device abstraction is called the
node. This abstraction is represented in C++ by the class ``Node``. The
``Node`` class provides methods for managing the representations of
computing devices in simulations.
You should think of a ``Node`` as a computer to which you will add
functionality. One adds things like applications, protocol stacks and
peripheral cards with their associated drivers to enable the computer to do
useful work. We use the same basic model in |ns3|.
Application
+++++++++++
Typically, computer software is divided into two broad classes. *System
Software* organizes various computer resources such as memory, processor
cycles, disk, network, etc., according to some computing model. System
software usually does not use those resources to complete tasks that directly
benefit a user. A user would typically run an *application* that acquires
and uses the resources controlled by the system software to accomplish some
goal.
Often, the line of separation between system and application software is made
at the privilege level change that happens in operating system traps.
In |ns3| there is no real concept of operating system and especially
no concept of privilege levels or system calls. We do, however, have the
idea of an application. Just as software applications run on computers to
perform tasks in the "real world," |ns3| applications run on
|ns3| ``Nodes`` to drive simulations in the simulated world.
In |ns3| the basic abstraction for a user program that generates some
activity to be simulated is the application. This abstraction is represented
in C++ by the class ``Application``. The ``Application`` class provides
methods for managing the representations of our version of user-level
applications in simulations. Developers are expected to specialize the
``Application`` class in the object-oriented programming sense to create new
applications. In this tutorial, we will use specializations of class
``Application`` called ``UdpEchoClientApplication`` and
``UdpEchoServerApplication``. As you might expect, these applications
compose a client/server application set used to generate and echo simulated
network packets
Channel
+++++++
In the real world, one can connect a computer to a network. Often the media
over which data flows in these networks are called *channels*. When
you connect your Ethernet cable to the plug in the wall, you are connecting
your computer to an Ethernet communication channel. In the simulated world
of |ns3|, one connects a ``Node`` to an object representing a
communication channel. Here the basic communication subnetwork abstraction
is called the channel and is represented in C++ by the class ``Channel``.
The ``Channel`` class provides methods for managing communication
subnetwork objects and connecting nodes to them. ``Channels`` may also be
specialized by developers in the object oriented programming sense. A
``Channel`` specialization may model something as simple as a wire. The
specialized ``Channel`` can also model things as complicated as a large
Ethernet switch, or three-dimensional space full of obstructions in the case
of wireless networks.
We will use specialized versions of the ``Channel`` called
``CsmaChannel``, ``PointToPointChannel`` and ``WifiChannel`` in this
tutorial. The ``CsmaChannel``, for example, models a version of a
communication subnetwork that implements a *carrier sense multiple
access* communication medium. This gives us Ethernet-like functionality.
Net Device
++++++++++
It used to be the case that if you wanted to connect a computers to a network,
you had to buy a specific kind of network cable and a hardware device called
(in PC terminology) a *peripheral card* that needed to be installed in
your computer. If the peripheral card implemented some networking function,
they were called Network Interface Cards, or *NICs*. Today most
computers come with the network interface hardware built in and users don't
see these building blocks.
A NIC will not work without a software driver to control the hardware. In
Unix (or Linux), a piece of peripheral hardware is classified as a
*device*. Devices are controlled using *device drivers*, and network
devices (NICs) are controlled using *network device drivers*
collectively known as *net devices*. In Unix and Linux you refer
to these net devices by names such as *eth0*.
In |ns3| the *net device* abstraction covers both the software
driver and the simulated hardware. A net device is "installed" in a
``Node`` in order to enable the ``Node`` to communicate with other
``Nodes`` in the simulation via ``Channels``. Just as in a real
computer, a ``Node`` may be connected to more than one ``Channel`` via
multiple ``NetDevices``.
The net device abstraction is represented in C++ by the class ``NetDevice``.
The ``NetDevice`` class provides methods for managing connections to
``Node`` and ``Channel`` objects; and may be specialized by developers
in the object-oriented programming sense. We will use the several specialized
versions of the ``NetDevice`` called ``CsmaNetDevice``,
``PointToPointNetDevice``, and ``WifiNetDevice`` in this tutorial.
Just as an Ethernet NIC is designed to work with an Ethernet network, the
``CsmaNetDevice`` is designed to work with a ``CsmaChannel``; the
``PointToPointNetDevice`` is designed to work with a
``PointToPointChannel`` and a ``WifiNetNevice`` is designed to work with
a ``WifiChannel``.
Topology Helpers
++++++++++++++++
In a real network, you will find host computers with added (or built-in)
NICs. In |ns3| we would say that you will find ``Nodes`` with
attached ``NetDevices``. In a large simulated network you will need to
arrange many connections between ``Nodes``, ``NetDevices`` and
``Channels``.
Since connecting ``NetDevices`` to ``Nodes``, ``NetDevices``
to ``Channels``, assigning IP addresses, etc., are such common tasks
in |ns3|, we provide what we call *topology helpers* to make
this as easy as possible. For example, it may take many distinct
|ns3| core operations to create a NetDevice, add a MAC address,
install that net device on a ``Node``, configure the node's protocol stack,
and then connect the ``NetDevice`` to a ``Channel``. Even more
operations would be required to connect multiple devices onto multipoint
channels and then to connect individual networks together into internetworks.
We provide topology helper objects that combine those many distinct operations
into an easy to use model for your convenience.
A First ns-3 Script
*******************
If you downloaded the system as was suggested above, you will have a release
of |ns3| in a directory called ``repos`` under your home
directory. Change into that release directory, and you should find a
directory structure something like the following:
::
AUTHORS doc/ README src/ waf.bat*
bindings/ examples/ RELEASE_NOTES utils/ wscript
build/ LICENSE samples/ VERSION wutils.py
CHANGES.html ns3/ scratch/ waf* wutils.pyc
Change into the ``examples/tutorial`` directory. You should see a file named
``first.cc`` located there. This is a script that will create a simple
point-to-point link between two nodes and echo a single packet between the
nodes. Let's take a look at that script line by line, so go ahead and open
``first.cc`` in your favorite editor.
Boilerplate
+++++++++++
The first line in the file is an emacs mode line. This tells emacs about the
formatting conventions (coding style) we use in our source code.
::
/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
This is always a somewhat controversial subject, so we might as well get it
out of the way immediately. The |ns3| project, like most large
projects, has adopted a coding style to which all contributed code must
adhere. If you want to contribute your code to the project, you will
eventually have to conform to the |ns3| coding standard as described
in the file ``doc/codingstd.txt`` or shown on the project web page
`here
<http://www.nsnam.org/codingstyle.html>`_.
We recommend that you, well, just get used to the look and feel of |ns3|
code and adopt this standard whenever you are working with our code. All of
the development team and contributors have done so with various amounts of
grumbling. The emacs mode line above makes it easier to get the formatting
correct if you use the emacs editor.
The |ns3| simulator is licensed using the GNU General Public
License. You will see the appropriate GNU legalese at the head of every file
in the |ns3| distribution. Often you will see a copyright notice for
one of the institutions involved in the |ns3| project above the GPL
text and an author listed below.
::
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
Module Includes
+++++++++++++++
The code proper starts with a number of include statements.
::
#include "ns3/core-module.h"
#include "ns3/simulator-module.h"
#include "ns3/node-module.h"
#include "ns3/helper-module.h"
To help our high-level script users deal with the large number of include
files present in the system, we group includes according to relatively large
modules. We provide a single include file that will recursively load all of
the include files used in each module. Rather than having to look up exactly
what header you need, and possibly have to get a number of dependencies right,
we give you the ability to load a group of files at a large granularity. This
is not the most efficient approach but it certainly makes writing scripts much
easier.
Each of the |ns3| include files is placed in a directory called
``ns3`` (under the build directory) during the build process to help avoid
include file name collisions. The ``ns3/core-module.h`` file corresponds
to the ns-3 module you will find in the directory ``src/core`` in your
downloaded release distribution. If you list this directory you will find a
large number of header files. When you do a build, Waf will place public
header files in an ``ns3`` directory under the appropriate
``build/debug`` or ``build/optimized`` directory depending on your
configuration. Waf will also automatically generate a module include file to
load all of the public header files.
Since you are, of course, following this tutorial religiously, you will
already have done a
::
./waf -d debug configure
in order to configure the project to perform debug builds. You will also have
done a
::
./waf
to build the project. So now if you look in the directory
``../../build/debug/ns3`` you will find the four module include files shown
above. You can take a look at the contents of these files and find that they
do include all of the public include files in their respective modules.
Ns3 Namespace
+++++++++++++
The next line in the ``first.cc`` script is a namespace declaration.
::
using namespace ns3;
The |ns3| project is implemented in a C++ namespace called
``ns3``. This groups all |ns3|-related declarations in a scope
outside the global namespace, which we hope will help with integration with
other code. The C++ ``using`` statement introduces the |ns3|
namespace into the current (global) declarative region. This is a fancy way
of saying that after this declaration, you will not have to type ``ns3::``
scope resolution operator before all of the |ns3| code in order to use
it. If you are unfamiliar with namespaces, please consult almost any C++
tutorial and compare the ``ns3`` namespace and usage here with instances of
the ``std`` namespace and the ``using namespace std;`` statements you
will often find in discussions of ``cout`` and streams.
Logging
+++++++
The next line of the script is the following,
::
NS_LOG_COMPONENT_DEFINE ("FirstScriptExample");
We will use this statement as a convenient place to talk about our Doxygen
documentation system. If you look at the project web site,
`ns-3 project
<http://www.nsnam.org>`_, you will find a link to "Doxygen
(ns-3-dev)" in the navigation bar. If you select this link, you will be
taken to our documentation page for the current development release. There
is also a link to "Doxygen (stable)" that will take you to the documentation
for the latest stable release of |ns3|.
Along the left side, you will find a graphical representation of the structure
of the documentation. A good place to start is the ``NS-3 Modules``
"book" in the |ns3| navigation tree. If you expand ``Modules``
you will see a list of |ns3| module documentation. The concept of
module here ties directly into the module include files discussed above. It
turns out that the |ns3| logging subsystem is part of the ``core``
module, so go ahead and expand that documentation node. Now, expand the
``Debugging`` book and then select the ``Logging`` page.
You should now be looking at the Doxygen documentation for the Logging module.
In the list of ``#define``s at the top of the page you will see the entry
for ``NS_LOG_COMPONENT_DEFINE``. Before jumping in, it would probably be
good to look for the "Detailed Description" of the logging module to get a
feel for the overall operation. You can either scroll down or select the
"More..." link under the collaboration diagram to do this.
Once you have a general idea of what is going on, go ahead and take a look at
the specific ``NS_LOG_COMPONENT_DEFINE`` documentation. I won't duplicate
the documentation here, but to summarize, this line declares a logging
component called ``FirstScriptExample`` that allows you to enable and
disable console message logging by reference to the name.
Main Function
+++++++++++++
The next lines of the script you will find are,
::
int
main (int argc, char *argv[])
{
This is just the declaration of the main function of your program (script).
Just as in any C++ program, you need to define a main function that will be
the first function run. There is nothing at all special here. Your
|ns3| script is just a C++ program.
The next two lines of the script are used to enable two logging components that
are built into the Echo Client and Echo Server applications:
::
LogComponentEnable("UdpEchoClientApplication", LOG_LEVEL_INFO);
LogComponentEnable("UdpEchoServerApplication", LOG_LEVEL_INFO);
If you have read over the Logging component documentation you will have seen
that there are a number of levels of logging verbosity/detail that you can
enable on each component. These two lines of code enable debug logging at the
INFO level for echo clients and servers. This will result in the application
printing out messages as packets are sent and received during the simulation.
Now we will get directly to the business of creating a topology and running
a simulation. We use the topology helper objects to make this job as
easy as possible.
Topology Helpers
++++++++++++++++
NodeContainer
~~~~~~~~~~~~~
The next two lines of code in our script will actually create the
|ns3| ``Node`` objects that will represent the computers in the
simulation.
::
NodeContainer nodes;
nodes.Create (2);
Let's find the documentation for the ``NodeContainer`` class before we
continue. Another way to get into the documentation for a given class is via
the ``Classes`` tab in the Doxygen pages. If you still have the Doxygen
handy, just scroll up to the top of the page and select the ``Classes``
tab. You should see a new set of tabs appear, one of which is
``Class List``. Under that tab you will see a list of all of the
|ns3| classes. Scroll down, looking for ``ns3::NodeContainer``.
When you find the class, go ahead and select it to go to the documentation for
the class.
You may recall that one of our key abstractions is the ``Node``. This
represents a computer to which we are going to add things like protocol stacks,
applications and peripheral cards. The ``NodeContainer`` topology helper
provides a convenient way to create, manage and access any ``Node`` objects
that we create in order to run a simulation. The first line above just
declares a NodeContainer which we call ``nodes``. The second line calls the
``Create`` method on the ``nodes`` object and asks the container to
create two nodes. As described in the Doxygen, the container calls down into
the |ns3| system proper to create two ``Node`` objects and stores
pointers to those objects internally.
The nodes as they stand in the script do nothing. The next step in
constructing a topology is to connect our nodes together into a network.
The simplest form of network we support is a single point-to-point link
between two nodes. We'll construct one of those links here.
PointToPointHelper
~~~~~~~~~~~~~~~~~~
We are constructing a point to point link, and, in a pattern which will become
quite familiar to you, we use a topology helper object to do the low-level
work required to put the link together. Recall that two of our key
abstractions are the ``NetDevice`` and the ``Channel``. In the real
world, these terms correspond roughly to peripheral cards and network cables.
Typically these two things are intimately tied together and one cannot expect
to interchange, for example, Ethernet devices and wireless channels. Our
Topology Helpers follow this intimate coupling and therefore you will use a
single ``PointToPointHelper`` to configure and connect |ns3|
``PointToPointNetDevice`` and ``PointToPointChannel`` objects in this
script.
The next three lines in the script are,
::
PointToPointHelper pointToPoint;
pointToPoint.SetDeviceAttribute ("DataRate", StringValue ("5Mbps"));
pointToPoint.SetChannelAttribute ("Delay", StringValue ("2ms"));
The first line,
::
PointToPointHelper pointToPoint;
instantiates a ``PointToPointHelper`` object on the stack. From a
high-level perspective the next line,
::
pointToPoint.SetDeviceAttribute ("DataRate", StringValue ("5Mbps"));
tells the ``PointToPointHelper`` object to use the value "5Mbps"
(five megabits per second) as the "DataRate" when it creates a
``PointToPointNetDevice`` object.
From a more detailed perspective, the string "DataRate" corresponds
to what we call an ``Attribute`` of the ``PointToPointNetDevice``.
If you look at the Doxygen for class ``ns3::PointToPointNetDevice`` and
find the documentation for the ``GetTypeId`` method, you will find a list
of ``Attributes`` defined for the device. Among these is the "DataRate"
``Attribute``. Most user-visible |ns3| objects have similar lists of
``Attributes``. We use this mechanism to easily configure simulations without
recompiling as you will see in a following section.
Similar to the "DataRate" on the ``PointToPointNetDevice`` you will find a
"Delay" ``Attribute`` associated with the ``PointToPointChannel``. The
final line,
::
pointToPoint.SetChannelAttribute ("Delay", StringValue ("2ms"));
tells the ``PointToPointHelper`` to use the value "2ms" (two milliseconds)
as the value of the transmission delay of every point to point channel it
subsequently creates.
NetDeviceContainer
~~~~~~~~~~~~~~~~~~
At this point in the script, we have a ``NodeContainer`` that contains
two nodes. We have a ``PointToPointHelper`` that is primed and ready to
make ``PointToPointNetDevices`` and wire ``PointToPointChannel`` objects
between them. Just as we used the ``NodeContainer`` topology helper object
to create the ``Nodes`` for our simulation, we will ask the
``PointToPointHelper`` to do the work involved in creating, configuring and
installing our devices for us. We will need to have a list of all of the
NetDevice objects that are created, so we use a NetDeviceContainer to hold
them just as we used a NodeContainer to hold the nodes we created. The
following two lines of code,
::
NetDeviceContainer devices;
devices = pointToPoint.Install (nodes);
will finish configuring the devices and channel. The first line declares the
device container mentioned above and the second does the heavy lifting. The
``Install`` method of the ``PointToPointHelper`` takes a
``NodeContainer`` as a parameter. Internally, a ``NetDeviceContainer``
is created. For each node in the ``NodeContainer`` (there must be exactly
two for a point-to-point link) a ``PointToPointNetDevice`` is created and
saved in the device container. A ``PointToPointChannel`` is created and
the two ``PointToPointNetDevices`` are attached. When objects are created
by the ``PointToPointHelper``, the ``Attributes`` previously set in the
helper are used to initialize the corresponding ``Attributes`` in the
created objects.
After executing the ``pointToPoint.Install (nodes)`` call we will have
two nodes, each with an installed point-to-point net device and a single
point-to-point channel between them. Both devices will be configured to
transmit data at five megabits per second over the channel which has a two
millisecond transmission delay.
InternetStackHelper
~~~~~~~~~~~~~~~~~~~
We now have nodes and devices configured, but we don't have any protocol stacks
installed on our nodes. The next two lines of code will take care of that.
::
InternetStackHelper stack;
stack.Install (nodes);
The ``InternetStackHelper`` is a topology helper that is to internet stacks
what the ``PointToPointHelper`` is to point-to-point net devices. The
``Install`` method takes a ``NodeContainer`` as a parameter. When it is
executed, it will install an Internet Stack (TCP, UDP, IP, etc.) on each of
the nodes in the node container.
Ipv4AddressHelper
~~~~~~~~~~~~~~~~~
Next we need to associate the devices on our nodes with IP addresses. We
provide a topology helper to manage the allocation of IP addresses. The only
user-visible API is to set the base IP address and network mask to use when
performing the actual address allocation (which is done at a lower level
inside the helper).
The next two lines of code in our example script, ``first.cc``,
::
Ipv4AddressHelper address;
address.SetBase ("10.1.1.0", "255.255.255.0");
declare an address helper object and tell it that it should begin allocating IP
addresses from the network 10.1.1.0 using the mask 255.255.255.0 to define
the allocatable bits. By default the addresses allocated will start at one
and increase monotonically, so the first address allocated from this base will
be 10.1.1.1, followed by 10.1.1.2, etc. The low level |ns3| system
actually remembers all of the IP addresses allocated and will generate a
fatal error if you accidentally cause the same address to be generated twice
(which is a very hard to debug error, by the way).
The next line of code,
::
Ipv4InterfaceContainer interfaces = address.Assign (devices);
performs the actual address assignment. In |ns3| we make the
association between an IP address and a device using an ``Ipv4Interface``
object. Just as we sometimes need a list of net devices created by a helper
for future reference we sometimes need a list of ``Ipv4Interface`` objects.
The ``Ipv4InterfaceContainer`` provides this functionality.
Now we have a point-to-point network built, with stacks installed and IP
addresses assigned. What we need at this point are applications to generate
traffic.
Applications
++++++++++++
Another one of the core abstractions of the ns-3 system is the
``Application``. In this script we use two specializations of the core
|ns3| class ``Application`` called ``UdpEchoServerApplication``
and ``UdpEchoClientApplication``. Just as we have in our previous
explanations, we use helper objects to help configure and manage the
underlying objects. Here, we use ``UdpEchoServerHelper`` and
``UdpEchoClientHelper`` objects to make our lives easier.
UdpEchoServerHelper
~~~~~~~~~~~~~~~~~~~
The following lines of code in our example script, ``first.cc``, are used
to set up a UDP echo server application on one of the nodes we have previously
created.
::
UdpEchoServerHelper echoServer (9);
ApplicationContainer serverApps = echoServer.Install (nodes.Get (1));
serverApps.Start (Seconds (1.0));
serverApps.Stop (Seconds (10.0));
The first line of code in the above snippet declares the
``UdpEchoServerHelper``. As usual, this isn't the application itself, it
is an object used to help us create the actual applications. One of our
conventions is to place *required* ``Attributes`` in the helper constructor.
In this case, the helper can't do anything useful unless it is provided with
a port number that the client also knows about. Rather than just picking one
and hoping it all works out, we require the port number as a parameter to the
constructor. The constructor, in turn, simply does a ``SetAttribute``
with the passed value. If you want, you can set the "Port" ``Attribute``
to another value later using ``SetAttribute``.
Similar to many other helper objects, the ``UdpEchoServerHelper`` object
has an ``Install`` method. It is the execution of this method that actually
causes the underlying echo server application to be instantiated and attached
to a node. Interestingly, the ``Install`` method takes a
``NodeContainter`` as a parameter just as the other ``Install`` methods
we have seen. This is actually what is passed to the method even though it
doesn't look so in this case. There is a C++ *implicit conversion* at
work here that takes the result of ``nodes.Get (1)`` (which returns a smart
pointer to a node object --- ``Ptr<Node>``) and uses that in a constructor
for an unnamed ``NodeContainer`` that is then passed to ``Install``.
If you are ever at a loss to find a particular method signature in C++ code
that compiles and runs just fine, look for these kinds of implicit conversions.
We now see that ``echoServer.Install`` is going to install a
``UdpEchoServerApplication`` on the node found at index number one of the
``NodeContainer`` we used to manage our nodes. ``Install`` will return
a container that holds pointers to all of the applications (one in this case
since we passed a ``NodeContainer`` containing one node) created by the
helper.
Applications require a time to "start" generating traffic and may take an
optional time to "stop". We provide both. These times are set using the
``ApplicationContainer`` methods ``Start`` and ``Stop``. These
methods take ``Time`` parameters. In this case, we use an *explicit*
C++ conversion sequence to take the C++ double 1.0 and convert it to an
|ns3| ``Time`` object using a ``Seconds`` cast. Be aware that
the conversion rules may be controlled by the model author, and C++ has its
own rules, so you can't always just assume that parameters will be happily
converted for you. The two lines,
::
serverApps.Start (Seconds (1.0));
serverApps.Stop (Seconds (10.0));
will cause the echo server application to ``Start`` (enable itself) at one
second into the simulation and to ``Stop`` (disable itself) at ten seconds
into the simulation. By virtue of the fact that we have declared a simulation
event (the application stop event) to be executed at ten seconds, the simulation
will last *at least* ten seconds.
UdpEchoClientHelper
~~~~~~~~~~~~~~~~~~~
The echo client application is set up in a method substantially similar to
that for the server. There is an underlying ``UdpEchoClientApplication``
that is managed by an ``UdpEchoClientHelper``.
::
UdpEchoClientHelper echoClient (interfaces.GetAddress (1), 9);
echoClient.SetAttribute ("MaxPackets", UintegerValue (1));
echoClient.SetAttribute ("Interval", TimeValue (Seconds (1.)));
echoClient.SetAttribute ("PacketSize", UintegerValue (1024));
ApplicationContainer clientApps = echoClient.Install (nodes.Get (0));
clientApps.Start (Seconds (2.0));
clientApps.Stop (Seconds (10.0));
For the echo client, however, we need to set five different ``Attributes``.
The first two ``Attributes`` are set during construction of the
``UdpEchoClientHelper``. We pass parameters that are used (internally to
the helper) to set the "RemoteAddress" and "RemotePort" ``Attributes``
in accordance with our convention to make required ``Attributes`` parameters
in the helper constructors.
Recall that we used an ``Ipv4InterfaceContainer`` to keep track of the IP
addresses we assigned to our devices. The zeroth interface in the
``interfaces`` container is going to correspond to the IP address of the
zeroth node in the ``nodes`` container. The first interface in the
``interfaces`` container corresponds to the IP address of the first node
in the ``nodes`` container. So, in the first line of code (from above), we
are creating the helper and telling it so set the remote address of the client
to be the IP address assigned to the node on which the server resides. We
also tell it to arrange to send packets to port nine.
The "MaxPackets" ``Attribute`` tells the client the maximum number of
packets we allow it to send during the simulation. The "Interval"
``Attribute`` tells the client how long to wait between packets, and the
"PacketSize" ``Attribute`` tells the client how large its packet payloads
should be. With this particular combination of ``Attributes``, we are
telling the client to send one 1024-byte packet.
Just as in the case of the echo server, we tell the echo client to ``Start``
and ``Stop``, but here we start the client one second after the server is
enabled (at two seconds into the simulation).
Simulator
+++++++++
What we need to do at this point is to actually run the simulation. This is
done using the global function ``Simulator::Run``.
::
Simulator::Run ();
When we previously called the methods,
::
serverApps.Start (Seconds (1.0));
serverApps.Stop (Seconds (10.0));
...
clientApps.Start (Seconds (2.0));
clientApps.Stop (Seconds (10.0));
we actually scheduled events in the simulator at 1.0 seconds, 2.0 seconds and
two events at 10.0 seconds. When ``Simulator::Run`` is called, the system
will begin looking through the list of scheduled events and executing them.
First it will run the event at 1.0 seconds, which will enable the echo server
application (this event may, in turn, schedule many other events). Then it
will run the event scheduled for t=2.0 seconds which will start the echo client
application. Again, this event may schedule many more events. The start event
implementation in the echo client application will begin the data transfer phase
of the simulation by sending a packet to the server.
The act of sending the packet to the server will trigger a chain of events
that will be automatically scheduled behind the scenes and which will perform
the mechanics of the packet echo according to the various timing parameters
that we have set in the script.
Eventually, since we only send one packet (recall the ``MaxPackets``
``Attribute`` was set to one), the chain of events triggered by
that single client echo request will taper off and the simulation will go
idle. Once this happens, the remaining events will be the ``Stop`` events
for the server and the client. When these events are executed, there are
no further events to process and ``Simulator::Run`` returns. The simulation
is then complete.
All that remains is to clean up. This is done by calling the global function
``Simulator::Destroy``. As the helper functions (or low level
|ns3| code) executed, they arranged it so that hooks were inserted in
the simulator to destroy all of the objects that were created. You did not
have to keep track of any of these objects yourself --- all you had to do
was to call ``Simulator::Destroy`` and exit. The |ns3| system
took care of the hard part for you. The remaining lines of our first
|ns3| script, ``first.cc``, do just that:
::
Simulator::Destroy ();
return 0;
}
Building Your Script
++++++++++++++++++++
We have made it trivial to build your simple scripts. All you have to do is
to drop your script into the scratch directory and it will automatically be
built if you run Waf. Let's try it. Copy ``examples/tutorial/first.cc`` into
the ``scratch`` directory after changing back into the top level directory.
::
cd ..
cp examples/tutorial/first.cc scratch/myfirst.cc
Now build your first example script using waf:
::
./waf
You should see messages reporting that your ``myfirst`` example was built
successfully.
::
Waf: Entering directory `/home/craigdo/repos/ns-3-allinone/ns-3-dev/build'
[614/708] cxx: scratch/myfirst.cc -> build/debug/scratch/myfirst_3.o
[706/708] cxx_link: build/debug/scratch/myfirst_3.o -> build/debug/scratch/myfirst
Waf: Leaving directory `/home/craigdo/repos/ns-3-allinone/ns-3-dev/build'
'build' finished successfully (2.357s)
You can now run the example (note that if you build your program in the scratch
directory you must run it out of the scratch directory):
::
./waf --run scratch/myfirst
You should see some output:
::
Waf: Entering directory `/home/craigdo/repos/ns-3-allinone/ns-3-dev/build'
Waf: Leaving directory `/home/craigdo/repos/ns-3-allinone/ns-3-dev/build'
'build' finished successfully (0.418s)
Sent 1024 bytes to 10.1.1.2
Received 1024 bytes from 10.1.1.1
Received 1024 bytes from 10.1.1.2
Here you see that the build system checks to make sure that the file has been
build and then runs it. You see the logging component on the echo client
indicate that it has sent one 1024 byte packet to the Echo Server on
10.1.1.2. You also see the logging component on the echo server say that
it has received the 1024 bytes from 10.1.1.1. The echo server silently
echoes the packet and you see the echo client log that it has received its
packet back from the server.
Ns-3 Source Code
****************
Now that you have used some of the |ns3| helpers you may want to
have a look at some of the source code that implements that functionality.
The most recent code can be browsed on our web server at the following link:
http://code.nsnam.org/ns-3-dev. There, you will see the Mercurial
summary page for our |ns3| development tree.
At the top of the page, you will see a number of links,
::
summary | shortlog | changelog | graph | tags | files
Go ahead and select the ``files`` link. This is what the top-level of
most of our *repositories* will look:
::
drwxr-xr-x [up]
drwxr-xr-x bindings python files
drwxr-xr-x doc files
drwxr-xr-x examples files
drwxr-xr-x ns3 files
drwxr-xr-x samples files
drwxr-xr-x scratch files
drwxr-xr-x src files
drwxr-xr-x utils files
-rw-r--r-- 2009-07-01 12:47 +0200 560 .hgignore file | revisions | annotate
-rw-r--r-- 2009-07-01 12:47 +0200 1886 .hgtags file | revisions | annotate
-rw-r--r-- 2009-07-01 12:47 +0200 1276 AUTHORS file | revisions | annotate
-rw-r--r-- 2009-07-01 12:47 +0200 30961 CHANGES.html file | revisions | annotate
-rw-r--r-- 2009-07-01 12:47 +0200 17987 LICENSE file | revisions | annotate
-rw-r--r-- 2009-07-01 12:47 +0200 3742 README file | revisions | annotate
-rw-r--r-- 2009-07-01 12:47 +0200 16171 RELEASE_NOTES file | revisions | annotate
-rw-r--r-- 2009-07-01 12:47 +0200 6 VERSION file | revisions | annotate
-rwxr-xr-x 2009-07-01 12:47 +0200 88110 waf file | revisions | annotate
-rwxr-xr-x 2009-07-01 12:47 +0200 28 waf.bat file | revisions | annotate
-rw-r--r-- 2009-07-01 12:47 +0200 35395 wscript file | revisions | annotate
-rw-r--r-- 2009-07-01 12:47 +0200 7673 wutils.py file | revisions | annotate
Our example scripts are in the ``examples`` directory. If you click on ``examples``
you will see a list of files. One of the files in that directory is ``first.cc``. If
you click on ``first.cc`` you will find the code you just walked through.
The source code is mainly in the ``src`` directory. You can view source
code either by clicking on the directory name or by clicking on the ``files``
link to the right of the directory name. If you click on the ``src``
directory, you will be taken to the listing of the ``src`` subdirectories. If you
then click on ``core`` subdirectory, you will find a list of files. The first file
you will find (as of this writing) is ``abort.h``. If you click on the
``abort.h`` link, you will be sent to the source file for ``abort.h`` which
contains useful macros for exiting scripts if abnormal conditions are detected.
The source code for the helpers we have used in this chapter can be found in the
``src/helper`` directory. Feel free to poke around in the directory tree to
get a feel for what is there and the style of |ns3| programs.

View File

@@ -0,0 +1,37 @@
.. include:: replace.txt
Conclusion
----------
Futures
*******
This document is a work in process. We hope and expect it to grow over time
to cover more and more of the nuts and bolts of |ns3|.
We hope to add the following chapters over the next few releases:
* The Callback System
* The Object System and Memory Management
* The Routing System
* Adding a New NetDevice and Channel
* Adding a New Protocol
* Working with Real Networks and Hosts
Writing manual and tutorial chapters is not something we all get excited about,
but it is very important to the project. If you are an expert in one of these
areas, please consider contributing to |ns3| by providing one of these
chapters; or any other chapter you may think is important.
Closing
*******
|ns3| is a large and complicated system. It is impossible to cover all
of the things you will need to know in one small tutorial.
We have really just scratched the surface of |ns3| in this tutorial,
but we hope to have covered enough to get you started doing useful networking
research using our favorite simulator.
-- The |ns3| development team.

216
doc/tutorial/source/conf.py Normal file
View File

@@ -0,0 +1,216 @@
# -*- coding: utf-8 -*-
#
# ns-3 documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 14 09:00:39 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ns-3'
copyright = u'2010, ns-3 project'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'ns-3.10'
# The full version, including alpha/beta/rc tags.
release = 'ns-3.10'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ns-3doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ns-3.tex', u'ns-3 Tutorial',
u'ns-3 project', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ns-3', u'ns-3 Tutorial',
[u'ns-3 project'], 1)
]

1
doc/tutorial/source/figures Symbolic link
View File

@@ -0,0 +1 @@
../figures

View File

@@ -1,101 +1,71 @@
.. include:: replace.txt
@c ========================================================================
@c Begin document body here
@c ========================================================================
@c ========================================================================
@c PART: Getting Started
@c ========================================================================
@c The below chapters are under the major heading "Getting Started"
@c This is similar to the Latex \part command
@c
@c ========================================================================
@c Getting Started
@c ========================================================================
@node Getting Started
@chapter Getting Started
Getting Started
---------------
@menu
* Downloading ns-3::
* Building ns-3::
* Testing ns-3::
* Running a Script::
@end menu
Downloading ns-3
****************
@c ========================================================================
@c Downloading ns-3
@c ========================================================================
@node Downloading ns-3
@section Downloading ns-3
@cindex Prerequisites
@cindex Dependencies
The @command{ns-3} system as a whole is a fairly complex system and has a
The |ns3| system as a whole is a fairly complex system and has a
number of dependencies on other components. Along with the systems you will
most likely deal with every day (the GNU toolchain, Mercurial, you programmer
editor) you will need to ensure that a number of additional libraries are
present on your system before proceeding. @command{ns-3} provides a wiki
present on your system before proceeding. |ns3| provides a wiki
for your reading pleasure that includes pages with many useful hints and tips.
One such page is the ``Installation'' page,
@uref{http://www.nsnam.org/wiki/index.php/Installation}.
One such page is the "Installation" page,
http://www.nsnam.org/wiki/index.php/Installation.
The ``Prerequisites'' section of this wiki page explains which packages are
required to support common @command{ns-3} options, and also provides the
The "Prerequisites" section of this wiki page explains which packages are
required to support common |ns3| options, and also provides the
commands used to install them for common Linux variants. Cygwin users will
have to use the Cygwin installer (if you are a Cygwin user, you used it to
install Cygwin).
You may want to take this opportunity to explore the @command{ns-3} wiki
You may want to take this opportunity to explore the |ns3| wiki
a bit since there really is a wealth of information there.
@cindex Linux
@cindex Cygwin
@cindex GNU
@cindex toolchain
@cindex Mercurial
@cindex Waf
From this point forward, we are going to assume that the reader is working in
Linux or a Linux emulation environment (Linux, Cygwin, etc.) and has the GNU
toolchain installed and verified along with the prerequisites mentioned
above. We are also going to assume that you have Mercurial and Waf installed
and running on the target system as described in the ``Getting Started'' section
of the @command{ns-3} web site:
@uref{http://www.nsnam.org/getting_started.html}.
and running on the target system as described in the "Getting Started" section
of the |ns3| web site:
http://www.nsnam.org/getting_started.html.
@cindex tarball
The @command{ns-3} code is available in Mercurial repositories on the server
@uref{http://code.nsnam.org}. You can also download a tarball release at
@uref{http://www.nsnam.org/releases/}, or you can work with repositories
The |ns3| code is available in Mercurial repositories on the server
http://code.nsnam.org. You can also download a tarball release at
http://www.nsnam.org/releases/, or you can work with repositories
using Mercurial. We recommend using Mercurial unless there's a good reason
not to. See the end of this section for instructions on how to get a tarball
release.
@cindex repository
The simplest way to get started using Mercurial repositories is to use the
@code{ns-3-allinone} environment. This is a set of scripts that manages the
downloading and building of various subsystems of @command{ns-3} for you. We
recommend that you begin your @command{ns-3} adventures in this environment
``ns-3-allinone`` environment. This is a set of scripts that manages the
downloading and building of various subsystems of |ns3| for you. We
recommend that you begin your |ns3| adventures in this environment
as it can really simplify your life at this point.
@subsection Downloading ns-3 Using Mercurial
One practice is to create a directory called @code{repos} in one's home
Downloading ns-3 Using Mercurial
++++++++++++++++++++++++++++++++
One practice is to create a directory called ``repos`` in one's home
directory under which one can keep local Mercurial repositories.
@emph{Hint: we will assume you do this later in the tutorial.} If you adopt
that approach, you can get a copy of @code{ns-3-allinone} by typing the
*Hint: we will assume you do this later in the tutorial.* If you adopt
that approach, you can get a copy of ``ns-3-allinone`` by typing the
following into your Linux shell (assuming you have installed Mercurial):
@verbatim
::
cd
mkdir repos
cd repos
hg clone http://code.nsnam.org/ns-3-allinone
@end verbatim
As the hg (Mercurial) command executes, you should see something like the
following displayed,
@verbatim
::
destination directory: ns-3-allinone
requesting all changes
adding changesets
@@ -103,31 +73,29 @@ following displayed,
adding file changes
added 31 changesets with 45 changes to 7 files
7 files updated, 0 files merged, 0 files removed, 0 files unresolved
@end verbatim
After the clone command completes, you should have a directory called
@code{ns-3-allinone} under your @code{~/repos} directory, the contents of which should
``ns-3-allinone`` under your ``~/repos`` directory, the contents of which should
look something like the following:
@verbatim
::
build.py* constants.py dist.py* download.py* README util.py
@end verbatim
Notice that you really just downloaded some Python scripts. The next step
will be to use those scripts to download and build the @command{ns-3}
will be to use those scripts to download and build the |ns3|
distribution of your choice.
@cindex repository
If you go to the following link: @uref{http://code.nsnam.org/},
If you go to the following link: http://code.nsnam.org/,
you will see a number of repositories. Many are the private repositories of
the @command{ns-3} development team. The repositories of interest to you will
be prefixed with ``ns-3''. Official releases of @command{ns-3} will be
numbered as @code{ns-3.<release>.<hotfix>}. For example, a second hotfix to a
still hypothetical release nine of @command{ns-3} would be numbered as
@code{ns-3.9.2}.
the |ns3| development team. The repositories of interest to you will
be prefixed with "ns-3". Official releases of |ns3| will be
numbered as ``ns-3.<release>.<hotfix>``. For example, a second hotfix to a
still hypothetical release nine of |ns3| would be numbered as
``ns-3.9.2``.
The current development snapshot (unreleased) of @command{ns-3} may be found
at @uref{http://code.nsnam.org/ns-3-dev/}. The
The current development snapshot (unreleased) of |ns3| may be found
at http://code.nsnam.org/ns-3-dev/. The
developers attempt to keep these repository in consistent, working states but
they are in a development area with unreleased code present, so you may want
to consider staying with an official release if you do not need newly-
@@ -135,38 +103,40 @@ introduced features.
Since the release numbers are going to be changing, I will stick with
the more constant ns-3-dev here in the tutorial, but you can replace the
string ``ns-3-dev'' with your choice of release (e.g., ns-3.6) in the
string "ns-3-dev" with your choice of release (e.g., ns-3.10) in the
text below. You can find the latest version of the
code either by inspection of the repository list or by going to the
@uref{http://www.nsnam.org/getting_started.html,,``Getting Started''}
`"Getting Started"
<http://www.nsnam.org/getting_started.html>`_
web page and looking for the latest release identifier.
Go ahead and change into the @code{ns-3-allinone} directory you created when
you cloned that repository. We are now going to use the @code{download.py}
script to pull down the various pieces of @command{ns-3} you will be using.
Go ahead and change into the ``ns-3-allinone`` directory you created when
you cloned that repository. We are now going to use the ``download.py``
script to pull down the various pieces of |ns3| you will be using.
Go ahead and type the following into your shell (remember you can substitute
the name of your chosen release number instead of @code{ns-3-dev} -- like
@code{"ns-3.6"} if you want to work with a
the name of your chosen release number instead of ``ns-3-dev`` -- like
``"ns-3.10"`` if you want to work with a
stable release).
@verbatim
./download.py -n ns-3-dev
@end verbatim
::
Note that the default for the @code{-n} option is @code{ns-3-dev} and so the
./download.py -n ns-3-dev
Note that the default for the ``-n`` option is ``ns-3-dev`` and so the
above is actually redundant. We provide this example to illustrate how to
specify alternate repositories. In order to download @code{ns-3-dev} you
specify alternate repositories. In order to download ``ns-3-dev`` you
can actually use the defaults and simply type,
@verbatim
::
./download.py
@end verbatim
As the hg (Mercurial) command executes, you should see something like the
following,
@verbatim
::
#
# Get NS-3
#
@@ -179,9 +149,8 @@ following,
adding file changes
added 4634 changesets with 16500 changes to 1762 files
870 files updated, 0 files merged, 0 files removed, 0 files unresolved
@end verbatim
This is output by the download script as it fetches the actual @code{ns-3}
This is output by the download script as it fetches the actual ``ns-3``
code from the repository.
The download script is smart enough to know that on some platforms various
@@ -189,7 +158,8 @@ pieces of ns-3 are not supported. On your platform you may not see some
of these pieces come down. However, on most platforms, the process should
continue with something like,
@verbatim
::
#
# Get PyBindGen
#
@@ -198,14 +168,14 @@ continue with something like,
Trying to fetch pybindgen; this will fail if no network connection is available. Hit Ctrl-C to skip.
=> bzr checkout -rrevno:640 https://launchpad.net/pybindgen pybindgen
Fetch was successful.
@end verbatim
This was the download script getting the Python bindings generator for you.
Note that you will need bazaar (bzr), a version control system, to download
PyBindGen. Next you should see (modulo platform variations) something along
the lines of,
@verbatim
::
#
# Get NSC
#
@@ -219,114 +189,106 @@ the lines of,
adding file changes
added 273 changesets with 17565 changes to 15175 files
10622 files updated, 0 files merged, 0 files removed, 0 files unresolved
@end verbatim
This part of the process is the script downloading the Network Simulation
Cradle for you. Note that NSC is not supported on OSX or Cygwin and works
best with gcc-3.4 or gcc-4.2 or greater series.
After the download.py script completes, you should have several new directories
under @code{~/repos/ns-3-allinone}:
under ``~/repos/ns-3-allinone``:
::
@verbatim
build.py* constants.pyc download.py* nsc/ README util.pyc
constants.py dist.py* ns-3-dev/ pybindgen/ util.py
@end verbatim
Go ahead and change into @code{ns-3-dev} under your @code{~/repos/ns-3-allinone}
Go ahead and change into ``ns-3-dev`` under your ``~/repos/ns-3-allinone``
directory. You should see something like the following there:
@verbatim
::
AUTHORS examples/ RELEASE_NOTES utils/ wscript
bindings/ LICENSE samples/ VERSION wutils.py
CHANGES.html ns3/ scratch/ waf*
doc/ README src/ waf.bat*
@end verbatim
You are now ready to build the @command{ns-3} distribution.
You are now ready to build the |ns3| distribution.
@subsection Downloading ns-3 Using a Tarball
The process for downloading @command{ns-3} via tarball is simpler than the
Downloading ns-3 Using a Tarball
++++++++++++++++++++++++++++++++
The process for downloading |ns3| via tarball is simpler than the
Mercurial process since all of the pieces are pre-packaged for you. You just
have to pick a release, download it and decompress it.
As mentioned above, one practice is to create a directory called @code{repos}
As mentioned above, one practice is to create a directory called ``repos``
in one's home directory under which one can keep local Mercurial repositories.
One could also keep a @code{tarballs} directory. @emph{Hint: the tutorial
will assume you downloaded into a @code{repos} directory, so remember the
placekeeper.} If you adopt the @code{tarballs} directory approach, you can
One could also keep a ``tarballs`` directory. *Hint: the tutorial
will assume you downloaded into a ``repos`` directory, so remember the
placekeeper.``* If you adopt the ``tarballs`` directory approach, you can
get a copy of a release by typing the following into your Linux shell
(substitute the appropriate version numbers, of course):
@verbatim
::
cd
mkdir tarballs
cd tarballs
wget http://www.nsnam.org/releases/ns-allinone-3.6.tar.bz2
tar xjf ns-allinone-3.6.tar.bz2
@end verbatim
wget http://www.nsnam.org/releases/ns-allinone-3.10.tar.bz2
tar xjf ns-allinone-3.10.tar.bz2
If you change into the directory @code{ns-allinone-3.6} you should see a
If you change into the directory ``ns-allinone-3.10`` you should see a
number of files:
@verbatim
build.py* ns-3.6/ pybindgen-0.12.0.700/ util.py
constants.py nsc-0.5.1/ README
@end verbatim
::
You are now ready to build the @command{ns-3} distribution.
build.py ns-3.10/ pybindgen-0.15.0/ util.py
constants.py nsc-0.5.2/ README
@c ========================================================================
@c Building ns-3
@c ========================================================================
You are now ready to build the |ns3| distribution.
@node Building ns-3
@section Building ns-3
Building ns-3
*************
@subsection Building with build.py
@cindex building with build.py
The first time you build the @command{ns-3} project you should build using the
@command{allinone} environment. This will get the project configured for you
Building with build.py
++++++++++++++++++++++
The first time you build the |ns3| project you should build using the
``allinone`` environment. This will get the project configured for you
in the most commonly useful way.
Change into the directory you created in the download section above. If you
downloaded using Mercurial you should have a directory called
@code{ns-3-allinone} under your @code{~/repos} directory. If you downloaded
``ns-3-allinone`` under your ``~/repos`` directory. If you downloaded
using a tarball you should have a directory called something like
@code{ns-allinone-3.6} under your @code{~/tarballs} directory. Take a deep
``ns-allinone-3.10`` under your ``~/tarballs`` directory. Take a deep
breath and type the following:
@verbatim
::
./build.py
@end verbatim
You will see lots of typical compiler output messages displayed as the build
script builds the various pieces you downloaded. Eventually you should see the
following magic words:
@verbatim
::
Waf: Leaving directory `/home/craigdo/repos/ns-3-allinone/ns-3-dev/build'
'build' finished successfully (2m30.586s)
@end verbatim
Once the project has built you can say goodbye to your old friends, the
@code{ns-3-allinone} scripts. You got what you needed from them and will now
interact directly with Waf and we do it in the @code{ns-3-dev} directory,
not in the @code{ns-3-allinone} directory. Go ahead and change into the
@code{ns-3-dev} directory (or the directory for the appropriate release you
``ns-3-allinone`` scripts. You got what you needed from them and will now
interact directly with Waf and we do it in the ``ns-3-dev`` directory,
not in the ``ns-3-allinone`` directory. Go ahead and change into the
``ns-3-dev`` directory (or the directory for the appropriate release you
downloaded.
@verbatim
cd ns-3-dev
@end verbatim
::
@subsection Building with Waf
@cindex building with Waf
@cindex configuring Waf
@cindex building debug version with Waf
@cindex compiling with Waf
@cindex unit tests with Waf
We use Waf to configure and build the @command{ns-3} project. It's not
cd ns-3-dev
Building with Waf
+++++++++++++++++
We use Waf to configure and build the |ns3| project. It's not
strictly required at this point, but it will be valuable to take a slight
detour and look at how to make changes to the configuration of the project.
Probably the most useful configuration change you can make will be to
@@ -335,15 +297,16 @@ your project to build the debug version. Let's tell the project to do
make an optimized build. To explain to Waf that it should do optimized
builds you will need to execute the following command,
@verbatim
::
./waf -d optimized configure
@end verbatim
This runs Waf out of the local directory (which is provided as a convenience
for you). As the build system checks for various dependencies you should see
output that looks similar to the following,
@verbatim
::
Checking for program g++ : ok /usr/bin/g++
Checking for program cpp : ok /usr/bin/cpp
Checking for program ar : ok /usr/bin/ar
@@ -401,79 +364,74 @@ output that looks similar to the following,
Build examples and samples : enabled
Static build : not enabled (option --enable-static not selected)
'configure' finished successfully (2.870s)
@end verbatim
Note the last part of the above output. Some ns-3 options are not enabled by
default or require support from the underlying system to work properly.
For instance, to enable XmlTo, the library libxml-2.0 must be found on the
system. If this library were not found, the corresponding @command{ns-3} feature
system. If this library were not found, the corresponding |ns3| feature
would not be enabled and a message would be displayed. Note further that there is
a feature to use the program @code{sudo} to set the suid bit of certain programs.
This is not enabled by default and so this feature is reported as ``not enabled.''
a feature to use the program ``sudo`` to set the suid bit of certain programs.
This is not enabled by default and so this feature is reported as "not enabled."
Now go ahead and switch back to the debug build.
@verbatim
::
./waf -d debug configure
@end verbatim
The build system is now configured and you can build the debug versions of
the @command{ns-3} programs by simply typing,
the |ns3| programs by simply typing,
::
@verbatim
./waf
@end verbatim
Some waf commands are meaningful during the build phase and some commands are valid
in the configuration phase. For example, if you wanted to use the emulation
features of @command{ns-3} you might want to enable setting the suid bit using
features of |ns3| you might want to enable setting the suid bit using
sudo as described above. This turns out to be a configuration-time command, and so
you could reconfigure using the following command
@verbatim
::
./waf -d debug --enable-sudo configure
@end verbatim
If you do this, waf will have run sudo to change the socket creator programs of the
emulation code to run as root. There are many other configure- and build-time options
available in waf. To explore these options, type:
@verbatim
::
./waf --help
@end verbatim
We'll use some of the testing-related commands in the next section.
Okay, sorry, I made you build the @command{ns-3} part of the system twice,
Okay, sorry, I made you build the |ns3| part of the system twice,
but now you know how to change the configuration and build optimized code.
@c ========================================================================
@c Testing ns-3
@c ========================================================================
Testing ns-3
************
@node Testing ns-3
@section Testing ns-3
You can run the unit tests of the |ns3| distribution by running the
"./test.py -c core" script,
@cindex unit tests
You can run the unit tests of the @command{ns-3} distribution by running the
``./test.py -c core'' script,
::
@verbatim
./test.py -c core
@end verbatim
These tests are run in parallel by waf. You should eventually
see a report saying that,
@verbatim
::
47 of 47 tests passed (47 passed, 0 failed, 0 crashed, 0 valgrind errors)
@end verbatim
This is the important message.
You will also see output from the test runner and the output will actually look something like,
@verbatim
::
Waf: Entering directory `/home/craigdo/repos/ns-3-allinone/ns-3-dev/build'
Waf: Leaving directory `/home/craigdo/repos/ns-3-allinone/ns-3-dev/build'
'build' finished successfully (1.799s)
@@ -494,66 +452,61 @@ You will also see output from the test runner and the output will actually look
PASS: TestSuite object
PASS: TestSuite random-number-generators
47 of 47 tests passed (47 passed, 0 failed, 0 crashed, 0 valgrind errors)
@end verbatim
This command is typically run by @code{users} to quickly verify that an
@command{ns-3} distribution has built correctly.
This command is typically run by ``users`` to quickly verify that an
|ns3| distribution has built correctly.
@c ========================================================================
@c Running a Script
@c ========================================================================
@node Running a Script
@section Running a Script
@cindex running a script with Waf
Running a Script
****************
We typically run scripts under the control of Waf. This allows the build
system to ensure that the shared library paths are set correctly and that
the libraries are available at run time. To run a program, simply use the
@code{--run} option in Waf. Let's run the @command{ns-3} equivalent of the
``--run`` option in Waf. Let's run the |ns3| equivalent of the
ubiquitous hello world program by typing the following:
@verbatim
::
./waf --run hello-simulator
@end verbatim
Waf first checks to make sure that the program is built correctly and
executes a build if required. Waf then executes the program, which
produces the following output.
@verbatim
::
Hello Simulator
@end verbatim
@emph{Congratulations. You are now an ns-3 user.}
*Congratulations. You are now an ns-3 user.*
@emph{What do I do if I don't see the output?}
*What do I do if I don't see the output?*
If you don't see @code{waf} messages indicating that the build was
completed successfully, but do not see the ``Hello Simulator'' output,
chances are that you have switched your build mode to ``optimized'' in
the ``Building with Waf'' section, but have missed the change back to
``debug'' mode. All of the console output used in this tutorial uses a
special @command{ns-3} logging component that is useful for printing
If you don't see ``waf`` messages indicating that the build was
completed successfully, but do not see the "Hello Simulator" output,
chances are that you have switched your build mode to "optimized" in
the "Building with Waf" section, but have missed the change back to
"debug" mode. All of the console output used in this tutorial uses a
special |ns3| logging component that is useful for printing
user messages to the console. Output from this component is
automatically disabled when you compile optimized code -- it is
``optimized out.'' If you don't see the ``Hello Simulator'' output,
"optimized out." If you don't see the "Hello Simulator" output,
type the following,
@verbatim
./waf -d debug configure
@end verbatim
::
to tell @code{waf} to build the debug versions of the @command{ns-3}
./waf -d debug configure
to tell ``waf`` to build the debug versions of the |ns3|
programs. You must still build the actual debug version of the code by
typing,
@verbatim
./waf
@end verbatim
::
Now, if you run the @code{hello-simulator} program, you should see the
./waf
Now, if you run the ``hello-simulator`` program, you should see the
expected output.
If you want to run programs under another tool such as gdb or valgrind,
see this @uref{http://www.nsnam.org/wiki/index.php/User_FAQ#How_to_run_NS-3_programs_under_another_tool,,wiki entry}.
see this `wiki entry
<http://www.nsnam.org/wiki/index.php/User_FAQ#How_to_run_NS-3_programs_under_another_tool>`_.

View File

@@ -0,0 +1,18 @@
.. only:: html or latex
Welcome to ns-3's tutorial!
================================
Contents:
.. toctree::
:maxdepth: 2
introduction
resources
getting-started
conceptual-overview
tweaking
building-topologies
tracing
conclusion

View File

@@ -0,0 +1,125 @@
.. include:: replace.txt
Introduction
------------
The |ns3| simulator is a discrete-event network simulator targeted
primarily for research and educational use. The
`ns-3 project
<http://www.nsnam.org>`_,
started in 2006, is an open-source project developing |ns3|.
Primary documentation for the |ns3| project is available in four
forms:
* `ns-3 Doxygen/Manual
<http://www.nsnam.org/doxygen/index.html>`_:
Documentation of the public APIs of the simulator
* Tutorial (this document)
* `Reference Manual
<http://www.nsnam.org/docs/manual.html>`_: Reference Manual
* `ns-3 wiki
<http://www.nsnam.org/wiki/index.php>`_
The purpose of this tutorial is to introduce new |ns3| users to the
system in a structured way. It is sometimes difficult for new users to
glean essential information from detailed manuals and to convert this
information into working simulations. In this tutorial, we will build
several example simulations, introducing and explaining key concepts and
features as we go.
As the tutorial unfolds, we will introduce the full |ns3| documentation
and provide pointers to source code for those interested in delving deeper
into the workings of the system.
A few key points are worth noting at the onset:
* Ns-3 is not an extension of `ns-2
<http://www.isi.edu/nsnam/ns>`_;
it is a new simulator. The two simulators are both written in C++ but
|ns3| is a new simulator that does not support the ns-2 APIs. Some
models from ns-2 have already been ported from ns-2 to |ns3|. The
project will continue to maintain ns-2 while |ns3| is being built,
and will study transition and integration mechanisms.
* |ns3| is open-source, and the project strives to maintain an
open environment for researchers to contribute and share their software.
For ns-2 Users
**************
For those familiar with ns-2, the most visible outward change when moving to
|ns3| is the choice of scripting language. Ns-2 is
scripted in OTcl and results of simulations can be visualized using the
Network Animator nam. It is not possible to run a simulation
in ns-2 purely from C++ (i.e., as a main() program without any OTcl).
Moreover, some components of ns-2 are written in C++ and others in OTcl.
In |ns3|, the simulator is written entirely in C++, with optional
Python bindings. Simulation scripts can therefore be written in C++
or in Python. The results of some simulations can be visualized by
nam, but new animators are under development. Since |ns3|
generates pcap packet trace files, other utilities can be used to
analyze traces as well.
In this tutorial, we will first concentrate on scripting
directly in C++ and interpreting results via trace files.
But there are similarities as well (both, for example, are based on C++
objects, and some code from ns-2 has already been ported to |ns3|).
We will try to highlight differences between ns-2 and |ns3|
as we proceed in this tutorial.
A question that we often hear is "Should I still use ns-2 or move to
|ns3|?" The answer is that it depends. |ns3| does not have
all of the models that ns-2 currently has, but on the other hand, |ns3|
does have new capabilities (such as handling multiple interfaces on nodes
correctly, use of IP addressing and more alignment with Internet
protocols and designs, more detailed 802.11 models, etc.). ns-2
models can usually be ported to |ns3| (a porting guide is under
development). There is active development on multiple fronts for
|ns3|. The |ns3| developers believe (and certain early users
have proven) that |ns3| is ready for active use, and should be an
attractive alternative for users looking to start new simulation projects.
Contributing
************
|ns3| is a research and educational simulator, by and for the
research community. It will rely on the ongoing contributions of the
community to develop new models, debug or maintain existing ones, and share
results. There are a few policies that we hope will encourage people to
contribute to |ns3| like they have for ns-2:
* Open source licensing based on GNU GPLv2 compatibility;
* `wiki
<http://www.nsnam.org/wiki/index.php>`_;
* `Contributed Code
<http://www.nsnam.org/wiki/index.php/Contributed_Code>`_ page, similar to ns-2's popular Contributed Code
`page
<http://nsnam.isi.edu/nsnam/index.php/Contributed_Code>`_;
* ``src/contrib`` directory (we will host your contributed code);
* Open `bug tracker
<http://www.nsnam.org/bugzilla>`_;
* |ns3| developers will gladly help potential contributors to get
started with the simulator (please contact `one of us
<http://www.nsnam.org/people.html>`_).
We realize that if you are reading this document, contributing back to
the project is probably not your foremost concern at this point, but
we want you to be aware that contributing is in the spirit of the project and
that even the act of dropping us a note about your early experience
with |ns3| (e.g. "this tutorial section was not clear..."),
reports of stale documentation, etc. are much appreciated.
Tutorial Organization
*********************
The tutorial assumes that new users might initially follow a path such as the
following:
* Try to download and build a copy;
* Try to run a few sample programs;
* Look at simulation output, and try to adjust it.
As a result, we have tried to organize the tutorial along the above
broad sequences of events.

View File

@@ -0,0 +1,3 @@
.. |ns3| replace:: *ns-3*
.. |ns2| replace:: *ns-2*

View File

@@ -0,0 +1,144 @@
.. include:: replace.txt
Resources
---------
The Web
*******
There are several important resources of which any |ns3| user must be
aware. The main web site is located at http://www.nsnam.org and
provides access to basic information about the |ns3| system. Detailed
documentation is available through the main web site at
http://www.nsnam.org/documents.html. You can also find documents
relating to the system architecture from this page.
There is a Wiki that complements the main |ns3| web site which you will
find at http://www.nsnam.org/wiki/. You will find user and developer
FAQs there, as well as troubleshooting guides, third-party contributed code,
papers, etc.
The source code may be found and browsed at http://code.nsnam.org/.
There you will find the current development tree in the repository named
``ns-3-dev``. Past releases and experimental repositories of the core
developers may also be found there.
Mercurial
*********
Complex software systems need some way to manage the organization and
changes to the underlying code and documentation. There are many ways to
perform this feat, and you may have heard of some of the systems that are
currently used to do this. The Concurrent Version System (CVS) is probably
the most well known.
The |ns3| project uses Mercurial as its source code management system.
Although you do not need to know much about Mercurial in order to complete
this tutorial, we recommend becoming familiar with Mercurial and using it
to access the source code. Mercurial has a web site at
http://www.selenic.com/mercurial/,
from which you can get binary or source releases of this Software
Configuration Management (SCM) system. Selenic (the developer of Mercurial)
also provides a tutorial at
http://www.selenic.com/mercurial/wiki/index.cgi/Tutorial/,
and a QuickStart guide at
http://www.selenic.com/mercurial/wiki/index.cgi/QuickStart/.
You can also find vital information about using Mercurial and |ns3|
on the main |ns3| web site.
Waf
***
Once you have source code downloaded to your local system, you will need
to compile that source to produce usable programs. Just as in the case of
source code management, there are many tools available to perform this
function. Probably the most well known of these tools is ``make``. Along
with being the most well known, ``make`` is probably the most difficult to
use in a very large and highly configurable system. Because of this, many
alternatives have been developed. Recently these systems have been developed
using the Python language.
The build system Waf is used on the |ns3| project. It is one
of the new generation of Python-based build systems. You will not need to
understand any Python to build the existing |ns3| system, and will
only have to understand a tiny and intuitively obvious subset of Python in
order to extend the system in most cases.
For those interested in the gory details of Waf, the main web site can be
found at http://code.google.com/p/waf/.
Development Environment
***********************
As mentioned above, scripting in |ns3| is done in C++ or Python.
As of ns-3.2, most of the |ns3| API is available in Python, but the
models are written in C++ in either case. A working
knowledge of C++ and object-oriented concepts is assumed in this document.
We will take some time to review some of the more advanced concepts or
possibly unfamiliar language features, idioms and design patterns as they
appear. We don't want this tutorial to devolve into a C++ tutorial, though,
so we do expect a basic command of the language. There are an almost
unimaginable number of sources of information on C++ available on the web or
in print.
If you are new to C++, you may want to find a tutorial- or cookbook-based
book or web site and work through at least the basic features of the language
before proceeding. For instance, `this tutorial
<http://www.cplusplus.com/doc/tutorial/>`_.
The |ns3| system uses several components of the GNU "toolchain"
for development. A
software toolchain is the set of programming tools available in the given
environment. For a quick review of what is included in the GNU toolchain see,
http://en.wikipedia.org/wiki/GNU_toolchain. |ns3| uses gcc,
GNU binutils, and gdb. However, we do not use the GNU build system tools,
neither make nor autotools. We use Waf for these functions.
Typically an |ns3| author will work in Linux or a Linux-like
environment. For those running under Windows, there do exist environments
which simulate the Linux environment to various degrees. The |ns3|
project supports development in the Cygwin environment for
these users. See http://www.cygwin.com/
for details on downloading (MinGW is presently not officially supported,
although some of the project maintainers to work with it). Cygwin provides
many of the popular Linux system commands. It can, however, sometimes be
problematic due to the way it actually does its emulation, and sometimes
interactions with other Windows software can cause problems.
If you do use Cygwin or MinGW; and use Logitech products, we will save you
quite a bit of heartburn right off the bat and encourage you to take a look
at the `MinGW FAQ
<http://oldwiki.mingw.org/index.php/FAQ>`_.
Search for "Logitech" and read the FAQ entry, "why does make often
crash creating a sh.exe.stackdump file when I try to compile my source code."
Believe it or not, the ``Logitech Process Monitor`` insinuates itself into
every DLL in the system when it is running. It can cause your Cygwin or
MinGW DLLs to die in mysterious ways and often prevents debuggers from
running. Beware of Logitech software when using Cygwin.
Another alternative to Cygwin is to install a virtual machine environment
such as VMware server and install a Linux virtual machine.
Socket Programming
******************
We will assume a basic facility with the Berkeley Sockets API in the examples
used in this tutorial. If you are new to sockets, we recommend reviewing the
API and some common usage cases. For a good overview of programming TCP/IP
sockets we recommend `TCP/IP Sockets in C, Donahoo and Calvert
<http://www.elsevier.com/wps/find/bookdescription.cws_home/717656/description#description>`_.
There is an associated web site that includes source for the examples in the
book, which you can find at:
http://cs.baylor.edu/~donahoo/practical/CSockets/.
If you understand the first four chapters of the book (or for those who do
not have access to a copy of the book, the echo clients and servers shown in
the website above) you will be in good shape to understand the tutorial.
There is a similar book on Multicast Sockets,
`Multicast Sockets, Makofske and Almeroth
<http://www.elsevier.com/wps/find/bookdescription.cws_home/700736/description#description>`_.
that covers material you may need to understand if you look at the multicast
examples in the distribution.

File diff suppressed because it is too large Load Diff

View File

@@ -1,156 +0,0 @@
body {
font-family: "Trebuchet MS", "Bitstream Vera Sans", verdana, lucida, arial, helvetica, sans-serif;
background: white;
color: black;
font-size: 11pt;
}
h1, h2, h3, h4, h5, h6 {
# color: #990000;
color: #009999;
}
pre {
font-size: 10pt;
background: #e0e0e0;
color: black;
}
a:link, a:visited {
font-weight: normal;
text-decoration: none;
color: #0047b9;
}
a:hover {
font-weight: normal;
text-decoration: underline;
color: #0047b9;
}
img {
border: 0px;
}
#main th {
font-size: 12pt;
background: #b0b0b0;
}
.odd {
font-size: 12pt;
background: white;
}
.even {
font-size: 12pt;
background: #e0e0e0;
}
.answer {
font-size: large;
font-weight: bold;
}
.answer p {
font-size: 12pt;
font-weight: normal;
}
.answer ul {
font-size: 12pt;
font-weight: normal;
}
#container {
position: absolute;
width: 100%;
height: 100%;
top: 0px;
}
#feedback {
color: #b0b0b0;
font-size: 9pt;
font-style: italic;
}
#header {
position: absolute;
margin: 0px;
top: 10px;
height:96px;
left: 175px;
right: 10em;
bottom: auto;
background: white;
clear: both;
}
#middle {
position: absolute;
left: 0;
height: auto;
width: 100%;
}
#main {
position: absolute;
top: 50px;
left: 175px;
right: 100px;
background: white;
padding: 0em 0em 0em 0em;
}
#navbar {
position: absolute;
top: 75px;
left: 0em;
width: 146px;
padding: 0px;
margin: 0px;
font-size: 10pt;
}
#navbar a:link, #navbar a:visited {
font-weight: normal;
text-decoration: none;
color: #0047b9;
}
#navbar a:hover {
font-weight: normal;
text-decoration: underline;
color: #0047b9;
}
#navbar dl {
width: 146px;
padding: 0;
margin: 0 0 10px 0px;
background: #99ffff url(images/box_bottom2.gif) no-repeat bottom left;
}
#navbar dt {
padding: 6px 10px;
font-size: 100%;
font-weight: bold;
background: #009999;
margin: 0px;
border-bottom: 1px solid #fff;
color: white;
background: #009999 url(images/box_top2.gif) no-repeat top left;
}
#navbar dd {
font-size: 100%;
margin: 0 0 0 0px;
padding: 6px 10px;
color: #0047b9;
}
dd#selected {
background: #99ffff url(images/arrow.gif) no-repeat;
background-position: 4px 10px;
}

View File

@@ -1,106 +0,0 @@
\input texinfo @c -*-texinfo-*-
@c %**start of header
@setfilename ns-3.info
@settitle ns-3 tutorial
@c @setchapternewpage odd
@c %**end of header
@ifinfo
Primary documentation for the @command{ns-3} project is available in
four forms:
@itemize @bullet
@item @uref{http://www.nsnam.org/doxygen/index.html,,ns-3 Doxygen/Manual}: Documentation of the public APIs of the simulator
@item Tutorial (this document)
@item @uref{http://www.nsnam.org/docs/manual.html,,Reference Manual}: Reference Manual
@item @uref{http://www.nsnam.org/wiki/index.php,, ns-3 wiki}
@end itemize
This document is written in GNU Texinfo and is to be maintained in revision
control on the @command{ns-3} code server. Both PDF and HTML versions should
be available on the server. Changes to the document should be discussed on
the ns-developers@@isi.edu mailing list.
@end ifinfo
@copying
This is an @command{ns-3} tutorial.
Primary documentation for the @command{ns-3} project is available in
four forms:
@itemize @bullet
@item @uref{http://www.nsnam.org/doxygen/index.html,,ns-3 Doxygen/Manual}: Documentation of the public APIs of the simulator
@item Tutorial (this document)
@item @uref{http://www.nsnam.org/docs/manual.html,,Reference Manual}: Reference Manual
@item @uref{http://www.nsnam.org/wiki/index.php,, ns-3 wiki}
@end itemize
This document is written in GNU Texinfo and is to be maintained in revision
control on the @command{ns-3} code server. Both PDF and HTML versions should
be available on the server. Changes to the document should be discussed on
the ns-developers@@isi.edu mailing list.
This software is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see @uref{http://www.gnu.org/licenses/}.
@end copying
@titlepage
@title ns-3 Tutorial
@author ns-3 project
@author feedback: ns-developers@@isi.edu
@today{}
@c @page
@vskip 0pt plus 1filll
@insertcopying
@end titlepage
@c So the toc is printed at the start.
@ifnottex
@anchor{Full Table of Contents}
@end ifnottex
@contents
@ifnottex
@node Top, Introduction, Full Table of Contents
@top ns-3 Tutorial (html version)
For a pdf version of this tutorial,
see @uref{http://www.nsnam.org/docs/tutorial.pdf}.
@insertcopying
@end ifnottex
@menu
* Introduction::
* Resources::
* Getting Started::
* Conceptual Overview::
* Tweaking ns-3::
* Building Topologies::
* The Tracing System::
* Closing Remarks::
* Index::
@end menu
@include introduction.texi
@include getting-started.texi
@include conceptual-overview.texi
@include tweaking.texi
@include building-topologies.texi
@include tracing.texi
@include conclusion.texi
@node Index
@unnumbered Index
@printindex cp
@bye