Run utils/trim-trailing-whitespace.py on codebase

This commit is contained in:
Tom Henderson
2022-06-05 21:01:11 -07:00
parent 7c47d8af08
commit b6a5ee8151
1385 changed files with 16008 additions and 16008 deletions

View File

@@ -23,7 +23,7 @@ Changes from ns-3.36 to ns-3.37
### Changes to existing API
* Adds support for channel paging to the **LrWpanPhy** (only placeholder, a single modulation/band is currently supported).
### Changes to build system
### Changed behavior

View File

@@ -1 +1 @@
int main(){}
int main(){}

View File

@@ -13,4 +13,4 @@ def lp64_to_ilp32(lp64path, ilp32path):
if __name__ == "__main__":
import sys
print(sys.argv)
exit(lp64_to_ilp32(sys.argv[1], sys.argv[2]))
exit(lp64_to_ilp32(sys.argv[1], sys.argv[2]))

View File

@@ -30,7 +30,7 @@ option is -d <debug level>. Valid debug levels (which are listed in
ns3 --help) are: "debug" or "optimized", with debug being default. It is
also possible to change the flags used for compilation with (e.g.):
CXXFLAGS="-O3" ./ns3 configure. By default, ns-3 is built as debug code,
with examples and tests disabled, and with python bindings enabled.
with examples and tests disabled, and with python bindings enabled.
[ Note: Unlike some other build tools, to change the build target,
the option must be supplied during the configure stage rather than

View File

@@ -2,6 +2,6 @@ Contributing to the ns-3 project
--------------------------------
ns-3 is a free, open source software project that welcomes contributions
from users worldwide. Please see the following web page for how to
contribute:
from users worldwide. Please see the following web page for how to
contribute:
http://www.nsnam.org/developers/contributing-code/

View File

@@ -20,12 +20,12 @@ def dump_pickles(out, dirname, filename, path):
out.write(' <page url="%s">\n' % path)
out.write(' <fragment>%s.frag</fragment>\n' % data['current_page_name'])
if data['prev'] is not None:
out.write(' <prev url="%s">%s</prev>\n' %
(os.path.normpath(os.path.join(path, data['prev']['link'])),
out.write(' <prev url="%s">%s</prev>\n' %
(os.path.normpath(os.path.join(path, data['prev']['link'])),
data['prev']['title']))
if data['next'] is not None:
out.write(' <next url="%s">%s</next>\n' %
(os.path.normpath(os.path.join(path, data['next']['link'])),
out.write(' <next url="%s">%s</next>\n' %
(os.path.normpath(os.path.join(path, data['next']['link'])),
data['next']['title']))
out.write(' </page>\n')
f.close()

View File

@@ -300,7 +300,7 @@ is described in the `Doxygen website <https://www.doxygen.nl/index.html>`_.
/**
* Private method doxygen is also recommended
*/
void MyPrivateMethod (void);
void MyPrivateMethod (void);
int m_myPrivateMemberVariable; ///< Brief description of member variable
};
@@ -415,7 +415,7 @@ Miscellaneous items
- ``NS_LOG_COMPONENT_DEFINE("log-component-name");`` statements should be
placed within namespace ns3 (for module code) and after the
``using namespace ns3;``. In examples.
``using namespace ns3;``. In examples.
``NS_OBJECT_ENSURE_REGISTERED()`` should also be placed within namespace ns3.
- Const reference syntax:
@@ -455,7 +455,7 @@ Miscellaneous items
This guidance does not apply to the use of references to implement operators.
- Expose class members through access functions, rather than direct access
to a public object. The access functions are typically named Get" and
to a public object. The access functions are typically named Get" and
"Set". For example, a member m_delayTime might have accessor functions
``GetDelayTime ()`` and ``SetDelayTime ()``.

View File

@@ -35,7 +35,7 @@ the `issue tracker <https://gitlab.com/nsnam/ns-3-dev/-/issues>`_ for
something that may be similar, and if nothing is found, please report
the new issue.
If a user wants to submit proposed new code for |ns3|, please
If a user wants to submit proposed new code for |ns3|, please
submit on the `merge request tracker <https://gitlab.com/nsnam/ns-3-dev/-/merge_requests>`_.
More details for each are provided below. Similarly, users who want to
@@ -54,7 +54,7 @@ determined which module your bug is related to, if it is inside the
official distribution (mainline), then create an issue, label it with the
name of the module, and provide as much information as possible.
First, perform a cursory search on the
First, perform a cursory search on the
`open issue list <https://gitlab.com/nsnam/ns-3-dev/-/issues>`_ to see if
the problem has already been reported. If it has and the issue is still
open, add a comment to the existing issue instead of opening a new one.
@@ -89,12 +89,12 @@ Here are some additional guidelines:
block formatting.
4. Describe the behavior you observed after following the steps and point out
what exactly is the problem with that behavior. Explain which behavior you
what exactly is the problem with that behavior. Explain which behavior you
expected to see instead and why.
5. If you're reporting that ns-3 crashed, include a crash report with a
stack trace from the operating system. On macOS, the crash report will
be available in Console.app under
be available in Console.app under
`"Diagnostic and usage information" > "User diagnostic reports"`. Include
the crash report in the issue in a code block, or a file attachment.
@@ -117,7 +117,7 @@ If you are new to public Git repositories, you may want to read
pull requests, the GitLab.com merge requests are very similar.
In brief, you will want to fork ns-3-dev into your own namespace (i.e.,
fork the repository into your personal GitLab.com account, via the user
fork the repository into your personal GitLab.com account, via the user
interface on GitLab.com), clone your fork of ns-3-dev to your local machine,
create a new feature branch that is based on the
current tip of ns-3-dev, push that new feature branch up to your fork,
@@ -129,7 +129,7 @@ Remember the documentation
==========================
If you add or change API to the simulator, please include `Doxygen <https://www.doxygen.nl>`_ changes as appropriate. Please scan the module documentation
(written in `Restructured Text <https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html>`_ in the `docs` directory) to check if an update is needed to align with the patch proposal.
(written in `Restructured Text <https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html>`_ in the `docs` directory) to check if an update is needed to align with the patch proposal.
Commit message format
=====================
@@ -155,11 +155,11 @@ more detail, you can add subsequent message lines below the first one, separated
Date: Wed May 19 16:34:01 2021 +0200
lte: Assign default values
Fixes crashing optimized/release builds with 'may be used uninitialized' error
3. The first line of the commit message should include the relevant module name or names, separated by a colon. Example:
3. The first line of the commit message should include the relevant module name or names, separated by a colon. Example:
.. code-block:: text
@@ -186,7 +186,7 @@ more detail, you can add subsequent message lines below the first one, separated
5. If the commit is from a merge request, that may also be added in a similar
way the same by saying 'merges !NNN'. The exclamation point differentiates
merge requests from issues (which use the number sign '#') on GitLab.com.
merge requests from issues (which use the number sign '#') on GitLab.com.
Example:
.. code-block:: text

View File

@@ -13,7 +13,7 @@ Submitting externally maintained code
This chapter mainly pertains to code that will be maintained in external
repositories (such as a personal or university research group repository,
possibly hosted on GitHub or GitLab.com),
but for which the contributor wishes to keep consistent and compatible
but for which the contributor wishes to keep consistent and compatible
with the |ns3| mainline.
If the contributor does not want to maintain the external code
@@ -226,7 +226,7 @@ Links to related projects
*************************
Some projects choose to maintain their own version of |ns3|, or maintain models
outside of the main tree of the code. In this case, the way to find out
about these is to look at the Related Projects page on the |ns3|
about these is to look at the Related Projects page on the |ns3|
`wiki <https://www.nsnam.org/wiki/Related_Projects>`_.
If you know of externally maintained code that the project does not know about,
@@ -241,15 +241,15 @@ app store so that there is a single place to discover them.
Unmaintained, contributed code
******************************
Anyone who wants to provide access to code that has been developed to
Anyone who wants to provide access to code that has been developed to
extend |ns3| but that will not be further maintained may list its
availability on our website. Furthermore, we can provide, in some
circumstances, storage of a compressed archive on a web server if needed.
This type of code contribution will
This type of code contribution will
not be listed in the app store, although popular extensions might be
adopted by a future contributor.
We ask anyone who wishes to do this to provide at least this information
We ask anyone who wishes to do this to provide at least this information
on our wiki:
* Authors,
@@ -259,6 +259,6 @@ on our wiki:
* Status (how it is maintained)
Please also make clear in your code the applicable software license.
The contribution will be stored on our `wiki <https://www.nsnam.org/wiki/Contributed_Code>`_. If you need web server
The contribution will be stored on our `wiki <https://www.nsnam.org/wiki/Contributed_Code>`_. If you need web server
space for your archive, please contact ``webmaster@nsnam.org``.

View File

@@ -38,7 +38,7 @@ original. The Software Freedom Law Center has published
`guidance <http://www.softwarefreedom.org/resources/2007/gpl-non-gpl-collaboration.html>`_ on handling this case.
Note that it is incumbent upon the submitter to make sure that the
licensing attribution is correct and that the code is suitable for |ns3|
licensing attribution is correct and that the code is suitable for |ns3|
inclusion. Do not include code (even snippets) from sources that have
incompatible licenses. Even if the code licenses are compatible, do not
copy someone else's code without attribution.
@@ -171,7 +171,7 @@ these guidelines:
main body of the code, for attribution purposes
An example of a substantial modification that led to extension of
the authors section of the header can be found in
the authors section of the header can be found in
``src/lte/model/lte-ue-phy.h``::
* Author: Giuseppe Piro <g.piro@poliba.it>
@@ -188,14 +188,14 @@ obtaining proper attribution and avoiding long headers.
Coding style
************
We ask that all contributors make their code conform to the
We ask that all contributors make their code conform to the
coding standard which is outlined in :ref:`Coding style`.
The project maintains a Python program called ``check-style.py`` found in
the ``utils/`` directory. This is a wrapper around the ``uncrustify``
utility with configuration set to the conventions used by |ns3|, and
can be used to quickly format new source code files proposed for the
mainline.
mainline.
Additionally, the project also maintains a Python program called
``trim_trailing_whitespace.py``, found in the ``utils/`` directory.
@@ -209,21 +209,21 @@ Patches are preferably submitted as a GitLab.com
Short patches can be attached to an issue report or sent to the mailing-lists,
but a Merge Request is the best way to submit.
The UNIX diff tool is the most common way of producing a patch: a patch is
a text-based representation of the difference between two text files or two
directories with text files in them. If you have two files,
``original.cc``, and, ``modified.cc``, you can generate a patch with the
command ``diff -u original.cc modified.cc``. If you wish to produce a patch
The UNIX diff tool is the most common way of producing a patch: a patch is
a text-based representation of the difference between two text files or two
directories with text files in them. If you have two files,
``original.cc``, and, ``modified.cc``, you can generate a patch with the
command ``diff -u original.cc modified.cc``. If you wish to produce a patch
between two directories, use the command ``diff -uprN original modified``.
Make sure you specify to the reviewer where the original files came from
and make sure that the resulting patch file does not contain unexpected
Make sure you specify to the reviewer where the original files came from
and make sure that the resulting patch file does not contain unexpected
content by performing a final inspection of the patch file yourself.
Patches such as this are sufficient for simple bug fixes or very simple
Patches such as this are sufficient for simple bug fixes or very simple
small features.
Git can be used to create a patch of what parts differ from the last
Git can be used to create a patch of what parts differ from the last
committed code; try::
$ git diff
@@ -247,7 +247,7 @@ Maintainers
Maintainers are the set of people who make decisions on code and documentation
changes. Maintainers are contributors who have demonstrated, over time,
knowledge and good judgment as pertains to contributions to |ns3|, and who
have expressed willingness to maintain some code. |ns3| is like other
have expressed willingness to maintain some code. |ns3| is like other
open source projects in terms of how people gradually become maintainers
as their involvement with the project deepens; maintainers are not newcomers
to the project.
@@ -255,14 +255,14 @@ to the project.
The list of maintainers for each module is found here:
https://www.nsnam.org/developers/maintainers/
Maintainers review code (bug fixes, new models) within scope of their
Maintainers review code (bug fixes, new models) within scope of their
maintenance responsibility. A maintainer of a module should "sign off"
(or approve of) changes to an |ns3| module before it is committed to
the main codebase. Note that we typically do not formalize the signing
off using Git's sign off feature, but instead, maintainers will indicate
their approval of the merge request using GitLab.com.
Note that some modules do not have active maintainers; these types of
Note that some modules do not have active maintainers; these types of
modules typically receive less maintenance attention than those with
active maintainers (and bugs may linger unattended).

View File

@@ -22,12 +22,12 @@ End users are encouraged to report issues and bugs, or to propose
software or documentation modifications, to be reviewed by and handled by
maintainers. End users can also help by reviewing code
proposals made by others. Some end users who contribute high quality
patches or code reviews over time may ask or be invited to become
patches or code reviews over time may ask or be invited to become
a maintainer of software within their areas of expertise. Finally, some
end users wish to disseminate their |ns3| work to the community, through
the addition of new features or modules to |ns3|.
A question often asked by newcomers is "How can I contribute to |ns3|?"
A question often asked by newcomers is "How can I contribute to |ns3|?"
or "How do I get started?".
This document summarizes the various ways and processes used to contribute
to |ns3|. Contribution by users is essential for a project maintained
@@ -37,8 +37,8 @@ contributors to please become familiar with conventions and processes
used by |ns3| so as to smooth the contribution process.
The very first step is to become familiar with |ns3| by reading the tutorial,
running some example programs, and then reading some of the code and
documentation to get a feel for things. From that point, there are a
running some example programs, and then reading some of the code and
documentation to get a feel for things. From that point, there are a
number of options to contribute:
* Contributing a small documentation correction
@@ -54,7 +54,7 @@ number of options to contribute:
how to contribute |ns3| code specifically, but the overall open source
project maintains various related codebases written in several other
languages, so if you are interested in contributing outside of |ns3|
C++ code, there are several possibilities:
C++ code, there are several possibilities:
* |ns3| provides Python bindings to most of its API, and maintains an
automated API scanning process that relies on other tools. We can use
@@ -64,7 +64,7 @@ C++ code, there are several possibilities:
* The `NetAnim <https://gitlab.com/nsnam/netanim>`_ animator is written in `Qt <https://www.qt.io/>`_ and has lacked a maintainer for several years.
* If you are interested in Linux kernel hacking, or use of applications in |ns3| such as open source routing daemons, we maintain the
`Direct Code Execution project <https://www.nsnam.org/docs/dce/manual/html/index.html>`_.
* If you are familiar with `Django <https://www.djangoproject.com/>`_, we have work to do on `our app store infrastructure <https://gitlab.com/nsnam/ns-3-AppStore>`_.
* If you are familiar with `Django <https://www.djangoproject.com/>`_, we have work to do on `our app store infrastructure <https://gitlab.com/nsnam/ns-3-AppStore>`_.
* Our `website <https://gitlab.com/nsnam/nsnam-web>`_ is written in `Jekyll <https://jekyllrb.com/>`_ and is in need of more work.
The remainder of this document is organized as follows.

View File

@@ -10,12 +10,12 @@
Submitting new models
---------------------
We actively encourage submission of new features to |ns3|.
We actively encourage submission of new features to |ns3|.
Independent submissions are essential for open source projects, and if
accepted into the mainline, you will also be credited as an author of
future versions of |ns3|. However,
future versions of |ns3|. However,
please keep in mind that there is already a large burden on the |ns3|
maintainers to manage the flow of incoming contributions and maintain new
maintainers to manage the flow of incoming contributions and maintain new
and existing code. The goal of this chapter is to outline the options for
new models in |ns3|, and how you can help to minimize the burden on maintainers
and thus minimize the average time-to-merge of your code.
@@ -43,7 +43,7 @@ The options for publishing new models are:
1. Propose a Merge Request for the |ns3| mainline and follow the guidelines below.
2. Organize your code as a "contributed module" or modules and maintain them in
2. Organize your code as a "contributed module" or modules and maintain them in
your own public Git repository. A page on the App Store can be made to
advertise this to users, and other tooling can be used to ensure that the
module stays compatible with the mainline.
@@ -59,7 +59,7 @@ The options for publishing new models are:
changes so that public forks can be avoided.
4. Archive your code somewhere, or publish in a Git repository, and link to
it from the |ns3| `Contributed Code <https://www.nsnam.org/wiki/Contributed_Code>`_
it from the |ns3| `Contributed Code <https://www.nsnam.org/wiki/Contributed_Code>`_
wiki page. This option requires the least amount of work from the
contributor, but visibility of the code to new |ns3| users will likely be
reduced. To follow this route, obtain a wiki account from the webmaster,
@@ -70,13 +70,13 @@ other options are described in the next chapter (:ref:`External`).
Upstreaming new models
**********************
The term "upstreaming" refers to the process whereby new code is
The term "upstreaming" refers to the process whereby new code is
contributed back to an upstream source (the main open source project)
whereby that project takes responsibility for further enhancement and
maintenance.
Making sure that each code submission fulfills as many items
as possible in the following checklist is the best way to ensure quick
Making sure that each code submission fulfills as many items
as possible in the following checklist is the best way to ensure quick
merging of your code.
In brief, we can summarize the guidelines as follows:
@@ -87,12 +87,12 @@ In brief, we can summarize the guidelines as follows:
engineering and consistency feedback that maintainers may provide
4. Write associated documentation, tests, and example programs
If you do not have the time to follow through the process to include your
If you do not have the time to follow through the process to include your
code in the main tree, please see the next chapter (:ref:`External`)
about contributing ns-3 code that is not maintained in the main tree.
The process can take a long time when submissions are large or when
contributors skip some of these steps. Therefore, best results are found
contributors skip some of these steps. Therefore, best results are found
when the following guidelines are followed:
* Ask for review of small chunks of code, rather than large patches. Split
@@ -215,7 +215,7 @@ documentation, test code, and example scripts.
Note that you may want to go through multiple phases of code reviews, and
all of this supporting material may not be needed at the first stage (e.g.
when you want some feedback on public API header declarations only, before
when you want some feedback on public API header declarations only, before
starting the implementation). However, when it times come to merge your code,
you should be prepared to provide these things, as fits your contribution
(maintainers will provide some guidance here).

View File

@@ -6,28 +6,28 @@
* \mainpage ns-3 Documentation
*
* \section intro-sec Introduction
* <a href="http://www.nsnam.org/">ns-3</a> documentation is maintained using
* <a href="http://www.nsnam.org/">ns-3</a> documentation is maintained using
* <a href="http://www.doxygen.org">Doxygen</a>.
* Doxygen is typically used for
* Doxygen is typically used for
* API documentation, and organizes such documentation across different
* modules. This project uses Doxygen for building the definitive
* maintained API documentation. Additional ns-3 project documentation
* maintained API documentation. Additional ns-3 project documentation
* can be found at the
* <a href="http://www.nsnam.org/documentation/latest">project web site</a>.
*
* \section install-sec Building the Documentation
*
*
* Building ns-3 Doxygen requires Doxygen version 1.8 at a minimum, but version 1.9 is recommended to minimize warnings.
*
*
* Type "./ns3 docs doxygen" or "./ns3 docs doxygen-no-build" to build the
* documentation. The doc/ directory contains
* configuration for Doxygen (doxygen.conf) and main.h. The Doxygen
* build process puts html files into the doc/html/ directory, and latex
* configuration for Doxygen (doxygen.conf) and main.h. The Doxygen
* build process puts html files into the doc/html/ directory, and latex
* filex into the doc/latex/ directory.
*
*
* \section module-sec Module overview
*
* The ns-3 library is split across many modules organized under the
* The ns-3 library is split across many modules organized under the
* <b><a href="modules.html">Modules</a></b> tab.
* - aodv
* - applications
@@ -78,10 +78,10 @@
/**
* \name Macros defined by the build system.
*
*
* These have to be visible for doxygen to document them,
* so we put them here in a file only seen by doxygen, not the compiler.
*
*
* @{
*/
/**

View File

@@ -58,9 +58,9 @@ SOURCES = \
${SRC}/stats/doc/adaptor.rst \
${SRC}/stats/doc/scope-and-limitations.rst \
# list all manual figure files that need to be copied to
# list all manual figure files that need to be copied to
# $SOURCETEMP/figures. For each figure to be included in all
# documentation formats (html, latex...) the following formats are supported:
# documentation formats (html, latex...) the following formats are supported:
# 1) a single .dia file (preferred option, because it can be edited)
# 2) a single .eps file
# 3) both a .pdf and .png file
@@ -170,7 +170,7 @@ help:
copy-sources: $(SOURCES)
@rm -rf $(SOURCETEMP)
@mkdir -p $(SOURCETEMP)
@mkdir -p $(SOURCETEMP)
@mkdir -p $(FIGURES)
@cp -r $(SOURCES) $(SOURCETEMP)
@cp -r $(SOURCEFIGS) $(FIGURES)

View File

@@ -1261,7 +1261,7 @@ The typical use-cases are:
As a matter of fact, some Objects might be created when the simulation starts.
Hence, ConfigStore will not "report" their attributes if invoked earlier in the code.
A typical workflow might involve running the simulation, calling ConfigStore
A typical workflow might involve running the simulation, calling ConfigStore
at the end of the simulation (after ``Simulator::Run ()`` and before ``Simulator::Destroy ()``)
This will show all the attributes in the Objects, both those with default values, and those
with values changed during the simulation execution.
@@ -1277,7 +1277,7 @@ ConfigStore GUI
There is a GTK-based front end for the ConfigStore. This allows users to use a
GUI to access and change variables.
Some screenshots are presented here. They are the result of using GtkConfig on
Some screenshots are presented here. They are the result of using GtkConfig on
``src/lte/examples/lena-dual-stripe.cc`` after ``Simulator::Run ()``.
.. _GtkConfig:

View File

@@ -48,7 +48,7 @@ independent compilation units in the simulator) and is not generalized; if in a
later usage scenario, B needs to talk to a completely different C object, the
source code for B needs to be changed to add a ``c_instance`` and so forth. It
is easy to see that this is a brute force mechanism of communication that can
lead to programming cruft in the models.
lead to programming cruft in the models.
This is not to say that objects should not know about one another if there is a
hard dependency between them, but that often the model can be made more flexible
@@ -182,14 +182,14 @@ calling function from the called class completely. This requirement led to the
development of the *Functor*.
A functor is the outgrowth of something invented in the 1960s called a closure.
It is basically just a packaged-up function call, possibly with some state.
It is basically just a packaged-up function call, possibly with some state.
A functor has two parts, a specific part and a generic part, related through
inheritance. The calling code (the code that executes the callback) will execute
a generic overloaded ``operator ()`` of a generic functor to cause the callback
to be called. The called code (the code that wants to be called back) will have
to provide a specialized implementation of the ``operator ()`` that performs the
class-specific work that caused the close-coupling problem above.
class-specific work that caused the close-coupling problem above.
With the specific functor and its overloaded ``operator ()`` created, the called
code then gives the specialized code to the module that will execute the
@@ -201,7 +201,7 @@ functor. This means that the calling module just needs to understand the
generic functor type. It is decoupled from the calling code completely.
The information one needs to make a specific functor is the object pointer and
the pointer-to-method address.
the pointer-to-method address.
The essence of what needs to happen is that the system declares a generic part
of the functor::
@@ -213,7 +213,7 @@ of the functor::
virtual int operator() (T arg) = 0;
};
The caller defines a specific part of the functor that really is just there to
The caller defines a specific part of the functor that really is just there to
implement the specific ``operator()`` method::
template <typename T, typename ARG>
@@ -256,11 +256,11 @@ Here is an example of the usage::
}
.. note:: The previous code is not real ns-3 code. It is simplistic example
code used only to illustrate the concepts involved and to help you understand
code used only to illustrate the concepts involved and to help you understand
the system more. Do not expect to find this code anywhere in the ns-3 tree.
Notice that there are two variables defined in the class above. The m_p
variable is the object pointer and m_pmi is the variable containing the
Notice that there are two variables defined in the class above. The m_p
variable is the object pointer and m_pmi is the variable containing the
address of the function to execute.
Notice that when ``operator()`` is called, it in turn calls the method provided
@@ -271,16 +271,16 @@ as a parameter::
void LibraryFunction (Functor functor);
The code that will talk to the model would build a specific functor and pass it to ``LibraryFunction``::
The code that will talk to the model would build a specific functor and pass it to ``LibraryFunction``::
MyClass myClass;
SpecificFunctor<MyClass, int> functor (&myclass, MyClass::MyMethod);
When ``LibraryFunction`` is done, it executes the callback using the
When ``LibraryFunction`` is done, it executes the callback using the
``operator()`` on the generic functor it was passed, and in this particular
case, provides the integer argument::
void
void
LibraryFunction (Functor functor)
{
// Execute the library function
@@ -291,11 +291,11 @@ Notice that ``LibraryFunction`` is completely decoupled from the specific
type of the client. The connection is made through the Functor polymorphism.
The Callback API in |ns3| implements object-oriented callbacks using
the functor mechanism. This callback API, being based on C++ templates, is
the functor mechanism. This callback API, being based on C++ templates, is
type-safe; that is, it performs static type checks to enforce proper signature
compatibility between callers and callees. It is therefore more type-safe to
use than traditional function pointers, but the syntax may look imposing at
first. This section is designed to walk you through the Callback system so
compatibility between callers and callees. It is therefore more type-safe to
use than traditional function pointers, but the syntax may look imposing at
first. This section is designed to walk you through the Callback system so
that you can be comfortable using it in |ns3|.
Using the Callback API
@@ -340,39 +340,39 @@ a ``this`` pointer. The function template ``Callback`` is essentially the
declaration of the variable containing the pointer-to-function. In the example
above, we explicitly showed a pointer to a function that returned an integer and
took a single integer as a parameter, The ``Callback`` template function is
a generic version of that -- it is used to declare the type of a callback.
a generic version of that -- it is used to declare the type of a callback.
.. note:: Readers unfamiliar with C++ templates may consult `<http://www.cplusplus.com/doc/tutorial/templates/>`_.
The ``Callback`` template requires one mandatory argument (the return type
of the function to be assigned to this callback) and up to five optional
The ``Callback`` template requires one mandatory argument (the return type
of the function to be assigned to this callback) and up to five optional
arguments, which each specify the type of the arguments (if your particular
callback function has more than five arguments, then this can be handled
by extending the callback implementation).
So in the above example, we have a declared a callback named "one" that will
eventually hold a function pointer. The signature of the function that it will
hold must return double and must support two double arguments. If one tries
to pass a function whose signature does not match the declared callback,
hold must return double and must support two double arguments. If one tries
to pass a function whose signature does not match the declared callback,
a compilation error will occur. Also, if one tries to assign to a callback
an incompatible one, compilation will succeed but a run-time
NS_FATAL_ERROR will be raised. The sample program
an incompatible one, compilation will succeed but a run-time
NS_FATAL_ERROR will be raised. The sample program
``src/core/examples/main-callback.cc`` demonstrates both of these error cases
at the end of the ``main()`` program.
Now, we need to tie together this callback instance and the actual target function
(CbOne). Notice above that CbOne has the same function signature types as the
callback-- this is important. We can pass in any such properly-typed function
(CbOne). Notice above that CbOne has the same function signature types as the
callback-- this is important. We can pass in any such properly-typed function
to this callback. Let's look at this more closely::
static double CbOne (double a, double b) {}
^ ^ ^
| | |
| | |
| | |
Callback<double, double, double> one;
You can only bind a function to a callback if they have the matching signature.
The first template argument is the return type, and the additional template
The first template argument is the return type, and the additional template
arguments are the types of the arguments of the function signature.
Now, let's bind our callback "one" to the function that matches its signature::
@@ -381,9 +381,9 @@ Now, let's bind our callback "one" to the function that matches its signature::
one = MakeCallback (&CbOne);
This call to ``MakeCallback`` is, in essence, creating one of the specialized
functors mentioned above. The variable declared using the ``Callback``
functors mentioned above. The variable declared using the ``Callback``
template function is going to be playing the part of the generic functor. The
assignment ``one = MakeCallback (&CbOne)`` is the cast that converts the
assignment ``one = MakeCallback (&CbOne)`` is the cast that converts the
specialized functor known to the callee to a generic functor known to the caller.
Then, later in the program, if the callback is needed, it can be used as follows::
@@ -394,18 +394,18 @@ Then, later in the program, if the callback is needed, it can be used as follows
double retOne;
retOne = one (10.0, 20.0);
The check for ``IsNull()`` ensures that the callback is not null -- that there
The check for ``IsNull()`` ensures that the callback is not null -- that there
is a function to call behind this callback. Then, ``one()`` executes the
generic ``operator()`` which is really overloaded with a specific implementation
of ``operator()`` and returns the same result as if ``CbOne()`` had been
of ``operator()`` and returns the same result as if ``CbOne()`` had been
called directly.
Using the Callback API with member functions
++++++++++++++++++++++++++++++++++++++++++++
Generally, you will not be calling static functions but instead public member
functions of an object. In this case, an extra argument is needed to the
MakeCallback function, to tell the system on which object the function should be
Generally, you will not be calling static functions but instead public member
functions of an object. In this case, an extra argument is needed to the
MakeCallback function, to tell the system on which object the function should be
invoked. Consider this example, also from main-callback.cc::
class MyCb {
@@ -429,7 +429,7 @@ invoked. Consider this example, also from main-callback.cc::
}
Here, we pass an additional object pointer to the ``MakeCallback<>`` function.
Recall from the background section above that ``Operator()`` will use the pointer to
Recall from the background section above that ``Operator()`` will use the pointer to
member syntax when it executes on an object::
virtual int operator() (ARG arg)
@@ -446,8 +446,8 @@ does precisely that. In this case, when ``two ()`` is invoked::
int result = two (1.0);
will result in a call to the ``CbTwo`` member function (method) on the object
pointed to by ``&cb``.
will result in a call to the ``CbTwo`` member function (method) on the object
pointed to by ``&cb``.
Building Null Callbacks
+++++++++++++++++++++++
@@ -466,21 +466,21 @@ crash at runtime.
Bound Callbacks
***************
A very useful extension to the functor concept is that of a Bound Callback.
Previously it was mentioned that closures were originally function calls
packaged up for later execution. Notice that in all of the Callback
descriptions above, there is no way to package up any parameters for use
later -- when the ``Callback`` is called via ``operator()``. All of
the parameters are provided by the calling function.
A very useful extension to the functor concept is that of a Bound Callback.
Previously it was mentioned that closures were originally function calls
packaged up for later execution. Notice that in all of the Callback
descriptions above, there is no way to package up any parameters for use
later -- when the ``Callback`` is called via ``operator()``. All of
the parameters are provided by the calling function.
What if it is desired to allow the client function (the one that provides the
callback) to provide some of the parameters? `Alexandrescu <http://erdani.com/book/main.html>`_ calls the process of
allowing a client to specify one of the parameters *"binding"*. One of the
allowing a client to specify one of the parameters *"binding"*. One of the
parameters of ``operator()`` has been bound (fixed) by the client.
Some of our pcap tracing code provides a nice example of this. There is a
function that needs to be called whenever a packet is received. This function
calls an object that actually writes the packet to disk in the pcap file
calls an object that actually writes the packet to disk in the pcap file
format. The signature of one of these functions will be::
static void DefaultSink (Ptr<PcapFileWrapper> file, Ptr<const Packet> p);
@@ -492,8 +492,8 @@ the calling code is just a call that looks like::
m_promiscSnifferTrace (m_currentPkt);
What we want to do is to *bind* the ``Ptr<PcapFileWriter> file`` to the
specific callback implementation when it is created and arrange for the
What we want to do is to *bind* the ``Ptr<PcapFileWriter> file`` to the
specific callback implementation when it is created and arrange for the
``operator()`` of the Callback to provide that parameter for free.
We provide the ``MakeBoundCallback`` template function for that purpose. It
@@ -585,9 +585,9 @@ itself. The actual Callback code is quite complicated and very template-intense
a deep understanding of the code is not required. If interested, expert users may
find the following useful.
The code was originally written based on the techniques described in
The code was originally written based on the techniques described in
`<http://www.codeproject.com/cpp/TTLFunction.asp>`_.
It was subsequently rewritten to follow the architecture outlined in
It was subsequently rewritten to follow the architecture outlined in
`Modern C++ Design, Generic Programming and Design Patterns Applied, Alexandrescu, chapter 5, Generalized Functors <http://www.moderncppdesign.com/book/main.html>`_.
This code uses:
@@ -605,6 +605,6 @@ This code uses:
value semantics.
This code most notably departs from the Alexandrescu implementation in that it
does not use type lists to specify and pass around the types of the callback
arguments. Of course, it also does not use copy-destruction semantics and
does not use type lists to specify and pass around the types of the callback
arguments. Of course, it also does not use copy-destruction semantics and
relies on a reference list rather than autoPtr to hold the pointer.

View File

@@ -24,7 +24,7 @@ import sys, os
# To change default code-block format in Latex to footnotesize (8pt)
# Tip from https://stackoverflow.com/questions/9899283/how-do-you-change-the-code-example-font-size-in-latex-pdf-output-with-sphinx/9955928
# Note: sizes are \footnotesize (8pt), \small (9pt), and \normalsize (10pt).
# Note: sizes are \footnotesize (8pt), \small (9pt), and \normalsize (10pt).
#from sphinx.highlighting import PygmentsBridge
#from pygments.formatters.latex import LatexFormatter
@@ -273,7 +273,7 @@ latex_elements = {
# (double backquotes) to either \footnotesize (8pt) or \small (9pt)
#
# See above to change the font size of verbatim code blocks
#
#
# 'preamble': '',
'preamble': u'''\\usepackage{amssymb}
\\definecolor{VerbatimBorderColor}{rgb}{1,1,1}

View File

@@ -14,7 +14,7 @@ The API documentation is generated from the source code itself,
using Doxygen_, to generate cross-linked web pages.
Both of these are important: the Sphinx chapters explain the *why*
and overview of using a model; the API documentation explains the
*how* details.
*how* details.
This chapter gives a quick overview of these
tools, emphasizing preferred usage and customizations for |ns3|.
@@ -64,7 +64,7 @@ all go in the ``src/foo/doc/`` directory. The docs are actually built
by a Sphinx Makefile. For especially involved
documentation, it may be helpful to have a local ``Makefile``
in the ``src/foo/doc/`` directory to
simplify building the documentation for this module
simplify building the documentation for this module
(`Antenna`_ is an example). Setting this up
is not particularly hard, but is beyond the scope of this chapter.
@@ -87,7 +87,7 @@ To add your chapter there, edit ``doc/models/source/index.rst``
.. toctree::
:maxdepth: 1
organization
animation
antenna
@@ -127,7 +127,7 @@ your image files. Again, please keep these in alphabetical order.
Building Sphinx Docs
====================
Building the Sphinx documentation is pretty simple.
Building the Sphinx documentation is pretty simple.
To build all the Sphinx documentation:
.. sourcecode:: bash
@@ -175,12 +175,12 @@ the basics here, instead focusing on preferred usage for |ns3|.
* Start documents with these two lines:
.. sourcecode:: rest
.. include:: replace.txt
.. highlight:: cpp
The first line enables some simple replacements. For example,
typing ``|ns3|`` renders as |ns3|.
The second sets the default source code highlighting language explicitly
@@ -189,12 +189,12 @@ the basics here, instead focusing on preferred usage for |ns3|.
see below.)
* Sections:
Sphinx is pretty liberal about marking section headings. By convention,
we prefer this hierarchy:
.. sourcecode:: rest
.. heading hierarchy:
------------- Chapter
************* Section (#.#)
@@ -202,7 +202,7 @@ the basics here, instead focusing on preferred usage for |ns3|.
############# Sub-subsection
* Syntax Highlighting:
To use the default syntax highlighter, simply start a sourcecode block:
+--------------------------------------+------------------------------------+
@@ -227,7 +227,7 @@ the basics here, instead focusing on preferred usage for |ns3|.
| | |
| $ ls | $ ls |
+--------------------------------------+------------------------------------+
* Shorthand Notations:
These shorthands are defined:
@@ -390,7 +390,7 @@ script:
.. sourcecode:: bash
$ doc/doxygen.warnings.report.sh
doxygen.warnings.report.sh:
Building and running print-introspected-doxygen...done.
Rebuilding doxygen (v1.8.10) docs with full errors...done.
@@ -545,7 +545,7 @@ usage for |ns3|.
*/
or in the corresponding ``.cc`` file::
/**
* \file
* \ingroup foo
@@ -570,7 +570,7 @@ usage for |ns3|.
* \param ale The size of a pint of ale, in Imperial ounces.
*/
typedef void (* BarCallback)(const int ale);
* Copy the ``Attribute`` help strings from the ``GetTypeId`` method to use
as the brief descriptions of associated members.
@@ -586,7 +586,7 @@ usage for |ns3|.
The allowed values of the direction token are ``[in]``, ``[out]``, and
``[in,out]`` (note the explicit square brackets), as discussed in the
Doxygen docs for ``\param``.
* Document template arguments with ``\tparam``, just as you use ``\param``
for function arguments.

View File

@@ -4,7 +4,7 @@
Enabling Subsets of |ns3| Modules
---------------------------------
As with most software projects, |ns3| is ever growing larger in terms of number of modules, lines of code, and memory footprint. Users, however, may only use a few of those modules at a time. For this reason, users may want to explicitly enable only the subset of the possible |ns3| modules that they actually need for their research.
As with most software projects, |ns3| is ever growing larger in terms of number of modules, lines of code, and memory footprint. Users, however, may only use a few of those modules at a time. For this reason, users may want to explicitly enable only the subset of the possible |ns3| modules that they actually need for their research.
This chapter discusses how to enable only the |ns3| modules that you are interested in using.
@@ -23,7 +23,7 @@ If the module has a test library and test libraries are being built, then
libns3-modulename-test.so
will be built, too. Other modules that the module depends on and their test libraries will also be built.
will be built, too. Other modules that the module depends on and their test libraries will also be built.
By default, all modules are built in |ns3|. There are two ways to enable a subset of these modules:
@@ -52,7 +52,7 @@ and the following libraries should be present:
Note the ``./ns3 clean`` step is done here only to make it more obvious which module libraries were built. You don't have to do ``./ns3 clean`` in order to enable subsets of modules.
Running test.py will cause only those tests that depend on module core to be run:
.. sourcecode:: text
24 of 24 tests passed (24 passed, 0 skipped, 0 failed, 0 crashed, 0 valgrind errors)
@@ -94,16 +94,16 @@ The .ns3rc file should now be in your top level |ns3| directory, and it contains
.. sourcecode:: python
#! /usr/bin/env python
# A list of the modules that will be enabled when ns-3 is run.
# Modules that depend on the listed modules will be enabled also.
#
# All modules can be enabled by choosing 'all_modules'.
modules_enabled = ['all_modules']
# Set this equal to true if you want examples to be run.
examples_enabled = False
# Set this equal to true if you want tests to be run.
tests_enabled = False
@@ -112,16 +112,16 @@ Use your favorite editor to modify the .ns3rc file to only enable the core modul
.. sourcecode:: python
#! /usr/bin/env python
# A list of the modules that will be enabled when ns-3 is run.
# Modules that depend on the listed modules will be enabled also.
#
# All modules can be enabled by choosing 'all_modules'.
modules_enabled = ['core']
# Set this equal to true if you want examples to be run.
examples_enabled = True
# Set this equal to true if you want tests to be run.
tests_enabled = True
@@ -143,7 +143,7 @@ and the following libraries should be present:
Note the ``./ns3 clean`` step is done here only to make it more obvious which module libraries were built. You don't have to do ``./ns3 clean`` in order to enable subsets of modules.
Running test.py will cause only those tests that depend on module core to be run:
.. sourcecode:: text
24 of 24 tests passed (24 passed, 0 skipped, 0 failed, 0 crashed, 0 valgrind errors)

View File

@@ -23,7 +23,7 @@ Enable/disable examples and tests using build.py
You can use build.py to enable/disable examples and tests when |ns3| is built for the first time.
By default, examples and tests are not built in |ns3|.
By default, examples and tests are not built in |ns3|.
From the ns-3-allinone directory, you can build |ns3| without any
examples or tests simply by doing: ::
@@ -31,7 +31,7 @@ examples or tests simply by doing: ::
$ ./build.py
Running test.py in the top level |ns3| directory now will cause no examples or tests to be run:
.. sourcecode:: text
0 of 0 tests passed (0 passed, 0 skipped, 0 failed, 0 crashed, 0 valgrind errors)
@@ -41,7 +41,7 @@ If you would like build |ns3| with examples and tests, then do the following fro
$ ./build.py --enable-examples --enable-tests
Running test.py in the top level |ns3| directory will cause all of the examples and tests to be run:
.. sourcecode:: text
170 of 170 tests passed (170 passed, 0 skipped, 0 failed, 0 crashed, 0 valgrind errors)
@@ -51,7 +51,7 @@ Enable/disable examples and tests using ns3
You can use ns3 to enable/disable examples and tests once |ns3| has been built.
By default, examples and tests are not built in |ns3|.
By default, examples and tests are not built in |ns3|.
From the top level |ns3| directory, you can build |ns3| without any
examples or tests simply by doing: ::
@@ -60,7 +60,7 @@ examples or tests simply by doing: ::
$ ./ns3 build
Running test.py now will cause no examples or tests to be run:
.. sourcecode:: text
0 of 0 tests passed (0 passed, 0 skipped, 0 failed, 0 crashed, 0 valgrind errors)
@@ -71,7 +71,7 @@ If you would like build |ns3| with examples and tests, then do the following fro
$ ./ns3 build
Running test.py will cause all of the examples and tests to be run:
.. sourcecode:: text
170 of 170 tests passed (170 passed, 0 skipped, 0 failed, 0 crashed, 0 valgrind errors)
@@ -101,16 +101,16 @@ The .ns3rc file should now be in your top level |ns3| directory, and it contains
.. sourcecode:: python
#! /usr/bin/env python
# A list of the modules that will be enabled when ns-3 is run.
# Modules that depend on the listed modules will be enabled also.
#
# All modules can be enabled by choosing 'all_modules'.
modules_enabled = ['all_modules']
# Set this equal to true if you want examples to be run.
examples_enabled = False
# Set this equal to true if you want tests to be run.
tests_enabled = False
@@ -121,7 +121,7 @@ examples or tests simply by doing: ::
$ ./ns3 build
Running test.py now will cause no examples or tests to be run:
.. sourcecode:: text
0 of 0 tests passed (0 passed, 0 skipped, 0 failed, 0 crashed, 0 valgrind errors)
@@ -133,16 +133,16 @@ examples_enabled and tests_enabled file to be True:
.. sourcecode:: python
#! /usr/bin/env python
# A list of the modules that will be enabled when ns-3 is run.
# Modules that depend on the listed modules will be enabled also.
#
# All modules can be enabled by choosing 'all_modules'.
modules_enabled = ['all_modules']
# Set this equal to true if you want examples to be run.
examples_enabled = True
# Set this equal to true if you want tests to be run.
tests_enabled = True
@@ -153,7 +153,7 @@ and tests simply by doing: ::
$ ./ns3 build
Running test.py will cause all of the examples and tests to be run:
.. sourcecode:: text
170 of 170 tests passed (170 passed, 0 skipped, 0 failed, 0 crashed, 0 valgrind errors)

View File

@@ -18,19 +18,19 @@ events in sequential time order. Once the completion of an event occurs,
the simulator will move to the next event (or will exit if there are no
more events in the event queue). If, for example, an event scheduled
for simulation time "100 seconds" is executed, and the next event is not
scheduled until "200 seconds", the simulator will immediately jump from
scheduled until "200 seconds", the simulator will immediately jump from
100 seconds to 200 seconds (of simulation time) to execute the next event.
This is what is meant by "discrete-event" simulator.
To make this all happen, the simulator needs a few things:
1) a simulator object that can access an event queue where events are
1) a simulator object that can access an event queue where events are
stored and that can manage the execution of events
2) a scheduler responsible for inserting and removing events from the queue
3) a way to represent simulation time
4) the events themselves
This chapter of the manual describes these fundamental objects
This chapter of the manual describes these fundamental objects
(simulator, scheduler, time, event) and how they are used.
Event
@@ -88,7 +88,7 @@ Notes:
support more arguments, please, file a bug report.
* Readers familiar with the term 'fully-bound functors' will recognize
the Simulator::Schedule methods as a way to automatically construct such
objects.
objects.
2) Common scheduling operations
@@ -139,7 +139,7 @@ node, its 'context' is set to 0xffffffff.
To associate a context to each event, the Schedule, and ScheduleNow
methods automatically reuse the context of the currently-executing event
as the context of the event scheduled for execution later.
as the context of the event scheduled for execution later.
In some cases, most notably when simulating the transmission of a packet
from a node to another, this behavior is undesirable since the expected
@@ -190,14 +190,14 @@ context.
Available Simulator Engines
===========================
|ns3| supplies two different types of basic simulator engine to manage
|ns3| supplies two different types of basic simulator engine to manage
event execution. These are derived from the abstract base class `SimulatorImpl`:
* `DefaultSimulatorImpl` This is a classic sequential discrete event
simulator engine which uses a single thread of execution. This engine
* `DefaultSimulatorImpl` This is a classic sequential discrete event
simulator engine which uses a single thread of execution. This engine
executes events as fast as possible.
* `DistributedSimulatorImpl` This is a classic YAWNS distributed ("parallel")
simulator engine. By labeling and instantiating your model components
* `DistributedSimulatorImpl` This is a classic YAWNS distributed ("parallel")
simulator engine. By labeling and instantiating your model components
appropriately this engine will execute the model in parallel across many
compute processes, yet in a time-synchronized way, as if the model had
executed sequentially. The two advantages are to execute models faster
@@ -209,7 +209,7 @@ event execution. These are derived from the abstract base class `SimulatorImpl`
instantiation of model components. This engine attempts to execute
events as fast as possible.
You can choose which simulator engine to use by setting a global variable,
You can choose which simulator engine to use by setting a global variable,
for example::
GlobalValue::Bind ("SimulatorImplementationType",
@@ -222,28 +222,28 @@ or by using a command line argument::
$ ./ns3 run "... -SimulatorImplementationType=ns3::DistributedSimulatorImpl"
In addition to the basic simulator engines there is a general facility used
to build "adapters" which provide small behavior modifications to one of
the core `SimulatorImpl` engines. The adapter base class is
to build "adapters" which provide small behavior modifications to one of
the core `SimulatorImpl` engines. The adapter base class is
`SimulatorAdapter`, itself derived from `SimulatorImpl`. `SimluatorAdapter`
uses the `PIMPL (pointer to implementation) <https://en.cppreference.com/w/cpp/language/pimpl>`_
idiom to forward all calls to the configured base simulator engine.
idiom to forward all calls to the configured base simulator engine.
This makes it easy to provide small customizations
just by overriding the specific Simulator calls needed, and allowing
`SimulatorAdapter` to handle the rest.
just by overriding the specific Simulator calls needed, and allowing
`SimulatorAdapter` to handle the rest.
There are few places where adapters are used currently:
* `ReadltimeSimulatorImpl` This adapter attempts to execute in real time
by pacing the wall clock evolution. This pacing is "best effort",
* `ReadltimeSimulatorImpl` This adapter attempts to execute in real time
by pacing the wall clock evolution. This pacing is "best effort",
meaning actual event execution may not occur exactly in sync, but
close to it. This engine is normally only used with the
close to it. This engine is normally only used with the
`DefaultSimulatorImpl`, but it can be used to keep a distributed
simulation synchronized with real time. See the :doc:`realtime` chapter.
* `VisualSimulatorImpl` This adapter starts a live visualization of the
* `VisualSimulatorImpl` This adapter starts a live visualization of the
running simulation, showing the network graph and each packet traversing
the links.
* `LocalTimeSimulatorImpl` This adapter enables attaching noisy local clocks
to `Nodes`, then scheduling events with respect to the local noisy clock,
to `Nodes`, then scheduling events with respect to the local noisy clock,
instead of relative to the true simulator time.
In addition to the PIMPL idiom of `SimulatorAdapter` there is a special
@@ -253,22 +253,22 @@ per-event customization hook::
One can use this to perform any housekeeping actions before the next event
actually executes.
The distinction between a core engine and an adapter is the following: there
The distinction between a core engine and an adapter is the following: there
can only ever be one core engine running, while there can be several adapters
chained up each providing a variation on the base engine execution.
chained up each providing a variation on the base engine execution.
For example one can use noisy local clocks with the real time adapter.
A single adapter can be added on top of the `DefaultSimulatorImpl` by the same
two methods above: binding the `"SimulatorImplementationType"` global value or
using the command line argument. To chain multipe adapters a different
using the command line argument. To chain multipe adapters a different
approach must be used; see the `SimulatorAdapter::AddAdapter()`
API documentation.
The simulator engine type can be set once, but must be set before the
first call to the `Simulator()` API. In practice, since some models have
to schedule their start up events when they are constructed, this means
generally you should set the engine type before instantiating any other
generally you should set the engine type before instantiating any other
model components.
The engine type can be changed after `Simulator::Destroy()` but before
@@ -279,22 +279,22 @@ multiple runs in a single |ns3| invocation.
Time
****
|ns3| internally represents simulation times and durations as
64-bit signed integers (with the sign bit used for negative durations).
|ns3| internally represents simulation times and durations as
64-bit signed integers (with the sign bit used for negative durations).
The time values are interpreted with respect to a "resolution" unit in the
customary SI units: fs, ps, ns, us, ms, s, min, h, d, y.
The unit defines the minimum Time value.
customary SI units: fs, ps, ns, us, ms, s, min, h, d, y.
The unit defines the minimum Time value.
It can be changed once before any calls to `Simulator::Run()`.
It is not stored with the 64-bit time value itself.
Times can be constructed from all standard numeric types
(using the configured default unit)
Times can be constructed from all standard numeric types
(using the configured default unit)
or with explicit units (as in `Time MicroSeconds (uint64_t value)`).
Times can be compared, tested for sign or equality to zero, rounded to
a given unit, converted to standard numeric types in specific units.
All basic arithmetic operations are supported
Times can be compared, tested for sign or equality to zero, rounded to
a given unit, converted to standard numeric types in specific units.
All basic arithmetic operations are supported
(addition, subtraction, multiplication or division
by a scalar (numeric value)). Times can be written to/read from IO streams.
by a scalar (numeric value)). Times can be written to/read from IO streams.
In the case of writing it is easy to choose the output unit, different
from the resolution unit.
@@ -306,24 +306,24 @@ The main job of the `Scheduler` classes is to maintain the priority queue of
future events. The scheduler can be set with a global variable,
similar to choosing the `SimulatorImpl`::
GlobalValue::Bind ("SchedulerType",
GlobalValue::Bind ("SchedulerType",
StringValue ("ns3::DistributedSimulatorImpl"));
The scheduler can be changed at any time via `Simulator::SetScheduler()`.
The default scheduler is `MapScheduler` which uses a `std::map<>` to
The default scheduler is `MapScheduler` which uses a `std::map<>` to
store events in time order.
Because event distributions vary by model there is no one
best strategy for the priority queue, so |ns3| has several options with
differing tradeoffs. The example `utils/bench-simulator.c` can be used
to test the performance for a user-supplied event distribution.
differing tradeoffs. The example `utils/bench-simulator.c` can be used
to test the performance for a user-supplied event distribution.
For modest execution times (less than an hour, say) the choice of priority
queue is usually not significant; configuring the build type to optimized
is much more important in reducing execution times.
The available scheduler types, and a summary of their time and space
complexity on `Insert()` and `RemoveNext()`, are listed in the
following table. See the individual Scheduler API pages for details on the
following table. See the individual Scheduler API pages for details on the
complexity of the other API calls.
+-----------------------+-------------------------------------+-------------+--------------+----------+--------------+

View File

@@ -1,6 +1,6 @@
.. include:: replace.txt
.. highlight:: cpp
Making Plots using the Gnuplot Class
------------------------------------
@@ -16,7 +16,7 @@ Creating Plots Using the Gnuplot Class
The following steps must be taken in order to create a plot using |ns3|'s Gnuplot class:
#. Modify your code so that is uses the Gnuplot class and its functions.
#. Modify your code so that is uses the Gnuplot class and its functions.
#. Run your code so that it creates a gnuplot control file.
#. Call gnuplot with the name of the gnuplot control file.
#. View the graphics file that was produced in your favorite graphics viewer.
@@ -42,38 +42,38 @@ This should produce the following gnuplot control files:
.. sourcecode:: text
plot-2d.plt
plot-2d.plt
plot-2d-with-error-bars.plt
plot-3d.plt
plot-3d.plt
In order to process these gnuplot control files, do the following:
.. sourcecode:: bash
$ gnuplot plot-2d.plt
$ gnuplot plot-2d.plt
$ gnuplot plot-2d-with-error-bars.plt
$ gnuplot plot-3d.plt
$ gnuplot plot-3d.plt
This should produce the following graphics files:
.. sourcecode:: text
plot-2d.png
plot-2d.png
plot-2d-with-error-bars.png
plot-3d.png
plot-3d.png
You can view these graphics files in your favorite graphics viewer. If you have gimp installed on your machine, for example, you can do this:
.. sourcecode:: bash
$ gimp plot-2d.png
$ gimp plot-2d.png
$ gimp plot-2d-with-error-bars.png
$ gimp plot-3d.png
$ gimp plot-3d.png
An Example 2-Dimensional Plot
*****************************
The following 2-Dimensional plot
The following 2-Dimensional plot
.. _plot-2d:
@@ -114,10 +114,10 @@ was created using the following code from gnuplot-example.cc: ::
for (x = -5.0; x <= +5.0; x += 1.0)
{
// Calculate the 2-D curve
//
//
// 2
// y = x .
//
//
y = x * x;
// Add this point.
@@ -135,7 +135,7 @@ was created using the following code from gnuplot-example.cc: ::
// Close the plot file.
plotFile.close ();
An Example 2-Dimensional Plot with Error Bars
*********************************************
@@ -185,10 +185,10 @@ was created using the following code from gnuplot-example.cc: ::
for (x = -5.0; x <= +5.0; x += 1.0)
{
// Calculate the 2-D curve
//
//
// 2
// y = x .
//
//
y = x * x;
// Make the uncertainty in the x direction be constant and make
@@ -217,7 +217,7 @@ was created using the following code from gnuplot-example.cc: ::
An Example 3-Dimensional Plot
*****************************
The following 3-Dimensional plot
The following 3-Dimensional plot
.. _plot-3d:
@@ -271,10 +271,10 @@ was created using the following code from gnuplot-example.cc: ::
for (y = -5.0; y <= +5.0; y += 1.0)
{
// Calculate the 3-D surface
//
//
// 2 2
// z = x * y .
//
//
z = x * x * y * y;
// Add this point.

View File

@@ -27,7 +27,7 @@ The simplest way to get a hash value of a data buffer or string is just::
char * buffer = ...
size_t buffer_size = ...
uint32_t buffer_hash = Hash32 ( buffer, buffer_size);
std::string s;
@@ -92,7 +92,7 @@ To add the hash function ``foo``, follow the ``hash-murmur3.h``/``.cc`` pattern:
``hash-murmur3.h`` is included.
* In your own code, instantiate a ``Hasher`` object via the constructor
``Hasher (Ptr<Hash::Function::Foo> ())``
If your hash function is a single function, e.g. ``hashf``, you don't
even need to create a new class derived from HashImplementation::

View File

@@ -4,7 +4,7 @@
How to write tests
------------------
A primary goal of the ns-3 project is to help users to improve the
A primary goal of the ns-3 project is to help users to improve the
validity and credibility of their results. There are many elements
to obtaining valid models and simulations, and testing is a major
component. If you contribute models or examples to ns-3, you may
@@ -26,7 +26,7 @@ Sample TestSuite skeleton
When starting from scratch (i.e. not adding a TestCase to an existing
TestSuite), these things need to be decided up front:
* What the test suite will be called
* What the test suite will be called
* What type of test it will be (Build Verification Test, Unit Test,
System Test, or Performance Test)
* Where the test code will live (either in an existing ns-3 module or
@@ -36,13 +36,13 @@ TestSuite), these things need to be decided up front:
A program called ``utils/create-module.py`` is a good starting point.
This program can be invoked such as ``create-module.py router`` for
a hypothetical new module called ``router``. Once you do this, you
will see a ``router`` directory, and a ``test/router-test-suite.cc``
will see a ``router`` directory, and a ``test/router-test-suite.cc``
test suite. This file can be a starting point for your initial test.
This is a working test suite, although the actual tests performed are
trivial. Copy it over to your module's test directory, and do a global
substitution of "Router" in that file for something pertaining to
the model that you want to test. You can also edit things such as a
more descriptive test case name.
more descriptive test case name.
You also need to add a block into your wscript to get this test to
compile:
@@ -82,7 +82,7 @@ Test macros
***********
There are a number of macros available for checking test program
output with expected output. These macros are defined in
output with expected output. These macros are defined in
``src/core/model/test.h``.
The main set of macros that are used include the following:
@@ -93,7 +93,7 @@ The main set of macros that are used include the following:
NS_TEST_ASSERT_MSG_NE(actual, limit, msg)
NS_TEST_ASSERT_MSG_LT(actual, limit, msg)
NS_TEST_ASSERT_MSG_GT(actual, limit, msg)
NS_TEST_ASSERT_MSG_EQ_TOL(actual, limit, tol, msg)
NS_TEST_ASSERT_MSG_EQ_TOL(actual, limit, tol, msg)
The first argument ``actual`` is the value under test, the second value
``limit`` is the expected value (or the value to test against), and the

View File

@@ -17,10 +17,10 @@ in your ``main()`` program or by the use of the ``NS_LOG`` environment variable.
Logging statements are not compiled into optimized builds of |ns3|. To use
logging, one must build the (default) debug build of |ns3|.
The project makes no guarantee about whether logging output will remain
The project makes no guarantee about whether logging output will remain
the same over time. Users are cautioned against building simulation output
frameworks on top of logging code, as the output and the way the output
is enabled may change over time.
is enabled may change over time.
Overview
********
@@ -61,10 +61,10 @@ This can be made more granular by selecting individual components:
$ NS_LOG="Ipv4L3Protocol" ./ns3 run first
The output can be further tailored with prefix options.
The second way to enable logging is to use explicit statements in your
program, such as in the ``first`` tutorial program::
int
main (int argc, char *argv[])
{
@@ -107,7 +107,7 @@ in a module, spanning different compilation units, but logically grouped
together, such as the |ns3| wifi code::
WifiHelper wifiHelper;
wifiHelper.EnableLogComponents ();
wifiHelper.EnableLogComponents ();
The ``NS_LOG`` log component wildcard \`*' will enable all components.
@@ -306,10 +306,10 @@ for all log components. These are all equivalent:
.. sourcecode:: bash
$ NS_LOG="***" ... $ NS_LOG="*=all|*" ... $ NS_LOG="*=*|all" ...
$ NS_LOG="***" ... $ NS_LOG="*=all|*" ... $ NS_LOG="*=*|all" ...
$ NS_LOG="*=**" ... $ NS_LOG="*=level_all|*" ... $ NS_LOG="*=*|prefix_all" ...
$ NS_LOG="*=*|*" ...
Be advised: even the trivial ``scratch-simulator`` produces over
46K lines of output with ``NS_LOG="***"``!
@@ -329,7 +329,7 @@ Adding logging to your code is very simple:
::
namespace ns3 {
NS_LOG_COMPONENT_DEFINE ("Ipv4L3Protocol");
...
@@ -400,8 +400,8 @@ Controlling timestamp precision
*******************************
Timestamps are printed out in units of seconds. When used with the default
|ns3| time resolution of nanoseconds, the default timestamp precision is 9
digits, with fixed format, to allow for 9 digits to be consistently printed
|ns3| time resolution of nanoseconds, the default timestamp precision is 9
digits, with fixed format, to allow for 9 digits to be consistently printed
to the right of the decimal point. Example:
::
@@ -417,11 +417,11 @@ or femtoseconds, the precision is expanded accordingly; e.g. for picosecond:
When the |ns3| simulation uses a time resolution lower than microseconds,
the default C++ precision is used.
An example program at ``src\core\examples\sample-log-time-format.cc``
demonstrates how to change the timestamp formatting.
The maximum useful precision is 20 decimal digits, since Time is signed 64
The maximum useful precision is 20 decimal digits, since Time is signed 64
bits.
Logging Macros
@@ -476,7 +476,7 @@ Guidelines
* Start every class method with ``NS_LOG_FUNCTION (this << args...);``
This enables easy function call tracing.
* Except: don't log operators or explicit copy constructors,
* Except: don't log operators or explicit copy constructors,
since these will cause infinite recursion and stack overflow.
* For methods without arguments use the same form:
@@ -502,11 +502,11 @@ Guidelines
* Use ``NS_LOG_LOGIC`` to trace important logic branches within a function.
* Test that your logging changes do not break the code.
Run some example programs with all log components turned on (e.g.
* Test that your logging changes do not break the code.
Run some example programs with all log components turned on (e.g.
``NS_LOG="***"``).
* Use an explicit cast for any variable of type uint8_t or int8_t,
* Use an explicit cast for any variable of type uint8_t or int8_t,
e.g., ``NS_LOG_LOGIC ("Variable i is " << static_cast<int> (i));``.
Without the cast, the integer is interpreted as a char, and the result
will be most likely not in line with the expectations.

View File

@@ -78,7 +78,7 @@ ListErrorModel, etc, such as is done in |ns2|.
You may be thinking at this point, "Why not make IsCorrupt() a virtual method?".
That is one approach; the other is to make the public non-virtual function
indirect through a private virtual function (this in C++ is known as the non
virtual interface idiom and is adopted in the |ns3| ErrorModel class).
virtual interface idiom and is adopted in the |ns3| ErrorModel class).
Next, should this device have any dependencies on IP or other protocols? We do
not want to create dependencies on Internet protocols (the error model should be
@@ -106,7 +106,7 @@ NetDevice level.
After some thinking and looking at existing |ns2| code, here is a sample API of
a base class and first subclass that could be posted for initial review::
class ErrorModel
class ErrorModel
{
public:
ErrorModel ();
@@ -153,14 +153,14 @@ Let's say that you are ready to start implementing; you have a fairly clear
picture of what you want to build, and you may have solicited some initial
review or suggestions from the list. One way to approach the next step
(implementation) is to create scaffolding and fill in the details as the design
matures.
matures.
This section walks through many of the steps you should consider to define
scaffolding, or a non-functional skeleton of what your model will eventually
implement. It is usually good practice to not wait to get these details
integrated at the end, but instead to plumb a skeleton of your model into the
system early and then add functions later once the API and integration seems
about right.
about right.
Note that you will want to modify a few things in the below presentation for
your model since if you follow the error model verbatim, the code you produce
@@ -185,7 +185,7 @@ particularly for ease of integrating with the build system.
In the case of the error model, it is very related to the packet class, so it
makes sense to implement this in the ``src/network/`` module where |ns3|
packets are implemented.
packets are implemented.
`cmake` and `CMakeLists.txt`
++++++++++++++++++++++++++++
@@ -197,7 +197,7 @@ add your files to the ``CMakeLists.txt`` file found in each directory.
Let's start with empty files error-model.h and error-model.cc, and add this to
``src/network/CMakeLists.txt``. It is really just a matter of adding the .cc file to the
rest of the source files, and the .h file to the list of the header files.
rest of the source files, and the .h file to the list of the header files.
Now, pop up to the top level directory and type "./test.py". You
shouldn't have broken anything by this operation.
@@ -290,7 +290,7 @@ from class Object.::
#include "ns3/object.h"
namespace ns3 {
class ErrorModel : public Object
{
public:
@@ -304,7 +304,7 @@ from class Object.::
{
public:
static TypeId GetTypeId (void);
RateErrorModel ();
virtual ~RateErrorModel ();
};
@@ -317,18 +317,18 @@ included without any path prefix. Therefore, if we were implementing ErrorModel
in ``src/core/model`` directory, we could have just said "``#include "object.h"``".
But we are in ``src/network/model``, so we must include it as "``#include
"ns3/object.h"``". Note also that this goes outside the namespace declaration.
Second, each class must implement a static public member function called
``GetTypeId (void)``.
``GetTypeId (void)``.
Third, it is a good idea to implement constructors and destructors rather than
to let the compiler generate them, and to make the destructor virtual. In C++,
note also that copy assignment operator and copy constructors are auto-generated
if they are not defined, so if you do not want those, you should implement those
as private members. This aspect of C++ is discussed in Scott Meyers' Effective
C++ book. item 45.
C++ book. item 45.
Let's now look at some corresponding skeletal implementation code in the .cc
Let's now look at some corresponding skeletal implementation code in the .cc
file.::
#include "error-model.h"
@@ -347,11 +347,11 @@ file.::
}
ErrorModel::ErrorModel ()
{
{
}
ErrorModel::~ErrorModel ()
{
{
}
NS_OBJECT_ENSURE_REGISTERED (RateErrorModel);
@@ -368,7 +368,7 @@ file.::
RateErrorModel::RateErrorModel ()
{
}
}
RateErrorModel::~RateErrorModel ()
{
@@ -378,7 +378,7 @@ What is the ``GetTypeId (void)`` function? This function does a few things. It
registers a unique string into the TypeId system. It establishes the hierarchy
of objects in the attribute system (via ``SetParent``). It also declares that
certain objects can be created via the object creation framework
(``AddConstructor``).
(``AddConstructor``).
The macro ``NS_OBJECT_ENSURE_REGISTERED (classname)`` is needed also once for
every class that defines a new GetTypeId method, and it does the actual
@@ -422,12 +422,12 @@ Add Basic Support in the Class
/* point-to-point-net-device.h */
class ErrorModel;
/**
* Error model for receive packet events
*/
Ptr<ErrorModel> m_receiveErrorModel;
Add Accessor
++++++++++++
@@ -438,7 +438,7 @@ Add Accessor
{
NS_LOG_FUNCTION (this << em);
m_receiveErrorModel = em;
}
}
.AddAttribute ("ReceiveErrorModel",
"The receiver error model used to simulate packet loss",
@@ -455,26 +455,26 @@ Plumb Into the System
{
NS_LOG_FUNCTION (this << packet);
uint16_t protocol = 0;
if (m_receiveErrorModel && m_receiveErrorModel->IsCorrupt (packet) )
{
//
//
// If we have an error model and it indicates that it is time to lose a
// corrupted packet, don't forward this packet up, let it go.
//
// corrupted packet, don't forward this packet up, let it go.
//
m_dropTrace (packet);
}
}
else
{
//
//
// Hit the receive trace hook, strip off the point-to-point protocol header
// and forward this packet up the protocol stack.
//
//
m_rxTrace (packet);
ProcessHeader(packet, protocol);
m_rxCallback (this, packet, protocol, GetRemote ());
if (!m_promiscCallback.IsNull ())
{ m_promiscCallback (this, packet, protocol, GetRemote (),
{ m_promiscCallback (this, packet, protocol, GetRemote (),
GetAddress (), NetDevice::PACKET_HOST);
}
}

View File

@@ -62,7 +62,7 @@ By default ``create-module.py`` creates the module skeleton in the
.. sourcecode:: bash
$ ./utils/create-module.py contrib/new-contrib
Let's assume we've created our new module in ``src``.
``cd`` into ``src/new-module``; you will find this directory layout:
@@ -113,10 +113,10 @@ like this (before editing):
.. sourcecode:: cmake
build_lib(
LIBNAME new-module
SOURCE_FILES helper/new-module-helper.cc
LIBNAME new-module
SOURCE_FILES helper/new-module-helper.cc
model/new-module.cc
HEADER_FILES helper/new-module-helper.h
HEADER_FILES helper/new-module-helper.h
model/new-module.h
LIBRARIES_TO_LINK ${libcore}
TEST_SOURCES test/new-module-test-suite.cc
@@ -129,12 +129,12 @@ should look like:
.. sourcecode:: cmake
build_lib(
LIBNAME new-module
SOURCE_FILES helper/new-module-helper.cc
LIBNAME new-module
SOURCE_FILES helper/new-module-helper.cc
model/new-module.cc
HEADER_FILES helper/new-module-helper.h
HEADER_FILES helper/new-module-helper.h
model/new-module.h
LIBRARIES_TO_LINK
LIBRARIES_TO_LINK
${libinternet}
${libmobility}
${libaodv}
@@ -147,7 +147,7 @@ is why we removed ``core``; the ``internet`` module in turn depends on
Your module will most likely have model source files. Initial skeletons
(which will compile successfully) are created in ``model/new-module.cc``
and ``model/new-module.h``.
and ``model/new-module.h``.
If your module will have helper source files, then they will go into
the ``helper/`` directory; again, initial skeletons are created
@@ -213,7 +213,7 @@ with the following:
Note: the ``source_files`` and ``header_files`` lists are not necessary.
They are used keep the ``build_lib`` macro readable for modules with many
source files.
source files.
The objects resulting from compiling these sources will be assembled
into a link library, which will be linked to any programs relying on this
@@ -257,7 +257,7 @@ If the list of headers is short, use the following instead:
build_lib(
LIBNAME spectrum
...
HEADER_FILES
HEADER_FILES
helper/adhoc-aloha-noack-ideal-phy-helper.h
helper/spectrum-analyzer-helper.h
...
@@ -266,7 +266,7 @@ If the list of headers is short, use the following instead:
model/wifi-spectrum-value-helper.h
...
)
Headers made public in this way will be accessible to users of your model
with include statements like
@@ -274,7 +274,7 @@ with include statements like
.. sourcecode:: cpp
#include "ns3/spectrum-model.h"
Headers used strictly internally in your implementation should not
be included here. They are still accessible to your implementation by
include statements like
@@ -307,7 +307,7 @@ The ``spectrum`` model tests are specified with the following stanza:
test/tv-spectrum-transmitter-test.cc
)
See :doc:`Tests <tests>` for more information on how to write test cases.
See :doc:`Tests <tests>` for more information on how to write test cases.
Step 6 - Declare Examples
*************************
@@ -325,7 +325,7 @@ The ``spectrum`` model defines it's first example in
build_lib_example(
NAME adhoc-aloha-ideal-phy
SOURCE_FILES adhoc-aloha-ideal-phy.cc
LIBRARIES_TO_LINK
LIBRARIES_TO_LINK
${libspectrum}
${libmobility}
${libinternet}
@@ -335,7 +335,7 @@ The ``spectrum`` model defines it's first example in
Note that the variable ``libraries_to_link`` is the list of modules that
the program being created depends on; again, don't forget to include
``new-module`` in the list. It's best practice to list only the direct
``new-module`` in the list. It's best practice to list only the direct
module dependencies, and let ``CMake`` deduce the full dependency tree.
Occasionally, for clarity, you may want to split the implementation
@@ -343,13 +343,13 @@ for your example among several source files. In this case, just
include those files as additional explicit sources of the example:
.. sourcecode:: cmake
build_lib_example(
NAME new-module-example
SOURCE_FILES new-module-example.cc
LIBRARIES_TO_LINK
${libspectrum}
${libmobility}
LIBRARIES_TO_LINK
${libspectrum}
${libmobility}
${libinternet}
${libapplications}
)
@@ -380,7 +380,7 @@ two lists of C++ and Python examples:
("adhoc-aloha-ideal-phy-with-microwave-oven", "True", "True"),
("adhoc-aloha-ideal-phy-matrix-propagation-loss-model", "True", "True"),
]
# A list of Python examples to run in order to ensure that they remain
# runnable over time. Each tuple in the list contains
#
@@ -447,6 +447,6 @@ Adding Python bindings to your module is optional.
If you want to include Python bindings (needed only if you want
to write Python ns-3 programs instead of C++ ns-3 programs), you
should scan your module to generate new bindings for the Python
API (covered elsewhere in this manual), and they will be used
should scan your module to generate new bindings for the Python
API (covered elsewhere in this manual), and they will be used
if NS3_PYTHON_BINDINGS is set to ON.

View File

@@ -24,9 +24,9 @@ not strictly in accordance with either.
Object-oriented behavior
************************
C++ objects, in general, provide common object-oriented capabilities
(abstraction, encapsulation, inheritance, and polymorphism) that are part
of classic object-oriented design. |ns3| objects make use of these
C++ objects, in general, provide common object-oriented capabilities
(abstraction, encapsulation, inheritance, and polymorphism) that are part
of classic object-oriented design. |ns3| objects make use of these
properties; for instance::
class Address
@@ -54,8 +54,8 @@ These base classes are:
* class :cpp:class:`ObjectBase`
* class :cpp:class:`SimpleRefCount`
It is not required that |ns3| objects inherit from these class, but
those that do get special properties. Classes deriving from
It is not required that |ns3| objects inherit from these class, but
those that do get special properties. Classes deriving from
class :cpp:class:`Object` get the following properties.
* the |ns3| type and attribute system (see :ref:`Attributes`)
@@ -87,14 +87,14 @@ obtained to an interface, the object's reference count is incremented by calling
object is deleted.
* When the client code obtains a pointer from the object itself through object
creation, or via GetObject, it does not have to increment the reference count.
creation, or via GetObject, it does not have to increment the reference count.
* When client code obtains a pointer from another source (e.g., copying a
pointer) it must call ``Ref()`` to increment the reference count.
* All users of the object pointer must call ``Unref()`` to release the
reference.
The burden for calling :cpp:func:`Unref()` is somewhat relieved by the use of
the reference counting smart pointer class described below.
the reference counting smart pointer class described below.
Users using a low-level API who wish to explicitly allocate
non-reference-counted objects on the heap, using operator new, are responsible
@@ -108,7 +108,7 @@ provides a smart pointer class :cpp:class:`Ptr` similar to
:cpp:class:`Boost::intrusive_ptr`. This smart-pointer class assumes that the
underlying type provides a pair of ``Ref`` and ``Unref`` methods that are
expected to increment and decrement the internal refcount of the object
instance.
instance.
This implementation allows you to manipulate the smart pointer as if it was a
normal pointer: you can compare it with zero, compare it against other pointers,
@@ -157,7 +157,7 @@ The |ns3| object aggregation system is motivated in strong part by a recognition
that a common use case for |ns2| has been the use of inheritance and
polymorphism to extend protocol models. For instance, specialized versions of
TCP such as RenoTcpAgent derive from (and override functions from) class
TcpAgent.
TcpAgent.
However, two problems that have arisen in the |ns2| model are downcasts and
"weak base class." Downcasting refers to the procedure of using a base class
@@ -174,7 +174,7 @@ problems. This design is based on elements of the `Component Object Model
<http://en.wikipedia.org/wiki/Component_Object_Model>`_ and `GNOME Bonobo
<http://en.wikipedia.org/wiki/Bonobo_(component_model)>`_ although full
binary-level compatibility of replaceable components is not supported and we
have tried to simplify the syntax and impact on model developers.
have tried to simplify the syntax and impact on model developers.
Examples
********
@@ -212,7 +212,7 @@ GetObject example
+++++++++++++++++
GetObject is a type-safe way to achieve a safe downcasting and to allow
interfaces to be found on an object.
interfaces to be found on an object.
Consider a node pointer ``m_node`` that points to a Node object that has an
implementation of IPv4 previously aggregated to it. The client code wishes to
@@ -243,7 +243,7 @@ Object factories
A common use case is to create lots of similarly configured objects. One can
repeatedly call :cpp:func:`CreateObject` but there is also a factory design
pattern in use in the |ns3| system. It is heavily used in the "helper" API.
pattern in use in the |ns3| system. It is heavily used in the "helper" API.
Class :cpp:class:`ObjectFactory` can be used to instantiate objects and to
configure the attributes on those objects::
@@ -254,7 +254,7 @@ configure the attributes on those objects::
The first method allows one to use the |ns3| TypeId system to specify the type
of objects created. The second allows one to set attributes on the objects to be
created, and the third allows one to create the objects themselves.
created, and the third allows one to create the objects themselves.
For example: ::
@@ -265,10 +265,10 @@ For example: ::
// subsequently created objects
factory.Set ("SystemLoss", DoubleValue (2.0));
// Create one such object
Ptr<Object> object = factory.Create ();
Ptr<Object> object = factory.Create ();
factory.Set ("SystemLoss", DoubleValue (3.0));
// Create another object with a different SystemLoss
Ptr<Object> object = factory.Create ();
Ptr<Object> object = factory.Create ();
Downcasting
***********

View File

@@ -13,12 +13,12 @@ and models are implemented in C++. |ns3| is built as a library which may be
statically or dynamically linked to a C++ main program that defines the
simulation topology and starts the simulator. |ns3| also exports nearly all
of its API to Python, allowing Python programs to import an "ns3" module in
much the same way as the |ns3| library is linked by executables in C++.
much the same way as the |ns3| library is linked by executables in C++.
.. _software-organization:
.. figure:: figures/software-organization.*
Software organization of |ns3|
The source code for |ns3| is mostly organized in the ``src`` directory and
@@ -26,9 +26,9 @@ can be described by the diagram in :ref:`software-organization`. We will
work our way from the bottom up; in general, modules only have dependencies
on modules beneath them in the figure.
We first describe the core of the simulator; those components that are
common across all protocol, hardware, and environmental models.
The simulation core is implemented in ``src/core``. Packets are
We first describe the core of the simulator; those components that are
common across all protocol, hardware, and environmental models.
The simulation core is implemented in ``src/core``. Packets are
fundamental objects in a network simulator
and are implemented in ``src/network``. These two simulation modules by
themselves are intended to comprise a generic simulation core that can be
@@ -36,8 +36,8 @@ used by different kinds of networks, not just Internet-based networks. The
above modules of |ns3| are independent of specific network and device
models, which are covered in subsequent parts of this manual.
In addition to the above |ns3| core, we introduce, also in the initial
portion of the manual, two other modules that supplement the core C++-based
In addition to the above |ns3| core, we introduce, also in the initial
portion of the manual, two other modules that supplement the core C++-based
API. |ns3| programs may access
all of the API directly or may make use of a so-called *helper API* that
provides convenient wrappers or encapsulation of low-level API calls. The
@@ -50,10 +50,10 @@ The remainder of the manual is focused on documenting the models and
supporting capabilities. The next part focuses on two fundamental objects in
|ns3|: the ``Node`` and ``NetDevice``. Two special NetDevice types are
designed to support network emulation use cases, and emulation is described
next. The following chapter is devoted to Internet-related models,
next. The following chapter is devoted to Internet-related models,
including the
sockets API used by Internet applications. The next chapter covers
applications, and the following chapter describes additional support for
sockets API used by Internet applications. The next chapter covers
applications, and the following chapter describes additional support for
simulation, such as animators and statistics.
The project maintains a separate manual devoted to testing and validation

View File

@@ -5,28 +5,28 @@ Profiling
---------
Memory profiling is essential to identify issues that
may cause memory corruption, which may lead to all sorts of
may cause memory corruption, which may lead to all sorts of
side-effects, such as crashing after many hours of simulation and
producing wrong results that invalidate the entire simulation.
It also can help tracking sources of excessive memory allocations,
the size of these allocations and memory usage during simulation.
These can affect simulation performance, or limit the complexity
It also can help tracking sources of excessive memory allocations,
the size of these allocations and memory usage during simulation.
These can affect simulation performance, or limit the complexity
and the number of concurrent simulations.
Performance profiling on the other hand is essential for
Performance profiling on the other hand is essential for
high-performance applications, as it allows for the identification
of bottlenecks and their mitigation.
Another type of profiling is related to system calls. They
can be used to debug issues and identify hotspots that
can be used to debug issues and identify hotspots that
may cause performance issues in specific conditions. Excessive
calls results in more context switches, which interrupt the
simulations, ultimately slowing them down.
calls results in more context switches, which interrupt the
simulations, ultimately slowing them down.
Other than profiling the simulations, which can highlight bottlenecks
in the simulator, we can also profile the compilation process.
This allows us to identify and fix bottlenecks, which speed up
in the simulator, we can also profile the compilation process.
This allows us to identify and fix bottlenecks, which speed up
build times.
@@ -40,22 +40,22 @@ Memory Profilers
.. _Bytehound : https://github.com/koute/bytehound
.. _gperftools : https://github.com/gperftools/gperftools
Memory profilers are tools that help identifying memory related
Memory profilers are tools that help identifying memory related
issues.
There are two well known tools for finding bugs such as uninitialized memory usage,
out-of-bound accesses, dereferencing null pointers and other memory-related bugs:
* `Valgrind`_
* Pros: very rich tooling, no need to recompile programs to profile the program.
* Cons: very slow and limited to Linux and MacOS.
* `Sanitizers`_
* Pros: sanitizers are distributed along with compilers, such as GCC, Clang and MSVC.
They are widely available, cross platform and faster than Valgrind.
* Cons: false positives, high memory usage, memory sanitizer is incompatible
with other sanitizers (e.g. address sanitizer), requiring two instrumented
* Cons: false positives, high memory usage, memory sanitizer is incompatible
with other sanitizers (e.g. address sanitizer), requiring two instrumented
compilations and two test runs. The memory sanitizer requires Clang.
There are also tools to count memory allocations, track memory usage and memory leaks,
@@ -92,11 +92,11 @@ used to profile programs at runtime and find issues related to undefined behavio
memory corruption (out-of-bound access, uninitialized memory use), leaks, race
conditions and others.
Sanitizers are shipped with most modern compilers and can be used by instructing the
Sanitizers are shipped with most modern compilers and can be used by instructing the
compiler to link the required libraries and instrument the code.
To build ns-3 with sanitizers, enable the ``NS3_SANITIZE`` option. This can be done
directly via CMake:
directly via CMake:
.. sourcecode:: console
@@ -158,27 +158,27 @@ Sanitizers were used to find issues in multiple occasions:
=>0x0ffd7197db70: 00 00 04 f9 f9 f9 f9[f9]00 00 00 00 00 00 00 00
Shadow byte legend (one shadow byte represents 8 application bytes):
Addressable: 00
Partially addressable: 01 02 03 04 05 06 07
Partially addressable: 01 02 03 04 05 06 07
Global redzone: f9
==51636==ABORTING
* The output above shows the type of error (``global-buffer-overflow``),
the stack-trace of where the bug happened (``LteAmc::GetDlTbSizeFromMcs``),
affected variables (``McsToItbsUl`` and ``TransportBlockSizeTable``),
* The output above shows the type of error (``global-buffer-overflow``),
the stack-trace of where the bug happened (``LteAmc::GetDlTbSizeFromMcs``),
affected variables (``McsToItbsUl`` and ``TransportBlockSizeTable``),
and a shadow bytes map, showing the wrong access between square brackets.
* The the global redzone (f9) shadow bytes are empty memory allocated between global variables (00s and 04s),
* The the global redzone (f9) shadow bytes are empty memory allocated between global variables (00s and 04s),
which are left there to be corrupted by the bugged program.
Any eventual corruption is then traced back to the source, without affecting the program execution.
* The adopted solution in merge request `MR703`_ was to fix one of the schedulers that could produce the index value of -1,
and updating the asserts to catch the illegal index value.
* A wrong downcast in the Wimax module:
* The pointer was casted incorrectly to U16TlvValue instead of U8TvlValue, which could have different sizes in memory
leading to the program reading the wrong memory address.
Reading the wrong memory address can result in unexpected or invalid values being read, which could change the
program flow and corrupt memory, producing wrong simulation results or crashing the program.
leading to the program reading the wrong memory address.
Reading the wrong memory address can result in unexpected or invalid values being read, which could change the
program flow and corrupt memory, producing wrong simulation results or crashing the program.
.. sourcecode:: console
~/ns-3-dev/src/wimax/model/service-flow.cc:159:86: runtime error: downcast of address 0x6020000148b0 which does not point to an object of type 'U16TlvValue'
@@ -210,8 +210,8 @@ along with stack traces, allowing developers to identify code responsible
for possible memory leaks and unnecessary allocations.
For the examples below we used the default configuration of ns-3,
with the output going to the ``build`` directory. The actual executable
for the ``wifi-he-network`` example is ``./build/examples/wireless/ns3-dev-wifi-he-network``, which is what is
with the output going to the ``build`` directory. The actual executable
for the ``wifi-he-network`` example is ``./build/examples/wireless/ns3-dev-wifi-he-network``, which is what is
executed by ``./ns3 run wifi-he-network``.
To collect information of a program (in this case the ``wifi-he-network``
@@ -225,13 +225,13 @@ If you prefer to use the ``ns3`` wrapper, try:
.. sourcecode:: console
~ns-3-dev/$ ./ns3 run "wifi-he-network --simulationTime=0.3 --frequency=5 --useRts=1 --minExpectedThroughput=6 --maxExpectedThroughput=745" --command-template "heaptrack %s" --no-build
~ns-3-dev/$ ./ns3 run "wifi-he-network --simulationTime=0.3 --frequency=5 --useRts=1 --minExpectedThroughput=6 --maxExpectedThroughput=745" --command-template "heaptrack %s" --no-build
In both cases, heaptrack will print to the terminal the output file:
.. sourcecode:: console
~ns-3-dev/$ ./ns3 run "wifi-he-network --simulationTime=0.3 --frequency=5 --useRts=1 --minExpectedThroughput=6 --maxExpectedThroughput=745" --command-template "heaptrack %s" --no-build
~ns-3-dev/$ ./ns3 run "wifi-he-network --simulationTime=0.3 --frequency=5 --useRts=1 --minExpectedThroughput=6 --maxExpectedThroughput=745" --command-template "heaptrack %s" --no-build
heaptrack output will be written to "~ns-3-dev/heaptrack.ns3-dev-wifi-he-network.210305.zst"
starting application, this might take some time...
MCS value Channel width GI Throughput
@@ -248,10 +248,10 @@ In both cases, heaptrack will print to the terminal the output file:
heaptrack --analyze "~/ns-3-dev/heaptrack.ns3-dev-wifi-he-network.210305.zst"
The output above shows a summary of the stats collected: ~149 million allocations,
The output above shows a summary of the stats collected: ~149 million allocations,
~21 million temporary allocations and ~10 thousand possible leaked allocations.
If ``heaptrack-gui`` is installed, running ``heaptrack`` will launch it. If it is not installed,
If ``heaptrack-gui`` is installed, running ``heaptrack`` will launch it. If it is not installed,
the command line interface will be used.
.. sourcecode:: console
@@ -364,7 +364,7 @@ Here is a short description of what each line of the last block of the output me
* Total memory leak refers to memory allocated but never freed. This includes static initialization,
so it is not uncommon to be different than 0KB. However this does not mean the program does not
have memory leaks. Other memory profilers such as Valgrind and memory sanitizers are better
suited to track down memory leaks.
suited to track down memory leaks.
Based on the stack trace, it is fairly easy to locate the corresponding code and act on it to
reduce the number of allocations.
@@ -383,7 +383,7 @@ but in a more interactive way.
.. image:: figures/heaptrack.png
Heaptrack was used in merge request `MR830`_ to track and reduce the number of allocations
in the ``wifi-he-network`` example mentioned above. About 29 million unnecessary allocations
in the ``wifi-he-network`` example mentioned above. About 29 million unnecessary allocations
were removed, which translates to a 20% reduction. This resulted in a 1.07x speedup of the
test suite with Valgrind (``./test.py -d -g``) and 1.02x speedup without it.
@@ -402,16 +402,16 @@ Performance Profilers
Performance profilers are programs that collect runtime information and help to
identify performance bottlenecks. In some cases, they can point out hotspots
and suggest solutions.
and suggest solutions.
There are many tools to profile your program, including:
There are many tools to profile your program, including:
* profilers from CPU manufacturers, such as `AMD uProf`_ and `Intel VTune`_
* profilers from the operating systems, such as Linux's `Perf`_ and `Windows Performance Toolkit`_
* `Perf`_ also has a few graphical user interfaces available, being `Hotspot`_ one of them
* instrumented compilation and auxiliary tools provided by compilers, such as `Gprof`_
* third-party tools, such as `Sysprof`_ and `Oprofile`_
* third-party tools, such as `Sysprof`_ and `Oprofile`_
An overview on how to use `Perf`_ with `Hotspot`_, `AMD uProf`_ and
`Intel VTune`_ is provided in the following sections.
@@ -422,29 +422,29 @@ Linux Perf and Hotspot GUI
++++++++++++++++++++++++++
`Perf`_ is the kernel tool to measure performance of the Linux kernel,
drivers and user-space applications.
drivers and user-space applications.
Perf tracks some performance events, being some of the most important for performance:
* cycles
* Clocks (time) spent running.
* cache-misses
* When either data or instructions were not in the L1/L2 caches, requiring
* When either data or instructions were not in the L1/L2 caches, requiring
a L3 or memory access.
* branch-misses
* How many branch instructions were mispredicted.
Mispredictions causes the CPU to stall and clean the pipeline,
Mispredictions causes the CPU to stall and clean the pipeline,
slowing down the program.
* stalled-cycles-frontend
* Cycles wasted by the processor waiting for the next instruction,
usually due to instruction cache miss or mispredictions.
Starves the CPU pipeline of instructions and slows down the program.
* stalled-cycles-backend
* Cycles wasted waiting for pipeline resources to finish their work.
Usually waiting for memory read/write, or executing long-latency instructions.
@@ -459,12 +459,12 @@ to the ``perf.data`` output file.
~/ns-3-dev$ ./ns3 run "wifi-he-network --simulationTime=0.3 --frequency=5 --useRts=1 --minExpectedThroughput=6 --maxExpectedThroughput=745" --command-template "perf record -o ./perf.data --call-graph dwarf --event cycles,cache-misses,branch-misses --sample-cpu %s" --no-build
`Hotspot`_ is a GUI for Perf, that makes performance profiling more
enjoyable and productive. It can parse the ``perf.data`` and show in
`Hotspot`_ is a GUI for Perf, that makes performance profiling more
enjoyable and productive. It can parse the ``perf.data`` and show in
a more friendly way.
To record the same perf.data from Hotspot directly, fill the fields
for working directory, path to the executable, arguments, perf
for working directory, path to the executable, arguments, perf
events to track and output directory for the ``perf.data``.
Then run to start recording.
@@ -475,7 +475,7 @@ image.
.. image:: figures/hotspot-cycles.png
The data is also presented in a tabular format in the bottom-up,
The data is also presented in a tabular format in the bottom-up,
top-down and caller/callee tabs (top left of the screen).
.. image:: figures/hotspot-top-down.png
@@ -490,14 +490,14 @@ Hotspot was used to identify performance bottlenecks in multiple occasions:
#. ``wifi-primary-channels`` test suite was extremely slow due to unnecessary RF processing.
The adopted solution was to replace the filtering step of the entire channel to just the desired
sub-band, and assuming sub-bands are uniformly sized, saving multiplications in the integral
used to compute the power of each sub-band. This resulted in a 6x speedup with
``./ns3 run "test-runner --fullness=TAKES_FOREVER --test-name=wifi-primary-channels"``.
used to compute the power of each sub-band. This resulted in a 6x speedup with
``./ns3 run "test-runner --fullness=TAKES_FOREVER --test-name=wifi-primary-channels"``.
Hotspot was used along with AMD uProf to track this and other bottlenecks in `issue 426`_.
#. ``WifiMacQueue::TtlExceeded`` dereferenced data out of cache when calling Simulator::Now().
The adopted solution was to move Simulator::Now() out of TtlExceeded and reuse the value
and inlining TtlExceeded. This resulted in a ~1.20x speedup with the test suite (``./test.py -d``).
Hotspot was used along with AMD uProf to track this and other bottlenecks in `issue 280`_
Hotspot was used along with AMD uProf to track this and other bottlenecks in `issue 280`_
and merge request `MR681`_.
#. MpduAggregator and MsduAggregator required an expensive attribute lookup to get the maximum sizes
@@ -510,9 +510,9 @@ Hotspot was used to identify performance bottlenecks in multiple occasions:
AMD uProf
+++++++++
`AMD uProf`_ works much like `Linux Perf and Hotspot GUI`_, but
`AMD uProf`_ works much like `Linux Perf and Hotspot GUI`_, but
is available in more platforms (Linux, Windows and BSD) using AMD
processors. Differently from Perf, it provides more performance
processors. Differently from Perf, it provides more performance
trackers for finer analysis.
To use it, open uProf then click to profile an application. If you
@@ -522,11 +522,11 @@ Configurations`` section.
.. image:: figures/uprof-start.png
Fill the fields with the application path, the arguments and
the working directory.
Fill the fields with the application path, the arguments and
the working directory.
You may need to add the LD_LIBRARY_PATH environment variable
(or PATH on Windows), pointing it to the library output
You may need to add the LD_LIBRARY_PATH environment variable
(or PATH on Windows), pointing it to the library output
directory (e.g. ``ns-3-dev/build/lib``).
Then click next:
@@ -537,29 +537,29 @@ Now select custom events and pick the events you want.
The recommended ones for performance profiling are:
* CYCLES_NOT_IN_HALT
* Clocks (time) spent running.
* RETIRED_INST
* How many instructions were completed.
* These do not count mispredictions, stalls, etc.
* Instructions per clock (IPC) = RETIRED_INST / CYCLES_NOT_IN_HALT
* RETIRED_BR_INST_MISP
* How many branch instructions were mispredicted.
* Mispredictions causes the CPU to stall and clean the pipeline,
* Mispredictions causes the CPU to stall and clean the pipeline,
slowing down the program.
* L2_CACHE_MISS.FROM_L1_IC_MISS
* L2 cache misses caused by instruction L1 cache misses.
* L2_CACHE_MISS.FROM_L1_IC_MISS
* L2 cache misses caused by instruction L1 cache misses.
* Results in L3/memory accesses due to missing instructions in L1/L2.
* L2_CACHE_MISS.FROM_L1_DC_MISS
* L2 cache misses caused by data L1 cache misses.
* L2_CACHE_MISS.FROM_L1_DC_MISS
* L2 cache misses caused by data L1 cache misses.
* Results in L3/memory accesses due to missing instructions in L1/L2
* MISALIGNED_LOADS
* Loads not aligned with processor words.
* Loads not aligned with processor words.
* Might result in additional cache and memory accesses.
.. image:: figures/uprof-select-events.png
@@ -569,14 +569,14 @@ Now click in advanced options to enable collection of the call stack.
.. image:: figures/uprof-collect-callstack.png
Then click ``Start Profile`` and wait for the program to end.
After it finishes you will be greeted with a hotspot summary screen,
but the ``Analyze`` tab (top of the screen) has sub-tabs with more
After it finishes you will be greeted with a hotspot summary screen,
but the ``Analyze`` tab (top of the screen) has sub-tabs with more
relevant information.
In the following image the metrics are shown per module, including the
In the following image the metrics are shown per module, including the
C library (libc.so.6) which provides the ``malloc`` and ``free`` functions.
Values can be shown in terms of samples or percentages for easier reading
and to decide where to optimize.
and to decide where to optimize.
.. image:: figures/uprof-stats.png
@@ -597,23 +597,23 @@ Here are a few cases where AMD uProf was used to identify performance bottleneck
#. ``wifi-primary-channels`` test suite was extremely slow due to unnecessary RF processing.
The adopted solution was to replace the filtering step of the entire channel to just the desired
sub-band, and assuming sub-bands are uniformly sized, saving multiplications in the integral
used to compute the power of each sub-band. This resulted in a 6x speedup with
``./ns3 run "test-runner --fullness=TAKES_FOREVER --test-name=wifi-primary-channels"``.
used to compute the power of each sub-band. This resulted in a 6x speedup with
``./ns3 run "test-runner --fullness=TAKES_FOREVER --test-name=wifi-primary-channels"``.
More details on: `issue 426`_ and merge request `MR677`_.
#. Continuing the work on ``wifi-primary-channels`` test suite, profiling showed an excessive
number of cache misses in ``InterferenceHelper::GetNextPosition``.
number of cache misses in ``InterferenceHelper::GetNextPosition``.
This function searches for an iterator on a map, which is very fast
if the map is small and fits in the cache, which was not the case. After reviewing the code,
it was noticed in most cases this call was unnecessary as the iterator was already known.
The adopted solution was to reuse the iterator whenever possible.
This resulted in a 1.78x speedup on top of the previous 6x with
The adopted solution was to reuse the iterator whenever possible.
This resulted in a 1.78x speedup on top of the previous 6x with
``./ns3 run "test-runner --fullness=TAKES_FOREVER --test-name=wifi-primary-channels"``.
More details on: `issue 426`_ and merge requests `MR677`_ and `MR680`_.
#. Position-Independent Code libraries (``-fPIC``) have an additional layer of indirection that increases
#. Position-Independent Code libraries (``-fPIC``) have an additional layer of indirection that increases
instruction cache misses. The adopted solution was to disable `semantic interposition`_ with flag
``-fno-semantic-interposition`` on GCC. This is the default setting on Clang. This results in
``-fno-semantic-interposition`` on GCC. This is the default setting on Clang. This results in
approximately 1.14x speedup with ``./test.py -d``. More details on: `MR777`_.
Note: all speedups above were measured on the same machine. Results may differ based on clock speeds,
@@ -622,39 +622,39 @@ cache sizes, number of cores, memory bandwidth and latency, storage throughput a
Intel VTune
+++++++++++
`Intel VTune`_ works much like `Linux Perf and Hotspot GUI`_, but
`Intel VTune`_ works much like `Linux Perf and Hotspot GUI`_, but
is available in more platforms (Linux, Windows and Mac) using Intel
processors. Differently from Perf, it provides more performance
processors. Differently from Perf, it provides more performance
trackers for finer analysis.
When you open the program, you will be greeted by the landing page
When you open the program, you will be greeted by the landing page
shown in the following image. To start a new profiling project, click
in the ``Configure Analysis`` button. If you already have a project,
right-click the entry and click to configure analysis to reuse the
in the ``Configure Analysis`` button. If you already have a project,
right-click the entry and click to configure analysis to reuse the
settings.
.. image:: figures/vtune-landing.png
A configuration page will open, where you can fill the fields with
the path to the program, arguments, and set working directory and
environment variables.
the path to the program, arguments, and set working directory and
environment variables.
Note: in this example on Windows using MinGW,
we need to define the ``PATH`` environment variable with the paths
to both ``~/ns-3-dev/build/lib`` and the MinGW binaries folder
we need to define the ``PATH`` environment variable with the paths
to both ``~/ns-3-dev/build/lib`` and the MinGW binaries folder
(``~/msys64/mingw64/bin``), which contains essential libraries.
On Linux-like systems you will need to define the
On Linux-like systems you will need to define the
``LD_LIBRARY_PATH`` environment variable instead of ``PATH``.
Clicking on the ``Performance Snapshot`` shows the different profiling
options.
options.
.. image:: figures/vtune-configure.png
If executed as is, a quicker profiling will be executed to
If executed as is, a quicker profiling will be executed to
determine what areas should be profiled with more details.
For the specific example, it is indicated that there are
microarchitectural bottlenecks and low parallelism
For the specific example, it is indicated that there are
microarchitectural bottlenecks and low parallelism
(not a surprise since ns-3 is single-threaded).
.. image:: figures/vtune-perf-snapshot.png
@@ -678,7 +678,7 @@ mispredictions), bad speculation, cache misses, unused load ports,
and more.
The stats for the wifi module are shown below. The retiring
metric indicates about 40% of dispatched instructions are
metric indicates about 40% of dispatched instructions are
executed. The diagram on the right shows the bottleneck is
in the front-end of the pipeline (red), due to high
instruction cache misses, translation lookaside buffer (TLB)
@@ -686,10 +686,10 @@ overhead and unknown branches (most likely callbacks).
.. image:: figures/vtune-uarch-wifi-stats.png
The stats for the core module are shown below.
The stats for the core module are shown below.
More specifically for the ns3::Object::DoGetObject function.
Metrics indicates about 63% of bad speculations.
The diagram on the right shows that there are bottlenecks
The diagram on the right shows that there are bottlenecks
both in the front-end and due to bad speculation (red).
.. image:: figures/vtune-uarch-core-stats.png
@@ -703,7 +703,7 @@ System calls profilers
.. _procmon : https://docs.microsoft.com/en-us/sysinternals/downloads/procmon
System call profilers collect information on which system
calls were made by a program, how long they took to be
calls were made by a program, how long they took to be
fulfilled and how many of them resulted in errors.
There are many system call profilers, including `dtrace`_, `strace`_ and `procmon`_.
@@ -714,8 +714,8 @@ Strace
++++++
The `strace`_ is a system calls (syscalls) profiler for Linux. It can filter
specific syscalls, or gather stats during the execution.
The `strace`_ is a system calls (syscalls) profiler for Linux. It can filter
specific syscalls, or gather stats during the execution.
To collect statistics, use ``strace -c``:
@@ -734,10 +734,10 @@ To collect statistics, use ``strace -c``:
------ ----------- ----------- --------- --------- ----------------
100.00 0.011515 8 1378 251 total
In the example above, the syscalls are listed in the right, after
In the example above, the syscalls are listed in the right, after
the time spent on each syscall, number of calls and errors.
The errors can be caused due to multiple reasons and may not
The errors can be caused due to multiple reasons and may not
be a problem. To check if they were problems, strace can log the
syscalls with ``strace -o calls.log``:
@@ -750,7 +750,7 @@ syscalls with ``strace -o calls.log``:
11 160 MHz 800 ns 524.459 Mbit/s
Looking at the ``calls.log`` file, we can see different sections. In the
Looking at the ``calls.log`` file, we can see different sections. In the
following section, the example is executed (``execve``), architecture is checked (``arch_prctl``),
memory is mapped for execution (``mmap``) and LD_PRELOAD use is checked.
@@ -772,14 +772,14 @@ Then the program searches for the wifi module library and fails multiple times (
openat(AT_FDCWD, "~/ns-3-dev/build/lib/x86_64/libns3-dev-wifi.so", O_RDONLY|O_CLOEXEC) = -1 ENOENT (No such file or directory)
newfstatat(AT_FDCWD, "~/ns-3-dev/build/lib/x86_64", 0x7ffff8d62c80, 0) = -1 ENOENT (No such file or directory)
The library is finally found and its header is read:
The library is finally found and its header is read:
.. sourcecode:: console
openat(AT_FDCWD, "~/ns-3-dev/build/lib/libns3-dev-wifi.so", O_RDONLY|O_CLOEXEC) = 3
read(3, "\177ELF\2\1\1\3\0\0\0\0\0\0\0\0\3\0>\0\1\0\0\0py\30\0\0\0\0\0"..., 832) = 832
Then other modules that wifi depends on are loaded, then execution of the program continues to the main
Then other modules that wifi depends on are loaded, then execution of the program continues to the main
function of the simulation.
Strace was used to track down issues found while running the ``lena-radio-link-failure`` example.
@@ -802,8 +802,8 @@ Its ``strace -c`` table was the following:
100,00 0,781554 1 411681 951 total
Notice the number of ``openat``, ``write``, ``close`` and ``lseek`` calls
are much more frequent than the other calls. These mean
``lena-radio-link-failure`` is opening, then seeking, then writing,
are much more frequent than the other calls. These mean
``lena-radio-link-failure`` is opening, then seeking, then writing,
then closing at least one file handler.
Using ``strace``, we can easily find the most frequently used file handlers.
@@ -835,21 +835,21 @@ Using ``strace``, we can easily find the most frequently used file handlers.
With the name of the files, we can look at the code that manipulates them.
The issue above was found in `MR777`_, were performance for some LTE examples
regressed for no apparent reason. The flame graph below, produced by `AMD uProf`_,
contains four large columns/"flames" in red, which
The issue above was found in `MR777`_, were performance for some LTE examples
regressed for no apparent reason. The flame graph below, produced by `AMD uProf`_,
contains four large columns/"flames" in red, which
correspond to the ``write``, ``openat``, ``close`` and ``lseek`` syscalls.
.. image:: figures/uprof-strace-lte.png
Upon closer inspection, these syscalls take a long time to complete due to
Upon closer inspection, these syscalls take a long time to complete due to
the underlying filesystem of the machine running the example (NTFS mount
using the ntfs-3g FUSE filesystem). In other words, the bottleneck only
exists when running the example in slow file systems
(e.g. FUSE and network file systems).
using the ntfs-3g FUSE filesystem). In other words, the bottleneck only
exists when running the example in slow file systems
(e.g. FUSE and network file systems).
The merge request `MR814`_ addressed the issue by keeping the files open
throughout the simulation. That alone resulted in a 1.75x speedup.
throughout the simulation. That alone resulted in a 1.75x speedup.
Compilation Profilers
@@ -859,7 +859,7 @@ Compilation profilers can help identifying which steps of the compilation
are slowing it down. These profilers are built into the compilers themselves,
only requiring third-party tools to consolidate the results.
The GCC feature is mentioned and exemplified, but is not the recommended
The GCC feature is mentioned and exemplified, but is not the recommended
compilation profiling method. For that, Clang is recommended.
GCC
@@ -899,14 +899,14 @@ output for a file is shown below. The line of ``---`` was inserted for clarity.
TOTAL : 0.67 0.20 0.88 78612 kB
In the table above, the first few lines show the five main compilations steps: ``setup``,
``parsing``, ``lang. deferred`` (C++ specific transformations),
``opt(imize) and generate (code)``, ``last asm`` (produce binary code).
``parsing``, ``lang. deferred`` (C++ specific transformations),
``opt(imize) and generate (code)``, ``last asm`` (produce binary code).
The lines below the ``---`` line show sub-steps of the five main compilation steps.
For this specific case, parsing global definitions (21%) and structures (16%),
For this specific case, parsing global definitions (21%) and structures (16%),
``template instantiation`` (16%) and generating the code in ``symout`` (10%).
Aggregating the data into a meaningful output to help focus where to improve is not that easy
Aggregating the data into a meaningful output to help focus where to improve is not that easy
and it is `not a priority`_ for GCC developers.
It is recommended to use the Clang alternative.
@@ -920,8 +920,8 @@ Clang can output very similar results with the ``-ftime-trace`` flag, but can al
it in a more meaningful way. With the help of the third-party tool `ClangBuildAnalyzer`_,
we can have really good insights on where to spend time trying to speed up the compilation.
Support for building with ``-ftime-trace``, compiling `ClangBuildAnalyzer`_ and producing a
report for the project have been baked into the CMake project of ns-3, and can be enabled
Support for building with ``-ftime-trace``, compiling `ClangBuildAnalyzer`_ and producing a
report for the project have been baked into the CMake project of ns-3, and can be enabled
with ``-DNS3_CLANG_TIMETRACE=ON``.
.. sourcecode:: console
@@ -1019,13 +1019,13 @@ The entire procedure looks like the following:
lte-test-rlc-um-transmitter.cc.o simulator.h event-id.h (560 ms)
...
done in 2.8s.
The output printed out contain a summary of time spent on parsing and on code generation, along
The output printed out contain a summary of time spent on parsing and on code generation, along
with multiple lists for different tracked categories. From the summary, it is clear that parsing times
are very high when compared to the optimization time (``-O3``). Skipping the others categories and going straight
to the expensive headers section, we can better understand why parsing times are so high, with some headers
to the expensive headers section, we can better understand why parsing times are so high, with some headers
adding as much as 5 minutes of CPU time to the parsing time.
.. _drastically speed up parsing times : https://gitlab.com/nsnam/ns-3-dev/-/merge_requests/731#note_687176503
@@ -1038,7 +1038,7 @@ compilation at the cost of increasing recompilation times.
CMake Profiler
**************
CMake has a built-in tracer that permits tracking hotspots in the CMake files slowing down the
CMake has a built-in tracer that permits tracking hotspots in the CMake files slowing down the
project configuration. To use the tracer, call cmake directly from a clean CMake cache directory:
.. sourcecode:: console
@@ -1055,14 +1055,14 @@ Or using the ns3 wrapper:
.. _Perfetto UI: https://ui.perfetto.dev/
A ``cmake_performance_trace.log`` file will be generated in the ns-3-dev directory.
A ``cmake_performance_trace.log`` file will be generated in the ns-3-dev directory.
The tracing results can be visualized using the ``about:tracing`` panel available
in Chromium-based browsers or a compatible trace viewer such as `Perfetto UI`_.
After opening the trace file, select the traced process and click on
any of the blocks to inspect the different stacks and find hotspots.
An auxiliary panel containing the function/macro name, arguments
and location can be shown, providing enough information to trace
An auxiliary panel containing the function/macro name, arguments
and location can be shown, providing enough information to trace
back the location of each specific call.
Just like in performance profilers, visual inspection makes it easier
@@ -1083,7 +1083,7 @@ up-to-date copies of headers in the output directory.
In `MR911`_, alternatives such as stub headers that include the original header
files, keeping them in their respective modules, and symlinking headers to the
output directory were used to reduce the configuration overhead.
output directory were used to reduce the configuration overhead.
Note: when testing I/O bottlenecks, you may want to drop filesystem caches,
otherwise the cache may hide the issues. In Linux, the caches can be cleared

View File

@@ -35,11 +35,11 @@ regenerated by an automated scanning process.
If a user is not interested in Python, no action is needed; by default, Python
bindings are disabled (and must be explicitly enabled at
CMake configure time). In this case, changes to the C++
API of a provided module will not cause the module to fail to compile.
API of a provided module will not cause the module to fail to compile.
The process for automatically generating Python bindings relies on a toolchain
involving a development installation of the Clang compiler, a program called
CastXML (https://github.com/CastXML/CastXML), and a program called
CastXML (https://github.com/CastXML/CastXML), and a program called
pygccxml (https://github.com/gccxml/pygccxml). The toolchain can be installed
using the ns-3 ``bake`` build tool.
@@ -67,42 +67,42 @@ Here is some example code that is written in Python and that runs |ns3|, which i
import ns.internet
import ns.network
import ns.point_to_point
ns.core.LogComponentEnable("UdpEchoClientApplication", ns.core.LOG_LEVEL_INFO)
ns.core.LogComponentEnable("UdpEchoServerApplication", ns.core.LOG_LEVEL_INFO)
nodes = ns.network.NodeContainer()
nodes.Create(2)
pointToPoint = ns.point_to_point.PointToPointHelper()
pointToPoint.SetDeviceAttribute("DataRate", ns.core.StringValue("5Mbps"))
pointToPoint.SetChannelAttribute("Delay", ns.core.StringValue("2ms"))
devices = pointToPoint.Install(nodes)
stack = ns.internet.InternetStackHelper()
stack.Install(nodes)
address = ns.internet.Ipv4AddressHelper()
address.SetBase(ns.network.Ipv4Address("10.1.1.0"), ns.network.Ipv4Mask("255.255.255.0"))
interfaces = address.Assign (devices);
echoServer = ns.applications.UdpEchoServerHelper(9)
serverApps = echoServer.Install(nodes.Get(1))
serverApps.Start(ns.core.Seconds(1.0))
serverApps.Stop(ns.core.Seconds(10.0))
echoClient = ns.applications.UdpEchoClientHelper(interfaces.GetAddress(1), 9)
echoClient.SetAttribute("MaxPackets", ns.core.UintegerValue(1))
echoClient.SetAttribute("Interval", ns.core.TimeValue(ns.core.Seconds (1.0)))
echoClient.SetAttribute("PacketSize", ns.core.UintegerValue(1024))
clientApps = echoClient.Install(nodes.Get(0))
clientApps.Start(ns.core.Seconds(2.0))
clientApps.Stop(ns.core.Seconds(10.0))
ns.core.Simulator.Run()
ns.core.Simulator.Destroy()
@@ -164,7 +164,7 @@ First of all, keep in mind that not 100% of the API is supported in Python. Som
#. some of the APIs involve pointers, which require knowledge of what kind of memory passing semantics (who owns what memory). Such knowledge is not part of the function signatures, and is either documented or sometimes not even documented. Annotations are needed to bind those functions;
#. Sometimes a unusual fundamental data type or C++ construct is used which is not yet supported by PyBindGen;
#. CastXML does not report template based classes unless they are instantiated.
#. CastXML does not report template based classes unless they are instantiated.
Most of the missing APIs can be wrapped, given enough time, patience, and expertise, and will likely be wrapped if bug reports are submitted. However, don't file a bug report saying "bindings are incomplete", because the project does not have maintainers to maintain every API.
@@ -232,7 +232,7 @@ There is one caveat: you must not allow the file object to be garbage collected
Working with Python Bindings
****************************
Python bindings are built on a module-by-module basis, and can be found in each module's ``bindings`` directory.
Python bindings are built on a module-by-module basis, and can be found in each module's ``bindings`` directory.
Overview
========
@@ -283,7 +283,7 @@ Process Overview
|ns3| has an automated process to regenerate Python bindings from the C++
header files. The process is only supported for Linux at the moment
because the project has not found a contributor yet to test and
document the capability on macOS. In short, the process currently
document the capability on macOS. In short, the process currently
requires the following steps on a Linux machine.
1. Prepare the system for scanning by installing the prerequisites,
@@ -332,7 +332,7 @@ Perform a configuration at the bake level:
$ ./bake.py configure -e ns-3-dev -e pygccxml
The output of ``./bake.py show`` should show something like this:
The output of ``./bake.py show`` should show something like this:
.. sourcecode:: bash
@@ -363,7 +363,7 @@ report as Missing. For Python bindings, the important
prerequisites are clang-dev, cmake, cxxfilt, llvm-dev, python3-dev,
and python3-setuptools. In the following process, the following programs
and libraries will be locally installed: castxml, pybindgen, pygccxml,
and |ns3|.
and |ns3|.
Note also that the `ns-3-allinone` target for bake will also include the
`pygccxml` and `ns-3-dev` targets (among other libraries) and can be
@@ -386,38 +386,38 @@ is present or missing on your system.
>> Searching for system dependency clang-dev - OK
>> Searching for system dependency qt - Problem
> Problem: Optional dependency, module "qt" not available
This may reduce the functionality of the final build.
This may reduce the functionality of the final build.
However, bake will continue since "qt" is not an essential dependency.
For more information call bake with -v or -vvv, for full verbose mode.
>> Searching for system dependency g++ - OK
>> Searching for system dependency cxxfilt - OK
>> Searching for system dependency setuptools - OK
>> Searching for system dependency python3-setuptools - OK
>> Searching for system dependency gi-cairo - Problem
> Problem: Optional dependency, module "gi-cairo" not available
This may reduce the functionality of the final build.
This may reduce the functionality of the final build.
However, bake will continue since "gi-cairo" is not an essential dependency.
For more information call bake with -v or -vvv, for full verbose mode.
>> Searching for system dependency gir-bindings - Problem
> Problem: Optional dependency, module "gir-bindings" not available
This may reduce the functionality of the final build.
This may reduce the functionality of the final build.
However, bake will continue since "gir-bindings" is not an essential dependency.
For more information call bake with -v or -vvv, for full verbose mode.
>> Searching for system dependency pygobject - Problem
> Problem: Optional dependency, module "pygobject" not available
This may reduce the functionality of the final build.
This may reduce the functionality of the final build.
However, bake will continue since "pygobject" is not an essential dependency.
For more information call bake with -v or -vvv, for full verbose mode.
>> Searching for system dependency pygraphviz - Problem
> Problem: Optional dependency, module "pygraphviz" not available
This may reduce the functionality of the final build.
This may reduce the functionality of the final build.
However, bake will continue since "pygraphviz" is not an essential dependency.
For more information call bake with -v or -vvv, for full verbose mode.
>> Searching for system dependency python3-dev - OK
>> Searching for system dependency cmake - OK
>> Downloading castxml - OK
@@ -425,7 +425,7 @@ is present or missing on your system.
>> Downloading pybindgen - OK
>> Downloading pygccxml - OK
>> Downloading ns-3-dev - OK
Build
#####
@@ -451,7 +451,7 @@ C++ code), it will report a failure instead:
::
>> Building ns-3-dev - Problem
> Error: Critical dependency, module "ns-3-dev" failed
> Error: Critical dependency, module "ns-3-dev" failed
For more information call Bake with --debug and/or -v, -vvv, for full verbose mode (bake --help)
At this point, it is recommended to change into the ns-3-dev directory and
@@ -483,15 +483,15 @@ data models, as explained here: https://www.ibm.com/support/knowledgecenter/en/
Linux uses the LP64 model, and MacOS (as well as 32-bit Linux) use the ILP32
model. Users will note that there are two versions of bindings files in
each ns-3 module directory; one with an ILP32.py suffix and one with
each ns-3 module directory; one with an ILP32.py suffix and one with
an LP64.py suffix. Only one is used on any given platform. The main
difference is in the representation of the 64 bit integer type as either
a 'long' (LP64) or 'long long' (ILP32).
a 'long' (LP64) or 'long long' (ILP32).
The process (only supported on Linux at present) generates the LP64
bindings using the toolchain and then copies the LP64 bindings to the
ILP32 bindings with some type substitutions automated by CMake scripts.
Rescanning a module
###################
@@ -532,7 +532,7 @@ Generating bindings on MacOS
In principle, this should work (and should generate the 32-bit bindings).
However, maintainers have not been available to complete this port to date.
We would welcome suggestions on how to enable scanning for MacOS.
We would welcome suggestions on how to enable scanning for MacOS.
Regenerating the Python bindings using gitlab-ci-local
======================================================
@@ -570,10 +570,10 @@ To allow an unprivileged user to use Docker, perform the following:
$ sudo chmod 666 /var/run/docker.sock
The following command will obtain a raw shell for an Ubuntu 20.04 image:
The following command will obtain a raw shell for an Ubuntu 20.04 image:
.. sourcecode:: bash
$ docker run -it ubuntu:20.04
However, if you prefer to work on your current directory from the container
@@ -620,7 +620,7 @@ The ``src/<module>/bindings`` directory may contain the following files, some of
* ``modulegen_customizations.py``: you may optionally add this file in order to customize the pybindgen code generation
* ``scan-header.h``: you may optionally add this file to customize what header file is scanned for the module. Basically this file is scanned instead of ns3/<module>-module.h. Typically, the first statement is #include "ns3/<module>-module.h", plus some other stuff to force template instantiations;
* ``module_helpers.cc``: you may add additional files, such as this, to be linked to python extension module. They will be automatically scanned;
* ``<module>.py``: if this file exists, it becomes the "frontend" python module for the ns3 module, and the extension module (.so file) becomes _<module>.so instead of <module>.so. The <module>.py file has to import all symbols from the module _<module> (this is more tricky than it sounds, see src/core/bindings/core.py for an example), and then can add some additional pure-python definitions.
* ``<module>.py``: if this file exists, it becomes the "frontend" python module for the ns3 module, and the extension module (.so file) becomes _<module>.so instead of <module>.so. The <module>.py file has to import all symbols from the module _<module> (this is more tricky than it sounds, see src/core/bindings/core.py for an example), and then can add some additional pure-python definitions.
Historical Information
**********************

View File

@@ -7,7 +7,7 @@ Random Variables
|ns3| contains a built-in pseudo-random number generator (PRNG). It is important
for serious users of the simulator to understand the functionality,
configuration, and usage of this PRNG, and to decide whether it is sufficient
for his or her research use.
for his or her research use.
Quick Overview
**************
@@ -17,7 +17,7 @@ Quick Overview
* by default, |ns3| simulations use a fixed seed; if there is any randomness in
the simulation, each run of the program will yield identical results unless
the seed and/or run number is changed.
the seed and/or run number is changed.
* in *ns-3.3* and earlier, |ns3| simulations used a random seed by default; this
marks a change in policy starting with *ns-3.4*.
@@ -46,24 +46,24 @@ Read further for more explanation about the random number facility for |ns3|.
Background
**********
Simulations use a lot of random numbers; one study
found that most network simulations spend as much as 50%
Simulations use a lot of random numbers; one study
found that most network simulations spend as much as 50%
of the CPU generating random numbers. Simulation users need
to be concerned with the quality of the (pseudo) random numbers and
the independence between different streams of random numbers.
the independence between different streams of random numbers.
Users need to be concerned with a few issues, such as:
* the seeding of the random number generator and whether a
* the seeding of the random number generator and whether a
simulation outcome is deterministic or not,
* how to acquire different streams of random numbers that are
independent from one another, and
* how to acquire different streams of random numbers that are
independent from one another, and
* how long it takes for streams to cycle
We will introduce a few terms here: a RNG provides a long sequence
of (pseudo) random numbers.
The length of this sequence is called the *cycle length*
or *period*, after which the RNG will repeat itself.
or *period*, after which the RNG will repeat itself.
This sequence can
be partitioned into disjoint *streams*. A stream of a
RNG is a contiguous subset or block of the RNG sequence.
@@ -73,10 +73,10 @@ RNG, then
the first stream might use the first N/2 values and the second
stream might produce the second N/2 values. An important property
here is that the two streams are uncorrelated. Likewise, each
stream can be partitioned disjointedly to a number of
stream can be partitioned disjointedly to a number of
uncorrelated *substreams*. The underlying RNG hopefully
produces a pseudo-random sequence of numbers with a very long
cycle length, and partitions this into streams and substreams in an
cycle length, and partitions this into streams and substreams in an
efficient manner.
|ns3| uses the same underlying random number generator as does |ns2|: the
@@ -85,7 +85,7 @@ http://www.iro.umontreal.ca/~lecuyer/myftp/papers/streams00.pdf. The MRG32k3a
generator provides :math:`1.8x10^{19}` independent streams of random numbers,
each of which consists of :math:`2.3x10^{15}` substreams. Each substream has a
period (*i.e.*, the number of random numbers before overlap) of
:math:`7.6x10^{22}`. The period of the entire generator is :math:`3.1x10^{57}`.
:math:`7.6x10^{22}`. The period of the entire generator is :math:`3.1x10^{57}`.
Class :cpp:class:`ns3::RandomVariableStream` is the public interface to this
@@ -109,10 +109,10 @@ Creating random variables
*************************
|ns3| supports a number of random variable objects from the base class
:cpp:class:`RandomVariableStream`. These objects derive from
:cpp:class:`RandomVariableStream`. These objects derive from
:cpp:class:`ns3::Object` and are handled by smart pointers.
The correct way to create these objects is to use the templated
The correct way to create these objects is to use the templated
`CreateObject<>` method, such as:
::
@@ -124,7 +124,7 @@ then you can access values by calling methods on the object such as:
::
myRandomNo = x->GetInteger ();
If you try to instead do something like this:
@@ -175,7 +175,7 @@ substream capability to produce multiple independent runs of the same
simulation.* In other words, the more statistically rigorous way to configure
multiple independent replications is to use a fixed seed and to advance the run
number. This implementation allows for a maximum of :math:`2.3x10^{15}`
independent replications using the substreams.
independent replications using the substreams.
For ease of use, it is not necessary to control the seed and run number from
within the program; the user can set the ``NS_GLOBAL_VALUE`` environment
@@ -209,7 +209,7 @@ base class provides a few methods for globally configuring the behavior
of the random number generator. Derived classes provide API for drawing random
variates from the particular distribution being supported.
Each RandomVariableStream created in the simulation is given a generator that is a
Each RandomVariableStream created in the simulation is given a generator that is a
new RNGStream from the underlying PRNG. Used in this manner, the L'Ecuyer
implementation allows for a maximum of :math:`1.8x10^19` random variables. Each
random variable in a single replication can produce up to :math:`7.6x10^22`
@@ -228,7 +228,7 @@ that access the next value in the substream.
* \return A floating point random value
*/
double GetValue (void) const;
/**
* \brief Returns a random integer from the underlying distribution
* \return Integer cast of ::GetValue()
@@ -242,7 +242,7 @@ Types of RandomVariables
************************
The following types of random variables are provided, and are documented in the
|ns3| Doxygen or by reading ``src/core/model/random-variable-stream.h``. Users
|ns3| Doxygen or by reading ``src/core/model/random-variable-stream.h``. Users
can also create their own custom random variables by deriving from class
:cpp:class:`RandomVariableStream`.
@@ -274,7 +274,7 @@ An example is in the propagation models for WifiNetDevice::
TypeId
RandomPropagationDelayModel::GetTypeId (void)
{
{
static TypeId tid = TypeId ("ns3::RandomPropagationDelayModel")
.SetParent<PropagationDelayModel> ()
.SetGroupName ("Propagation")
@@ -303,7 +303,7 @@ Setting the stream number
*************************
The underlying MRG32k3a generator provides 2^64 independent streams.
In ns-3, these are assigned sequentially starting from the first stream as
In ns-3, these are assigned sequentially starting from the first stream as
new RandomVariableStream instances make their first call to GetValue().
As a result of how these RandomVariableStream objects are assigned to
@@ -314,12 +314,12 @@ streams may (or may not) change.
As a concrete example, a user running a comparative study between routing
protocols may find that the act of changing one routing protocol for another
will notice that the underlying mobility pattern also changed.
will notice that the underlying mobility pattern also changed.
Starting with ns-3.15, some control has been provided to users to allow
users to optionally fix the assignment of selected RandomVariableStream
users to optionally fix the assignment of selected RandomVariableStream
objects to underlying streams. This is the ``Stream`` attribute, part
of the base class RandomVariableStream.
of the base class RandomVariableStream.
By partitioning the existing sequence of streams from before:
@@ -332,7 +332,7 @@ into two equal-sized sets:
.. sourcecode:: text
<-------------------------------------------------------------------------->
<-------------------------------------------------------------------------->
^ ^^ ^
| || |
stream 0 stream (2^63 - 1) stream 2^63 stream (2^64 - 1)
@@ -353,7 +353,7 @@ of -1 means that a value will be automatically allocated).
Publishing your results
***********************
When you publish simulation results, a key piece of configuration
When you publish simulation results, a key piece of configuration
information that you should always state is how you used the
random number generator.
@@ -374,8 +374,8 @@ Summary
Let's review what things you should do when creating a simulation.
* Decide whether you are running with a fixed seed or random seed; a fixed seed
is the default,
* Decide how you are going to manage independent replications, if applicable,
is the default,
* Decide how you are going to manage independent replications, if applicable,
* Convince yourself that you are not drawing more random values than the cycle
length, if you are running a very long simulation, and
* When you publish, follow the guidelines above about documenting your use of

View File

@@ -7,7 +7,7 @@ RealTime
|ns3| has been designed for integration into testbed and virtual machine
environments. To integrate with real network stacks and emit/consume packets, a
real-time scheduler is needed to try to lock the simulation clock with the
hardware clock. We describe here a component of this: the RealTime scheduler.
hardware clock. We describe here a component of this: the RealTime scheduler.
The purpose of the realtime scheduler is to cause the progression of the
simulation clock to occur synchronously with respect to some external time base.
@@ -41,7 +41,7 @@ executing events until it reaches a point where the next event is in the
possible for the simulation to consume more time than the wall clock time. The
other option "HardLimit" will cause the simulation to abort if the tolerance
threshold is exceeded. This attribute is
``ns3::RealTimeSimulatorImpl::HardLimit`` and the default is 0.1 seconds.
``ns3::RealTimeSimulatorImpl::HardLimit`` and the default is 0.1 seconds.
A different mode of operation is one in which simulated time is **not** frozen
during an event execution. This mode of realtime simulation was implemented but
@@ -55,7 +55,7 @@ Usage
*****
The usage of the realtime simulator is straightforward, from a scripting
perspective. Users just need to set the attribute
perspective. Users just need to set the attribute
``SimulatorImplementationType`` to the Realtime simulator, such as follows: ::
GlobalValue::Bind ("SimulatorImplementationType",
@@ -93,4 +93,4 @@ smaller than the minimum sleep time, so we busy-wait for the remainder of the
time. This means that the thread just sits in a for loop consuming cycles until
the desired time arrives. After the combination of sleep- and busy-waits, the
elapsed realtime (wall) clock should agree with the simulation time of the next
event and the simulation proceeds.
event and the simulation proceeds.

View File

@@ -7,33 +7,33 @@ Background
software testing.**
Writing defect-free software is a difficult proposition. There are many
dimensions to the problem and there is much confusion regarding what is
dimensions to the problem and there is much confusion regarding what is
meant by different terms in different contexts. We have found it worthwhile
to spend a little time reviewing the subject and defining some terms.
Software testing may be loosely defined as the process of executing a program
with the intent of finding errors. When one enters a discussion regarding
software testing, it quickly becomes apparent that there are many distinct
with the intent of finding errors. When one enters a discussion regarding
software testing, it quickly becomes apparent that there are many distinct
mind-sets with which one can approach the subject.
For example, one could break the process into broad functional categories
For example, one could break the process into broad functional categories
like ''correctness testing,'' ''performance testing,'' ''robustness testing''
and ''security testing.'' Another way to look at the problem is by life-cycle:
''requirements testing,'' ''design testing,'' ''acceptance testing,'' and
''requirements testing,'' ''design testing,'' ''acceptance testing,'' and
''maintenance testing.'' Yet another view is by the scope of the tested system.
In this case one may speak of ''unit testing,'' ''component testing,''
In this case one may speak of ''unit testing,'' ''component testing,''
''integration testing,'' and ''system testing.'' These terms are also not
standardized in any way, and so ''maintenance testing'' and ''regression
testing'' may be heard interchangeably. Additionally, these terms are often
misused.
There are also a number of different philosophical approaches to software
There are also a number of different philosophical approaches to software
testing. For example, some organizations advocate writing test programs
before actually implementing the desired software, yielding ''test-driven
before actually implementing the desired software, yielding ''test-driven
development.'' Some organizations advocate testing from a customer perspective
as soon as possible, following a parallel with the agile development process:
''test early and test often.'' This is sometimes called ''agile testing.'' It
seems that there is at least one approach to testing for every development
seems that there is at least one approach to testing for every development
methodology.
The |ns3| project is not in the business of advocating for any one of
@@ -42,7 +42,7 @@ the test process.
Like all major software products, |ns3| has a number of qualities that
must be present for the product to succeed. From a testing perspective, some
of these qualities that must be addressed are that |ns3| must be
of these qualities that must be addressed are that |ns3| must be
''correct,'' ''robust,'' ''performant'' and ''maintainable.'' Ideally there
should be metrics for each of these dimensions that are checked by the tests
to identify when the product fails to meet its expectations / requirements.
@@ -50,70 +50,70 @@ to identify when the product fails to meet its expectations / requirements.
Correctness
***********
The essential purpose of testing is to determine that a piece of software
behaves ''correctly.'' For |ns3| this means that if we simulate
something, the simulation should faithfully represent some physical entity or
The essential purpose of testing is to determine that a piece of software
behaves ''correctly.'' For |ns3| this means that if we simulate
something, the simulation should faithfully represent some physical entity or
process to a specified accuracy and precision.
It turns out that there are two perspectives from which one can view
correctness. Verifying that a particular model is implemented according
to its specification is generically called *verification*. The process of
deciding that the model is correct for its intended use is generically called
It turns out that there are two perspectives from which one can view
correctness. Verifying that a particular model is implemented according
to its specification is generically called *verification*. The process of
deciding that the model is correct for its intended use is generically called
*validation*.
Validation and Verification
***************************
A computer model is a mathematical or logical representation of something. It
can represent a vehicle, an elephant (see
A computer model is a mathematical or logical representation of something. It
can represent a vehicle, an elephant (see
`David Harel's talk about modeling an elephant at SIMUTools 2009 <http://simutools.org/2009/>`_, or a networking card. Models can also represent
processes such as global warming, freeway traffic flow or a specification of a
networking protocol. Models can be completely faithful representations of a
logical process specification, but they necessarily can never completely
simulate a physical object or process. In most cases, a number of
simplifications are made to the model to make simulation computationally
tractable.
processes such as global warming, freeway traffic flow or a specification of a
networking protocol. Models can be completely faithful representations of a
logical process specification, but they necessarily can never completely
simulate a physical object or process. In most cases, a number of
simplifications are made to the model to make simulation computationally
tractable.
Every model has a *target system* that it is attempting to simulate. The
Every model has a *target system* that it is attempting to simulate. The
first step in creating a simulation model is to identify this target system and
the level of detail and accuracy that the simulation is desired to reproduce.
In the case of a logical process, the target system may be identified as ''TCP
the level of detail and accuracy that the simulation is desired to reproduce.
In the case of a logical process, the target system may be identified as ''TCP
as defined by RFC 793.'' In this case, it will probably be desirable to create
a model that completely and faithfully reproduces RFC 793. In the case of a
physical process this will not be possible. If, for example, you would like to
simulate a wireless networking card, you may determine that you need, ''an
accurate MAC-level implementation of the 802.11 specification and [...] a
not-so-slow PHY-level model of the 802.11a specification.''
a model that completely and faithfully reproduces RFC 793. In the case of a
physical process this will not be possible. If, for example, you would like to
simulate a wireless networking card, you may determine that you need, ''an
accurate MAC-level implementation of the 802.11 specification and [...] a
not-so-slow PHY-level model of the 802.11a specification.''
Once this is done, one can develop an abstract model of the target system. This
is typically an exercise in managing the tradeoffs between complexity, resource
requirements and accuracy. The process of developing an abstract model has been
called *model qualification* in the literature. In the case of a TCP
protocol, this process results in a design for a collection of objects,
called *model qualification* in the literature. In the case of a TCP
protocol, this process results in a design for a collection of objects,
interactions and behaviors that will fully implement RFC 793 in |ns3|.
In the case of the wireless card, this process results in a number of tradeoffs
to allow the physical layer to be simulated and the design of a network device
and channel for ns-3, along with the desired objects, interactions and behaviors.
to allow the physical layer to be simulated and the design of a network device
and channel for ns-3, along with the desired objects, interactions and behaviors.
This abstract model is then developed into an |ns3| model that
This abstract model is then developed into an |ns3| model that
implements the abstract model as a computer program. The process of getting the
implementation to agree with the abstract model is called *model
verification* in the literature.
implementation to agree with the abstract model is called *model
verification* in the literature.
The process so far is open loop. What remains is to make a determination that a
given ns-3 model has some connection to some reality -- that a model is an
given ns-3 model has some connection to some reality -- that a model is an
accurate representation of a real system, whether a logical process or a physical
entity.
entity.
If one is going to use a simulation model to try and predict how some real
system is going to behave, there must be some reason to believe your results --
i.e., can one trust that an inference made from the model translates into a
If one is going to use a simulation model to try and predict how some real
system is going to behave, there must be some reason to believe your results --
i.e., can one trust that an inference made from the model translates into a
correct prediction for the real system. The process of getting the ns-3 model
behavior to agree with the desired target system behavior as defined by the model
qualification process is called *model validation* in the literature. In the
case of a TCP implementation, you may want to compare the behavior of your ns-3
TCP model to some reference implementation in order to validate your model. In
the case of a wireless physical layer simulation, you may want to compare the
case of a TCP implementation, you may want to compare the behavior of your ns-3
TCP model to some reference implementation in order to validate your model. In
the case of a wireless physical layer simulation, you may want to compare the
behavior of your model to that of real hardware in a controlled setting,
The |ns3| testing environment provides tools to allow for both model
@@ -122,58 +122,58 @@ validation and testing, and encourages the publication of validation results.
Robustness
**********
Robustness is the quality of being able to withstand stresses, or changes in
Robustness is the quality of being able to withstand stresses, or changes in
environments, inputs or calculations, etc. A system or design is ''robust''
if it can deal with such changes with minimal loss of functionality.
This kind of testing is usually done with a particular focus. For example, the
system as a whole can be run on many different system configurations to
This kind of testing is usually done with a particular focus. For example, the
system as a whole can be run on many different system configurations to
demonstrate that it can perform correctly in a large number of environments.
The system can be also be stressed by operating close to or beyond capacity by
The system can be also be stressed by operating close to or beyond capacity by
generating or simulating resource exhaustion of various kinds. This genre of
testing is called ''stress testing.''
The system and its components may be exposed to so-called ''clean tests'' that
demonstrate a positive result -- that is that the system operates correctly in
response to a large variation of expected configurations.
demonstrate a positive result -- that is that the system operates correctly in
response to a large variation of expected configurations.
The system and its components may also be exposed to ''dirty tests'' which
provide inputs outside the expected range. For example, if a module expects a
The system and its components may also be exposed to ''dirty tests'' which
provide inputs outside the expected range. For example, if a module expects a
zero-terminated string representation of an integer, a dirty test might provide
an unterminated string of random characters to verify that the system does not
crash as a result of this unexpected input. Unfortunately, detecting such
crash as a result of this unexpected input. Unfortunately, detecting such
''dirty'' input and taking preventive measures to ensure the system does not
fail catastrophically can require a huge amount of development overhead. In
order to reduce development time, a decision was taken early on in the project
to minimize the amount of parameter validation and error handling in the
to minimize the amount of parameter validation and error handling in the
|ns3| codebase. For this reason, we do not spend much time on dirty
testing -- it would just uncover the results of the design decision we know
we took.
We do want to demonstrate that |ns3| software does work across some set
of conditions. We borrow a couple of definitions to narrow this down a bit.
of conditions. We borrow a couple of definitions to narrow this down a bit.
The *domain of applicability* is a set of prescribed conditions for which
the model has been tested, compared against reality to the extent possible, and
judged suitable for use. The *range of accuracy* is an agreement between
the computerized model and reality within a domain of applicability.
the model has been tested, compared against reality to the extent possible, and
judged suitable for use. The *range of accuracy* is an agreement between
the computerized model and reality within a domain of applicability.
The |ns3| testing environment provides tools to allow for setting up
and running test environments over multiple systems (buildbot) and provides
The |ns3| testing environment provides tools to allow for setting up
and running test environments over multiple systems (buildbot) and provides
classes to encourage clean tests to verify the operation of the system over the
expected ''domain of applicability'' and ''range of accuracy.''
Performant
**********
Okay, ''performant'' isn't a real English word. It is, however, a very concise
neologism that is quite often used to describe what we want |ns3| to
Okay, ''performant'' isn't a real English word. It is, however, a very concise
neologism that is quite often used to describe what we want |ns3| to
be: powerful and fast enough to get the job done.
This is really about the broad subject of software performance testing. One of
the key things that is done is to compare two systems to find which performs
better (cf benchmarks). This is used to demonstrate that, for example,
|ns3| can perform a basic kind of simulation at least as fast as a
the key things that is done is to compare two systems to find which performs
better (cf benchmarks). This is used to demonstrate that, for example,
|ns3| can perform a basic kind of simulation at least as fast as a
competing tool, or can be used to identify parts of the system that perform
badly.
@@ -183,15 +183,15 @@ of tests.
Maintainability
***************
A software product must be maintainable. This is, again, a very broad
A software product must be maintainable. This is, again, a very broad
statement, but a testing framework can help with the task. Once a model has
been developed, validated and verified, we can repeatedly execute the suite
of tests for the entire system to ensure that it remains valid and verified
over its lifetime.
When a feature stops functioning as intended after some kind of change to the
system is integrated, it is called generically a *regression*.
Originally the
system is integrated, it is called generically a *regression*.
Originally the
term regression referred to a change that caused a previously fixed bug to
reappear, but the term has evolved to describe any kind of change that breaks
existing functionality. There are many kinds of regressions that may occur
@@ -210,7 +210,7 @@ existing bug that had no affect is suddenly exposed in the system. This may
be as simple as exercising a code path for the first time.
A *performance regression* is one that causes the performance requirements
of the system to be violated. For example, doing some work in a low level
of the system to be violated. For example, doing some work in a low level
function that may be repeated large numbers of times may suddenly render the
system unusable from certain perspectives.

View File

@@ -7,7 +7,7 @@ This chapter is concerned with the testing and validation of |ns3| software.
This chapter provides
* background about terminology and software testing
* a description of the ns-3 testing framework
* a guide to model developers or new model contributors for how to write tests
* background about terminology and software testing
* a description of the ns-3 testing framework
* a guide to model developers or new model contributors for how to write tests

View File

@@ -29,7 +29,7 @@ output, as in, ::
...
std::cout << "The value of x is " << x << std::endl;
...
}
}
This is workable in small environments, but as your simulations get more and
more complicated, you end up with more and more prints and the task of parsing
@@ -76,20 +76,20 @@ congestion window of a TCP model is a prime candidate for a trace source.
Trace sources are not useful by themselves; they must be connected to other
pieces of code that actually do something useful with the information provided
by the source. The entities that consume trace information are called trace
sinks. Trace sources are generators of events and trace sinks are consumers.
sinks. Trace sources are generators of events and trace sinks are consumers.
This explicit division allows for large numbers of trace sources to be scattered
around the system in places which model authors believe might be useful. Unless
a user connects a trace sink to one of these sources, nothing is output. This
arrangement allows relatively unsophisticated users to attach new types of sinks
to existing tracing sources, without requiring editing and recompiling the core
or models of the simulator.
or models of the simulator.
There can be zero or more consumers of trace events generated by a trace source.
One can think of a trace source as a kind of point-to-multipoint information
link.
link.
The "transport protocol" for this conceptual point-to-multipoint link is an
The "transport protocol" for this conceptual point-to-multipoint link is an
|ns3| ``Callback``.
Recall from the Callback Section that callback facility is a way to allow two
@@ -110,21 +110,21 @@ The Simplest Example
It will be useful to go walk a quick example just to reinforce what we've
said.::
#include "ns3/object.h"
#include "ns3/uinteger.h"
#include "ns3/traced-value.h""
#include "ns3/trace-source-accessor.h"
#include <iostream>
using namespace ns3;
The first thing to do is include the required files. As mentioned above, the
trace system makes heavy use of the Object and Attribute systems. The first two
includes bring in the declarations for those systems. The file,
``traced-value.h`` brings in the required declarations for tracing data that
obeys value semantics.
obeys value semantics.
In general, value semantics just means that you can pass the object around, not
an address. In order to use value semantics at all you have to have an object
@@ -150,7 +150,7 @@ made using those operators.::
;
return tid;
}
MyObject () {}
TracedValue<uint32_t> m_myInt;
};
@@ -179,7 +179,7 @@ function. This function will be called whenever one of the operators of the
main (int argc, char *argv[])
{
Ptr<MyObject> myObject = CreateObject<MyObject> ();
myObject->TraceConnectWithoutContext ("MyInteger", MakeCallback(&IntTrace));
myObject->m_myInt = 1234;
@@ -233,7 +233,7 @@ system (taken from ``examples/tcp-large-transfer.cc``)::
...
Config::ConnectWithoutContext (
"/NodeList/0/$ns3::TcpL4Protocol/SocketList/0/CongestionWindow",
"/NodeList/0/$ns3::TcpL4Protocol/SocketList/0/CongestionWindow",
MakeCallback (&CwndTracer));
This should look very familiar. It is the same thing as the previous example,
@@ -260,10 +260,10 @@ happens.
The leading "/" character in the path refers to a so-called namespace. One of the
predefined namespaces in the config system is "NodeList" which is a list of all of
the nodes in the simulation. Items in the list are referred to by indices into the
the nodes in the simulation. Items in the list are referred to by indices into the
list, so "/NodeList/0" refers to the zeroth node in the list of nodes created by
the simulation. This node is actually a ``Ptr<Node>`` and so is a subclass of
an :cpp:class:`ns3::Object`.
an :cpp:class:`ns3::Object`.
As described in the :ref:`Object-model` section, |ns3| supports an object
aggregation model. The next path segment begins with the "$" character which
@@ -311,7 +311,7 @@ There are three levels of interaction with the tracing system:
generated or use existing trace sources in different ways, without modifying
the core of the simulator;
* Advanced users can modify the simulator core to add new tracing sources and
sinks.
sinks.
Using Trace Helpers
*******************
@@ -339,7 +339,7 @@ example, you may want to specify that pcap tracing should be enabled on a
particular device on a specific node. This follows from the |ns3| device
conceptual model, and also the conceptual models of the various device helpers.
Following naturally from this, the files created follow a
<prefix>-<node>-<device> naming convention.
<prefix>-<node>-<device> naming convention.
Protocol helpers look at the problem of specifying which traces should be
enabled through a protocol and interface pair. This follows from the |ns3|
@@ -363,7 +363,7 @@ possible there are analogs for all methods in all classes.
We use an approach called a ``mixin`` to add tracing functionality to our helper
classes. A ``mixin`` is a class that provides functionality to that is
inherited by a subclass. Inheriting from a mixin is not considered a form of
specialization but is really a way to collect functionality.
specialization but is really a way to collect functionality.
Let's take a quick look at all four of these cases and their respective
``mixins``.
@@ -512,7 +512,7 @@ Finally, two of the methods shown above,::
have a default parameter called ``explicitFilename``. When set to true, this
parameter disables the automatic filename completion mechanism and allows you to
create an explicit filename. This option is only available in the methods which
enable pcap tracing on a single device.
enable pcap tracing on a single device.
For example, in order to arrange for a device helper to create a single
promiscuous pcap capture file of a specific name (``my-pcap-file.pcap``) on a
@@ -528,8 +528,8 @@ tells the helper to interpret the ``prefix`` parameter as a complete filename.
Ascii Tracing Device Helpers
++++++++++++++++++++++++++++
The behavior of the ASCII trace helper ``mixin`` is substantially similar to
the pcap version. Take a look at ``src/network/helper/trace-helper.h`` if you want to
The behavior of the ASCII trace helper ``mixin`` is substantially similar to
the pcap version. Take a look at ``src/network/helper/trace-helper.h`` if you want to
follow the discussion while looking at real code.
The class ``AsciiTraceHelperForDevice`` adds the high level functionality for
@@ -550,11 +550,11 @@ methods,::
void EnableAscii (Ptr<OutputStreamWrapper> stream, Ptr<NetDevice> nd);
will call the device implementation of ``EnableAsciiInternal`` directly,
providing either a valid prefix or stream. All other public ASCII tracing
providing either a valid prefix or stream. All other public ASCII tracing
methods will build on these low-level functions to provide additional user-level
functionality. What this means to the user is that all device helpers in the
functionality. What this means to the user is that all device helpers in the
system will have all of the ASCII trace methods available; and these methods
will all work in the same way across devices if the devices implement
will all work in the same way across devices if the devices implement
``EnablAsciiInternal`` correctly.
Ascii Tracing Device Helper Methods
@@ -623,9 +623,9 @@ user is completely specifying the file name, the string should include the ".tr"
for consistency.
You can enable ASCII tracing on a particular node/net-device pair by providing a
``std::string`` representing an object name service string to an
``std::string`` representing an object name service string to an
``EnablePcap`` method. The ``Ptr<NetDevice>`` is looked up from the name
string. Again, the ``<Node>`` is implicit since the named net device must
string. Again, the ``<Node>`` is implicit since the named net device must
belong to exactly one ``Node``. For example,::
Names::Add ("client" ...);
@@ -811,7 +811,7 @@ corresponding interface. For example,::
NodeContainer nodes;
...
NetDeviceContainer devices = deviceHelper.Install (nodes);
...
...
Ipv4AddressHelper ipv4;
ipv4.SetBase ("10.1.1.0", "255.255.255.0");
Ipv4InterfaceContainer interfaces = ipv4.Assign (devices);
@@ -865,7 +865,7 @@ would be "prefix-n21-i1.pcap".
You can always use the |ns3| object name service to make this more clear.
For example, if you use the object name service to assign the name "serverIpv4"
to the Ptr<Ipv4> on node 21, the resulting pcap trace file name will
to the Ptr<Ipv4> on node 21, the resulting pcap trace file name will
automatically become, "prefix-nserverIpv4-i1.pcap".
Ascii Tracing Protocol Helpers
@@ -884,7 +884,7 @@ The class ``AsciiTraceHelperForIpv4`` adds the high level functionality for
using ASCII tracing to a protocol helper. Each protocol that enables these
methods must implement a single virtual method inherited from this class.::
virtual void EnableAsciiIpv4Internal (Ptr<OutputStreamWrapper> stream, std::string prefix,
virtual void EnableAsciiIpv4Internal (Ptr<OutputStreamWrapper> stream, std::string prefix,
Ptr<Ipv4> ipv4, uint32_t interface) = 0;
The signature of this method reflects the protocol- and interface-centric view
@@ -936,7 +936,7 @@ pcap tracing. This is because, in addition to the pcap-style model where traces
from each unique protocol/interface pair are written to a unique file, we
support a model in which trace information for many protocol/interface pairs is
written to a common file. This means that the <prefix>-n<node id>-<interface>
file name generation mechanism is replaced by a mechanism to refer to a common
file name generation mechanism is replaced by a mechanism to refer to a common
file; and the number of API methods is doubled to allow all combinations.
Just as in pcap tracing, you can enable ASCII tracing on a particular
@@ -982,9 +982,9 @@ between protocol instances and nodes, For example,::
helper.EnableAsciiIpv4 ("prefix", "node1Ipv4", 1);
helper.EnableAsciiIpv4 ("prefix", "node2Ipv4", 1);
This would result in two files named "prefix-nnode1Ipv4-i1.tr" and
"prefix-nnode2Ipv4-i1.tr" with traces for each interface in the respective
trace file. Since all of the EnableAscii functions are overloaded to take a
This would result in two files named "prefix-nnode1Ipv4-i1.tr" and
"prefix-nnode2Ipv4-i1.tr" with traces for each interface in the respective
trace file. Since all of the EnableAscii functions are overloaded to take a
stream wrapper, you can use that form as well::
Names::Add ("node1Ipv4" ...);
@@ -1008,7 +1008,7 @@ one-to-one correspondence between each protocol and its node. For example,::
NodeContainer nodes;
...
NetDeviceContainer devices = deviceHelper.Install (nodes);
...
...
Ipv4AddressHelper ipv4;
ipv4.SetBase ("10.1.1.0", "255.255.255.0");
Ipv4InterfaceContainer interfaces = ipv4.Assign (devices);
@@ -1023,7 +1023,7 @@ traces into a single file is accomplished similarly to the examples above::
NodeContainer nodes;
...
NetDeviceContainer devices = deviceHelper.Install (nodes);
...
...
Ipv4AddressHelper ipv4;
ipv4.SetBase ("10.1.1.0", "255.255.255.0");
Ipv4InterfaceContainer interfaces = ipv4.Assign (devices);
@@ -1062,7 +1062,7 @@ associated protocol being the same type as that managed by the device helper.::
This would result in a number of ASCII trace files being created, one for
every interface in the system related to a protocol of the type managed by the
helper. All of these files will follow the <prefix>-n<node id>-i<interface.tr
convention. Combining all of the traces into a single file is accomplished
convention. Combining all of the traces into a single file is accomplished
similarly to the examples above.
Ascii Tracing Device Helper Filename Selection

View File

@@ -25,8 +25,8 @@ Here is an example of what might occur::
$ ./ns3 run tcp-point-to-point
Entering directory '/home/tomh/ns-3-nsc/build'
Compilation finished successfully
Command ['/home/tomh/ns-3-nsc/build/debug/examples/tcp-point-to-point'] exited with code -11
Compilation finished successfully
Command ['/home/tomh/ns-3-nsc/build/debug/examples/tcp-point-to-point'] exited with code -11
The error message says that the program terminated unsuccessfully, but it is
not clear from this information what might be wrong. To examine more
@@ -37,18 +37,18 @@ closely, try running it under the `gdb debugger
$ ./ns3 run tcp-point-to-point --gdb
Entering directory '/home/tomh/ns-3-nsc/build'
Compilation finished successfully
Compilation finished successfully
GNU gdb Red Hat Linux (6.3.0.0-1.134.fc5rh)
Copyright 2004 Free Software Foundation, Inc.
GDB is free software, covered by the GNU General Public License, and you are
welcome to change it and/or distribute copies of it under certain conditions.
Type "show copying" to see the conditions.
There is absolutely no warranty for GDB. Type "show warranty" for details.
This GDB was configured as "i386-redhat-linux-gnu"...Using host libthread_db
This GDB was configured as "i386-redhat-linux-gnu"...Using host libthread_db
library "/lib/libthread_db.so.1".
(gdb) run
Starting program: /home/tomh/ns-3-nsc/build/debug/examples/tcp-point-to-point
Starting program: /home/tomh/ns-3-nsc/build/debug/examples/tcp-point-to-point
Reading symbols from shared object read from target memory...done.
Loaded system supplied DSO at 0xf5c000
@@ -64,7 +64,7 @@ closely, try running it under the `gdb debugger
The program is running. Exit anyway? (y or n) y
Note first the way the program was invoked-- pass the command to run as an
argument to the command template "gdb %s".
argument to the command template "gdb %s".
This tells us that there was an attempt to dereference a null pointer
socketFactory.
@@ -78,7 +78,7 @@ Let's look around line 136 of tcp-point-to-point, as gdb suggests:
localSocket->Bind ();
The culprit here is that the return value of GetObject is not being checked and
may be null.
may be null.
Sometimes you may need to use the `valgrind memory checker
<http://valgrind.org>`_ for more subtle errors. Again, you invoke the use of

View File

@@ -69,7 +69,7 @@ This will output the following::
* HelloInterval: HELLO messages emission interval.
Bench-simulator
***************
@@ -95,18 +95,18 @@ Command-line Arguments
--prec: printed output precision [6]
You can change the Scheduler being benchmarked by passing
the appropriate flags, for example if you want to
the appropriate flags, for example if you want to
benchmark the CalendarScheduler pass `--cal` to the program.
The default total number of events, runs or population size
can be overridden by passing `--total=value`, `--runs=value`
and `--pop=value` respectively.
can be overridden by passing `--total=value`, `--runs=value`
and `--pop=value` respectively.
If you want to use event distribution which is stored in a file,
you can pass the file option by `--file=FILE_NAME`.
you can pass the file option by `--file=FILE_NAME`.
`--prec` can be used to change the output precision value and
`--debug` as the name suggests enables debugging.
`--debug` as the name suggests enables debugging.
Invocation
++++++++++
@@ -116,7 +116,7 @@ To run it, simply open the terminal and type
.. sourcecode:: bash
$ ./ns3 run bench-simulator
It will show something like this depending upon the scheduler being benchmarked::
ns3-dev-bench-simulator-debug:
@@ -138,7 +138,7 @@ Suppose we had to benchmark `CalendarScheduler` instead, we would have written
.. sourcecode:: bash
$ ./ns3 run "bench-simulator --cal"
And the output would look something like this::
ns3-dev-bench-simulator-debug:

View File

@@ -179,7 +179,7 @@ and we can see the edits with git diff:
@@ -1439,6 +1439,10 @@ TcpSocketBase::ReceivedAck (Ptr<Packet> packet, const TcpHeader& tcpHeader)
// There is a DupAck
++m_dupAckCount;
+ // I'm introducing a subtle bug!
+
+ m_tcb->m_cWnd = m_tcb->m_ssThresh;
@@ -250,7 +250,7 @@ Submit work for review
**********************
After you push your branch to origin, you can follow the instructions here https://docs.gitlab.com/ee/user/project/merge_requests/creating_merge_requests.html
to create a merge request.
to create a merge request.
GitLab CI (Continous Integration)
+++++++++++++++++++++++++++++++++
@@ -380,7 +380,7 @@ Make a commit of these files:
Next, make the following change to RELEASE_NOTES and commit it:
::
Availability
------------
-This release is not yet available.
@@ -390,7 +390,7 @@ Next, make the following change to RELEASE_NOTES and commit it:
$ git commit -m"Update availability in RELEASE_NOTES" RELEASE_NOTES
Finally, add a git annotated tag:
::
$ git tag -a 'ns-3.34' -m"ns-3.34 release"
@@ -466,7 +466,7 @@ are committed to ``master`` on ``nsnam/ns-3-dev.git`` as usual::
... (now fix a really important bug)
$ echo 'abc' >> a
$ git commit -m"Fix missing abc bug on file a" a
Now the tree looks like this::
@@ -543,10 +543,10 @@ We can next hand-edit these files to restore them to original state, so that::
The new log should show something like the below, with parallel git
history paths until the merge back again::
$ git log --graph --decorate --oneline --all
* 815ce6e (HEAD -> master) Merge branch 'ns-3.34.1-release'
|\
|\
| * 12a29ca (tag: ns-3.34.1) Update VERSION to 3.34.1
| * 21ebdbf Fix missing abc bug on file a
* | ee37d41 Fix missing abc bug on file a
@@ -554,8 +554,8 @@ history paths until the merge back again::
* | ba28d6d Add new feature
* | e50015a make some changes
* | fd075f6 Merge ns-3.34-release branch
|\ \
| |/
|\ \
| |/
| * 3fab3cf (tag: ns-3.34) Update availability in RELEASE_NOTES
| * c50aaf7 Update VERSION and documentation tags for ns-3.34 release
|/

View File

@@ -15,25 +15,25 @@ Working with gitlab-ci-local
.. _rootless mode : https://docs.docker.com/engine/security/rootless/
The ns-3 project repository is currently hosted in GitLab, which includes
`continuos integration (CI)`_ tools to automate build, tests, packaging and
distribution of software. The CI works based on jobs, that are defined
on YAML files.
`continuos integration (CI)`_ tools to automate build, tests, packaging and
distribution of software. The CI works based on jobs, that are defined
on YAML files.
The ns-3 GitLab CI files are located in ``ns-3-dev/utils/tests/``.
The ns-3 GitLab CI files are located in ``ns-3-dev/utils/tests/``.
The main GitLab CI file is ``gitlab-ci.yml``. The different jobs
are used to check if a multitude of compilers and package versions
are compatible with the current ns-3 build, which is why a build is
usually followed by a test run. Other CI jobs build and warn about
missing the documentation.
are compatible with the current ns-3 build, which is why a build is
usually followed by a test run. Other CI jobs build and warn about
missing the documentation.
The GitLab CI jobs are executed based on `pipelines`_ containing a
The GitLab CI jobs are executed based on `pipelines`_ containing a
sequence of job batches. Jobs within a batch can be executed in parallel.
These `pipelines`_ can be triggered manually, or scheduled to run automatically
per commit and/or based on a time period
(ns-3 has `daily and weekly pipelines`_ scheduled).
These `pipelines`_ can be triggered manually, or scheduled to run automatically
per commit and/or based on a time period
(ns-3 has `daily and weekly pipelines`_ scheduled).
The GitLab CI free tier is very slow, taking a lot of time to identify
issues during active merge request development.
issues during active merge request development.
Note: the free tier
now requires a credit card due to `crypto miners abuse`_.
@@ -42,18 +42,18 @@ now requires a credit card due to `crypto miners abuse`_.
configuration files locally, allowing for the debugging of CI settings
and pipelines without requiring pushes to test repositories or main
repositories that fill up the CI job queues with failed jobs due to
script errors.
script errors.
GitLab-CI-local relies on `Docker`_ to setup the environment to execute
the jobs.
the jobs.
Note: Docker is usually setup in root mode, requiring
Note: Docker is usually setup in root mode, requiring
frequent use of administrative permissions/sudo. However,
this is highly discouraged. You can configure Docker to run
in `rootless mode`_. From this point onwards, we assume Docker is configured
this is highly discouraged. You can configure Docker to run
in `rootless mode`_. From this point onwards, we assume Docker is configured
in `rootless mode`_.
After installing both `Docker`_ in `rootless mode`_ and `GitLab-CI-local`_,
After installing both `Docker`_ in `rootless mode`_ and `GitLab-CI-local`_,
the ns-3 jobs can be listed using the following command:
.. sourcecode:: bash
@@ -61,30 +61,30 @@ the ns-3 jobs can be listed using the following command:
~/ns-3-dev$ gitlab-ci-local --file ./utils/tests/gitlab-ci.yml --list
parsing and downloads finished in 226 ms
name description stage when allow_failure needs
weekly-build-ubuntu-18.04-debug build on_success false
...
weekly-build-ubuntu-18.04-debug build on_success false
weekly-build-clang-11-optimized build on_success false
pybindgen build on_success false
per-commit-compile-debug build on_success false
per-commit-compile-release build on_success false
per-commit-compile-optimized build on_success false
daily-test-debug test on_success false
daily-test-release test on_success false
daily-test-optimized test on_success false
daily-test-optimized-valgrind test on_success false
weekly-test-debug-valgrind test on_success false
weekly-test-release-valgrind test on_success false
weekly-test-optimized-valgrind test on_success false
weekly-test-takes-forever-optimized test on_success false
doxygen documentation on_success false
manual documentation on_success false
tutorial documentation on_success false
models documentation on_success false
...
To execute the ``per-commit-compile-release`` job, or any of the others listed above, use
the following command.
weekly-build-clang-11-optimized build on_success false
pybindgen build on_success false
per-commit-compile-debug build on_success false
per-commit-compile-release build on_success false
per-commit-compile-optimized build on_success false
daily-test-debug test on_success false
daily-test-release test on_success false
daily-test-optimized test on_success false
daily-test-optimized-valgrind test on_success false
weekly-test-debug-valgrind test on_success false
weekly-test-release-valgrind test on_success false
weekly-test-optimized-valgrind test on_success false
weekly-test-takes-forever-optimized test on_success false
doxygen documentation on_success false
manual documentation on_success false
tutorial documentation on_success false
models documentation on_success false
To execute the ``per-commit-compile-release`` job, or any of the others listed above, use
the following command.
.. sourcecode:: console
@@ -161,6 +161,6 @@ Then run the doxygen job again:
PASS doxygen
Artifacts built by the CI jobs will be stored in separate subfolders
based on the job name.
based on the job name.
``~/ns-3-dev/.gitlab-ci-local/artifacts/jobname``

View File

@@ -102,9 +102,9 @@ SOURCES = \
$(SRC)/sixlowpan/doc/sixlowpan.rst \
$(SRC)/lr-wpan/doc/lr-wpan.rst \
# list all model library figure files that need to be copied to
# list all model library figure files that need to be copied to
# $SOURCETEMP/figures. For each figure to be included in all
# documentation formats (html, latex...) the following formats are supported:
# documentation formats (html, latex...) the following formats are supported:
# 1) a single .dia file (preferred option, because it can be edited)
# 2) a single .eps file
# 3) both a .pdf and .png file
@@ -472,7 +472,7 @@ IMAGES_EPS = \
# rescale pdf figures as necessary
$(FIGURES)/testbed.pdf_width = 5in
$(FIGURES)/emulated-channel.pdf_width = 6in
$(FIGURES)/antenna-coordinate-system.pdf_width = 7cm
$(FIGURES)/antenna-coordinate-system.pdf_width = 7cm
$(FIGURES)/node.pdf_width = 5in
$(FIGURES)/packet.pdf_width = 4in
$(FIGURES)/buffer.pdf_width = 15cm
@@ -501,7 +501,7 @@ $(FIGURES)/fr-soft-frequency-reuse-scheme-v1.pdf_width = 8cm
$(FIGURES)/fr-soft-frequency-reuse-scheme-v2.pdf_width = 8cm
$(FIGURES)/fr-strict-frequency-reuse-scheme.pdf_width = 8cm
$(FIGURES)/ffr-distributed-scheme.pdf_width = 8cm
$(FIGURES)/lte-arch-enb-data.pdf_width = 6cm
$(FIGURES)/lte-arch-enb-data.pdf_width = 6cm
$(FIGURES)/lte-arch-enb-ctrl.pdf_width = 10cm
$(FIGURES)/lte-arch-ue-data.pdf_width = 6cm
$(FIGURES)/lte-arch-ue-ctrl.pdf_width = 10cm
@@ -561,7 +561,7 @@ BUILDDIR = build
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(SOURCETEMP)
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(SOURCETEMP)
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest
@@ -587,8 +587,8 @@ help:
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
copy-sources: $(SOURCES)
@mkdir -p $(SOURCETEMP)
@mkdir -p $(FIGURES)
@mkdir -p $(SOURCETEMP)
@mkdir -p $(FIGURES)
@cp -r -p $(SOURCES) $(SOURCETEMP)
@cp -r -p $(SOURCEFIGS) $(FIGURES)
@@ -601,7 +601,7 @@ frag: pickle
pushd $(BUILDDIR)/frag && ../../pickle-to-xml.py ../pickle/index.fpickle > navigation.xml && popd
cp -r $(BUILDDIR)/pickle/_images $(BUILDDIR)/frag
html: copy-sources $(IMAGES)
html: copy-sources $(IMAGES)
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."

View File

@@ -24,7 +24,7 @@ import sys, os
# To change default code-block format in Latex to footnotesize (8pt)
# Tip from https://stackoverflow.com/questions/9899283/how-do-you-change-the-code-example-font-size-in-latex-pdf-output-with-sphinx/9955928
# Note: sizes are \footnotesize (8pt), \small (9pt), and \normalsize (10pt).
# Note: sizes are \footnotesize (8pt), \small (9pt), and \normalsize (10pt).
#from sphinx.highlighting import PygmentsBridge
#from pygments.formatters.latex import LatexFormatter
@@ -274,7 +274,7 @@ latex_elements = {
# (double backquotes) to either \footnotesize (8pt) or \small (9pt)
#
# See above to change the font size of verbatim code blocks
#
#
# 'preamble': '',
'preamble': u'''\\usepackage{amssymb}
\\definecolor{VerbatimBorderColor}{rgb}{1,1,1}

View File

@@ -24,7 +24,7 @@ One of the use-cases we want to support is that of a testbed. A concrete example
of an environment of this kind is the ORBIT testbed. ORBIT is a laboratory
emulator/field trial network arranged as a two dimensional grid of 400 802.11
radio nodes. We integrate with ORBIT by using their "imaging" process to load
and run |ns3| simulations on the ORBIT array. We can use our
and run |ns3| simulations on the ORBIT array. We can use our
``EmuFdNetDevice``
to drive the hardware in the testbed and we can accumulate results either using
the |ns3| tracing and logging functions, or the native ORBIT data gathering
@@ -36,7 +36,7 @@ A simulation of this kind is shown in the following figure:
.. _testbed:
.. figure:: figures/testbed.*
Example Implementation of Testbed Emulation.
You can see that there are separate hosts, each running a subset of a "global"

View File

@@ -3,12 +3,12 @@
ns-3 Model Library
==================
This is the *ns-3 Model Library* documentation. Primary documentation for the ns-3 project is
This is the *ns-3 Model Library* documentation. Primary documentation for the ns-3 project is
available in five forms:
* `ns-3 Doxygen <https://www.nsnam.org/doxygen/index.html>`_: Documentation of the public APIs of the simulator
* Tutorial, Manual, and Model Library *(this document)* for the `latest release <https://www.nsnam.org/documentation/latest/>`_ and `development tree <https://www.nsnam.org/documentation/development-tree/>`_
* `ns-3 wiki <https://www.nsnam.org/wiki>`_
* `ns-3 wiki <https://www.nsnam.org/wiki>`_
This document is written in `reStructuredText <http://docutils.sourceforge.net/rst.html>`_ for `Sphinx <http://sphinx.pocoo.org/>`_ and is maintained in the
``doc/models`` directory of ns-3's source code.

View File

@@ -12,22 +12,22 @@ It is important to distinguish between **modules** and **models**:
the modules (libraries) they need to conduct their simulation.
* |ns3| *models* are abstract representations of real-world objects,
protocols, devices, etc.
protocols, devices, etc.
An |ns3| module may consist of more than one model (for instance, the
:mod:`internet` module contains models for both TCP and UDP). In general,
ns-3 models do not span multiple software modules, however.
ns-3 models do not span multiple software modules, however.
This manual provides documentation about the models of |ns3|. It
This manual provides documentation about the models of |ns3|. It
complements two other sources of documentation concerning models:
* the model APIs are documented, from a programming perspective, using
`Doxygen <http://www.doxygen.org>`_. Doxygen for ns-3 models is available
`on the project web server <http://www.nsnam.org/docs/doxygen/index.html>`_.
`on the project web server <http://www.nsnam.org/docs/doxygen/index.html>`_.
* the |ns3| core is documented in the developer's manual. |ns3| models make
use of the facilities of the core, such as attributes, default values,
random numbers, test frameworks, etc. Consult the
use of the facilities of the core, such as attributes, default values,
random numbers, test frameworks, etc. Consult the
`main web site <http://www.nsnam.org>`_ to find copies of the manual.
Finally, additional documentation about various aspects of |ns3| may
@@ -35,7 +35,7 @@ exist on the `project wiki <http://www.nsnam.org/wiki>`_.
A sample outline of how to write model library documentation can be
found by executing the ``create-module.py`` program and looking at the
template created in the file ``new-module/doc/new-module.rst``.
template created in the file ``new-module/doc/new-module.rst``.
.. sourcecode:: bash
@@ -46,8 +46,8 @@ The remainder of this document is organized alphabetically by module name.
If you are new to |ns3|, you might first want to read below about the network
module, which contains some fundamental models for the simulator.
The packet model, models for different address formats, and abstract
base classes for objects such as nodes, net devices, channels, sockets, and
The packet model, models for different address formats, and abstract
base classes for objects such as nodes, net devices, channels, sockets, and
applications are discussed there.

View File

@@ -29,7 +29,7 @@
# If both a and b are true, we're building for public urls.
# (The newer update-docs script (through ns3) sets
# NS3_WWW_URLS=public explicitly.)
#
#
# The repo version is either a tag name or a commit (short) id.
#
# If we're building for nsnam.org, and at a tag, we use just
@@ -64,7 +64,7 @@ function usage
-n pretend we are on nsnam.org
-d pretend we are in the automated build directory
-t pretend we are at a repo tag
EOF
exit 1
}
@@ -172,24 +172,24 @@ if [ $PUBLIC -eq 1 ]; then
echo "// public urls" >> $outf
# Generate URL relative to server root
echo "var ns3_host = \"/\";" >> $outf
if [ $distance -eq 1 ]; then
# Like "http://www.nsnam.org/ns-3-14"
vers_href="https://www.nsnam.org/ns-3-${version#ns-3.}"
vers_href="<a href=\\\"$vers_href\\\">$version$dirty</a>"
echo "var ns3_version = \"Release $vers_href\";" >> $outf
echo "var ns3_release = \"docs/release/${version#ns-}/\";" >> $outf
else
vers_href="https://gitlab.com/nsnam/ns-3-dev/commits/$version"
version="<a href=\\\"$vers_href\\\">$version$dirty</a>"
echo "var ns3_version = \"ns-3-dev @ $version\";" >> $outf
echo "var ns3_release = \"docs/\";" >> $outf
fi
echo "var ns3_local = \"\";" >> $outf
echo "var ns3_doxy = \"doxygen/\";" >> $outf
else
repo=`basename $PWD`
echo "// ns3_version.js: automatically generated" > $outf

View File

@@ -11,15 +11,15 @@
{% set reldelim1 = '<span class="navelem">&nbsp;</span>' %}
{# set reldelim1 = ' @' #}
{%- block extrahead %}
{%- if theme_customstylesheet %}
<link rel="stylesheet" type="text/css"
href="{{ pathto('_static/'+theme_customstylesheet, 1) }}" />
{%- endif %}
{%- if theme_favicon %}
<link rel="icon" type="image/ico"
<link rel="icon" type="image/ico"
href="{{ pathto('_static/'+theme_favicon, 1) }}" />
{%- endif %}
@@ -27,9 +27,9 @@
<script type="text/javascript" src="_static/ns3_version.js"></script>
<script type="text/javascript">var ns3_builder="{{builder}}";</script>
<script type="text/javascript" src="_static/ns3_links.js"></script>
{% endblock %}
{% block header %}
<div id="titlearea">
<table cellspacing="0" cellpadding="0" width="100%">
@@ -56,11 +56,11 @@
>&nbsp;&nbsp;Home</a>
</li>
<li><span
onmouseover="mopen('mTuts')"
onmouseover="mopen('mTuts')"
onmouseout="mclosetime()"
>Tutorials &nbsp;&#x25BC;</span>
<div id="mTuts"
onmouseover="mcancelclosetime()"
<div id="mTuts"
onmouseover="mcancelclosetime()"
onmouseout="mclosetime()">
<a id="ns3_tut"
href="/docs/tutorial/html/index.html"
@@ -68,11 +68,11 @@
</div>
</li>
<li><span
onmouseover="mopen('mDocs')"
onmouseover="mopen('mDocs')"
onmouseout="mclosetime()"
>Documentation &nbsp;&#x25BC;</span>
<div id="mDocs"
onmouseover="mcancelclosetime()"
onmouseover="mcancelclosetime()"
onmouseout="mclosetime()">
<a id="ns3_man"
href="/docs/manual/html/index.html"
@@ -89,11 +89,11 @@
</div>
</li>
<li><span
onmouseover="mopen('mDev')"
onmouseover="mopen('mDev')"
onmouseout="mclosetime()"
>Development &nbsp;&#x25BC;</span>
<div id="mDev"
onmouseover="mcancelclosetime()"
onmouseover="mcancelclosetime()"
onmouseout="mclosetime()">
<a id="ns3_api"
href="/docs/doxygen/html/index.html"
@@ -119,12 +119,12 @@
<script type="text/javascript">ns3_write_links()</script>
</div>
{% endblock %}
{% block rootrellink %}
<li class="navelem"><a href="{{ theme_homepage }}">{{ theme_projectname }}</a><span class="navelem">&nbsp;</span></li>
{{ super() }}
{% endblock %}
{% if theme_collapsiblesidebar|tobool %}
{% set script_files = script_files + ['_static/sidebar.js'] %}
{% endif %}

View File

@@ -52,11 +52,11 @@ $extrastylesheet
>&nbsp;&nbsp;Home</a>
</li>
<li><span
onmouseover="mopen('mTuts')"
onmouseover="mopen('mTuts')"
onmouseout="mclosetime()"
>Tutorials &nbsp;&#x25BC;</span>
<div id="mTuts"
onmouseover="mcancelclosetime()"
<div id="mTuts"
onmouseover="mcancelclosetime()"
onmouseout="mclosetime()">
<a id="ns3_tut"
href="/docs/tutorial/html/index.html"
@@ -64,11 +64,11 @@ $extrastylesheet
</div>
</li>
<li><span
onmouseover="mopen('mDocs')"
onmouseover="mopen('mDocs')"
onmouseout="mclosetime()"
>Documentation &nbsp;&#x25BC;</span>
<div id="mDocs"
onmouseover="mcancelclosetime()"
onmouseover="mcancelclosetime()"
onmouseout="mclosetime()">
<a id="ns3_man"
href="/docs/manual/html/index.html"
@@ -85,11 +85,11 @@ $extrastylesheet
</div>
</li>
<li><span
onmouseover="mopen('mDev')"
onmouseover="mopen('mDev')"
onmouseout="mclosetime()"
>Development &nbsp;&#x25BC;</span>
<div id="mDev"
onmouseover="mcancelclosetime()"
onmouseover="mcancelclosetime()"
onmouseout="mclosetime()">
<a id="ns3_api"
href="/docs/doxygen/html/index.html"

View File

@@ -4,19 +4,19 @@
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
//
// (1) Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// notice, this list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in
// the documentation and/or other materials provided with the
// distribution.
//
// distribution.
//
// (3) The name of the author may not be used to
// endorse or promote products derived from this software without
// specific prior written permission.
//
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@@ -27,8 +27,8 @@
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
// IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// POSSIBILITY OF SUCH DAMAGE.
//
var timeout = 250;
var closetimer = 0;
@@ -36,13 +36,13 @@ var ddmenuitem = 0;
// open hidden layer
function mopen(id)
{
{
// cancel close timer
mcancelclosetime();
// close old layer
if(ddmenuitem) ddmenuitem.style.visibility = 'hidden';
// get new layer and show it
ddmenuitem = document.getElementById(id);
ddmenuitem.style.visibility = 'visible';
@@ -71,4 +71,4 @@ function mcancelclosetime()
}
// close layer when click-out
document.onclick = mclose;
document.onclick = mclose;

View File

@@ -130,7 +130,7 @@ div.sphinxsidebar a {
/*
ns-3 Main-menu, based on WordPress site
Drop down menu based on Simple Javascript Drop-Down Menu v2.0
http://javascript-array.com/scripts/simple_drop_down_menu/
*/
@@ -168,7 +168,7 @@ div.sphinxsidebar a {
color:#ffffff;
float:left;
font-family: Aldo, Tahoma, Arial, sans-serif;
font-size: 14px;
font-size: 14px;
/* height:40px; */
margin:0px 0px 0px 0px;
/* overflow:hidden; */
@@ -184,7 +184,7 @@ div.sphinxsidebar a {
font-weight: normal; /* default anchors are bold */
text-decoration:none; /* default anchors are underlined */
}
#ns3-menu .menu ul li a:hover {
color:#cadc48;
text-decoration:none; /* don't add underline on hover */

View File

@@ -47,7 +47,7 @@ An example commit (July 14, 2021) to review is 9df8ef4b2.
new release 'ns-3.x' will not be yet available as a tagged release, so
the 'ns-3.x' module may need some indirection to fetch ns-3-dev in its place.
2.3) Check out a clean ns-3-dev somewhere using ns-3-allinone
2.3) Check out a clean ns-3-dev somewhere using ns-3-allinone
- git clone https://gitlab.com/nsnam/ns-3-allinone.git
- cd ns-3-allinone
- ./download.py
@@ -58,7 +58,7 @@ the 'ns-3.x' module may need some indirection to fetch ns-3-dev in its place.
- cd ..
- ./dist.py
This should yield a compressed tarfile, such as: ns-allinone-3.31.rc1.tar.bz2
This should yield a compressed tarfile, such as: ns-allinone-3.31.rc1.tar.bz2
Test this, and when satisfied, upload it to
www.nsnam.org:/var/www/html/releases/ (with apache:apache file ownership)
@@ -79,7 +79,7 @@ we will work off of a release branch.
At this point, you are ready for final packaging and repository/site work
We'll refer to the release number as "X" or "x" below.
We'll refer to the release number as "X" or "x" below.
creating the distribution tarball
---------------------------------
@@ -121,7 +121,7 @@ bake commit ba47854c (July 14, 2021).
- check that ns-3-allinone build.py works
- check that bake ns-3.x and ns-allinone-3.x targets work
5. upload "ns-allinone-3.x.tar.bz2" to the /var/www/html/releases/ directory on
5. upload "ns-allinone-3.x.tar.bz2" to the /var/www/html/releases/ directory on
the www.nsnam.org server
- scp ns-allinone-3.x.tar.bz2 www.nsnam.org:~
- ssh www.nsnam.org
@@ -148,8 +148,8 @@ preparing the documentation
----------------------------
1. If final release, build release documentation
- sudo bash; su nsnam; cd /home/nsnam/bin
- ./update-docs -c -R -r ns-3.x
- sudo bash; su nsnam; cd /home/nsnam/bin
- ./update-docs -c -R -r ns-3.x
2. Check if these new files are available on the website; check that the
headers all say 'ns-3.x release' in the version, and that all links work
@@ -169,7 +169,7 @@ https://www.nsnam.org/releases/ns-3-x.
release (there are two such pages, one under Releases and one under
Documentation)
4. Create a blog entry to announce release
4. Create a blog entry to announce release
ns-3 wiki edits
---------------
@@ -200,8 +200,8 @@ start collecting inputs for the ns-3.(x+1) release.
The project may decide to make incremental, bug-fix releases from
time to time, with a minor version number (e.g. ns-3.36.1). To do
this, changesets may be cherry-picked from ns-3-dev and added to
ns-3.x branch. Do not move over changesets that pertain to
adding new features, but documentation fixes and bug fixes are good
ns-3.x branch. Do not move over changesets that pertain to
adding new features, but documentation fixes and bug fixes are good
changesets to make available in a minor release. The same steps
above for making a release are generally followed; the documentation
in the manual about working with Git as a maintainer provides the

View File

@@ -20,12 +20,12 @@ def dump_pickles(out, dirname, filename, path):
out.write(' <page url="%s">\n' % path)
out.write(' <fragment>%s.frag</fragment>\n' % data['current_page_name'])
if data['prev'] is not None:
out.write(' <prev url="%s">%s</prev>\n' %
(os.path.normpath(os.path.join(path, data['prev']['link'])),
out.write(' <prev url="%s">%s</prev>\n' %
(os.path.normpath(os.path.join(path, data['prev']['link'])),
data['prev']['title']))
if data['next'] is not None:
out.write(' <next url="%s">%s</next>\n' %
(os.path.normpath(os.path.join(path, data['next']['link'])),
out.write(' <next url="%s">%s</next>\n' %
(os.path.normpath(os.path.join(path, data['next']['link'])),
data['next']['title']))
out.write(' </page>\n')
f.close()

View File

@@ -9,26 +9,26 @@ Building Topologies
Building a Bus Network Topology
*******************************
In this section we are going to expand our mastery of |ns3| network
In this section we are going to expand our mastery of |ns3| network
devices and channels to cover an example of a bus network. |ns3|
provides a net device and channel we call CSMA (Carrier Sense Multiple Access).
The |ns3| CSMA device models a simple network in the spirit of
Ethernet. A real Ethernet uses CSMA/CD (Carrier Sense Multiple Access with
Collision Detection) scheme with exponentially increasing backoff to contend
for the shared transmission medium. The |ns3| CSMA device and
The |ns3| CSMA device models a simple network in the spirit of
Ethernet. A real Ethernet uses CSMA/CD (Carrier Sense Multiple Access with
Collision Detection) scheme with exponentially increasing backoff to contend
for the shared transmission medium. The |ns3| CSMA device and
channel models only a subset of this.
Just as we have seen point-to-point topology helper objects when constructing
point-to-point topologies, we will see equivalent CSMA topology helpers in
this section. The appearance and operation of these helpers should look
this section. The appearance and operation of these helpers should look
quite familiar to you.
We provide an example script in our ``examples/tutorial`` directory. This script
builds on the ``first.cc`` script and adds a CSMA network to the
point-to-point simulation we've already considered. Go ahead and open
builds on the ``first.cc`` script and adds a CSMA network to the
point-to-point simulation we've already considered. Go ahead and open
``examples/tutorial/second.cc`` in your favorite editor. You will have already seen
enough |ns3| code to understand most of what is going on in this
enough |ns3| code to understand most of what is going on in this
example, but we will go over the entire script and examine some of the output.
Just as in the ``first.cc`` example (and in all ns-3 examples) the file
@@ -53,9 +53,9 @@ find a similar "drawing" in most of our examples.
In this case, you can see that we are going to extend our point-to-point
example (the link between the nodes n0 and n1 below) by hanging a bus network
off of the right side. Notice that this is the default network topology
off of the right side. Notice that this is the default network topology
since you can actually vary the number of nodes created on the LAN. If you
set nCsma to one, there will be a total of two nodes on the LAN (CSMA
set nCsma to one, there will be a total of two nodes on the LAN (CSMA
channel) --- one required node and one "extra" node. By default there are
three "extra" nodes as seen below:
@@ -73,9 +73,9 @@ Then the ns-3 namespace is ``used`` and a logging component is defined.
This is all just as it was in ``first.cc``, so there is nothing new yet.
::
using namespace ns3;
NS_LOG_COMPONENT_DEFINE ("SecondScriptExample");
The main program begins with a slightly different twist. We use a verbose
@@ -112,7 +112,7 @@ entirely comfortable with the following code at this point in the tutorial.
nCsma = nCsma == 0 ? 1 : nCsma;
The next step is to create two nodes that we will connect via the
The next step is to create two nodes that we will connect via the
point-to-point link. The ``NodeContainer`` is used to do this just as was
done in ``first.cc``.
@@ -123,7 +123,7 @@ done in ``first.cc``.
Next, we declare another ``NodeContainer`` to hold the nodes that will be
part of the bus (CSMA) network. First, we just instantiate the container
object itself.
object itself.
::
@@ -133,9 +133,9 @@ object itself.
The next line of code ``Gets`` the first node (as in having an index of one)
from the point-to-point node container and adds it to the container of nodes
that will get CSMA devices. The node in question is going to end up with a
point-to-point device *and* a CSMA device. We then create a number of
"extra" nodes that compose the remainder of the CSMA network. Since we
that will get CSMA devices. The node in question is going to end up with a
point-to-point device *and* a CSMA device. We then create a number of
"extra" nodes that compose the remainder of the CSMA network. Since we
already have one node in the CSMA network -- the one that will have both a
point-to-point and CSMA net device, the number of "extra" nodes means the
number nodes you desire in the CSMA section minus one.
@@ -154,20 +154,20 @@ the helper and a two millisecond delay on channels created by the helper.
NetDeviceContainer p2pDevices;
p2pDevices = pointToPoint.Install (p2pNodes);
We then instantiate a ``NetDeviceContainer`` to keep track of the
point-to-point net devices and we ``Install`` devices on the
We then instantiate a ``NetDeviceContainer`` to keep track of the
point-to-point net devices and we ``Install`` devices on the
point-to-point nodes.
We mentioned above that you were going to see a helper for CSMA devices and
channels, and the next lines introduce them. The ``CsmaHelper`` works just
like a ``PointToPointHelper``, but it creates and connects CSMA devices and
channels. In the case of a CSMA device and channel pair, notice that the data
rate is specified by a *channel* ``Attribute`` instead of a device
rate is specified by a *channel* ``Attribute`` instead of a device
``Attribute``. This is because a real CSMA network does not allow one to mix,
for example, 10Base-T and 100Base-T devices on a given channel. We first set
for example, 10Base-T and 100Base-T devices on a given channel. We first set
the data rate to 100 megabits per second, and then set the speed-of-light delay
of the channel to 6560 nano-seconds (arbitrarily chosen as 1 nanosecond per foot
over a 2000 meter segment). Notice that you can set an ``Attribute`` using
over a 2000 meter segment). Notice that you can set an ``Attribute`` using
its native data type.
::
@@ -180,8 +180,8 @@ its native data type.
csmaDevices = csma.Install (csmaNodes);
Just as we created a ``NetDeviceContainer`` to hold the devices created by
the ``PointToPointHelper`` we create a ``NetDeviceContainer`` to hold
the devices created by our ``CsmaHelper``. We call the ``Install``
the ``PointToPointHelper`` we create a ``NetDeviceContainer`` to hold
the devices created by our ``CsmaHelper``. We call the ``Install``
method of the ``CsmaHelper`` to install the devices into the nodes of the
``csmaNodes NodeContainer``.
@@ -196,11 +196,11 @@ stacks present. Just as in the ``first.cc`` script, we will use the
stack.Install (csmaNodes);
Recall that we took one of the nodes from the ``p2pNodes`` container and
added it to the ``csmaNodes`` container. Thus we only need to install
added it to the ``csmaNodes`` container. Thus we only need to install
the stacks on the remaining ``p2pNodes`` node, and all of the nodes in the
``csmaNodes`` container to cover all of the nodes in the simulation.
Just as in the ``first.cc`` example script, we are going to use the
Just as in the ``first.cc`` example script, we are going to use the
``Ipv4AddressHelper`` to assign IP addresses to our device interfaces.
First we use the network 10.1.1.0 to create the two addresses needed for our
two point-to-point devices.
@@ -215,11 +215,11 @@ two point-to-point devices.
Recall that we save the created interfaces in a container to make it easy to
pull out addressing information later for use in setting up the applications.
We now need to assign IP addresses to our CSMA device interfaces. The
We now need to assign IP addresses to our CSMA device interfaces. The
operation works just as it did for the point-to-point case, except we now
are performing the operation on a container that has a variable number of
CSMA devices --- remember we made the number of CSMA devices changeable by
command line argument. The CSMA devices will be associated with IP addresses
are performing the operation on a container that has a variable number of
CSMA devices --- remember we made the number of CSMA devices changeable by
command line argument. The CSMA devices will be associated with IP addresses
from network number 10.1.2.0 in this case, as seen below.
::
@@ -229,14 +229,14 @@ from network number 10.1.2.0 in this case, as seen below.
csmaInterfaces = address.Assign (csmaDevices);
Now we have a topology built, but we need applications. This section is
going to be fundamentally similar to the applications section of
``first.cc`` but we are going to instantiate the server on one of the
nodes that has a CSMA device and the client on the node having only a
going to be fundamentally similar to the applications section of
``first.cc`` but we are going to instantiate the server on one of the
nodes that has a CSMA device and the client on the node having only a
point-to-point device.
First, we set up the echo server. We create a ``UdpEchoServerHelper`` and
provide a required ``Attribute`` value to the constructor which is the server
port number. Recall that this port can be changed later using the
port number. Recall that this port can be changed later using the
``SetAttribute`` method if desired, but we require it to be provided to
the constructor.
@@ -248,21 +248,21 @@ the constructor.
serverApps.Start (Seconds (1.0));
serverApps.Stop (Seconds (10.0));
Recall that the ``csmaNodes NodeContainer`` contains one of the
nodes created for the point-to-point network and ``nCsma`` "extra" nodes.
Recall that the ``csmaNodes NodeContainer`` contains one of the
nodes created for the point-to-point network and ``nCsma`` "extra" nodes.
What we want to get at is the last of the "extra" nodes. The zeroth entry of
the ``csmaNodes`` container will be the point-to-point node. The easy
way to think of this, then, is if we create one "extra" CSMA node, then it
will be at index one of the ``csmaNodes`` container. By induction,
if we create ``nCsma`` "extra" nodes the last one will be at index
``nCsma``. You see this exhibited in the ``Get`` of the first line of
if we create ``nCsma`` "extra" nodes the last one will be at index
``nCsma``. You see this exhibited in the ``Get`` of the first line of
code.
The client application is set up exactly as we did in the ``first.cc``
example script. Again, we provide required ``Attributes`` to the
example script. Again, we provide required ``Attributes`` to the
``UdpEchoClientHelper`` in the constructor (in this case the remote address
and port). We tell the client to send packets to the server we just installed
on the last of the "extra" CSMA nodes. We install the client on the
on the last of the "extra" CSMA nodes. We install the client on the
leftmost point-to-point node seen in the topology illustration.
::
@@ -276,17 +276,17 @@ leftmost point-to-point node seen in the topology illustration.
clientApps.Start (Seconds (2.0));
clientApps.Stop (Seconds (10.0));
Since we have actually built an internetwork here, we need some form of
Since we have actually built an internetwork here, we need some form of
internetwork routing. |ns3| provides what we call global routing to
help you out. Global routing takes advantage of the fact that the entire
help you out. Global routing takes advantage of the fact that the entire
internetwork is accessible in the simulation and runs through the all of the
nodes created for the simulation --- it does the hard work of setting up routing
nodes created for the simulation --- it does the hard work of setting up routing
for you without having to configure routers.
Basically, what happens is that each node behaves as if it were an OSPF router
that communicates instantly and magically with all other routers behind the
scenes. Each node generates link advertisements and communicates them
directly to a global route manager which uses this global information to
scenes. Each node generates link advertisements and communicates them
directly to a global route manager which uses this global information to
construct the routing tables for each node. Setting up this form of routing
is a one-liner:
@@ -294,7 +294,7 @@ is a one-liner:
Ipv4GlobalRoutingHelper::PopulateRoutingTables ();
Next we enable pcap tracing. The first line of code to enable pcap tracing
Next we enable pcap tracing. The first line of code to enable pcap tracing
in the point-to-point helper should be familiar to you by now. The second
line enables pcap tracing in the CSMA helper and there is an extra parameter
you haven't encountered yet.
@@ -304,23 +304,23 @@ you haven't encountered yet.
pointToPoint.EnablePcapAll ("second");
csma.EnablePcap ("second", csmaDevices.Get (1), true);
The CSMA network is a multi-point-to-point network. This means that there
can (and are in this case) multiple endpoints on a shared medium. Each of
The CSMA network is a multi-point-to-point network. This means that there
can (and are in this case) multiple endpoints on a shared medium. Each of
these endpoints has a net device associated with it. There are two basic
alternatives to gathering trace information from such a network. One way
alternatives to gathering trace information from such a network. One way
is to create a trace file for each net device and store only the packets
that are emitted or consumed by that net device. Another way is to pick
that are emitted or consumed by that net device. Another way is to pick
one of the devices and place it in promiscuous mode. That single device
then "sniffs" the network for all packets and stores them in a single
pcap file. This is how ``tcpdump``, for example, works. That final
parameter tells the CSMA helper whether or not to arrange to capture
packets in promiscuous mode.
pcap file. This is how ``tcpdump``, for example, works. That final
parameter tells the CSMA helper whether or not to arrange to capture
packets in promiscuous mode.
In this example, we are going to select one of the devices on the CSMA
network and ask it to perform a promiscuous sniff of the network, thereby
emulating what ``tcpdump`` would do. If you were on a Linux machine
you might do something like ``tcpdump -i eth0`` to get the trace.
In this case, we specify the device using ``csmaDevices.Get(1)``,
you might do something like ``tcpdump -i eth0`` to get the trace.
In this case, we specify the device using ``csmaDevices.Get(1)``,
which selects the first device in the container. Setting the final
parameter to true enables promiscuous captures.
@@ -334,7 +334,7 @@ the ``first.cc`` example.
return 0;
}
In order to run this example, copy the ``second.cc`` example script into
In order to run this example, copy the ``second.cc`` example script into
the scratch directory and use ns3 to build just as you did with
the ``first.cc`` example. If you are in the top-level directory of the
repository you just type,
@@ -346,7 +346,7 @@ repository you just type,
Warning: We use the file ``second.cc`` as one of our regression tests to
verify that it works exactly as we think it should in order to make your
tutorial experience a positive one. This means that an executable named
tutorial experience a positive one. This means that an executable named
``second`` already exists in the project. To avoid any confusion
about what you are executing, please do the renaming to ``mysecond.cc``
suggested above.
@@ -360,7 +360,7 @@ run the program.
$ export NS_LOG=
$ ./ns3 run scratch/mysecond
Since we have set up the UDP echo applications to log just as we did in
Since we have set up the UDP echo applications to log just as we did in
``first.cc``, you will see similar output when you run the script.
.. sourcecode:: text
@@ -372,37 +372,37 @@ Since we have set up the UDP echo applications to log just as we did in
Received 1024 bytes from 10.1.1.1
Received 1024 bytes from 10.1.2.4
Recall that the first message, "``Sent 1024 bytes to 10.1.2.4``," is the
Recall that the first message, "``Sent 1024 bytes to 10.1.2.4``," is the
UDP echo client sending a packet to the server. In this case, the server
is on a different network (10.1.2.0). The second message, "``Received 1024
is on a different network (10.1.2.0). The second message, "``Received 1024
bytes from 10.1.1.1``," is from the UDP echo server, generated when it receives
the echo packet. The final message, "``Received 1024 bytes from 10.1.2.4``,"
is from the echo client, indicating that it has received its echo back from
the server.
If you now go and look in the top level directory, you will find three trace
If you now go and look in the top level directory, you will find three trace
files:
.. sourcecode:: text
second-0-0.pcap second-1-0.pcap second-2-0.pcap
Let's take a moment to look at the naming of these files. They all have the
Let's take a moment to look at the naming of these files. They all have the
same form, ``<name>-<node>-<device>.pcap``. For example, the first file
in the listing is ``second-0-0.pcap`` which is the pcap trace from node
zero, device zero. This is the point-to-point net device on node zero. The
in the listing is ``second-0-0.pcap`` which is the pcap trace from node
zero, device zero. This is the point-to-point net device on node zero. The
file ``second-1-0.pcap`` is the pcap trace for device zero on node one,
also a point-to-point net device; and the file ``second-2-0.pcap`` is the
pcap trace for device zero on node two.
If you refer back to the topology illustration at the start of the section,
If you refer back to the topology illustration at the start of the section,
you will see that node zero is the leftmost node of the point-to-point link
and node one is the node that has both a point-to-point device and a CSMA
and node one is the node that has both a point-to-point device and a CSMA
device. You will see that node two is the first "extra" node on the CSMA
network and its device zero was selected as the device to capture the
network and its device zero was selected as the device to capture the
promiscuous-mode trace.
Now, let's follow the echo packet through the internetwork. First, do a
Now, let's follow the echo packet through the internetwork. First, do a
tcpdump of the trace file for the leftmost point-to-point node --- node zero.
.. sourcecode:: bash
@@ -418,10 +418,10 @@ You should see the contents of the pcap file displayed:
2.017607 IP 10.1.2.4.9 > 10.1.1.1.49153: UDP, length 1024
The first line of the dump indicates that the link type is PPP (point-to-point)
which we expect. You then see the echo packet leaving node zero via the
which we expect. You then see the echo packet leaving node zero via the
device associated with IP address 10.1.1.1 headed for IP address
10.1.2.4 (the rightmost CSMA node). This packet will move over the
point-to-point link and be received by the point-to-point net device on node
10.1.2.4 (the rightmost CSMA node). This packet will move over the
point-to-point link and be received by the point-to-point net device on node
one. Let's take a look:
.. sourcecode:: bash
@@ -438,10 +438,10 @@ link:
2.013921 IP 10.1.2.4.9 > 10.1.1.1.49153: UDP, length 1024
Here we see that the link type is also PPP as we would expect. You see the
packet from IP address 10.1.1.1 (that was sent at 2.000000 seconds) headed
toward IP address 10.1.2.4 appear on this interface. Now, internally to this
node, the packet will be forwarded to the CSMA interface and we should see it
pop out on that device headed for its ultimate destination.
packet from IP address 10.1.1.1 (that was sent at 2.000000 seconds) headed
toward IP address 10.1.2.4 appear on this interface. Now, internally to this
node, the packet will be forwarded to the CSMA interface and we should see it
pop out on that device headed for its ultimate destination.
Remember that we selected node 2 as the promiscuous sniffer node for the CSMA
network so let's then look at second-2-0.pcap and see if its there.
@@ -468,7 +468,7 @@ Node one knows it needs to send the packet to IP address 10.1.2.4, but it
doesn't know the MAC address of the corresponding node. It broadcasts on the
CSMA network (ff:ff:ff:ff:ff:ff) asking for the device that has IP address
10.1.2.4. In this case, the rightmost node replies saying it is at MAC address
00:00:00:00:00:06. Note that node two is not directly involved in this
00:00:00:00:00:06. Note that node two is not directly involved in this
exchange, but is sniffing the network and reporting all of the traffic it sees.
This exchange is seen in the following lines,
@@ -479,7 +479,7 @@ This exchange is seen in the following lines,
2.007710 ARP, Reply 10.1.2.4 is-at 00:00:00:00:00:06, length 50
Then node one, device one goes ahead and sends the echo packet to the UDP echo
server at IP address 10.1.2.4.
server at IP address 10.1.2.4.
.. sourcecode:: text
@@ -535,7 +535,7 @@ and see that the echoed packet arrives back at the source at 2.017607 seconds,
Finally, recall that we added the ability to control the number of CSMA devices
in the simulation by command line argument. You can change this argument in
the same way as when we looked at changing the number of packets echoed in the
``first.cc`` example. Try running the program with the number of "extra"
``first.cc`` example. Try running the program with the number of "extra"
devices set to four:
.. sourcecode:: bash
@@ -559,12 +559,12 @@ nodes, which is 10.1.2.5 instead of the default case, 10.1.2.4.
It is possible that you may not be satisfied with a trace file generated by
a bystander in the CSMA network. You may really want to get a trace from
a single device and you may not be interested in any other traffic on the
a single device and you may not be interested in any other traffic on the
network. You can do this fairly easily.
Let's take a look at ``scratch/mysecond.cc`` and add that code enabling us
to be more specific. ``ns-3`` helpers provide methods that take a node
number and device number as parameters. Go ahead and replace the
number and device number as parameters. Go ahead and replace the
``EnablePcap`` calls with the calls below.
::
@@ -577,30 +577,30 @@ We know that we want to create a pcap file with the base name "second" and
we also know that the device of interest in both cases is going to be zero,
so those parameters are not really interesting.
In order to get the node number, you have two choices: first, nodes are
numbered in a monotonically increasing fashion starting from zero in the
order in which you created them. One way to get a node number is to figure
this number out "manually" by contemplating the order of node creation.
If you take a look at the network topology illustration at the beginning of
the file, we did this for you and you can see that the last CSMA node is
going to be node number ``nCsma + 1``. This approach can become
annoyingly difficult in larger simulations.
In order to get the node number, you have two choices: first, nodes are
numbered in a monotonically increasing fashion starting from zero in the
order in which you created them. One way to get a node number is to figure
this number out "manually" by contemplating the order of node creation.
If you take a look at the network topology illustration at the beginning of
the file, we did this for you and you can see that the last CSMA node is
going to be node number ``nCsma + 1``. This approach can become
annoyingly difficult in larger simulations.
An alternate way, which we use here, is to realize that the
``NodeContainers`` contain pointers to |ns3| ``Node`` Objects.
The ``Node`` Object has a method called ``GetId`` which will return that
node's ID, which is the node number we seek. Let's go take a look at the
Doxygen for the ``Node`` and locate that method, which is further down in
node's ID, which is the node number we seek. Let's go take a look at the
Doxygen for the ``Node`` and locate that method, which is further down in
the |ns3| core code than we've seen so far; but sometimes you have to
search diligently for useful things.
Go to the Doxygen documentation for your release (recall that you can find it
on the project web site). You can get to the ``Node`` documentation by
looking through at the "Classes" tab and scrolling down the "Class List"
looking through at the "Classes" tab and scrolling down the "Class List"
until you find ``ns3::Node``. Select ``ns3::Node`` and you will be taken
to the documentation for the ``Node`` class. If you now scroll down to the
``GetId`` method and select it, you will be taken to the detailed
documentation for the method. Using the ``GetId`` method can make
``GetId`` method and select it, you will be taken to the detailed
documentation for the method. Using the ``GetId`` method can make
determining node numbers much easier in complex topologies.
Let's clear the old trace files out of the top-level directory to avoid confusion
@@ -639,8 +639,8 @@ list the pcap files in the top level directory you will see,
The trace file ``second-0-0.pcap`` is the "leftmost" point-to-point device
which is the echo packet source. The file ``second-101-0.pcap`` corresponds
to the rightmost CSMA device which is where the echo server resides. You may
have noticed that the final parameter on the call to enable pcap tracing on the
to the rightmost CSMA device which is where the echo server resides. You may
have noticed that the final parameter on the call to enable pcap tracing on the
echo server node was false. This means that the trace gathered on that node
was in non-promiscuous mode.
@@ -684,22 +684,22 @@ Models, Attributes and Reality
******************************
This is a convenient place to make a small excursion and make an important
point. It may or may not be obvious to you, but whenever one is using a
point. It may or may not be obvious to you, but whenever one is using a
simulation, it is important to understand exactly what is being modeled and
what is not. It is tempting, for example, to think of the CSMA devices
and channels used in the previous section as if they were real Ethernet
devices; and to expect a simulation result to directly reflect what will
happen in a real Ethernet. This is not the case.
what is not. It is tempting, for example, to think of the CSMA devices
and channels used in the previous section as if they were real Ethernet
devices; and to expect a simulation result to directly reflect what will
happen in a real Ethernet. This is not the case.
A model is, by definition, an abstraction of reality. It is ultimately the
A model is, by definition, an abstraction of reality. It is ultimately the
responsibility of the simulation script author to determine the so-called
"range of accuracy" and "domain of applicability" of the simulation as
a whole, and therefore its constituent parts.
In some cases, like ``Csma``, it can be fairly easy to determine what is
*not* modeled. By reading the model description (``csma.h``) you
In some cases, like ``Csma``, it can be fairly easy to determine what is
*not* modeled. By reading the model description (``csma.h``) you
can find that there is no collision detection in the CSMA model and decide
on how applicable its use will be in your simulation or what caveats you
on how applicable its use will be in your simulation or what caveats you
may want to include with your results. In other cases, it can be quite easy
to configure behaviors that might not agree with any reality you can go out
and buy. It will prove worthwhile to spend some time investigating a few
@@ -708,30 +708,30 @@ in your simulations.
As you have seen, |ns3| provides ``Attributes`` which a user
can easily set to change model behavior. Consider two of the ``Attributes``
of the ``CsmaNetDevice``: ``Mtu`` and ``EncapsulationMode``.
The ``Mtu`` attribute indicates the Maximum Transmission Unit to the
of the ``CsmaNetDevice``: ``Mtu`` and ``EncapsulationMode``.
The ``Mtu`` attribute indicates the Maximum Transmission Unit to the
device. This is the size of the largest Protocol Data Unit (PDU) that the
device can send.
device can send.
The MTU defaults to 1500 bytes in the ``CsmaNetDevice``. This default
corresponds to a number found in RFC 894, "A Standard for the Transmission
of IP Datagrams over Ethernet Networks." The number is actually derived
from the maximum packet size for 10Base5 (full-spec Ethernet) networks --
1518 bytes. If you subtract the DIX encapsulation overhead for Ethernet
packets (18 bytes) you will end up with a maximum possible data size (MTU)
of IP Datagrams over Ethernet Networks." The number is actually derived
from the maximum packet size for 10Base5 (full-spec Ethernet) networks --
1518 bytes. If you subtract the DIX encapsulation overhead for Ethernet
packets (18 bytes) you will end up with a maximum possible data size (MTU)
of 1500 bytes. One can also find that the ``MTU`` for IEEE 802.3 networks
is 1492 bytes. This is because LLC/SNAP encapsulation adds an extra eight
is 1492 bytes. This is because LLC/SNAP encapsulation adds an extra eight
bytes of overhead to the packet. In both cases, the underlying hardware can
only send 1518 bytes, but the data size is different.
In order to set the encapsulation mode, the ``CsmaNetDevice`` provides
an ``Attribute`` called ``EncapsulationMode`` which can take on the
an ``Attribute`` called ``EncapsulationMode`` which can take on the
values ``Dix`` or ``Llc``. These correspond to Ethernet and LLC/SNAP
framing respectively.
If one leaves the ``Mtu`` at 1500 bytes and changes the encapsulation mode
to ``Llc``, the result will be a network that encapsulates 1500 byte PDUs
with LLC/SNAP framing resulting in packets of 1526 bytes, which would be
with LLC/SNAP framing resulting in packets of 1526 bytes, which would be
illegal in many networks, since they can transmit a maximum of 1518 bytes per
packet. This would most likely result in a simulation that quite subtly does
not reflect the reality you might be expecting.
@@ -740,8 +740,8 @@ Just to complicate the picture, there exist jumbo frames (1500 < MTU <= 9000 byt
and super-jumbo (MTU > 9000 bytes) frames that are not officially sanctioned
by IEEE but are available in some high-speed (Gigabit) networks and NICs. One
could leave the encapsulation mode set to ``Dix``, and set the ``Mtu``
``Attribute`` on a ``CsmaNetDevice`` to 64000 bytes -- even though an
associated ``CsmaChannel DataRate`` was set at 10 megabits per second.
``Attribute`` on a ``CsmaNetDevice`` to 64000 bytes -- even though an
associated ``CsmaChannel DataRate`` was set at 10 megabits per second.
This would essentially model an Ethernet switch made out of vampire-tapped
1980s-style 10Base5 networks that support super-jumbo datagrams. This is
certainly not something that was ever made, nor is likely to ever be made,
@@ -750,8 +750,8 @@ but it is quite easy for you to configure.
In the previous example, you used the command line to create a simulation that
had 100 ``Csma`` nodes. You could have just as easily created a simulation
with 500 nodes. If you were actually modeling that 10Base5 vampire-tap network,
the maximum length of a full-spec Ethernet cable is 500 meters, with a minimum
tap spacing of 2.5 meters. That means there could only be 200 taps on a
the maximum length of a full-spec Ethernet cable is 500 meters, with a minimum
tap spacing of 2.5 meters. That means there could only be 200 taps on a
real network. You could have quite easily built an illegal network in that
way as well. This may or may not result in a meaningful simulation depending
on what you are trying to model.
@@ -774,36 +774,36 @@ Building a Wireless Network Topology
************************************
In this section we are going to further expand our knowledge of |ns3|
network devices and channels to cover an example of a wireless network.
|ns3| provides a set of 802.11 models that attempt to provide an
accurate MAC-level implementation of the 802.11 specification and a
network devices and channels to cover an example of a wireless network.
|ns3| provides a set of 802.11 models that attempt to provide an
accurate MAC-level implementation of the 802.11 specification and a
"not-so-slow" PHY-level model of the 802.11a specification.
Just as we have seen both point-to-point and CSMA topology helper objects when
constructing point-to-point topologies, we will see equivalent ``Wifi``
topology helpers in this section. The appearance and operation of these
topology helpers in this section. The appearance and operation of these
helpers should look quite familiar to you.
We provide an example script in our ``examples/tutorial`` directory. This script
builds on the ``second.cc`` script and adds a Wi-Fi network. Go ahead and
open ``examples/tutorial/third.cc`` in your favorite editor. You will have already
seen enough |ns3| code to understand most of what is going on in
this example, but there are a few new things, so we will go over the entire
seen enough |ns3| code to understand most of what is going on in
this example, but there are a few new things, so we will go over the entire
script and examine some of the output.
Just as in the ``second.cc`` example (and in all |ns3| examples)
the file begins with an emacs mode line and some GPL boilerplate.
Take a look at the ASCII art (reproduced below) that shows the default network
topology constructed in the example. You can see that we are going to
topology constructed in the example. You can see that we are going to
further extend our example by hanging a wireless network off of the left side.
Notice that this is a default network topology since you can actually vary the
number of nodes created on the wired and wireless networks. Just as in the
``second.cc`` script case, if you change ``nCsma``, it will give you a
number of "extra" CSMA nodes. Similarly, you can set ``nWifi`` to
number of nodes created on the wired and wireless networks. Just as in the
``second.cc`` script case, if you change ``nCsma``, it will give you a
number of "extra" CSMA nodes. Similarly, you can set ``nWifi`` to
control how many ``STA`` (station) nodes are created in the simulation.
There will always be one ``AP`` (access point) node on the wireless
network. By default there are three "extra" CSMA nodes and three wireless
There will always be one ``AP`` (access point) node on the wireless
network. By default there are three "extra" CSMA nodes and three wireless
``STA`` nodes.
The code begins by loading module include files just as was done in the
@@ -836,9 +836,9 @@ The network topology illustration follows:
// ================
// LAN 10.1.2.0
You can see that we are adding a new network device to the node on the left
You can see that we are adding a new network device to the node on the left
side of the point-to-point link that becomes the access point for the wireless
network. A number of wireless STA nodes are created to fill out the new
network. A number of wireless STA nodes are created to fill out the new
10.1.3.0 network as shown on the left side of the illustration.
After the illustration, the ``ns-3`` namespace is ``used`` and a logging
@@ -847,11 +847,11 @@ component is defined. This should all be quite familiar by now.
::
using namespace ns3;
NS_LOG_COMPONENT_DEFINE ("ThirdScriptExample");
The main program begins just like ``second.cc`` by adding some command line
parameters for enabling or disabling logging components and for changing the
parameters for enabling or disabling logging components and for changing the
number of devices created.
::
@@ -874,16 +874,16 @@ number of devices created.
}
Just as in all of the previous examples, the next step is to create two nodes
that we will connect via the point-to-point link.
that we will connect via the point-to-point link.
::
NodeContainer p2pNodes;
p2pNodes.Create (2);
Next, we see an old friend. We instantiate a ``PointToPointHelper`` and
set the associated default ``Attributes`` so that we create a five megabit
per second transmitter on devices created using the helper and a two millisecond
Next, we see an old friend. We instantiate a ``PointToPointHelper`` and
set the associated default ``Attributes`` so that we create a five megabit
per second transmitter on devices created using the helper and a two millisecond
delay on channels created by the helper. We then ``Install`` the devices
on the nodes and the channel between them.
@@ -907,13 +907,13 @@ part of the bus (CSMA) network.
The next line of code ``Gets`` the first node (as in having an index of one)
from the point-to-point node container and adds it to the container of nodes
that will get CSMA devices. The node in question is going to end up with a
that will get CSMA devices. The node in question is going to end up with a
point-to-point device and a CSMA device. We then create a number of "extra"
nodes that compose the remainder of the CSMA network.
We then instantiate a ``CsmaHelper`` and set its ``Attributes`` as we did
in the previous example. We create a ``NetDeviceContainer`` to keep track of
the created CSMA net devices and then we ``Install`` CSMA devices on the
the created CSMA net devices and then we ``Install`` CSMA devices on the
selected nodes.
::
@@ -926,8 +926,8 @@ selected nodes.
csmaDevices = csma.Install (csmaNodes);
Next, we are going to create the nodes that will be part of the Wi-Fi network.
We are going to create a number of "station" nodes as specified by the
command line argument, and we are going to use the "leftmost" node of the
We are going to create a number of "station" nodes as specified by the
command line argument, and we are going to use the "leftmost" node of the
point-to-point link as the node for the access point.
::
@@ -962,7 +962,7 @@ Once the PHY helper is configured, we can focus on the MAC layer. The
WifiMacHelper object is used to set MAC parameters.
The second statement below creates an 802.11 service set identifier (SSID)
object that will be used to set the value of the "Ssid" ``Attribute`` of
the MAC layer implementation.
the MAC layer implementation.
::
@@ -978,7 +978,7 @@ a compatible rate control algorithm (IdealWifiManager).
WifiHelper wifi;
We are now ready to install Wi-Fi models on the nodes, using these four
We are now ready to install Wi-Fi models on the nodes, using these four
helper objects (YansWifiChannelHelper, YansWifiPhyHelper, WifiMacHelper,
WifiHelper) and the Ssid object created above. These helpers have
encapsulated a lot of default configuration,
@@ -1005,7 +1005,7 @@ set to false. This means that probe requests will not be sent by MACs
created by this helper, and stations will listen for AP beacons.
Once all the station-specific parameters are fully configured, both at the
MAC and PHY layers, we can invoke our now-familiar ``Install`` method to
MAC and PHY layers, we can invoke our now-familiar ``Install`` method to
create the Wi-Fi devices of these stations:
::
@@ -1013,9 +1013,9 @@ create the Wi-Fi devices of these stations:
NetDeviceContainer staDevices;
staDevices = wifi.Install (phy, mac, wifiStaNodes);
We have configured Wi-Fi for all of our STA nodes, and now we need to
We have configured Wi-Fi for all of our STA nodes, and now we need to
configure the AP (access point) node. We begin this process by changing
the default ``Attributes`` of the ``WifiMacHelper`` to reflect the
the default ``Attributes`` of the ``WifiMacHelper`` to reflect the
requirements of the AP.
::
@@ -1036,9 +1036,9 @@ The next lines create the single AP which shares the same set of PHY-level
apDevices = wifi.Install (phy, mac, wifiApNode);
Now, we are going to add mobility models. We want the STA nodes to be mobile,
wandering around inside a bounding box, and we want to make the AP node
wandering around inside a bounding box, and we want to make the AP node
stationary. We use the ``MobilityHelper`` to make this easy for us.
First, we instantiate a ``MobilityHelper`` object and set some
First, we instantiate a ``MobilityHelper`` object and set some
``Attributes`` controlling the "position allocator" functionality.
::
@@ -1054,12 +1054,12 @@ First, we instantiate a ``MobilityHelper`` object and set some
"LayoutType", StringValue ("RowFirst"));
This code tells the mobility helper to use a two-dimensional grid to initially
place the STA nodes. Feel free to explore the Doxygen for class
place the STA nodes. Feel free to explore the Doxygen for class
``ns3::GridPositionAllocator`` to see exactly what is being done.
We have arranged our nodes on an initial grid, but now we need to tell them
how to move. We choose the ``RandomWalk2dMobilityModel`` which has the
nodes move in a random direction at a random speed around inside a bounding
how to move. We choose the ``RandomWalk2dMobilityModel`` which has the
nodes move in a random direction at a random speed around inside a bounding
box.
::
@@ -1067,7 +1067,7 @@ box.
mobility.SetMobilityModel ("ns3::RandomWalk2dMobilityModel",
"Bounds", RectangleValue (Rectangle (-50, 50, -50, 50)));
We now tell the ``MobilityHelper`` to install the mobility models on the
We now tell the ``MobilityHelper`` to install the mobility models on the
STA nodes.
::
@@ -1075,7 +1075,7 @@ STA nodes.
mobility.Install (wifiStaNodes);
We want the access point to remain in a fixed position during the simulation.
We accomplish this by setting the mobility model for this node to be the
We accomplish this by setting the mobility model for this node to be the
``ns3::ConstantPositionMobilityModel``:
::
@@ -1083,8 +1083,8 @@ We accomplish this by setting the mobility model for this node to be the
mobility.SetMobilityModel ("ns3::ConstantPositionMobilityModel");
mobility.Install (wifiApNode);
We now have our nodes, devices and channels created, and mobility models
chosen for the Wi-Fi nodes, but we have no protocol stacks present. Just as
We now have our nodes, devices and channels created, and mobility models
chosen for the Wi-Fi nodes, but we have no protocol stacks present. Just as
we have done previously many times, we will use the ``InternetStackHelper``
to install these stacks.
@@ -1095,7 +1095,7 @@ to install these stacks.
stack.Install (wifiApNode);
stack.Install (wifiStaNodes);
Just as in the ``second.cc`` example script, we are going to use the
Just as in the ``second.cc`` example script, we are going to use the
``Ipv4AddressHelper`` to assign IP addresses to our device interfaces.
First we use the network 10.1.1.0 to create the two addresses needed for our
two point-to-point devices. Then we use network 10.1.2.0 to assign addresses
@@ -1156,7 +1156,7 @@ created will never "naturally" stop. This is because we asked the wireless
access point to generate beacons. It will generate beacons forever, and this
will result in simulator events being scheduled into the future indefinitely,
so we must tell the simulator to stop even though it may have beacon generation
events scheduled. The following line of code tells the simulator to stop so that
events scheduled. The following line of code tells the simulator to stop so that
we don't simulate beacons forever and enter what is essentially an endless
loop.
@@ -1173,9 +1173,9 @@ We create just enough tracing to cover all three networks:
csma.EnablePcap ("third", csmaDevices.Get (0), true);
These three lines of code will start pcap tracing on both of the point-to-point
nodes that serves as our backbone, will start a promiscuous (monitor) mode
trace on the Wi-Fi network, and will start a promiscuous trace on the CSMA
network. This will let us see all of the traffic with a minimum number of
nodes that serves as our backbone, will start a promiscuous (monitor) mode
trace on the Wi-Fi network, and will start a promiscuous trace on the CSMA
network. This will let us see all of the traffic with a minimum number of
trace files.
Finally, we actually run the simulation, clean up and then exit the program.
@@ -1197,7 +1197,7 @@ repository you would type,
$ cp examples/tutorial/third.cc scratch/mythird.cc
$ ./ns3 run scratch/mythird
Again, since we have set up the UDP echo applications just as we did in the
Again, since we have set up the UDP echo applications just as we did in the
``second.cc`` script, you will see similar output.
.. sourcecode:: text
@@ -1210,15 +1210,15 @@ Again, since we have set up the UDP echo applications just as we did in the
At time 2.01796s server sent 1024 bytes to 10.1.3.3 port 49153
At time 2.03364s client received 1024 bytes from 10.1.2.4 port 9
Recall that the first message, ``Sent 1024 bytes to 10.1.2.4``," is the
Recall that the first message, ``Sent 1024 bytes to 10.1.2.4``," is the
UDP echo client sending a packet to the server. In this case, the client
is on the wireless network (10.1.3.0). The second message,
"``Received 1024 bytes from 10.1.3.3``," is from the UDP echo server,
generated when it receives the echo packet. The final message,
is on the wireless network (10.1.3.0). The second message,
"``Received 1024 bytes from 10.1.3.3``," is from the UDP echo server,
generated when it receives the echo packet. The final message,
"``Received 1024 bytes from 10.1.2.4``," is from the echo client, indicating
that it has received its echo back from the server.
If you now go and look in the top level directory, you will find four trace
If you now go and look in the top level directory, you will find four trace
files from this simulation, two from node zero and two from node one:
.. sourcecode:: text
@@ -1226,7 +1226,7 @@ files from this simulation, two from node zero and two from node one:
third-0-0.pcap third-0-1.pcap third-1-0.pcap third-1-1.pcap
The file "third-0-0.pcap" corresponds to the point-to-point device on node
zero -- the left side of the "backbone". The file "third-1-0.pcap"
zero -- the left side of the "backbone". The file "third-1-0.pcap"
corresponds to the point-to-point device on node one -- the right side of the
"backbone". The file "third-0-1.pcap" will be the promiscuous (monitor
mode) trace from the Wi-Fi network and the file "third-1-1.pcap" will be the
@@ -1247,24 +1247,24 @@ You should see some wifi-looking contents you haven't seen here before:
reading from file third-0-1.pcap, link-type IEEE802_11 (802.11)
0.000025 Beacon (ns-3-ssid) [6.0* 9.0 12.0 18.0 24.0 36.0 48.0 54.0 Mbit] IBSS
0.000308 Assoc Request (ns-3-ssid) [6.0 9.0 12.0 18.0 24.0 36.0 48.0 54.0 Mbit]
0.000324 Acknowledgment RA:00:00:00:00:00:08
0.000324 Acknowledgment RA:00:00:00:00:00:08
0.000402 Assoc Response AID(0) :: Successful
0.000546 Acknowledgment RA:00:00:00:00:00:0a
0.000546 Acknowledgment RA:00:00:00:00:00:0a
0.000721 Assoc Request (ns-3-ssid) [6.0 9.0 12.0 18.0 24.0 36.0 48.0 54.0 Mbit]
0.000737 Acknowledgment RA:00:00:00:00:00:07
0.000737 Acknowledgment RA:00:00:00:00:00:07
0.000824 Assoc Response AID(0) :: Successful
0.000968 Acknowledgment RA:00:00:00:00:00:0a
0.000968 Acknowledgment RA:00:00:00:00:00:0a
0.001134 Assoc Request (ns-3-ssid) [6.0 9.0 12.0 18.0 24.0 36.0 48.0 54.0 Mbit]
0.001150 Acknowledgment RA:00:00:00:00:00:09
0.001150 Acknowledgment RA:00:00:00:00:00:09
0.001273 Assoc Response AID(0) :: Successful
0.001417 Acknowledgment RA:00:00:00:00:00:0a
0.001417 Acknowledgment RA:00:00:00:00:00:0a
0.102400 Beacon (ns-3-ssid) [6.0* 9.0 12.0 18.0 24.0 36.0 48.0 54.0 Mbit] IBSS
0.204800 Beacon (ns-3-ssid) [6.0* 9.0 12.0 18.0 24.0 36.0 48.0 54.0 Mbit] IBSS
0.307200 Beacon (ns-3-ssid) [6.0* 9.0 12.0 18.0 24.0 36.0 48.0 54.0 Mbit] IBSS
You can see that the link type is now 802.11 as you would expect. You can
You can see that the link type is now 802.11 as you would expect. You can
probably understand what is going on and find the IP echo request and response
packets in this trace. We leave it as an exercise to completely parse the
packets in this trace. We leave it as an exercise to completely parse the
trace dump.
Now, look at the pcap file of the left side of the point-to-point link,
@@ -1298,11 +1298,11 @@ Again, you should see some familiar looking contents:
2.011837 IP 10.1.3.3.49153 > 10.1.2.4.9: UDP, length 1024
2.023072 IP 10.1.2.4.9 > 10.1.3.3.49153: UDP, length 1024
This is also the echo packet going from left to right (from Wi-Fi to CSMA) and
This is also the echo packet going from left to right (from Wi-Fi to CSMA) and
back again across the point-to-point link with slightly different timings
as you might expect.
The echo server is on the CSMA network, let's look at the promiscuous trace
The echo server is on the CSMA network, let's look at the promiscuous trace
there:
.. sourcecode:: bash
@@ -1331,14 +1331,14 @@ into the ``MobilityModel`` course change trace source. This is just a sneak
peek into the detailed tracing section which is coming up, but this seems a very
nice place to get an example in.
As mentioned in the "Tweaking ns-3" section, the |ns3| tracing system
is divided into trace sources and trace sinks, and we provide functions to
connect the two. We will use the mobility model predefined course change
trace source to originate the trace events. We will need to write a trace
sink to connect to that source that will display some pretty information for
As mentioned in the "Tweaking ns-3" section, the |ns3| tracing system
is divided into trace sources and trace sinks, and we provide functions to
connect the two. We will use the mobility model predefined course change
trace source to originate the trace events. We will need to write a trace
sink to connect to that source that will display some pretty information for
us. Despite its reputation as being difficult, it's really quite simple.
Just before the main program of the ``scratch/mythird.cc`` script (i.e.,
just after the ``NS_LOG_COMPONENT_DEFINE`` statement), add the
just after the ``NS_LOG_COMPONENT_DEFINE`` statement), add the
following function:
::
@@ -1347,14 +1347,14 @@ following function:
CourseChange (std::string context, Ptr<const MobilityModel> model)
{
Vector position = model->GetPosition ();
NS_LOG_UNCOND (context <<
NS_LOG_UNCOND (context <<
" x = " << position.x << ", y = " << position.y);
}
This code just pulls the position information from the mobility model and
This code just pulls the position information from the mobility model and
unconditionally logs the x and y position of the node. We are
going to arrange for this function to be called every time the wireless
node with the echo client changes its position. We do this using the
node with the echo client changes its position. We do this using the
``Config::Connect`` function. Add the following lines of code to the
script just before the ``Simulator::Run`` call.
@@ -1368,9 +1368,9 @@ script just before the ``Simulator::Run`` call.
Config::Connect (oss.str (), MakeCallback (&CourseChange));
What we do here is to create a string containing the tracing namespace path
of the event to which we want to connect. First, we have to figure out which
of the event to which we want to connect. First, we have to figure out which
node it is we want using the ``GetId`` method as described earlier. In the
case of the default number of CSMA and wireless nodes, this turns out to be
case of the default number of CSMA and wireless nodes, this turns out to be
node seven and the tracing namespace path to the mobility model would look
like,
@@ -1378,19 +1378,19 @@ like,
/NodeList/7/$ns3::MobilityModel/CourseChange
Based on the discussion in the tracing section, you may infer that this trace
Based on the discussion in the tracing section, you may infer that this trace
path references the seventh node in the global NodeList. It specifies
what is called an aggregated object of type ``ns3::MobilityModel``. The
what is called an aggregated object of type ``ns3::MobilityModel``. The
dollar sign prefix implies that the MobilityModel is aggregated to node seven.
The last component of the path means that we are hooking into the
"CourseChange" event of that model.
The last component of the path means that we are hooking into the
"CourseChange" event of that model.
We make a connection between the trace source in node seven with our trace
sink by calling ``Config::Connect`` and passing this namespace path. Once
this is done, every course change event on node seven will be hooked into our
We make a connection between the trace source in node seven with our trace
sink by calling ``Config::Connect`` and passing this namespace path. Once
this is done, every course change event on node seven will be hooked into our
trace sink, which will in turn print out the new position.
If you now run the simulation, you will see the course changes displayed as
If you now run the simulation, you will see the course changes displayed as
they happen.
.. sourcecode:: text
@@ -1439,7 +1439,7 @@ The selection of queueing disciplines in |ns3| can have a large impact
on performance, and it is important for users to understand what is installed
by default and how to change the defaults and observe the performance.
Architecturally, |ns3| separates the device layer from the IP layers
Architecturally, |ns3| separates the device layer from the IP layers
or traffic control layers of an Internet host. Since recent releases
of |ns3|, outgoing packets traverse two queueing layers before reaching
the channel object. The first queueing layer encountered is what is

View File

@@ -21,18 +21,18 @@ networking, but have a specific meaning in |ns3|.
Node
++++
In Internet jargon, a computing device that connects to a network is called
a *host* or sometimes an *end system*. Because |ns3| is a
*network* simulator, not specifically an *Internet* simulator, we
a *host* or sometimes an *end system*. Because |ns3| is a
*network* simulator, not specifically an *Internet* simulator, we
intentionally do not use the term host since it is closely associated with
the Internet and its protocols. Instead, we use a more generic term also
used by other simulators that originates in Graph Theory --- the *node*.
In |ns3| the basic computing device abstraction is called the
node. This abstraction is represented in C++ by the class ``Node``. The
``Node`` class provides methods for managing the representations of
In |ns3| the basic computing device abstraction is called the
node. This abstraction is represented in C++ by the class ``Node``. The
``Node`` class provides methods for managing the representations of
computing devices in simulations.
You should think of a ``Node`` as a computer to which you will add
You should think of a ``Node`` as a computer to which you will add
functionality. One adds things like applications, protocol stacks and
peripheral cards with their associated drivers to enable the computer to do
useful work. We use the same basic model in |ns3|.
@@ -45,7 +45,7 @@ cycles, disk, network, etc., according to some computing model. System
software usually does not use those resources to complete tasks that directly
benefit a user. A user would typically run an *application* that acquires
and uses the resources controlled by the system software to accomplish some
goal.
goal.
Often, the line of separation between system and application software is made
at the privilege level change that happens in operating system traps.
@@ -56,41 +56,41 @@ perform tasks in the "real world," |ns3| applications run on
|ns3| ``Nodes`` to drive simulations in the simulated world.
In |ns3| the basic abstraction for a user program that generates some
activity to be simulated is the application. This abstraction is represented
in C++ by the class ``Application``. The ``Application`` class provides
methods for managing the representations of our version of user-level
activity to be simulated is the application. This abstraction is represented
in C++ by the class ``Application``. The ``Application`` class provides
methods for managing the representations of our version of user-level
applications in simulations. Developers are expected to specialize the
``Application`` class in the object-oriented programming sense to create new
applications. In this tutorial, we will use specializations of class
``Application`` called ``UdpEchoClientApplication`` and
``UdpEchoServerApplication``. As you might expect, these applications
compose a client/server application set used to generate and echo simulated
network packets
applications. In this tutorial, we will use specializations of class
``Application`` called ``UdpEchoClientApplication`` and
``UdpEchoServerApplication``. As you might expect, these applications
compose a client/server application set used to generate and echo simulated
network packets
Channel
+++++++
In the real world, one can connect a computer to a network. Often the media
over which data flows in these networks are called *channels*. When
you connect your Ethernet cable to the plug in the wall, you are connecting
you connect your Ethernet cable to the plug in the wall, you are connecting
your computer to an Ethernet communication channel. In the simulated world
of |ns3|, one connects a ``Node`` to an object representing a
communication channel. Here the basic communication subnetwork abstraction
is called the channel and is represented in C++ by the class ``Channel``.
communication channel. Here the basic communication subnetwork abstraction
is called the channel and is represented in C++ by the class ``Channel``.
The ``Channel`` class provides methods for managing communication
The ``Channel`` class provides methods for managing communication
subnetwork objects and connecting nodes to them. ``Channels`` may also be
specialized by developers in the object oriented programming sense. A
``Channel`` specialization may model something as simple as a wire. The
specialized ``Channel`` can also model things as complicated as a large
Ethernet switch, or three-dimensional space full of obstructions in the case
specialized by developers in the object oriented programming sense. A
``Channel`` specialization may model something as simple as a wire. The
specialized ``Channel`` can also model things as complicated as a large
Ethernet switch, or three-dimensional space full of obstructions in the case
of wireless networks.
We will use specialized versions of the ``Channel`` called
``CsmaChannel``, ``PointToPointChannel`` and ``WifiChannel`` in this
tutorial. The ``CsmaChannel``, for example, models a version of a
communication subnetwork that implements a *carrier sense multiple
access* communication medium. This gives us Ethernet-like functionality.
tutorial. The ``CsmaChannel``, for example, models a version of a
communication subnetwork that implements a *carrier sense multiple
access* communication medium. This gives us Ethernet-like functionality.
Net Device
++++++++++
@@ -98,52 +98,52 @@ It used to be the case that if you wanted to connect a computer to a network,
you had to buy a specific kind of network cable and a hardware device called
(in PC terminology) a *peripheral card* that needed to be installed in
your computer. If the peripheral card implemented some networking function,
they were called Network Interface Cards, or *NICs*. Today most
computers come with the network interface hardware built in and users don't
they were called Network Interface Cards, or *NICs*. Today most
computers come with the network interface hardware built in and users don't
see these building blocks.
A NIC will not work without a software driver to control the hardware. In
Unix (or Linux), a piece of peripheral hardware is classified as a
A NIC will not work without a software driver to control the hardware. In
Unix (or Linux), a piece of peripheral hardware is classified as a
*device*. Devices are controlled using *device drivers*, and network
devices (NICs) are controlled using *network device drivers*
collectively known as *net devices*. In Unix and Linux you refer
to these net devices by names such as *eth0*.
In |ns3| the *net device* abstraction covers both the software
driver and the simulated hardware. A net device is "installed" in a
``Node`` in order to enable the ``Node`` to communicate with other
In |ns3| the *net device* abstraction covers both the software
driver and the simulated hardware. A net device is "installed" in a
``Node`` in order to enable the ``Node`` to communicate with other
``Nodes`` in the simulation via ``Channels``. Just as in a real
computer, a ``Node`` may be connected to more than one ``Channel`` via
multiple ``NetDevices``.
The net device abstraction is represented in C++ by the class ``NetDevice``.
The ``NetDevice`` class provides methods for managing connections to
The ``NetDevice`` class provides methods for managing connections to
``Node`` and ``Channel`` objects; and may be specialized by developers
in the object-oriented programming sense. We will use the several specialized
versions of the ``NetDevice`` called ``CsmaNetDevice``,
``PointToPointNetDevice``, and ``WifiNetDevice`` in this tutorial.
Just as an Ethernet NIC is designed to work with an Ethernet network, the
``CsmaNetDevice`` is designed to work with a ``CsmaChannel``; the
``PointToPointNetDevice`` is designed to work with a
``PointToPointNetDevice`` is designed to work with a
``PointToPointChannel`` and a ``WifiNetNevice`` is designed to work with
a ``WifiChannel``.
Topology Helpers
++++++++++++++++
In a real network, you will find host computers with added (or built-in)
NICs. In |ns3| we would say that you will find ``Nodes`` with
attached ``NetDevices``. In a large simulated network you will need to
arrange many connections between ``Nodes``, ``NetDevices`` and
NICs. In |ns3| we would say that you will find ``Nodes`` with
attached ``NetDevices``. In a large simulated network you will need to
arrange many connections between ``Nodes``, ``NetDevices`` and
``Channels``.
Since connecting ``NetDevices`` to ``Nodes``, ``NetDevices``
to ``Channels``, assigning IP addresses, etc., are such common tasks
in |ns3|, we provide what we call *topology helpers* to make
this as easy as possible. For example, it may take many distinct
|ns3| core operations to create a NetDevice, add a MAC address,
in |ns3|, we provide what we call *topology helpers* to make
this as easy as possible. For example, it may take many distinct
|ns3| core operations to create a NetDevice, add a MAC address,
install that net device on a ``Node``, configure the node's protocol stack,
and then connect the ``NetDevice`` to a ``Channel``. Even more
operations would be required to connect multiple devices onto multipoint
operations would be required to connect multiple devices onto multipoint
channels and then to connect individual networks together into internetworks.
We provide topology helper objects that combine those many distinct operations
into an easy to use model for your convenience.
@@ -151,8 +151,8 @@ into an easy to use model for your convenience.
A First ns-3 Script
*******************
If you downloaded the system as was suggested above, you will have a release
of |ns3| in a directory called ``repos`` under your home
directory. Change into that release directory, and you should find a
of |ns3| in a directory called ``repos`` under your home
directory. Change into that release directory, and you should find a
directory structure something like the following:
.. sourcecode:: bash
@@ -160,10 +160,10 @@ directory structure something like the following:
AUTHORS doc RELEASE_NOTES.md utils
bindings examples scratch utils.py
CHANGES.html LICENSE src VERSION
contrib Makefile test.py
contrib Makefile test.py
CONTRIBUTING.md README.md testpy.supp
Change into the ``examples/tutorial`` directory. You should see a file named
Change into the ``examples/tutorial`` directory. You should see a file named
``first.cc`` located there. This is a script that will create a simple
point-to-point link between two nodes and echo a single packet between the
nodes. Let's take a look at that script line by line, so go ahead and open
@@ -172,29 +172,29 @@ nodes. Let's take a look at that script line by line, so go ahead and open
Boilerplate
+++++++++++
The first line in the file is an emacs mode line. This tells emacs about the
formatting conventions (coding style) we use in our source code.
formatting conventions (coding style) we use in our source code.
::
/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
This is always a somewhat controversial subject, so we might as well get it
out of the way immediately. The |ns3| project, like most large
projects, has adopted a coding style to which all contributed code must
adhere. If you want to contribute your code to the project, you will
eventually have to conform to the |ns3| coding standard as described
out of the way immediately. The |ns3| project, like most large
projects, has adopted a coding style to which all contributed code must
adhere. If you want to contribute your code to the project, you will
eventually have to conform to the |ns3| coding standard as described
in the file ``doc/codingstd.txt`` or shown on the project web page
`here
<http://www.nsnam.org/developers/contributing-code/coding-style/>`_.
We recommend that you, well, just get used to the look and feel of |ns3|
code and adopt this standard whenever you are working with our code. All of
the development team and contributors have done so with various amounts of
grumbling. The emacs mode line above makes it easier to get the formatting
code and adopt this standard whenever you are working with our code. All of
the development team and contributors have done so with various amounts of
grumbling. The emacs mode line above makes it easier to get the formatting
correct if you use the emacs editor.
The |ns3| simulator is licensed using the GNU General Public
License. You will see the appropriate GNU legalese at the head of every file
The |ns3| simulator is licensed using the GNU General Public
License. You will see the appropriate GNU legalese at the head of every file
in the |ns3| distribution. Often you will see a copyright notice for
one of the institutions involved in the |ns3| project above the GPL
text and an author listed below.
@@ -218,7 +218,7 @@ text and an author listed below.
Module Includes
+++++++++++++++
The code proper starts with a number of include statements.
The code proper starts with a number of include statements.
::
@@ -228,41 +228,41 @@ The code proper starts with a number of include statements.
#include "ns3/point-to-point-module.h"
#include "ns3/applications-module.h"
To help our high-level script users deal with the large number of include
files present in the system, we group includes according to relatively large
modules. We provide a single include file that will recursively load all of
To help our high-level script users deal with the large number of include
files present in the system, we group includes according to relatively large
modules. We provide a single include file that will recursively load all of
the include files used in each module. Rather than having to look up exactly
what header you need, and possibly have to get a number of dependencies right,
we give you the ability to load a group of files at a large granularity. This
is not the most efficient approach but it certainly makes writing scripts much
easier.
Each of the |ns3| include files is placed in a directory called
Each of the |ns3| include files is placed in a directory called
``ns3`` (under the build directory) during the build process to help avoid
include file name collisions. The ``ns3/core-module.h`` file corresponds
to the ns-3 module you will find in the directory ``src/core`` in your
include file name collisions. The ``ns3/core-module.h`` file corresponds
to the ns-3 module you will find in the directory ``src/core`` in your
downloaded release distribution. If you list this directory you will find a
large number of header files. When you do a build, ns3 will place public
header files in an ``ns3`` directory under the appropriate
``build/debug`` or ``build/optimized`` directory depending on your
header files in an ``ns3`` directory under the appropriate
``build/debug`` or ``build/optimized`` directory depending on your
configuration. CMake will also automatically generate a module include file to
load all of the public header files.
Since you are, of course, following this tutorial religiously, you will
Since you are, of course, following this tutorial religiously, you will
already have done a
.. sourcecode:: bash
$ ./ns3 configure -d debug --enable-examples --enable-tests
in order to configure the project to perform debug builds that include
in order to configure the project to perform debug builds that include
examples and tests. You will also have done a
.. sourcecode:: bash
$ ./ns3 build
to build the project. So now if you look in the directory
to build the project. So now if you look in the directory
``../../build/include/ns3`` you will find the four module include files shown
above. You can take a look at the contents of these files and find that they
do include all of the public include files in their respective modules.
@@ -275,16 +275,16 @@ The next line in the ``first.cc`` script is a namespace declaration.
using namespace ns3;
The |ns3| project is implemented in a C++ namespace called
The |ns3| project is implemented in a C++ namespace called
``ns3``. This groups all |ns3|-related declarations in a scope
outside the global namespace, which we hope will help with integration with
outside the global namespace, which we hope will help with integration with
other code. The C++ ``using`` statement introduces the |ns3|
namespace into the current (global) declarative region. This is a fancy way
of saying that after this declaration, you will not have to type ``ns3::``
scope resolution operator before all of the |ns3| code in order to use
it. If you are unfamiliar with namespaces, please consult almost any C++
it. If you are unfamiliar with namespaces, please consult almost any C++
tutorial and compare the ``ns3`` namespace and usage here with instances of
the ``std`` namespace and the ``using namespace std;`` statements you
the ``std`` namespace and the ``using namespace std;`` statements you
will often find in discussions of ``cout`` and streams.
Logging
@@ -296,10 +296,10 @@ The next line of the script is the following,
NS_LOG_COMPONENT_DEFINE ("FirstScriptExample");
We will use this statement as a convenient place to talk about our Doxygen
documentation system. If you look at the project web site,
documentation system. If you look at the project web site,
`ns-3 project
<http://www.nsnam.org>`_, you will find a link to "Documentation" in the navigation bar. If you select this link, you will be
taken to our documentation page. There
taken to our documentation page. There
is a link to "Latest Release" that will take you to the documentation
for the latest stable release of |ns3|.
If you select the "API Documentation" link, you will be
@@ -307,25 +307,25 @@ taken to the |ns3| API documentation page.
Along the left side, you will find a graphical representation of the structure
of the documentation. A good place to start is the ``NS-3 Modules``
"book" in the |ns3| navigation tree. If you expand ``Modules``
you will see a list of |ns3| module documentation. The concept of
"book" in the |ns3| navigation tree. If you expand ``Modules``
you will see a list of |ns3| module documentation. The concept of
module here ties directly into the module include files discussed above. The |ns3| logging subsystem is discussed in the :ref:`UsingLogging` section, so
we'll get to it later in this tutorial, but you can find out about the above
statement by looking at the ``Core`` module, then expanding the
statement by looking at the ``Core`` module, then expanding the
``Debugging tools`` book, and then selecting the ``Logging`` page. Click
on ``Logging``.
You should now be looking at the Doxygen documentation for the Logging module.
In the list of ``Macros``'s at the top of the page you will see the entry
for ``NS_LOG_COMPONENT_DEFINE``. Before jumping in, it would probably be
good to look for the "Detailed Description" of the logging module to get a
for ``NS_LOG_COMPONENT_DEFINE``. Before jumping in, it would probably be
good to look for the "Detailed Description" of the logging module to get a
feel for the overall operation. You can either scroll down or select the
"More..." link under the collaboration diagram to do this.
Once you have a general idea of what is going on, go ahead and take a look at
the specific ``NS_LOG_COMPONENT_DEFINE`` documentation. I won't duplicate
the documentation here, but to summarize, this line declares a logging
component called ``FirstScriptExample`` that allows you to enable and
the documentation here, but to summarize, this line declares a logging
component called ``FirstScriptExample`` that allows you to enable and
disable console message logging by reference to the name.
Main Function
@@ -339,8 +339,8 @@ The next lines of the script you will find are,
{
This is just the declaration of the main function of your program (script).
Just as in any C++ program, you need to define a main function that will be
the first function run. There is nothing at all special here. Your
Just as in any C++ program, you need to define a main function that will be
the first function run. There is nothing at all special here. Your
|ns3| script is just a C++ program.
The next line sets the time resolution to one nanosecond, which happens
@@ -367,12 +367,12 @@ are built into the Echo Client and Echo Server applications:
LogComponentEnable("UdpEchoServerApplication", LOG_LEVEL_INFO);
If you have read over the Logging component documentation you will have seen
that there are a number of levels of logging verbosity/detail that you can
that there are a number of levels of logging verbosity/detail that you can
enable on each component. These two lines of code enable debug logging at the
INFO level for echo clients and servers. This will result in the application
printing out messages as packets are sent and received during the simulation.
Now we will get directly to the business of creating a topology and running
Now we will get directly to the business of creating a topology and running
a simulation. We use the topology helper objects to make this job as
easy as possible.
@@ -380,9 +380,9 @@ Topology Helpers
++++++++++++++++
NodeContainer
~~~~~~~~~~~~~
The next two lines of code in our script will actually create the
|ns3| ``Node`` objects that will represent the computers in the
simulation.
The next two lines of code in our script will actually create the
|ns3| ``Node`` objects that will represent the computers in the
simulation.
::
@@ -390,11 +390,11 @@ simulation.
nodes.Create (2);
Let's find the documentation for the ``NodeContainer`` class before we
continue. Another way to get into the documentation for a given class is via
the ``Classes`` tab in the Doxygen pages. If you still have the Doxygen
handy, just scroll up to the top of the page and select the ``Classes``
tab. You should see a new set of tabs appear, one of which is
``Class List``. Under that tab you will see a list of all of the
continue. Another way to get into the documentation for a given class is via
the ``Classes`` tab in the Doxygen pages. If you still have the Doxygen
handy, just scroll up to the top of the page and select the ``Classes``
tab. You should see a new set of tabs appear, one of which is
``Class List``. Under that tab you will see a list of all of the
|ns3| classes. Scroll down, looking for ``ns3::NodeContainer``.
When you find the class, go ahead and select it to go to the documentation for
the class.
@@ -403,30 +403,30 @@ You may recall that one of our key abstractions is the ``Node``. This
represents a computer to which we are going to add things like protocol stacks,
applications and peripheral cards. The ``NodeContainer`` topology helper
provides a convenient way to create, manage and access any ``Node`` objects
that we create in order to run a simulation. The first line above just
that we create in order to run a simulation. The first line above just
declares a NodeContainer which we call ``nodes``. The second line calls the
``Create`` method on the ``nodes`` object and asks the container to
``Create`` method on the ``nodes`` object and asks the container to
create two nodes. As described in the Doxygen, the container calls down into
the |ns3| system proper to create two ``Node`` objects and stores
pointers to those objects internally.
The nodes as they stand in the script do nothing. The next step in
The nodes as they stand in the script do nothing. The next step in
constructing a topology is to connect our nodes together into a network.
The simplest form of network we support is a single point-to-point link
The simplest form of network we support is a single point-to-point link
between two nodes. We'll construct one of those links here.
PointToPointHelper
~~~~~~~~~~~~~~~~~~
We are constructing a point to point link, and, in a pattern which will become
quite familiar to you, we use a topology helper object to do the low-level
work required to put the link together. Recall that two of our key
work required to put the link together. Recall that two of our key
abstractions are the ``NetDevice`` and the ``Channel``. In the real
world, these terms correspond roughly to peripheral cards and network cables.
world, these terms correspond roughly to peripheral cards and network cables.
Typically these two things are intimately tied together and one cannot expect
to interchange, for example, Ethernet devices and wireless channels. Our
to interchange, for example, Ethernet devices and wireless channels. Our
Topology Helpers follow this intimate coupling and therefore you will use a
single ``PointToPointHelper`` to configure and connect |ns3|
``PointToPointNetDevice`` and ``PointToPointChannel`` objects in this
``PointToPointNetDevice`` and ``PointToPointChannel`` objects in this
script.
The next three lines in the script are,
@@ -443,7 +443,7 @@ The first line,
PointToPointHelper pointToPoint;
instantiates a ``PointToPointHelper`` object on the stack. From a
instantiates a ``PointToPointHelper`` object on the stack. From a
high-level perspective the next line,
::
@@ -451,20 +451,20 @@ high-level perspective the next line,
pointToPoint.SetDeviceAttribute ("DataRate", StringValue ("5Mbps"));
tells the ``PointToPointHelper`` object to use the value "5Mbps"
(five megabits per second) as the "DataRate" when it creates a
(five megabits per second) as the "DataRate" when it creates a
``PointToPointNetDevice`` object.
From a more detailed perspective, the string "DataRate" corresponds
to what we call an ``Attribute`` of the ``PointToPointNetDevice``.
If you look at the Doxygen for class ``ns3::PointToPointNetDevice`` and
If you look at the Doxygen for class ``ns3::PointToPointNetDevice`` and
find the documentation for the ``GetTypeId`` method, you will find a list
of ``Attributes`` defined for the device. Among these is the "DataRate"
``Attribute``. Most user-visible |ns3| objects have similar lists of
``Attribute``. Most user-visible |ns3| objects have similar lists of
``Attributes``. We use this mechanism to easily configure simulations without
recompiling as you will see in a following section.
Similar to the "DataRate" on the ``PointToPointNetDevice`` you will find a
"Delay" ``Attribute`` associated with the ``PointToPointChannel``. The
"Delay" ``Attribute`` associated with the ``PointToPointChannel``. The
final line,
::
@@ -472,20 +472,20 @@ final line,
pointToPoint.SetChannelAttribute ("Delay", StringValue ("2ms"));
tells the ``PointToPointHelper`` to use the value "2ms" (two milliseconds)
as the value of the propagation delay of every point to point channel it
as the value of the propagation delay of every point to point channel it
subsequently creates.
NetDeviceContainer
~~~~~~~~~~~~~~~~~~
At this point in the script, we have a ``NodeContainer`` that contains
two nodes. We have a ``PointToPointHelper`` that is primed and ready to
two nodes. We have a ``PointToPointHelper`` that is primed and ready to
make ``PointToPointNetDevices`` and wire ``PointToPointChannel`` objects
between them. Just as we used the ``NodeContainer`` topology helper object
to create the ``Nodes`` for our simulation, we will ask the
to create the ``Nodes`` for our simulation, we will ask the
``PointToPointHelper`` to do the work involved in creating, configuring and
installing our devices for us. We will need to have a list of all of the
NetDevice objects that are created, so we use a NetDeviceContainer to hold
them just as we used a NodeContainer to hold the nodes we created. The
installing our devices for us. We will need to have a list of all of the
NetDevice objects that are created, so we use a NetDeviceContainer to hold
them just as we used a NodeContainer to hold the nodes we created. The
following two lines of code,
::
@@ -493,22 +493,22 @@ following two lines of code,
NetDeviceContainer devices;
devices = pointToPoint.Install (nodes);
will finish configuring the devices and channel. The first line declares the
device container mentioned above and the second does the heavy lifting. The
``Install`` method of the ``PointToPointHelper`` takes a
``NodeContainer`` as a parameter. Internally, a ``NetDeviceContainer``
is created. For each node in the ``NodeContainer`` (there must be exactly
two for a point-to-point link) a ``PointToPointNetDevice`` is created and
saved in the device container. A ``PointToPointChannel`` is created and
will finish configuring the devices and channel. The first line declares the
device container mentioned above and the second does the heavy lifting. The
``Install`` method of the ``PointToPointHelper`` takes a
``NodeContainer`` as a parameter. Internally, a ``NetDeviceContainer``
is created. For each node in the ``NodeContainer`` (there must be exactly
two for a point-to-point link) a ``PointToPointNetDevice`` is created and
saved in the device container. A ``PointToPointChannel`` is created and
the two ``PointToPointNetDevices`` are attached. When objects are created
by the ``PointToPointHelper``, the ``Attributes`` previously set in the
helper are used to initialize the corresponding ``Attributes`` in the
by the ``PointToPointHelper``, the ``Attributes`` previously set in the
helper are used to initialize the corresponding ``Attributes`` in the
created objects.
After executing the ``pointToPoint.Install (nodes)`` call we will have
two nodes, each with an installed point-to-point net device and a single
point-to-point channel between them. Both devices will be configured to
transmit data at five megabits per second over the channel which has a two
point-to-point channel between them. Both devices will be configured to
transmit data at five megabits per second over the channel which has a two
millisecond transmission delay.
InternetStackHelper
@@ -529,10 +529,10 @@ the nodes in the node container.
Ipv4AddressHelper
~~~~~~~~~~~~~~~~~
Next we need to associate the devices on our nodes with IP addresses. We
Next we need to associate the devices on our nodes with IP addresses. We
provide a topology helper to manage the allocation of IP addresses. The only
user-visible API is to set the base IP address and network mask to use when
performing the actual address allocation (which is done at a lower level
performing the actual address allocation (which is done at a lower level
inside the helper).
The next two lines of code in our example script, ``first.cc``,
@@ -543,12 +543,12 @@ The next two lines of code in our example script, ``first.cc``,
address.SetBase ("10.1.1.0", "255.255.255.0");
declare an address helper object and tell it that it should begin allocating IP
addresses from the network 10.1.1.0 using the mask 255.255.255.0 to define
addresses from the network 10.1.1.0 using the mask 255.255.255.0 to define
the allocatable bits. By default the addresses allocated will start at one
and increase monotonically, so the first address allocated from this base will
be 10.1.1.1, followed by 10.1.1.2, etc. The low level |ns3| system
actually remembers all of the IP addresses allocated and will generate a
fatal error if you accidentally cause the same address to be generated twice
fatal error if you accidentally cause the same address to be generated twice
(which is a very hard to debug error, by the way).
The next line of code,
@@ -559,21 +559,21 @@ The next line of code,
performs the actual address assignment. In |ns3| we make the
association between an IP address and a device using an ``Ipv4Interface``
object. Just as we sometimes need a list of net devices created by a helper
object. Just as we sometimes need a list of net devices created by a helper
for future reference we sometimes need a list of ``Ipv4Interface`` objects.
The ``Ipv4InterfaceContainer`` provides this functionality.
Now we have a point-to-point network built, with stacks installed and IP
Now we have a point-to-point network built, with stacks installed and IP
addresses assigned. What we need at this point are applications to generate
traffic.
Applications
++++++++++++
Another one of the core abstractions of the ns-3 system is the
Another one of the core abstractions of the ns-3 system is the
``Application``. In this script we use two specializations of the core
|ns3| class ``Application`` called ``UdpEchoServerApplication``
and ``UdpEchoClientApplication``. Just as we have in our previous
explanations, we use helper objects to help configure and manage the
and ``UdpEchoClientApplication``. Just as we have in our previous
explanations, we use helper objects to help configure and manage the
underlying objects. Here, we use ``UdpEchoServerHelper`` and
``UdpEchoClientHelper`` objects to make our lives easier.
@@ -591,45 +591,45 @@ created.
serverApps.Start (Seconds (1.0));
serverApps.Stop (Seconds (10.0));
The first line of code in the above snippet declares the
The first line of code in the above snippet declares the
``UdpEchoServerHelper``. As usual, this isn't the application itself, it
is an object used to help us create the actual applications. One of our
is an object used to help us create the actual applications. One of our
conventions is to place *required* ``Attributes`` in the helper constructor.
In this case, the helper can't do anything useful unless it is provided with
a port number that the client also knows about. Rather than just picking one
and hoping it all works out, we require the port number as a parameter to the
a port number that the client also knows about. Rather than just picking one
and hoping it all works out, we require the port number as a parameter to the
constructor. The constructor, in turn, simply does a ``SetAttribute``
with the passed value. If you want, you can set the "Port" ``Attribute``
to another value later using ``SetAttribute``.
Similar to many other helper objects, the ``UdpEchoServerHelper`` object
Similar to many other helper objects, the ``UdpEchoServerHelper`` object
has an ``Install`` method. It is the execution of this method that actually
causes the underlying echo server application to be instantiated and attached
to a node. Interestingly, the ``Install`` method takes a
``NodeContainter`` as a parameter just as the other ``Install`` methods
we have seen. This is actually what is passed to the method even though it
we have seen. This is actually what is passed to the method even though it
doesn't look so in this case. There is a C++ *implicit conversion* at
work here that takes the result of ``nodes.Get (1)`` (which returns a smart
pointer to a node object --- ``Ptr<Node>``) and uses that in a constructor
for an unnamed ``NodeContainer`` that is then passed to ``Install``.
If you are ever at a loss to find a particular method signature in C++ code
that compiles and runs just fine, look for these kinds of implicit conversions.
that compiles and runs just fine, look for these kinds of implicit conversions.
We now see that ``echoServer.Install`` is going to install a
``UdpEchoServerApplication`` on the node found at index number one of the
``NodeContainer`` we used to manage our nodes. ``Install`` will return
a container that holds pointers to all of the applications (one in this case
since we passed a ``NodeContainer`` containing one node) created by the
a container that holds pointers to all of the applications (one in this case
since we passed a ``NodeContainer`` containing one node) created by the
helper.
Applications require a time to "start" generating traffic and may take an
optional time to "stop". We provide both. These times are set using the
``ApplicationContainer`` methods ``Start`` and ``Stop``. These
``ApplicationContainer`` methods ``Start`` and ``Stop``. These
methods take ``Time`` parameters. In this case, we use an *explicit*
C++ conversion sequence to take the C++ double 1.0 and convert it to an
C++ conversion sequence to take the C++ double 1.0 and convert it to an
|ns3| ``Time`` object using a ``Seconds`` cast. Be aware that
the conversion rules may be controlled by the model author, and C++ has its
own rules, so you can't always just assume that parameters will be happily
own rules, so you can't always just assume that parameters will be happily
converted for you. The two lines,
::
@@ -662,27 +662,27 @@ that is managed by an ``UdpEchoClientHelper``.
clientApps.Stop (Seconds (10.0));
For the echo client, however, we need to set five different ``Attributes``.
The first two ``Attributes`` are set during construction of the
The first two ``Attributes`` are set during construction of the
``UdpEchoClientHelper``. We pass parameters that are used (internally to
the helper) to set the "RemoteAddress" and "RemotePort" ``Attributes``
in accordance with our convention to make required ``Attributes`` parameters
in the helper constructors.
in the helper constructors.
Recall that we used an ``Ipv4InterfaceContainer`` to keep track of the IP
addresses we assigned to our devices. The zeroth interface in the
``interfaces`` container is going to correspond to the IP address of the
zeroth node in the ``nodes`` container. The first interface in the
``interfaces`` container corresponds to the IP address of the first node
Recall that we used an ``Ipv4InterfaceContainer`` to keep track of the IP
addresses we assigned to our devices. The zeroth interface in the
``interfaces`` container is going to correspond to the IP address of the
zeroth node in the ``nodes`` container. The first interface in the
``interfaces`` container corresponds to the IP address of the first node
in the ``nodes`` container. So, in the first line of code (from above), we
are creating the helper and telling it so set the remote address of the client
to be the IP address assigned to the node on which the server resides. We
to be the IP address assigned to the node on which the server resides. We
also tell it to arrange to send packets to port nine.
The "MaxPackets" ``Attribute`` tells the client the maximum number of
packets we allow it to send during the simulation. The "Interval"
The "MaxPackets" ``Attribute`` tells the client the maximum number of
packets we allow it to send during the simulation. The "Interval"
``Attribute`` tells the client how long to wait between packets, and the
"PacketSize" ``Attribute`` tells the client how large its packet payloads
should be. With this particular combination of ``Attributes``, we are
should be. With this particular combination of ``Attributes``, we are
telling the client to send one 1024-byte packet.
Just as in the case of the echo server, we tell the echo client to ``Start``
@@ -691,7 +691,7 @@ enabled (at two seconds into the simulation).
Simulator
+++++++++
What we need to do at this point is to actually run the simulation. This is
What we need to do at this point is to actually run the simulation. This is
done using the global function ``Simulator::Run``.
::
@@ -709,35 +709,35 @@ When we previously called the methods,
clientApps.Stop (Seconds (10.0));
we actually scheduled events in the simulator at 1.0 seconds, 2.0 seconds and
two events at 10.0 seconds. When ``Simulator::Run`` is called, the system
will begin looking through the list of scheduled events and executing them.
First it will run the event at 1.0 seconds, which will enable the echo server
application (this event may, in turn, schedule many other events). Then it
two events at 10.0 seconds. When ``Simulator::Run`` is called, the system
will begin looking through the list of scheduled events and executing them.
First it will run the event at 1.0 seconds, which will enable the echo server
application (this event may, in turn, schedule many other events). Then it
will run the event scheduled for t=2.0 seconds which will start the echo client
application. Again, this event may schedule many more events. The start event
implementation in the echo client application will begin the data transfer phase
of the simulation by sending a packet to the server.
The act of sending the packet to the server will trigger a chain of events
that will be automatically scheduled behind the scenes and which will perform
the mechanics of the packet echo according to the various timing parameters
that will be automatically scheduled behind the scenes and which will perform
the mechanics of the packet echo according to the various timing parameters
that we have set in the script.
Eventually, since we only send one packet (recall the ``MaxPackets``
``Attribute`` was set to one), the chain of events triggered by
that single client echo request will taper off and the simulation will go
Eventually, since we only send one packet (recall the ``MaxPackets``
``Attribute`` was set to one), the chain of events triggered by
that single client echo request will taper off and the simulation will go
idle. Once this happens, the remaining events will be the ``Stop`` events
for the server and the client. When these events are executed, there are
no further events to process and ``Simulator::Run`` returns. The simulation
is then complete.
All that remains is to clean up. This is done by calling the global function
``Simulator::Destroy``. As the helper functions (or low level
All that remains is to clean up. This is done by calling the global function
``Simulator::Destroy``. As the helper functions (or low level
|ns3| code) executed, they arranged it so that hooks were inserted in
the simulator to destroy all of the objects that were created. You did not
have to keep track of any of these objects yourself --- all you had to do
the simulator to destroy all of the objects that were created. You did not
have to keep track of any of these objects yourself --- all you had to do
was to call ``Simulator::Destroy`` and exit. The |ns3| system
took care of the hard part for you. The remaining lines of our first
took care of the hard part for you. The remaining lines of our first
|ns3| script, ``first.cc``, do just that:
::
@@ -755,21 +755,21 @@ events in the temporal order of simulation time. Events may cause future
events to be scheduled (for example, a timer may reschedule itself to
expire at the next interval).
The initial events are usually triggered by each object, e.g., IPv6 will
schedule Router Advertisements, Neighbor Solicitations, etc.,
The initial events are usually triggered by each object, e.g., IPv6 will
schedule Router Advertisements, Neighbor Solicitations, etc.,
an Application schedule the first packet sending event, etc.
When an event is processed, it may generate zero, one or more events.
As a simulation executes, events are consumed, but more events may (or may
not) be generated.
The simulation will stop automatically when no further events are in the
event queue, or when a special Stop event is found. The Stop event is
created through the
The simulation will stop automatically when no further events are in the
event queue, or when a special Stop event is found. The Stop event is
created through the
``Simulator::Stop (stopTime);`` function.
There is a typical case where ``Simulator::Stop`` is absolutely necessary
There is a typical case where ``Simulator::Stop`` is absolutely necessary
to stop the simulation: when there is a self-sustaining event.
Self-sustaining (or recurring) events are events that always reschedule
Self-sustaining (or recurring) events are events that always reschedule
themselves. As a consequence, they always keep the event queue non-empty.
There are many protocols and modules containing recurring events, e.g.:
@@ -778,17 +778,17 @@ There are many protocols and modules containing recurring events, e.g.:
* RIPng - periodic broadcast of routing tables update
* etc.
In these cases, ``Simulator::Stop`` is necessary to gracefully stop the
simulation. In addition, when |ns3| is in emulation mode, the
``RealtimeSimulator`` is used to keep the simulation clock aligned with
the machine clock, and ``Simulator::Stop`` is necessary to stop the
process.
In these cases, ``Simulator::Stop`` is necessary to gracefully stop the
simulation. In addition, when |ns3| is in emulation mode, the
``RealtimeSimulator`` is used to keep the simulation clock aligned with
the machine clock, and ``Simulator::Stop`` is necessary to stop the
process.
Many of the simulation programs in the tutorial do not explicitly call
``Simulator::Stop``, since the event queue will automatically run out
of events. However, these programs will also accept a call to
of events. However, these programs will also accept a call to
``Simulator::Stop``. For example, the following additional statement
in the first example program will schedule an explicit stop at 11 seconds:
in the first example program will schedule an explicit stop at 11 seconds:
::
@@ -799,20 +799,20 @@ in the first example program will schedule an explicit stop at 11 seconds:
}
The above will not actually change the behavior of this program, since
this particular simulation naturally ends after 10 seconds. But if you
were to change the stop time in the above statement from 11 seconds to 1
second, you would notice that the simulation stops before any output is
printed to the screen (since the output occurs around time 2 seconds of
this particular simulation naturally ends after 10 seconds. But if you
were to change the stop time in the above statement from 11 seconds to 1
second, you would notice that the simulation stops before any output is
printed to the screen (since the output occurs around time 2 seconds of
simulation time).
It is important to call ``Simulator::Stop`` *before* calling
It is important to call ``Simulator::Stop`` *before* calling
``Simulator::Run``; otherwise, ``Simulator::Run`` may never return control
to the main program to execute the stop!
Building Your Script
++++++++++++++++++++
We have made it trivial to build your simple scripts. All you have to do is
to drop your script into the scratch directory and it will automatically be
We have made it trivial to build your simple scripts. All you have to do is
to drop your script into the scratch directory and it will automatically be
built if you run ns3. Let's try it. Copy ``examples/tutorial/first.cc`` into
the ``scratch`` directory after changing back into the top level directory.
@@ -857,17 +857,17 @@ You should see some output:
Received 1024 bytes from 10.1.1.2
Here you see that the build system checks to make sure that the file has been
build and then runs it. You see the logging component on the echo client
indicate that it has sent one 1024 byte packet to the Echo Server on
build and then runs it. You see the logging component on the echo client
indicate that it has sent one 1024 byte packet to the Echo Server on
10.1.1.2. You also see the logging component on the echo server say that
it has received the 1024 bytes from 10.1.1.1. The echo server silently
echoes the packet and you see the echo client log that it has received its
it has received the 1024 bytes from 10.1.1.1. The echo server silently
echoes the packet and you see the echo client log that it has received its
packet back from the server.
Ns-3 Source Code
****************
Now that you have used some of the |ns3| helpers you may want to
Now that you have used some of the |ns3| helpers you may want to
have a look at some of the source code that implements that functionality.
The most recent code can be browsed on our web server at the following link:
https://gitlab.com/nsnam/ns-3-dev.git. There, you will see the Git/GitLab
@@ -877,7 +877,7 @@ At the top of the page, you will see a number of links,
.. sourcecode:: text
summary | shortlog | changelog | graph | tags | files
summary | shortlog | changelog | graph | tags | files
Go ahead and select the ``files`` link. This is what the top-level of
most of our *repositories* will look:
@@ -922,12 +922,12 @@ you click on ``first.cc`` you will find the code you just walked through.
The source code is mainly in the ``src`` directory. You can view source
code either by clicking on the directory name or by clicking on the ``files``
link to the right of the directory name. If you click on the ``src``
directory, you will be taken to the listing of the ``src`` subdirectories. If you
directory, you will be taken to the listing of the ``src`` subdirectories. If you
then click on ``core`` subdirectory, you will find a list of files. The first file
you will find (as of this writing) is ``abort.h``. If you click on the
``abort.h`` link, you will be sent to the source file for ``abort.h`` which
you will find (as of this writing) is ``abort.h``. If you click on the
``abort.h`` link, you will be sent to the source file for ``abort.h`` which
contains useful macros for exiting scripts if abnormal conditions are detected.
The source code for the helpers we have used in this chapter can be found in the
The source code for the helpers we have used in this chapter can be found in the
``src/applications/helper`` directory. Feel free to poke around in the directory tree to
get a feel for what is there and the style of |ns3| programs.

View File

@@ -7,8 +7,8 @@ Conclusion
Futures
*******
This document is intended as a living document. We hope and expect it to
grow over time to cover more and more of the nuts and bolts of |ns3|.
This document is intended as a living document. We hope and expect it to
grow over time to cover more and more of the nuts and bolts of |ns3|.
Writing manual and tutorial chapters is not something we all get excited about,
but it is very important to the project. If you are an expert in one of these
@@ -18,7 +18,7 @@ chapters; or any other chapter you may think is important.
Closing
*******
|ns3| is a large and complicated system. It is impossible to cover all
|ns3| is a large and complicated system. It is impossible to cover all
of the things you will need to know in one small tutorial. Readers
who want to learn more are encouraged to read the following additional
documentation:

View File

@@ -22,7 +22,7 @@
# To change default code-block format in Latex to footnotesize (8pt)
# Tip from https://stackoverflow.com/questions/9899283/how-do-you-change-the-code-example-font-size-in-latex-pdf-output-with-sphinx/9955928
# Note: sizes are \footnotesize (8pt), \small (9pt), and \normalsize (10pt).
# Note: sizes are \footnotesize (8pt), \small (9pt), and \normalsize (10pt).
#from sphinx.highlighting import PygmentsBridge
#from pygments.formatters.latex import LatexFormatter
@@ -271,7 +271,7 @@ latex_elements = {
# (double backquotes) to either \footnotesize (8pt) or \small (9pt)
#
# See above to change the font size of verbatim code blocks
#
#
# 'preamble': '',
'preamble': u'''\\usepackage{amssymb}
\\definecolor{VerbatimBorderColor}{rgb}{1,1,1}

View File

@@ -13,10 +13,10 @@ tutorial section is also a work-in-progress.
Motivation
**********
One of the main points of running simulations is to generate output data,
One of the main points of running simulations is to generate output data,
either for research purposes or simply to learn about the system.
In the previous chapter, we introduced the tracing subsystem and
the example ``sixth.cc``. from which PCAP or ASCII trace files are
the example ``sixth.cc``. from which PCAP or ASCII trace files are
generated. These traces are valuable for data analysis using a
variety of external tools, and for many users, such output data is
a preferred means of gathering data (for analysis by external tools).
@@ -30,7 +30,7 @@ including the following:
trace files is prohibitive or cumbersome, and
* the need for *online* data reduction or computation, during the course
of the simulation. A good example of this is to define a termination
condition for the simulation, to tell it when to stop when it has
condition for the simulation, to tell it when to stop when it has
received enough data to form a narrow-enough confidence interval around
the estimate of some parameter.
@@ -38,7 +38,7 @@ The |ns3| data collection framework is designed to provide these
additional capabilities beyond trace-based output. We recommend
that the reader interested in this topic consult the |ns3| Manual
for a more detailed treatment of this framework; here, we summarize
with an example program some of the developing capabilities.
with an example program some of the developing capabilities.
Example Code
************
@@ -47,14 +47,14 @@ The tutorial example ``examples/tutorial/seventh.cc`` resembles the
``sixth.cc`` example we previously reviewed, except for a few changes.
First, it has been enabled for IPv6 support with a command-line option:
::
::
CommandLine cmd;
cmd.AddValue ("useIpv6", "Use Ipv6", useV6);
cmd.Parse (argc, argv);
If the user specifies ``useIpv6``, option, the program will be run
using IPv6 instead of IPv4. The ``help`` option, available on all |ns3|
using IPv6 instead of IPv4. The ``help`` option, available on all |ns3|
programs that support the CommandLine object as shown above, can
be invoked as follows (please note the use of double quotes):
@@ -67,10 +67,10 @@ which produces:
::
ns3-dev-seventh-debug [Program Arguments] [General Arguments]
Program Arguments:
--useIpv6: Use Ipv6 [false]
General Arguments:
--PrintGlobals: Print the list of globals.
--PrintGroups: Print the list of groups.
@@ -79,7 +79,7 @@ which produces:
--PrintAttributes=[typeid]: Print all attributes of typeid.
--PrintHelp: Print this help message.
This default (use of IPv4, since useIpv6 is false) can be changed by
This default (use of IPv4, since useIpv6 is false) can be changed by
toggling the boolean value as follows:
::
@@ -93,11 +93,11 @@ and have a look at the pcap generated, such as with ``tcpdump``:
tcpdump -r seventh.pcap -nn -tt
This has been a short digression into IPv6 support and the command line,
which was also introduced earlier in this tutorial. For a dedicated
example of command line usage, please see
which was also introduced earlier in this tutorial. For a dedicated
example of command line usage, please see
``src/core/examples/command-line-example.cc``.
Now back to data collection. In the ``examples/tutorial/`` directory,
Now back to data collection. In the ``examples/tutorial/`` directory,
type the following command: ``diff -u sixth.cc seventh.cc``, and examine
some of the new lines of this diff:
@@ -120,7 +120,7 @@ some of the new lines of this diff:
...
+ // Use GnuplotHelper to plot the packet byte count over time
+ GnuplotHelper plotHelper;
+
+
+ // Configure the plot. The first argument is the file name prefix
+ // for the output files generated. The second, third, and fourth
+ // arguments are, respectively, the plot title, x-axis, and y-axis labels
@@ -128,7 +128,7 @@ some of the new lines of this diff:
+ "Packet Byte Count vs. Time",
+ "Time (Seconds)",
+ "Packet Byte Count");
+
+
+ // Specify the probe type, trace source path (in configuration namespace), and
+ // probe output trace source ("OutputBytes") to plot. The fourth argument
+ // specifies the name of the data series label on the plot. The last
@@ -138,27 +138,27 @@ some of the new lines of this diff:
+ "OutputBytes",
+ "Packet Byte Count",
+ GnuplotAggregator::KEY_BELOW);
+
+
+ // Use FileHelper to write out the packet byte count over time
+ FileHelper fileHelper;
+
+
+ // Configure the file to be written, and the formatting of output data.
+ fileHelper.ConfigureFile ("seventh-packet-byte-count",
+ FileAggregator::FORMATTED);
+
+
+ // Set the labels for this formatted output file.
+ fileHelper.Set2dFormat ("Time (Seconds) = %.3e\tPacket Byte Count = %.0f");
+
+
+ // Specify the probe type, probe path (in configuration namespace), and
+ // probe output trace source ("OutputBytes") to write.
+ fileHelper.WriteProbe (probeType,
+ tracePath,
+ "OutputBytes");
+
+
Simulator::Stop (Seconds (20));
Simulator::Run ();
Simulator::Destroy ();
The careful reader will have noticed, when testing the IPv6 command
line attribute above, that ``seventh.cc`` had created a number of new output files:
@@ -185,20 +185,20 @@ GnuplotHelper
The GnuplotHelper is an |ns3| helper object aimed at the production of
``gnuplot`` plots with as few statements as possible, for common cases.
It hooks |ns3| trace sources with data types supported by the
data collection system. Not all |ns3| trace sources data types are
supported, but many of the common trace types are, including TracedValues
It hooks |ns3| trace sources with data types supported by the
data collection system. Not all |ns3| trace sources data types are
supported, but many of the common trace types are, including TracedValues
with plain old data (POD) types.
Let's look at the output produced by this helper:
::
seventh-packet-byte-count.dat
seventh-packet-byte-count.plt
seventh-packet-byte-count.sh
The first is a gnuplot data file with a series of space-delimited
The first is a gnuplot data file with a series of space-delimited
timestamps and packet byte counts. We'll cover how this particular
data output was configured below, but let's continue with the output
files. The file ``seventh-packet-byte-count.plt`` is a gnuplot plot file,
@@ -209,16 +209,16 @@ syntax can see that this will produce a formatted output PNG file named
to produce the desired PNG (which can be viewed in an image editor); that
is, the command:
::
::
sh seventh-packet-byte-count.sh
will yield ``seventh-packet-byte-count.png``. Why wasn't this PNG
produced in the first place? The answer is that by providing the
produced in the first place? The answer is that by providing the
plt file, the user can hand-configure the result if desired, before
producing the PNG.
The PNG image title states that this plot is a plot of
The PNG image title states that this plot is a plot of
"Packet Byte Count vs. Time", and that it is plotting the probed data
corresponding to the trace source path:
@@ -226,7 +226,7 @@ corresponding to the trace source path:
/NodeList/*/$ns3::Ipv6L3Protocol/Tx
Note the wild-card in the trace path. In summary, what this plot is
Note the wild-card in the trace path. In summary, what this plot is
capturing is the plot of packet bytes observed at the transmit trace
source of the Ipv6L3Protocol object; largely 596-byte TCP segments
in one direction, and 60-byte TCP acks in the other (two node
@@ -236,7 +236,7 @@ How was this configured? A few statements need to be provided. First,
the GnuplotHelper object must be declared and configured:
::
+ // Use GnuplotHelper to plot the packet byte count over time
+ GnuplotHelper plotHelper;
+
@@ -254,7 +254,7 @@ is the first argument, the plot title is the second, the x-axis label
the third, and the y-axis label the fourth argument.
The next step is to configure the data, and here is where the trace
source is hooked. First, note above in the program we declared a few
source is hooked. First, note above in the program we declared a few
variables for later use:
::
@@ -267,7 +267,7 @@ variables for later use:
We use them here:
::
+ // Specify the probe type, trace source path (in configuration namespace), and
+ // probe output trace source ("OutputBytes") to plot. The fourth argument
+ // specifies the name of the data series label on the plot. The last
@@ -315,7 +315,7 @@ The Ipv6PacketProbe exports, itself, some trace sources that extract
the data out of the probed Packet object:
::
TypeId
Ipv6PacketProbe::GetTypeId ()
{
@@ -332,14 +332,14 @@ the data out of the probed Packet object:
;
return tid;
}
The third argument of our PlotProbe statement specifies that we are
interested in the number of bytes in this packet; specifically, the
"OutputBytes" trace source of Ipv6PacketProbe.
Finally, the last two arguments of the statement provide the plot
legend for this data series ("Packet Byte Count"), and an optional
gnuplot formatting statement (GnuplotAggregator::KEY_BELOW) that we want
gnuplot formatting statement (GnuplotAggregator::KEY_BELOW) that we want
the plot key to be inserted below the plot. Other options include
NO_KEY, KEY_INSIDE, and KEY_ABOVE.
@@ -381,7 +381,7 @@ The following TraceSource types are supported by Probes as of this writing:
| Ptr<const Packet>, const Address& | ApplicationPacketProbe | OutputBytes | applications/model/application-packet-probe.h |
+------------------------------------------+------------------------+---------------+----------------------------------------------------+
As can be seen, only a few trace sources are supported, and they are all
As can be seen, only a few trace sources are supported, and they are all
oriented towards outputting the Packet size (in bytes). However,
most of the fundamental data types available as TracedValues can be
supported with these helpers.
@@ -394,7 +394,7 @@ example. The example program provides formatted output of the
same timestamped data, such as follows:
::
Time (Seconds) = 9.312e+00 Packet Byte Count = 596
Time (Seconds) = 9.312e+00 Packet Byte Count = 564
@@ -405,7 +405,7 @@ be seen in the filenames. Let's look at the code piece-by-piece:
+ // Use FileHelper to write out the packet byte count over time
+ FileHelper fileHelper;
+
+
+ // Configure the file to be written, and the formatting of output data.
+ fileHelper.ConfigureFile ("seventh-packet-byte-count",
+ FileAggregator::FORMATTED);
@@ -414,11 +414,11 @@ The file helper file prefix is the first argument, and a format specifier
is next.
Some other options for formatting include SPACE_SEPARATED, COMMA_SEPARATED,
and TAB_SEPARATED. Users are able to change the formatting (if
FORMATTED is specified) with a format string such as follows:
FORMATTED is specified) with a format string such as follows:
::
+
+
+ // Set the labels for this formatted output file.
+ fileHelper.Set2dFormat ("Time (Seconds) = %.3e\tPacket Byte Count = %.0f");
@@ -428,17 +428,17 @@ trace source "OutputBytes" is hooked:
::
+
+
+ // Specify the probe type, trace source path (in configuration namespace), and
+ // probe output trace source ("OutputBytes") to write.
+ fileHelper.WriteProbe (probeType,
+ tracePath,
+ "OutputBytes");
+
+
The wildcard fields in this trace source specifier match two trace sources.
Unlike the GnuplotHelper example, in which two data series were overlaid
on the same plot, here, two separate files are written to disk.
on the same plot, here, two separate files are written to disk.
Summary
*******
@@ -447,5 +447,5 @@ Data collection support is new as of ns-3.18, and basic support for
providing time series output has been added. The basic pattern described
above may be replicated within the scope of support of the existing
probes and trace sources. More capabilities including statistics
processing will be added in future releases.
processing will be added in future releases.

View File

@@ -16,17 +16,17 @@ Overview
|ns3| is built as a system of software libraries that work together.
User programs can be written that links with (or imports from) these
libraries. User programs are written in either the C++ or Python
libraries. User programs are written in either the C++ or Python
programming languages.
|ns3| is distributed as source code, meaning that the target system
needs to have a software development environment to build the libraries
first, then build the user program. |ns3| could in principle be
first, then build the user program. |ns3| could in principle be
distributed as pre-built libraries for selected systems, and in the
future it may be distributed that way, but at present, many users
actually do their work by editing |ns3| itself, so having the source
code around to rebuild the libraries is useful. If someone would like
to undertake the job of making pre-built libraries and packages for
code around to rebuild the libraries is useful. If someone would like
to undertake the job of making pre-built libraries and packages for
operating systems, please contact the ns-developers mailing list.
In the following, we'll look at three ways of downloading and building
@@ -49,11 +49,11 @@ a non-privileged user account is recommended.
Prerequisites
*************
The entire set of available |ns3| libraries has a number of dependencies
The entire set of available |ns3| libraries has a number of dependencies
on third-party libraries, but most of |ns3| can be built and used with
support for a few common (often installed by default) components: a
C++ compiler, an installation of Python, a source code editor (such as vim,
emacs, or Eclipse) and, if using the development repositories, an
emacs, or Eclipse) and, if using the development repositories, an
installation of Git source code control system. Most beginning users
need not concern themselves if their configuration reports some missing
optional features of |ns3|, but for those wishing a full installation,
@@ -62,13 +62,13 @@ and tips. One such page is the "Installation" page, with install instructions
for various systems, available at
https://www.nsnam.org/wiki/Installation.
The "Prerequisites" section of this wiki page explains which packages are
required to support common |ns3| options, and also provides the
The "Prerequisites" section of this wiki page explains which packages are
required to support common |ns3| options, and also provides the
commands used to install them for common Linux or macOS variants.
You may want to take this opportunity to explore the |ns3| wiki
a bit, or the main web site at https://www.nsnam.org, since there is a
wealth of information there.
You may want to take this opportunity to explore the |ns3| wiki
a bit, or the main web site at https://www.nsnam.org, since there is a
wealth of information there.
As of the most recent |ns3| release (ns-3.36), the following tools
are needed to get started with |ns3|:
@@ -104,7 +104,7 @@ of the parent directories contains a space in the directory name:
Downloading a release of ns-3 as a source archive
+++++++++++++++++++++++++++++++++++++++++++++++++
This option is for the new user who wishes to download and experiment with
This option is for the new user who wishes to download and experiment with
the most recently released and packaged version of |ns3|.
|ns3| publishes its releases as compressed source archives, sometimes
referred to as a tarball.
@@ -114,9 +114,9 @@ The process for downloading |ns3| via tarball is simple; you just
have to pick a release, download it and uncompress it.
Let's assume that you, as a user, wish to build |ns3| in a local
directory called ``workspace``.
If you adopt the ``workspace`` directory approach, you can
get a copy of a release by typing the following into your Linux shell
directory called ``workspace``.
If you adopt the ``workspace`` directory approach, you can
get a copy of a release by typing the following into your Linux shell
(substitute the appropriate version numbers, of course)
.. sourcecode:: console
@@ -130,8 +130,8 @@ get a copy of a release by typing the following into your Linux shell
Notice the use above of the ``wget`` utility, which is a command-line
tool to fetch objects from the web; if you do not have this installed,
you can use a browser for this step.
Following these steps, if you change into the directory
Following these steps, if you change into the directory
``ns-allinone-3.36``, you should see a number of files and directories
.. sourcecode:: text
@@ -152,7 +152,7 @@ at https://gitlab.com/nsnam/. The group name ``nsnam`` organizes the
various repositories used by the open source project.
The simplest way to get started using Git repositories is to fork or clone
the ``ns-3-allinone`` environment. This is a set of scripts that manages the
the ``ns-3-allinone`` environment. This is a set of scripts that manages the
downloading and building of the most commonly used subsystems of |ns3|
for you. If you are new to Git, the terminology of ``fork`` and ``clone``
may be foreign to you; if so, we recommend that you simply ``clone``
@@ -193,7 +193,7 @@ release number:
After this step, the additional repositories of |ns3|, bake, pybindgen,
and netanim will be downloaded to the ``ns-3-allinone`` directory.
Downloading ns-3 Using Bake
+++++++++++++++++++++++++++
@@ -205,7 +205,7 @@ ns-3-allinone is called ``bake``.
Bake is a tool for coordinated software building from multiple repositories,
developed for the |ns3| project. Bake can be used to fetch development
versions of the |ns3| software, and to download and build extensions to the
versions of the |ns3| software, and to download and build extensions to the
base |ns3| distribution, such as the Direct Code Execution environment,
Network Simulation Cradle, ability to create new Python bindings, and
various |ns3| "apps". If you envision that your |ns3| installation may
@@ -218,10 +218,10 @@ will allow one to download any software that was current at the
time of the release. That is, for example, the version of Bake that
is distributed with the ``ns-3.30`` release can be used to fetch components
for that |ns3| release or earlier, but can't be used to fetch components
for later releases (unless the ``bakeconf.xml`` package description file
for later releases (unless the ``bakeconf.xml`` package description file
is updated).
You can also get the most recent copy of ``bake`` by typing the
You can also get the most recent copy of ``bake`` by typing the
following into your Linux shell (assuming you have installed Git)::
$ cd
@@ -229,7 +229,7 @@ following into your Linux shell (assuming you have installed Git)::
$ cd workspace
$ git clone https://gitlab.com/nsnam/bake.git
As the git command executes, you should see something like the
As the git command executes, you should see something like the
following displayed:
.. sourcecode:: console
@@ -242,7 +242,7 @@ following displayed:
Receiving objects: 100% (2086/2086), 2.68 MiB | 3.82 MiB/s, done.
Resolving deltas: 100% (1404/1404), done.
After the clone command completes, you should have a directory called
After the clone command completes, you should have a directory called
``bake``, the contents of which should look something like the following:
.. sourcecode:: console
@@ -266,21 +266,21 @@ There are a few configuration targets available:
4. ``ns-3-allinone``: similar to the released version of the allinone
module, but for development code.
The current development snapshot (unreleased) of |ns3| may be found
at https://gitlab.com/nsnam/ns-3-dev.git. The
The current development snapshot (unreleased) of |ns3| may be found
at https://gitlab.com/nsnam/ns-3-dev.git. The
developers attempt to keep these repositories in consistent, working states but
they are in a development area with unreleased code present, so you may want
they are in a development area with unreleased code present, so you may want
to consider staying with an official release if you do not need newly-
introduced features.
You can find the latest version of the
code either by inspection of the repository list or by going to the
code either by inspection of the repository list or by going to the
`"ns-3 Releases"
<https://www.nsnam.org/releases>`_
web page and clicking on the latest release link. We'll proceed in
this tutorial example with ``ns-3.36``.
We are now going to use the bake tool to pull down the various pieces of
We are now going to use the bake tool to pull down the various pieces of
|ns3| you will be using. First, we'll say a word about running bake.
Bake works by downloading source packages into a source directory,
@@ -290,9 +290,9 @@ outside of the directory it was downloaded into, it is advisable
to put bake into your path, such as follows (Linux bash shell example).
First, change into the 'bake' directory, and then set the following
environment variables:
.. sourcecode:: console
$ export BAKE_HOME=`pwd`
$ export PATH=$PATH:$BAKE_HOME:$BAKE_HOME/build/bin
$ export PYTHONPATH=$PYTHONPATH:$BAKE_HOME:$BAKE_HOME/build/lib
@@ -333,8 +333,8 @@ You should see something like the following:
In particular, download tools such as Git and Mercurial
are our principal concerns at this point, since they allow us to fetch
the code. Please install missing tools at this stage, in the usual
way for your system (if you are able to), or contact your system
administrator as needed to install these tools. You can also
way for your system (if you are able to), or contact your system
administrator as needed to install these tools. You can also
Next, try to download the software:
@@ -393,15 +393,15 @@ described above; not from downloading via git or bake.
When working from a released tarball, a convenience script available as
part of ``ns-3-allinone`` can orchestrate a simple build of components.
This program is called ``build.py``. This
This program is called ``build.py``. This
program will get the project configured for you
in the most commonly useful way. However, please note that more advanced
configuration and work with |ns3| will typically involve using the
native |ns3| build system, CMake, to be introduced later in this tutorial.
If you downloaded
using a tarball you should have a directory called something like
``ns-allinone-3.36`` under your ``~/workspace`` directory.
using a tarball you should have a directory called something like
``ns-allinone-3.36`` under your ``~/workspace`` directory.
Type the following:
.. sourcecode:: console
@@ -424,7 +424,7 @@ Building with bake
++++++++++++++++++
If you used bake above to fetch source code from project repositories, you
may continue to use it to build |ns3|. Type:
may continue to use it to build |ns3|. Type:
.. sourcecode:: console
@@ -440,12 +440,12 @@ and you should see something like:
There may be failures to build all components, but the build will proceed
anyway if the component is optional. For example, a recent portability issue
has been that castxml may not build via the bake build tool on all
has been that castxml may not build via the bake build tool on all
platforms; in this case, the line will show something like::
>> Building castxml - Problem
> Problem: Optional dependency, module "castxml" failed
This may reduce the functionality of the final build.
This may reduce the functionality of the final build.
However, bake will continue since "castxml" is not an essential dependency.
For more information call bake with -v or -vvv, for full verbose mode.
@@ -475,18 +475,18 @@ CMake needs to be installed before building |ns3|.
So, to proceed, please change your working directory to
the |ns3| directory that you have initially built.
It's not
It's not
strictly required at this point, but it will be valuable to take a slight
detour and look at how to make changes to the configuration of the project.
Probably the most useful configuration change you can make will be to
Probably the most useful configuration change you can make will be to
build the optimized version of the code. The project will be configured
by default using the ``default`` build profile, which is an optimized
build with debug information (CMAKE_BUILD_TYPE=relwithdebinfo) version.
build with debug information (CMAKE_BUILD_TYPE=relwithdebinfo) version.
Let's tell the project to make an optimized build.
To maintain a similar interface for command-line users, we include a
wrapper script for CMake, |ns3|. To tell |ns3| that it should do optimized
builds that include the examples and tests, you will need to execute the
builds that include the examples and tests, you will need to execute the
following commands:
.. sourcecode:: console
@@ -495,11 +495,11 @@ following commands:
$ ./ns3 configure --build-profile=optimized --enable-examples --enable-tests
This runs CMake out of the local directory (which is provided as a convenience
for you). The first command to clean out the previous build is not
for you). The first command to clean out the previous build is not
typically strictly necessary but is good practice (but see `Build Profiles`_,
below); it will remove the
previously built libraries and object files found in directory ``build/``.
When the project is reconfigured and the build system checks for various
previously built libraries and object files found in directory ``build/``.
When the project is reconfigured and the build system checks for various
dependencies, you should see
output that looks similar to the following:
@@ -651,8 +651,8 @@ output that looks similar to the following:
Tap Bridge : ON
Tap FdNetDevice : ON
Tests : ON
Modules configured to be built:
antenna aodv applications
bridge buildings config-store
@@ -667,13 +667,13 @@ output that looks similar to the following:
tap-bridge test topology-read
traffic-control uan virtual-net-device
wave wifi wimax
Modules that cannot be built:
brite click mpi
openflow visualizer
-- Configuring done
-- Generating done
-- Build files have been written to: /mnt/dev/tools/source/ns-3-dev/cmake-cache
@@ -686,8 +686,8 @@ Note the last part of the above output. Some |ns3| options are not enabled by
default or require support from the underlying system to work properly (``OFF (not requested)``).
Other options might depend on third-party libraries, which if not found will be disabled
(``OFF(missing dependency)``).
If this library were not found, the corresponding |ns3| feature
would not be enabled and a message would be displayed. Note further that there is
If this library were not found, the corresponding |ns3| feature
would not be enabled and a message would be displayed. Note further that there is
a feature to use the program ``sudo`` to set the suid bit of certain programs.
This is not enabled by default and so this feature is reported as "not enabled."
Finally, to reprint this summary of which optional features are enabled, use
@@ -700,7 +700,7 @@ Now go ahead and switch back to the debug build that includes the examples and t
$ ./ns3 clean
$ ./ns3 configure --build-profile=debug --enable-examples --enable-tests
The build system is now configured and you can build the debug versions of
The build system is now configured and you can build the debug versions of
the |ns3| programs by simply typing:
.. sourcecode:: console
@@ -737,7 +737,7 @@ options through to ns3, so instead of the above, the following will work:
.. sourcecode:: console
$ ./build.py -- --disable-python
$ ./build.py -- --disable-python
as it generates the underlying command ``./ns3 configure --disable-python``.
@@ -762,7 +762,7 @@ on Fedora 28, when Gtk2+ is installed, will result in an error such as::
void (*__gtk_reserved1);
In releases starting with ns-3.28.1, an option is available in CMake to work
around these issues. The option disables the inclusion of the '-Werror'
around these issues. The option disables the inclusion of the '-Werror'
flag to g++ and clang++. The option is '--disable-werror' and must be
used at configure time; e.g.:
@@ -774,9 +774,9 @@ Configure vs. Build
===================
Some CMake commands are only meaningful during the configure phase and some commands are valid
in the build phase. For example, if you wanted to use the emulation
in the build phase. For example, if you wanted to use the emulation
features of |ns3|, you might want to enable setting the suid bit using
sudo as described above. This turns out to be a configuration-time command, and so
sudo as described above. This turns out to be a configuration-time command, and so
you could reconfigure using the following command that also includes the examples and tests.
.. sourcecode:: console
@@ -829,10 +829,10 @@ The build profile controls the use of logging, assertions, and compiler optimiza
| Flags | | | | ``-march=native`` |
| | | | | ``-mtune=native`` |
+----------+---------------------------------+-----------------------------+-------------------------------+---------------------------------+
As you can see, logging and assertions are only configured
by default in debug builds, although they can be selectively enabled
in other build profiles by using the ``--enable-logs`` and
in other build profiles by using the ``--enable-logs`` and
``--enable-asserts`` flags during CMake configuration time.
Recommended practice is to develop your scenario in debug mode, then
conduct repetitive runs (for statistics or changing parameters) in
@@ -932,7 +932,7 @@ into ``/usr/local/bin``, libraries into ``/usr/local/lib``, and headers
into ``/usr/local/include``. Superuser privileges are typically needed
to install to the default prefix, so the typical command would be
``sudo ./ns3 install``. When running programs with ns3, ns3 will
first prefer to use shared libraries in the build directory, then
first prefer to use shared libraries in the build directory, then
will look for libraries in the library path configured in the local
environment. So when installing libraries to the system, it is good
practice to check that the intended libraries are being used.
@@ -950,7 +950,7 @@ the project if ns3 will be used to install things at a different prefix.
In summary, it is not necessary to call ``./ns3 install`` to use |ns3|.
Most users will not need this command since ns3 will pick up the
current libraries from the ``build`` directory, but some users may find
current libraries from the ``build`` directory, but some users may find
it useful if their use case involves working with programs outside
of the |ns3| directory.
@@ -968,8 +968,8 @@ remember where you are, and invoke ns3 like this:
but that gets tedious, and error prone, and there are better solutions.
One common way when using a text-based editor such as emacs or vim is to
open two terminal sessions and use one to build |ns3| and one to
One common way when using a text-based editor such as emacs or vim is to
open two terminal sessions and use one to build |ns3| and one to
edit source code.
If you only have the tarball, an environment variable can help:
@@ -1255,7 +1255,7 @@ source files to/from the CMakeLists.txt files, adding a new module or dependenci
Testing ns-3
************
You can run the unit tests of the |ns3| distribution by running the
You can run the unit tests of the |ns3| distribution by running the
``./test.py`` script:
.. sourcecode:: console
@@ -1314,7 +1314,7 @@ executing each test, which will actually look something like:
739 of 742 tests passed (739 passed, 3 skipped, 0 failed, 0 crashed, 0 valgrind errors)
This command is typically run by users to quickly verify that an
This command is typically run by users to quickly verify that an
|ns3| distribution has built correctly. (Note the order of the ``PASS: ...``
lines can vary, which is okay. What's important is that the summary line at
the end report that all tests passed; none failed or crashed.)
@@ -1348,13 +1348,13 @@ Congratulations! You are now an ns-3 user!
**What do I do if I don't see the output?**
If you see ns3 messages indicating that the build was
completed successfully, but do not see the "Hello Simulator" output,
chances are that you have switched your build mode to ``optimized`` in
completed successfully, but do not see the "Hello Simulator" output,
chances are that you have switched your build mode to ``optimized`` in
the `Building with the ns3 CMake wrapper`_ section, but have missed the change back to
``debug`` mode. All of the console output used in this tutorial uses a
special |ns3| logging component that is useful for printing
user messages to the console. Output from this component is
automatically disabled when you compile optimized code -- it is
``debug`` mode. All of the console output used in this tutorial uses a
special |ns3| logging component that is useful for printing
user messages to the console. Output from this component is
automatically disabled when you compile optimized code -- it is
"optimized out." If you don't see the "Hello Simulator" output,
type the following:
@@ -1363,14 +1363,14 @@ type the following:
$ ./ns3 configure --build-profile=debug --enable-examples --enable-tests
to tell ns3 to build the debug versions of the |ns3|
programs that includes the examples and tests. You must still build
programs that includes the examples and tests. You must still build
the actual debug version of the code by typing
.. sourcecode:: console
$ ./ns3
Now, if you run the ``hello-simulator`` program, you should see the
Now, if you run the ``hello-simulator`` program, you should see the
expected output.
Program Arguments
@@ -1387,7 +1387,7 @@ for ``<args>``. The ``--command-template`` argument to ns3 is
basically a recipe for constructing the actual command line ns3 should use
to execute the program. ns3 checks that the build is complete,
sets the shared library paths, then invokes the executable
using the provided command line template,
using the provided command line template,
inserting the program name for the ``%s`` placeholder.
If you find the above to be syntactically complicated, a simpler variant
@@ -1466,7 +1466,7 @@ Running without Building
As of the ns-3.30 release, a new ns3 option was introduced to allow the
running of programs while skipping the build step. This can reduce the time
to run programs when, for example, running the same program repeatedly
through a shell script, or when demonstrating program execution.
through a shell script, or when demonstrating program execution.
The option ``--no-build`` modifies the ``run`` option,
skipping the build steps of the program and required ns-3 libraries.
@@ -1478,8 +1478,8 @@ Build version
+++++++++++++
As of the ns-3.32 release, a new ns3 configure option ``--enable-build-version``
was introduced which inspects the local ns3 git repository during builds and adds
version metadata to the core module.
was introduced which inspects the local ns3 git repository during builds and adds
version metadata to the core module.
This configuration option has the following prerequisites:
@@ -1493,9 +1493,9 @@ or
If these prerequisites are not met, the configuration will fail.
When these prerequisites are met and ns-3 is configured with the
When these prerequisites are met and ns-3 is configured with the
``--enable-build-version`` option, the ns3 command ``--check-version`` can be
used to query the local git repository and display the current version metadata.
used to query the local git repository and display the current version metadata.
.. sourcecode:: console
@@ -1513,10 +1513,10 @@ an error message indicating that the option is disabled will be displayed instea
.. sourcecode:: text
Build version support is not enabled, reconfigure with --enable-build-version flag
Build version support is not enabled, reconfigure with --enable-build-version flag
The build information is generated by examining the current state of the git
repository. The output of ``--check-version`` will change whenever the state
The build information is generated by examining the current state of the git
repository. The output of ``--check-version`` will change whenever the state
of the active branch changes.
The output of ``--check-version`` has the following format:
@@ -1525,39 +1525,39 @@ The output of ``--check-version`` has the following format:
<version_tag>[+closest_tag][+distance_from_tag]@<commit_hash>[-tree_state]-<profile>
version_tag
version_tag
version_tag contains the version of the ns-3 code. The version tag is
defined as a git tag with the format ns-3*. If multiple git tags match the
format, the tag on the active branch which is closest to the current commit
is chosen.
is chosen.
closest_tag
closest_tag is similar to version_tag except it is the first tag found,
regardless of format. The closest tag is not included in the output when
closest_tag and version_tag have the same value.
closest_tag and version_tag have the same value.
distance_from_tag
distance_from_tag contains the number of commits between the current commit
and closest_tag. distance_from_tag is not included in the output when the
distance_from_tag contains the number of commits between the current commit
and closest_tag. distance_from_tag is not included in the output when the
value is 0 (i.e. when closest_tag points to the current commit)
commit_hash
commit_hash is the hash of the commit at the tip of the active branch. The
value is 'g' followed by the first 7 characters of the commit hash. The 'g'
prefix is used to indicate that this is a git hash.
prefix is used to indicate that this is a git hash.
tree_state
tree_state indicates the state of the working tree. When the working tree
has uncommitted changes this field has the value 'dirty'. The tree state is
not included in the version output when the working tree is clean (e.g. when
there are no uncommitted changes).
has uncommitted changes this field has the value 'dirty'. The tree state is
not included in the version output when the working tree is clean (e.g. when
there are no uncommitted changes).
profile
The build profile specified in the ``--build-profile`` option passed to
The build profile specified in the ``--build-profile`` option passed to
``ns3 configure``
A new class, named Version, has been added to the core module. The Version class
contains functions to retrieve individual fields of the build version as well
A new class, named Version, has been added to the core module. The Version class
contains functions to retrieve individual fields of the build version as well
as functions to print the full build version like ``--check-version``.
The ``build-version-example`` application provides an example of how to use
the Version class to retrieve the various build version fields. See the
@@ -1572,7 +1572,7 @@ the core module when the ``--enable-build-version`` option is configured.
build-version-example:
Program Version (according to CommandLine): ns-3.33+249@g80e0dd0-dirty-debug
Version fields:
LongVersion: ns-3.33+249@g80e0dd0-dirty-debug
ShortVersion: ns-3.33+*
@@ -1581,14 +1581,14 @@ the core module when the ``--enable-build-version`` option is configured.
Major: 3
Minor: 33
Patch: 0
ReleaseCandidate:
ReleaseCandidate:
ClosestAncestorTag: ns-3.33
TagDistance: 249
CommitHash: g80e0dd0
BuildProfile: debug
WorkingTree: dirty
The CommandLine class has also been updated to support the ``--version``
The CommandLine class has also been updated to support the ``--version``
option which will print the full build version and exit.
.. sourcecode:: text

View File

@@ -5,20 +5,20 @@
Introduction
------------
The |ns3| simulator is a discrete-event network simulator targeted
primarily for research and educational use. The
The |ns3| simulator is a discrete-event network simulator targeted
primarily for research and educational use. The
`ns-3 project
<http://www.nsnam.org>`_,
<http://www.nsnam.org>`_,
started in 2006, is an open-source project developing |ns3|.
The purpose of this tutorial is to introduce new |ns3| users to the
The purpose of this tutorial is to introduce new |ns3| users to the
system in a structured way. It is sometimes difficult for new users to
glean essential information from detailed manuals and to convert this
information into working simulations. In this tutorial, we will build
information into working simulations. In this tutorial, we will build
several example simulations, introducing and explaining key concepts and
features as we go.
As the tutorial unfolds, we will introduce the full |ns3| documentation
As the tutorial unfolds, we will introduce the full |ns3| documentation
and provide pointers to source code for those interested in delving deeper
into the workings of the system.
@@ -27,11 +27,11 @@ diving right in without too much documentation.
A few key points are worth noting at the onset:
* |ns3| is open-source, and the project strives to maintain an
open environment for researchers to contribute and share their software.
* |ns3| is open-source, and the project strives to maintain an
open environment for researchers to contribute and share their software.
* |ns3| is not a backwards-compatible extension of `ns-2
<http://www.isi.edu/nsnam/ns>`_;
it is a new simulator. The two simulators are both written in C++ but
<http://www.isi.edu/nsnam/ns>`_;
it is a new simulator. The two simulators are both written in C++ but
|ns3| is a new simulator that does not support the |ns2| APIs.
@@ -45,7 +45,7 @@ simulation engine for users to conduct simulation experiments. Some of the
reasons to use |ns3| include to perform studies that are more difficult
or not possible to perform with real systems, to study system behavior in
a highly controlled, reproducible environment, and to learn about how
networks work. Users will note that the available model set in |ns3|
networks work. Users will note that the available model set in |ns3|
focuses on modeling how Internet protocols and networks work, but
|ns3| is not limited to Internet systems; several users are using
|ns3| to model non-Internet-based systems.
@@ -55,28 +55,28 @@ a few distinguishing features of |ns3| in contrast to other tools.
* |ns3| is designed as a set of libraries that can be combined together
and also with other external software libraries. While some simulation
platforms provide users with a single, integrated graphical user
interface environment in which all tasks are carried out, |ns3| is
platforms provide users with a single, integrated graphical user
interface environment in which all tasks are carried out, |ns3| is
more modular in this regard. Several external animators and
data analysis and visualization tools can be used with |ns3|. However,
users should expect to work at the command line and with C++ and/or
Python software development tools.
Python software development tools.
* |ns3| is primarily used on Linux or macOS systems, although support exists
for BSD systems and also for Windows frameworks that can build Linux code,
such as Windows Subsystem for Linux, or Cygwin. Native Windows
Visual Studio is not presently supported although a developer is working
such as Windows Subsystem for Linux, or Cygwin. Native Windows
Visual Studio is not presently supported although a developer is working
on future support. Windows users may also use a Linux virtual machine.
* |ns3| is not an officially supported software product of any company.
Support for |ns3| is done on a best-effort basis on the
Support for |ns3| is done on a best-effort basis on the
ns-3-users forum (ns-3-users@googlegroups.com).
For ns-2 Users
**************
For those familiar with |ns2| (a popular tool that preceded |ns3|),
the most visible outward change when moving to
|ns3| is the choice of scripting language. Programs in |ns2| are
scripted in OTcl and results of simulations can be visualized using the
For those familiar with |ns2| (a popular tool that preceded |ns3|),
the most visible outward change when moving to
|ns3| is the choice of scripting language. Programs in |ns2| are
scripted in OTcl and results of simulations can be visualized using the
Network Animator nam. It is not possible to run a simulation
in |ns2| purely from C++ (i.e., as a main() program without any OTcl).
Moreover, some components of |ns2| are written in C++ and others in OTcl.
@@ -86,10 +86,10 @@ or in Python. New animators and visualizers are available and under
current development. Since |ns3|
generates pcap packet trace files, other utilities can be used to
analyze traces as well.
In this tutorial, we will first concentrate on scripting
directly in C++ and interpreting results via trace files.
In this tutorial, we will first concentrate on scripting
directly in C++ and interpreting results via trace files.
But there are similarities as well (both, for example, are based on C++
But there are similarities as well (both, for example, are based on C++
objects, and some code from |ns2| has already been ported to |ns3|).
We will try to highlight differences between |ns2| and |ns3|
as we proceed in this tutorial.
@@ -101,7 +101,7 @@ of |ns2|, or based on a specific simulation model that is only available
in |ns2|), a user will be more productive with |ns3| for the following
reasons:
* |ns3| is actively maintained with an active, responsive users mailing
* |ns3| is actively maintained with an active, responsive users mailing
list, while |ns2| is only lightly maintained and has not seen
significant development in its main code tree for over a decade.
* |ns3| provides features not available in |ns2|, such as a implementation
@@ -115,16 +115,16 @@ reasons:
If in doubt, a good guideline would be to look at both simulators (as
well as other simulators), and in particular the models available
for your research, but keep in mind that your experience may be better
in using the tool that is being actively developed and
in using the tool that is being actively developed and
maintained (|ns3|).
Contributing
************
|ns3| is a research and educational simulator, by and for the
research community. It will rely on the ongoing contributions of the
community to develop new models, debug or maintain existing ones, and share
results. There are a few policies that we hope will encourage people to
|ns3| is a research and educational simulator, by and for the
research community. It will rely on the ongoing contributions of the
community to develop new models, debug or maintain existing ones, and share
results. There are a few policies that we hope will encourage people to
contribute to |ns3| like they have for |ns2|:
* Open source licensing based on GNU GPLv2 compatibility
@@ -133,16 +133,16 @@ contribute to |ns3| like they have for |ns2|:
* `Contributed Code
<https://www.nsnam.org/wiki/Contributed_Code>`_ page, similar to |ns2|'s popular Contributed Code
`page
<http://nsnam.isi.edu/nsnam/index.php/Contributed_Code>`_
<http://nsnam.isi.edu/nsnam/index.php/Contributed_Code>`_
* Use of GitLab.com including issue tracker
<https://www.gitlab.com/nsnam>`_
We realize that if you are reading this document, contributing back to
We realize that if you are reading this document, contributing back to
the project is probably not your foremost concern at this point, but
we want you to be aware that contributing is in the spirit of the project and
that even the act of dropping us a note about your early experience
with |ns3| (e.g. "this tutorial section was not clear..."),
reports of stale documentation or comments in the code, etc. are much
that even the act of dropping us a note about your early experience
with |ns3| (e.g. "this tutorial section was not clear..."),
reports of stale documentation or comments in the code, etc. are much
appreciated. The preferred way to submit patches is either to fork
our project on GitLab.com and generate a Merge Request, or to open
an issue on our issue tracker and append a patch.

View File

@@ -17,10 +17,10 @@ Brief Summary
It is written directly in C++, not in a high-level modeling language;
simulation events are simply C++ function calls, organized by a scheduler.
An |ns3| user will obtain the |ns3| source code (see below),
compile it into shared (or static) libraries, and link the libraries to
An |ns3| user will obtain the |ns3| source code (see below),
compile it into shared (or static) libraries, and link the libraries to
`main()` programs that he or she authors. The `main()` program is where
the specific simulation scenario configuration is performed and where the
the specific simulation scenario configuration is performed and where the
simulator is run and stopped. Several example programs are provided, which
can be modified or copied to create new simulation scenarios. Users also
often edit the |ns3| library code (and rebuild the libraries) to change
@@ -38,9 +38,9 @@ a build-system (e.g. make, ninja, Xcode).
We focus in this chapter only on getting |ns3| up and running on a system
supported by a recent C++ compiler and Python runtime support.
For Linux, use either g++ or clang++ compilers. For macOS, use clang++
For Linux, use either g++ or clang++ compilers. For macOS, use clang++
(available in Xcode or Xcode Command Line Tools). For Windows, we recommend
to either use a Linux virtual machine, or the Windows Subsystem for Linux.
to either use a Linux virtual machine, or the Windows Subsystem for Linux.
Downloading ns-3
****************
@@ -127,7 +127,7 @@ Once complete, you can run the unit tests to check your build:
All tests should either PASS or be SKIPped. At this point, you have a
working |ns3| simulator. From here, you can start to
run programs (look in the examples directory). To run the first tutorial
program, whose source code is located at `examples/tutorial/first.cc`,
program, whose source code is located at `examples/tutorial/first.cc`,
use ns3 to run it (by doing so, the |ns3| shared libraries are found
automatically):

View File

@@ -7,18 +7,18 @@ The Web
*******
There are several important resources of which any |ns3| user must be
aware. The main web site is located at https://www.nsnam.org and
provides access to basic information about the |ns3| system. Detailed
aware. The main web site is located at https://www.nsnam.org and
provides access to basic information about the |ns3| system. Detailed
documentation is available through the main web site at
https://www.nsnam.org/documentation/. You can also find documents
https://www.nsnam.org/documentation/. You can also find documents
relating to the system architecture from this page.
There is a Wiki that complements the main |ns3| web site which you will
find at https://www.nsnam.org/wiki/. You will find user and developer
FAQs there, as well as troubleshooting guides, third-party contributed code,
papers, etc.
find at https://www.nsnam.org/wiki/. You will find user and developer
FAQs there, as well as troubleshooting guides, third-party contributed code,
papers, etc.
The source code may be found and browsed at GitLab.com:
The source code may be found and browsed at GitLab.com:
https://gitlab.com/nsnam/.
There you will find the current development tree in the repository named
``ns-3-dev``. Past releases and experimental repositories of the core
@@ -28,22 +28,22 @@ http://code.nsnam.org.
Git
***
Complex software systems need some way to manage the organization and
Complex software systems need some way to manage the organization and
changes to the underlying code and documentation. There are many ways to
perform this feat, and you may have heard of some of the systems that are
currently used to do this. Until recently, the |ns3| project used Mercurial
as its source code management system, but in December 2018, switch to
using Git. Although you do not need to know much about Git in order to
complete this tutorial, we recommend becoming familiar with Git and using it
complete this tutorial, we recommend becoming familiar with Git and using it
to access the source code. GitLab.com provides resources to get started
at: https://docs.gitlab.com/ee/gitlab-basics/.
CMake
*****
Once you have source code downloaded to your local system, you will need
Once you have source code downloaded to your local system, you will need
to compile that source to produce usable programs. Just as in the case of
source code management, there are many tools available to perform this
source code management, there are many tools available to perform this
function. Probably the most well known of these tools is ``make``. Along
with being the most well known, ``make`` is probably the most difficult to
use in a very large and highly configurable system. Because of this, many
@@ -59,11 +59,11 @@ Development Environment
***********************
As mentioned above, scripting in |ns3| is done in C++ or Python.
Most of the |ns3| API is available in Python, but the
models are written in C++ in either case. A working
Most of the |ns3| API is available in Python, but the
models are written in C++ in either case. A working
knowledge of C++ and object-oriented concepts is assumed in this document.
We will take some time to review some of the more advanced concepts or
possibly unfamiliar language features, idioms and design patterns as they
We will take some time to review some of the more advanced concepts or
possibly unfamiliar language features, idioms and design patterns as they
appear. We don't want this tutorial to devolve into a C++ tutorial, though,
so we do expect a basic command of the language. There are a wide
number of sources of information on C++ available on the web or
@@ -74,12 +74,12 @@ book or web site and work through at least the basic features of the language
before proceeding. For instance, `this tutorial
<http://www.cplusplus.com/doc/tutorial/>`_.
On Linux, the |ns3| system uses several components of the GNU "toolchain"
for development. A
software toolchain is the set of programming tools available in the given
On Linux, the |ns3| system uses several components of the GNU "toolchain"
for development. A
software toolchain is the set of programming tools available in the given
environment. For a quick review of what is included in the GNU toolchain see,
http://en.wikipedia.org/wiki/GNU_toolchain. |ns3| uses gcc,
GNU binutils, and gdb. However, we do not use the GNU build system tools,
http://en.wikipedia.org/wiki/GNU_toolchain. |ns3| uses gcc,
GNU binutils, and gdb. However, we do not use the GNU build system tools,
neither make directly. We use CMake for these functions.
On macOS, the toolchain used is Xcode. |ns3| users on a Mac are strongly
@@ -87,11 +87,11 @@ encouraged to install Xcode and the command-line tools packages from the
Apple App Store, and to look at the |ns3| installation wiki for more
information (https://www.nsnam.org/wiki/Installation).
Typically an |ns3| author will work in Linux or a Unix-like environment.
For those running under Windows, there do exist environments
which simulate the Linux environment to various degrees. The |ns3|
project has in the past (but not presently) supported development in the Cygwin environment for
these users. See http://www.cygwin.com/
Typically an |ns3| author will work in Linux or a Unix-like environment.
For those running under Windows, there do exist environments
which simulate the Linux environment to various degrees. The |ns3|
project has in the past (but not presently) supported development in the Cygwin environment for
these users. See http://www.cygwin.com/
for details on downloading, and visit the |ns3| wiki for more information
about Cygwin and |ns3|. MinGW is presently not officially supported.
Another alternative to Cygwin is to install a virtual machine environment
@@ -111,10 +111,10 @@ book, which you can find at:
http://cs.baylor.edu/~donahoo/practical/CSockets/.
If you understand the first four chapters of the book (or for those who do
not have access to a copy of the book, the echo clients and servers shown in
not have access to a copy of the book, the echo clients and servers shown in
the website above) you will be in good shape to understand the tutorial.
There is a similar book on Multicast Sockets,
`Multicast Sockets, Makofske and Almeroth
<https://www.elsevier.com/books/multicast-sockets/makofske/978-1-55860-846-7>`_.
that covers material you may need to understand if you look at the multicast
that covers material you may need to understand if you look at the multicast
examples in the distribution.

View File

@@ -4,7 +4,7 @@
:format: html latex
.. Mimic doxygen formatting for parameter names
.. raw:: html
<style>.param {font-weight:bold; color:#602020;}</style>
@@ -77,7 +77,7 @@ standard output, as in::
...
std::cout << "The value of x is " << x << std::endl;
...
}
}
Nobody is going to prevent you from going deep into the core of |ns3|
and adding print statements. This is insanely easy to do and, after
@@ -107,7 +107,7 @@ other people as a patch to the existing core.
Let's pick a random example. If you wanted to add more logging to the
|ns3| TCP socket (``tcp-socket-base.cc``) you could just add a new
message down in the implementation. Notice that in
``TcpSocketBase::ProcessEstablished ()`` there is no log message for the
``TcpSocketBase::ProcessEstablished ()`` there is no log message for the
reception of a SYN+ACK in ESTABLISHED state.
You could simply add one, changing the code. Here is the original::
@@ -158,7 +158,7 @@ files to disk and process them down to a few lines whenever you want
to do anything.
Since there are no guarantees in |ns3| about the stability of
``NS_LOG`` output, you may also discover that pieces of log output
``NS_LOG`` output, you may also discover that pieces of log output
which you depend on disappear or change between releases. If you depend
on the structure of the output, you may find other messages being
added or deleted which may affect your parsing code.
@@ -328,14 +328,14 @@ the tutorial directory as ``fourth.cc``. Let's walk through it::
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "ns3/object.h"
#include "ns3/uinteger.h"
#include "ns3/traced-value.h"
#include "ns3/trace-source-accessor.h"
#include <iostream>
using namespace ns3;
Most of this code should be quite familiar to you. As mentioned
@@ -376,7 +376,7 @@ simple Object we can work with.
;
return tid;
}
MyObject () {}
TracedValue<int32_t> m_myInt;
};
@@ -431,7 +431,7 @@ code to connect the source to the sink, which happens in ``main``::
{
Ptr<MyObject> myObject = CreateObject<MyObject> ();
myObject->TraceConnectWithoutContext ("MyInteger", MakeCallback(&IntTrace));
myObject->m_myInt = 1234;
}
@@ -522,7 +522,7 @@ more clear to you what this function is doing::
CourseChange (std::string context, Ptr<const MobilityModel> model)
{
Vector position = model->GetPosition ();
NS_LOG_UNCOND (context <<
NS_LOG_UNCOND (context <<
" x = " << position.x << ", y = " << position.y);
}
@@ -628,7 +628,7 @@ for "CourseChange" in your favorite editor. You should find
MakeTraceSourceAccessor (&MobilityModel::m_courseChangeTrace),
"ns3::MobilityModel::CourseChangeCallback")
which should look very familiar at this point.
which should look very familiar at this point.
If you look for the corresponding declaration of the underlying traced
variable in ``mobility-model.h`` you will find
@@ -740,7 +740,7 @@ an entry for
::
CourseChange: The value of the position and/or velocity vector changed
CourseChange: The value of the position and/or velocity vector changed
You should recognize this as the trace source we used in the
``third.cc`` example. Perusing this list will be helpful.
@@ -772,7 +772,7 @@ in the "All TraceSources" list and you want to figure out how to
connect to it. You know that you are using (again, from the
``third.cc`` example) an ``ns3::RandomWalk2dMobilityModel``. So
either click on the class name in the "All TraceSources" list, or find
``ns3::RandomWalk2dMobilityModel`` in the "Class List". Either way
``ns3::RandomWalk2dMobilityModel`` in the "Class List". Either way
you should now be looking at the "ns3::RandomWalk2dMobilityModel Class
Reference" page.
@@ -808,7 +808,7 @@ Look further down in the "Detailed Description" section for the list
of trace sources. You will find
No TraceSources are defined for this type.
**TraceSources defined in parent class ``ns3::MobilityModel``**
* **CourseChange**: The value of the position and/or velocity vector
@@ -843,11 +843,11 @@ and you may find your answer along with working code. For example, in
this case, ``src/mobility/examples/main-random-topology.cc`` has
something just waiting for you to use::
Config::Connect ("/NodeList/*/$ns3::MobilityModel/CourseChange",
Config::Connect ("/NodeList/*/$ns3::MobilityModel/CourseChange",
MakeCallback (&CourseChange));
We'll return to this example in a moment.
We'll return to this example in a moment.
Callback Signatures
+++++++++++++++++++
@@ -871,7 +871,7 @@ The callback signature is given as a link to the relevant ``typedef``,
where we find
``typedef void (* CourseChangeCallback)(std::string context, Ptr<const MobilityModel> * model);``
**TracedCallback** signature for course change notifications.
If the callback is connected using ``ConnectWithoutContext`` omit the
@@ -1040,11 +1040,11 @@ Just after this comment, you will find
::
template<typename T1 = empty, typename T2 = empty,
template<typename T1 = empty, typename T2 = empty,
typename T3 = empty, typename T4 = empty,
typename T5 = empty, typename T6 = empty,
typename T7 = empty, typename T8 = empty>
class TracedCallback
class TracedCallback
{
...
@@ -1063,11 +1063,11 @@ tracing system is in the ``Connect`` and ``ConnectWithoutContext``
functions. If you scroll down, you will see a
``ConnectWithoutContext`` method here::
template<typename T1, typename T2,
template<typename T1, typename T2,
typename T3, typename T4,
typename T5, typename T6,
typename T7, typename T8>
void
void
TracedCallback<T1,T2,T3,T4,T5,T6,T7,T8>::ConnectWithoutContext ...
{
Callback<void,T1,T2,T3,T4,T5,T6,T7,T8> cb;
@@ -1081,7 +1081,7 @@ instantiated for the declaration above, the compiler will replace
::
void
void
TracedCallback<Ptr<const MobilityModel>::ConnectWithoutContext ... cb
{
Callback<void, Ptr<const MobilityModel> > cb;
@@ -1304,8 +1304,8 @@ usual, ``grep`` is your friend:
$ find . -name '*.cc' | xargs grep CongestionWindow
This will point out a couple of promising candidates:
``examples/tcp/tcp-large-transfer.cc`` and
This will point out a couple of promising candidates:
``examples/tcp/tcp-large-transfer.cc`` and
``src/test/ns3tcp/ns3tcp-cwnd-test-suite.cc``.
We haven't visited any of the test code yet, so let's take a look
@@ -1316,7 +1316,7 @@ and search for "CongestionWindow". You will find,
::
ns3TcpSocket->TraceConnectWithoutContext ("CongestionWindow",
ns3TcpSocket->TraceConnectWithoutContext ("CongestionWindow",
MakeCallback (&Ns3TcpCwndTestCase1::CwndChange, this));
This should look very familiar to you. We mentioned above that if we
@@ -1405,16 +1405,16 @@ see some familiar looking code::
* along with this program; if not, write to the Free Software
* Foundation, Include., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <fstream>
#include "ns3/core-module.h"
#include "ns3/network-module.h"
#include "ns3/internet-module.h"
#include "ns3/point-to-point-module.h"
#include "ns3/applications-module.h"
using namespace ns3;
NS_LOG_COMPONENT_DEFINE ("FifthScriptExample");
This has all been covered, so we won't rehash it. The next lines of
@@ -1469,20 +1469,20 @@ time.
class MyApp : public Application
{
public:
MyApp ();
virtual ~MyApp();
void Setup (Ptr<Socket> socket, Address address, uint32_t packetSize,
void Setup (Ptr<Socket> socket, Address address, uint32_t packetSize,
uint32_t nPackets, DataRate dataRate);
private:
virtual void StartApplication (void);
virtual void StopApplication (void);
void ScheduleTx (void);
void SendPacket (void);
Ptr<Socket> m_socket;
Address m_peer;
uint32_t m_packetSize;
@@ -1669,7 +1669,7 @@ course::
m_packetsSent (0)
{
}
MyApp::~MyApp()
{
m_socket = 0;
@@ -1681,7 +1681,7 @@ this ``Application`` in the first place.
::
void
MyApp::Setup (Ptr<Socket> socket, Address address, uint32_t packetSize,
MyApp::Setup (Ptr<Socket> socket, Address address, uint32_t packetSize,
uint32_t nPackets, DataRate dataRate)
{
m_socket = socket;
@@ -1690,7 +1690,7 @@ this ``Application`` in the first place.
m_nPackets = nPackets;
m_dataRate = dataRate;
}
This code should be pretty self-explanatory. We are just initializing
member variables. The important one from the perspective of tracing
is the ``Ptr<Socket> socket`` which we needed to provide to the
@@ -1734,12 +1734,12 @@ creating simulation events.
MyApp::StopApplication (void)
{
m_running = false;
if (m_sendEvent.IsRunning ())
{
Simulator::Cancel (m_sendEvent);
}
if (m_socket)
{
m_socket->Close ();
@@ -1769,7 +1769,7 @@ chain of events that describes the ``Application`` behavior.
{
Ptr<Packet> packet = Create<Packet> (m_packetSize);
m_socket->Send (packet);
if (++m_packetsSent < m_nPackets)
{
ScheduleTx ();
@@ -1865,11 +1865,11 @@ The following code should be very familiar to you by now::
{
NodeContainer nodes;
nodes.Create (2);
PointToPointHelper pointToPoint;
pointToPoint.SetDeviceAttribute ("DataRate", StringValue ("5Mbps"));
pointToPoint.SetChannelAttribute ("Delay", StringValue ("2ms"));
NetDeviceContainer devices;
devices = pointToPoint.Install (nodes);
@@ -1886,7 +1886,7 @@ congestion window.
|ns3| provides ``ErrorModel`` objects which can be attached to
``Channels``. We are using the ``RateErrorModel`` which allows us to
introduce errors
into a ``Channel`` at a given *rate*.
into a ``Channel`` at a given *rate*.
::
@@ -1921,7 +1921,7 @@ is commonly used in |ns3| for that purpose.
uint16_t sinkPort = 8080;
Address sinkAddress (InetSocketAddress(interfaces.GetAddress (1), sinkPort));
PacketSinkHelper packetSinkHelper ("ns3::TcpSocketFactory",
PacketSinkHelper packetSinkHelper ("ns3::TcpSocketFactory",
InetSocketAddress (Ipv4Address::GetAny (), sinkPort));
ApplicationContainer sinkApps = packetSinkHelper.Install (nodes.Get (1));
sinkApps.Start (Seconds (0.));
@@ -1931,7 +1931,7 @@ This should all be familiar, with the exception of,
::
PacketSinkHelper packetSinkHelper ("ns3::TcpSocketFactory",
PacketSinkHelper packetSinkHelper ("ns3::TcpSocketFactory",
InetSocketAddress (Ipv4Address::GetAny (), sinkPort));
This code instantiates a ``PacketSinkHelper`` and tells it to create
@@ -1952,9 +1952,9 @@ trace source.
::
Ptr<Socket> ns3TcpSocket = Socket::CreateSocket (nodes.Get (0),
Ptr<Socket> ns3TcpSocket = Socket::CreateSocket (nodes.Get (0),
TcpSocketFactory::GetTypeId ());
ns3TcpSocket->TraceConnectWithoutContext ("CongestionWindow",
ns3TcpSocket->TraceConnectWithoutContext ("CongestionWindow",
MakeCallback (&CwndChange));
The first statement calls the static member function
@@ -2127,7 +2127,7 @@ information to a stream representing a file.
NS_LOG_UNCOND (Simulator::Now ().GetSeconds () << "\t" << newCwnd);
*stream->GetStream () << Simulator::Now ().GetSeconds () << "\t" << oldCwnd << "\t" << newCwnd << std::endl;
}
static void
RxDrop (Ptr<PcapFileWrapper> file, Ptr<const Packet> p)
{
@@ -2350,7 +2350,7 @@ and we did this in only 18 lines of code::
}
...
PcapHelper pcapHelper;
Ptr<PcapFileWrapper> file = pcapHelper.CreateFile ("sixth.pcap", "w", PcapHelper::DLT_PPP);
devices.Get (1)->TraceConnectWithoutContext("PhyRxDrop", MakeBoundCallback (&RxDrop, file));
@@ -2364,7 +2364,7 @@ previous sections, primarily :ref:`BuildingTopologies`, we have seen
several varieties of the trace helper methods designed for use inside
other (device) helpers.
Perhaps you will recall seeing some of these variations:
Perhaps you will recall seeing some of these variations:
::
@@ -2619,8 +2619,8 @@ inherited from the ASCII trace ``mixin``.
::
virtual void EnableAsciiInternal (Ptr<OutputStreamWrapper> stream,
std::string prefix,
virtual void EnableAsciiInternal (Ptr<OutputStreamWrapper> stream,
std::string prefix,
Ptr<NetDevice> nd,
bool explicitFilename) = 0;
@@ -2778,7 +2778,7 @@ but to summarize ...
This would result in a number of ASCII trace files being created,
each of which follows the ``<prefix>-<node id>-<device id>.tr``
convention.
Combining all of the traces into a single file is accomplished
similarly to the examples above::
@@ -2889,8 +2889,8 @@ class ``Object``, and methods that share the same signature.
::
virtual void EnablePcapIpv4Internal (std::string prefix,
Ptr<Ipv4> ipv4,
virtual void EnablePcapIpv4Internal (std::string prefix,
Ptr<Ipv4> ipv4,
uint32_t interface,
bool explicitFilename) = 0;
@@ -2967,7 +2967,7 @@ summarize ...
NodeContainer nodes;
...
NetDeviceContainer devices = deviceHelper.Install (nodes);
...
...
Ipv4AddressHelper ipv4;
ipv4.SetBase ("10.1.1.0", "255.255.255.0");
Ipv4InterfaceContainer interfaces = ipv4.Assign (devices);
@@ -3061,9 +3061,9 @@ method inherited from this class.
::
virtual void EnableAsciiIpv4Internal (Ptr<OutputStreamWrapper> stream,
std::string prefix,
Ptr<Ipv4> ipv4,
virtual void EnableAsciiIpv4Internal (Ptr<OutputStreamWrapper> stream,
std::string prefix,
Ptr<Ipv4> ipv4,
uint32_t interface,
bool explicitFilename) = 0;
@@ -3204,7 +3204,7 @@ but to summarize ...
NodeContainer nodes;
...
NetDeviceContainer devices = deviceHelper.Install (nodes);
...
...
Ipv4AddressHelper ipv4;
ipv4.SetBase ("10.1.1.0", "255.255.255.0");
Ipv4InterfaceContainer interfaces = ipv4.Assign (devices);
@@ -3220,7 +3220,7 @@ but to summarize ...
NodeContainer nodes;
...
NetDeviceContainer devices = deviceHelper.Install (nodes);
...
...
Ipv4AddressHelper ipv4;
ipv4.SetBase ("10.1.1.0", "255.255.255.0");
Ipv4InterfaceContainer interfaces = ipv4.Assign (devices);

View File

@@ -11,34 +11,34 @@ Using the Logging Module
************************
We have already taken a brief look at the |ns3| logging module while
going over the ``first.cc`` script. We will now take a closer look and
going over the ``first.cc`` script. We will now take a closer look and
see what kind of use-cases the logging subsystem was designed to cover.
Logging Overview
++++++++++++++++
Many large systems support some kind of message logging facility, and
|ns3| is not an exception. In some cases, only error messages are
Many large systems support some kind of message logging facility, and
|ns3| is not an exception. In some cases, only error messages are
logged to the "operator console" (which is typically ``stderr`` in Unix-
based systems). In other systems, warning messages may be output as well as
more detailed informational messages. In some cases, logging facilities are
based systems). In other systems, warning messages may be output as well as
more detailed informational messages. In some cases, logging facilities are
used to output debug messages which can quickly turn the output into a blur.
|ns3| takes the view that all of these verbosity levels are useful
|ns3| takes the view that all of these verbosity levels are useful
and we provide a selectable, multi-level approach to message logging. Logging
can be disabled completely, enabled on a component-by-component basis, or
enabled globally; and it provides selectable verbosity levels. The
enabled globally; and it provides selectable verbosity levels. The
|ns3| log module provides a straightforward, relatively easy to use
way to get useful information out of your simulation.
You should understand that we do provide a general purpose mechanism ---
tracing --- to get data out of your models which should be preferred for
You should understand that we do provide a general purpose mechanism ---
tracing --- to get data out of your models which should be preferred for
simulation output (see the tutorial section Using the Tracing System for
more details on our tracing system). Logging should be preferred for
debugging information, warnings, error messages, or any time you want to
more details on our tracing system). Logging should be preferred for
debugging information, warnings, error messages, or any time you want to
easily get a quick message out of your scripts or models.
There are currently seven levels of log messages of increasing verbosity
defined in the system.
defined in the system.
* LOG_ERROR --- Log error messages (associated macro: NS_LOG_ERROR);
* LOG_WARN --- Log warning messages (associated macro: NS_LOG_WARN);
@@ -59,7 +59,7 @@ consequence of this, LOG_ERROR and LOG_LEVEL_ERROR and also LOG_ALL
and LOG_LEVEL_ALL are functionally equivalent.) For example,
enabling LOG_INFO will only enable messages provided by NS_LOG_INFO macro,
while enabling LOG_LEVEL_INFO will also enable messages provided by
NS_LOG_DEBUG, NS_LOG_WARN and NS_LOG_ERROR macros.
NS_LOG_DEBUG, NS_LOG_WARN and NS_LOG_ERROR macros.
We also provide an unconditional logging macro that is always displayed,
irrespective of logging levels or component selection.
@@ -67,20 +67,20 @@ irrespective of logging levels or component selection.
* NS_LOG_UNCOND -- Log the associated message unconditionally (no associated
log level).
Each level can be requested singly or cumulatively; and logging can be set
up using a shell environment variable (NS_LOG) or by logging system function
call. As was seen earlier in the tutorial, the logging system has Doxygen
documentation and now would be a good time to peruse the Logging Module
Each level can be requested singly or cumulatively; and logging can be set
up using a shell environment variable (NS_LOG) or by logging system function
call. As was seen earlier in the tutorial, the logging system has Doxygen
documentation and now would be a good time to peruse the Logging Module
documentation if you have not done so.
Now that you have read the documentation in great detail, let's use some of
that knowledge to get some interesting information out of the
that knowledge to get some interesting information out of the
``scratch/myfirst.cc`` example script you have already built.
Enabling Logging
++++++++++++++++
Let's use the NS_LOG environment variable to turn on some more logging, but
first, just to get our bearings, go ahead and run the last script just as you
first, just to get our bearings, go ahead and run the last script just as you
did previously,
.. sourcecode:: bash
@@ -100,14 +100,14 @@ program
Received 1024 bytes from 10.1.1.2
It turns out that the "Sent" and "Received" messages you see above are
actually logging messages from the ``UdpEchoClientApplication`` and
``UdpEchoServerApplication``. We can ask the client application, for
example, to print more information by setting its logging level via the
NS_LOG environment variable.
actually logging messages from the ``UdpEchoClientApplication`` and
``UdpEchoServerApplication``. We can ask the client application, for
example, to print more information by setting its logging level via the
NS_LOG environment variable.
I am going to assume from here on that you are using an sh-like shell that uses
the"VARIABLE=value" syntax. If you are using a csh-like shell, then you
will have to convert my examples to the "setenv VARIABLE value" syntax
I am going to assume from here on that you are using an sh-like shell that uses
the"VARIABLE=value" syntax. If you are using a csh-like shell, then you
will have to convert my examples to the "setenv VARIABLE value" syntax
required by those shells.
Right now, the UDP echo client application is responding to the following line
@@ -117,7 +117,7 @@ of code in ``scratch/myfirst.cc``,
LogComponentEnable("UdpEchoClientApplication", LOG_LEVEL_INFO);
This line of code enables the ``LOG_LEVEL_INFO`` level of logging. When
This line of code enables the ``LOG_LEVEL_INFO`` level of logging. When
we pass a logging level flag, we are actually enabling the given level and
all lower levels. In this case, we have enabled ``NS_LOG_INFO``,
``NS_LOG_DEBUG``, ``NS_LOG_WARN`` and ``NS_LOG_ERROR``. We can
@@ -137,7 +137,7 @@ This sets the shell environment variable ``NS_LOG`` to the string,
The left hand side of the assignment is the name of the logging component we
want to set, and the right hand side is the flag we want to use. In this case,
we are going to turn on all of the debugging levels for the application. If
you run the script with NS_LOG set this way, the |ns3| logging
you run the script with NS_LOG set this way, the |ns3| logging
system will pick up the change and you should see the following output:
.. sourcecode:: bash
@@ -169,15 +169,15 @@ is left to the individual model developer. In the case of the echo
applications, a good deal of log output is available.
You can now see a log of the function calls that were made to the application.
If you look closely you will notice a single colon between the string
``UdpEchoClientApplication`` and the method name where you might have
expected a C++ scope operator (``::``). This is intentional.
If you look closely you will notice a single colon between the string
``UdpEchoClientApplication`` and the method name where you might have
expected a C++ scope operator (``::``). This is intentional.
The name is not actually a class name, it is a logging component name. When
there is a one-to-one correspondence between a source file and a class, this
will generally be the class name but you should understand that it is not
The name is not actually a class name, it is a logging component name. When
there is a one-to-one correspondence between a source file and a class, this
will generally be the class name but you should understand that it is not
actually a class name, and there is a single colon there instead of a double
colon to remind you in a relatively subtle way to conceptually separate the
colon to remind you in a relatively subtle way to conceptually separate the
logging component name from the class name.
It turns out that in some cases, it can be hard to determine which method
@@ -193,7 +193,7 @@ from. You can resolve this by OR'ing the ``prefix_func`` level into the
Note that the quotes are required since the vertical bar we use to indicate an
OR operation is also a Unix pipe connector.
Now, if you run the script you will see that the logging system makes sure
Now, if you run the script you will see that the logging system makes sure
that every message from the given log component is prefixed with the component
name.
@@ -217,8 +217,8 @@ name.
You can now see all of the messages coming from the UDP echo client application
are identified as such. The message "Received 1024 bytes from 10.1.1.2" is
now clearly identified as coming from the echo client application. The
remaining message must be coming from the UDP echo server application. We
now clearly identified as coming from the echo client application. The
remaining message must be coming from the UDP echo server application. We
can enable that component by entering a colon separated list of components in
the NS_LOG environment variable.
@@ -293,20 +293,20 @@ you should see the following output:
UdpEchoClientApplication:~UdpEchoClient()
UdpEchoServerApplication:~UdpEchoServer()
You can see that the constructor for the UdpEchoServer was called at a
simulation time of 0 seconds. This is actually happening before the
You can see that the constructor for the UdpEchoServer was called at a
simulation time of 0 seconds. This is actually happening before the
simulation starts, but the time is displayed as zero seconds. The same is true
for the UdpEchoClient constructor message.
Recall that the ``scratch/first.cc`` script started the echo server
application at one second into the simulation. You can now see that the
Recall that the ``scratch/first.cc`` script started the echo server
application at one second into the simulation. You can now see that the
``StartApplication`` method of the server is, in fact, called at one second.
You can also see that the echo client application is started at a simulation
You can also see that the echo client application is started at a simulation
time of two seconds as we requested in the script.
You can now follow the progress of the simulation from the
``ScheduleTransmit`` call in the client that calls ``Send`` to the
``HandleRead`` callback in the echo server application. Note that the
You can now follow the progress of the simulation from the
``ScheduleTransmit`` call in the client that calls ``Send`` to the
``HandleRead`` callback in the echo server application. Note that the
elapsed time for the packet to be sent across the point-to-point link is 3.69
milliseconds. You see the echo server logging a message telling you that it
has echoed the packet and then, after another channel delay, you see the echo
@@ -314,37 +314,37 @@ client receive the echoed packet in its ``HandleRead`` method.
There is a lot that is happening under the covers in this simulation that you
are not seeing as well. You can very easily follow the entire process by
turning on all of the logging components in the system. Try setting the
turning on all of the logging components in the system. Try setting the
``NS_LOG`` variable to the following,
.. sourcecode:: bash
$ export 'NS_LOG=*=level_all|prefix_func|prefix_time'
The asterisk above is the logging component wildcard. This will turn on all
of the logging in all of the components used in the simulation. I won't
The asterisk above is the logging component wildcard. This will turn on all
of the logging in all of the components used in the simulation. I won't
reproduce the output here (as of this writing it produces 1265 lines of output
for the single packet echo) but you can redirect this information into a file
for the single packet echo) but you can redirect this information into a file
and look through it with your favorite editor if you like,
.. sourcecode:: bash
$ ./ns3 run scratch/myfirst > log.out 2>&1
I personally use this extremely verbose version of logging when I am presented
with a problem and I have no idea where things are going wrong. I can follow the
progress of the code quite easily without having to set breakpoints and step
I personally use this extremely verbose version of logging when I am presented
with a problem and I have no idea where things are going wrong. I can follow the
progress of the code quite easily without having to set breakpoints and step
through code in a debugger. I can just edit up the output in my favorite editor
and search around for things I expect, and see things happening that I don't
and search around for things I expect, and see things happening that I don't
expect. When I have a general idea about what is going wrong, I transition into
a debugger for a fine-grained examination of the problem. This kind of output
a debugger for a fine-grained examination of the problem. This kind of output
can be especially useful when your script does something completely unexpected.
If you are stepping using a debugger you may miss an unexpected excursion
If you are stepping using a debugger you may miss an unexpected excursion
completely. Logging the excursion makes it quickly visible.
Adding Logging to your Code
+++++++++++++++++++++++++++
You can add new logging to your simulations by making calls to the log
You can add new logging to your simulations by making calls to the log
component via several macros. Let's do so in the ``myfirst.cc`` script we
have in the ``scratch`` directory.
@@ -356,9 +356,9 @@ Recall that we have defined a logging component in that script:
You now know that you can enable all of the logging for this component by
setting the ``NS_LOG`` environment variable to the various levels. Let's
go ahead and add some logging to the script. The macro used to add an
informational level log message is ``NS_LOG_INFO``. Go ahead and add one
(just before we start creating the nodes) that tells you that the script is
go ahead and add some logging to the script. The macro used to add an
informational level log message is ``NS_LOG_INFO``. Go ahead and add one
(just before we start creating the nodes) that tells you that the script is
"Creating Topology." This is done as in this code snippet,
Open ``scratch/myfirst.cc`` in your favorite editor and add the line,
@@ -382,16 +382,16 @@ off the torrent of logging we previously enabled:
$ ./ns3
$ export NS_LOG=
Now, if you run the script,
Now, if you run the script,
.. sourcecode:: bash
$ ./ns3 run scratch/myfirst
you will ``not`` see your new message since its associated logging
you will ``not`` see your new message since its associated logging
component (``FirstScriptExample``) has not been enabled. In order to see your
message you will have to enable the ``FirstScriptExample`` logging component
with a level greater than or equal to ``NS_LOG_INFO``. If you just want to
with a level greater than or equal to ``NS_LOG_INFO``. If you just want to
see this particular level of logging, you can enable it by,
.. sourcecode:: bash
@@ -419,7 +419,7 @@ Using Command Line Arguments
Overriding Default Attributes
+++++++++++++++++++++++++++++
Another way you can change how |ns3| scripts behave without editing
and building is via *command line arguments.* We provide a mechanism to
and building is via *command line arguments.* We provide a mechanism to
parse command line arguments and automatically set local and global variables
based on those arguments.
@@ -432,7 +432,7 @@ in the following code,
int
main (int argc, char *argv[])
{
...
...
CommandLine cmd;
cmd.Parse (argc, argv);
@@ -441,9 +441,9 @@ in the following code,
}
This simple two line snippet is actually very useful by itself. It opens the
door to the |ns3| global variable and ``Attribute`` systems. Go
door to the |ns3| global variable and ``Attribute`` systems. Go
ahead and add that two lines of code to the ``scratch/myfirst.cc`` script at
the start of ``main``. Go ahead and build the script and run it, but ask
the start of ``main``. Go ahead and build the script and run it, but ask
the script for help in the following way,
.. sourcecode:: bash
@@ -451,7 +451,7 @@ the script for help in the following way,
$ ./ns3 run "scratch/myfirst --PrintHelp"
This will ask ns3 to run the ``scratch/myfirst`` script and pass the command
line argument ``--PrintHelp`` to the script. The quotes are required to
line argument ``--PrintHelp`` to the script. The quotes are required to
sort out which program gets which argument. The command line parser will
now see the ``--PrintHelp`` argument and respond with,
@@ -470,7 +470,7 @@ now see the ``--PrintHelp`` argument and respond with,
--PrintGlobals: Print the list of globals.
Let's focus on the ``--PrintAttributes`` option. We have already hinted
at the |ns3| ``Attribute`` system while walking through the
at the |ns3| ``Attribute`` system while walking through the
``first.cc`` script. We looked at the following lines of code,
::
@@ -479,7 +479,7 @@ at the |ns3| ``Attribute`` system while walking through the
pointToPoint.SetDeviceAttribute ("DataRate", StringValue ("5Mbps"));
pointToPoint.SetChannelAttribute ("Delay", StringValue ("2ms"));
and mentioned that ``DataRate`` was actually an ``Attribute`` of the
and mentioned that ``DataRate`` was actually an ``Attribute`` of the
``PointToPointNetDevice``. Let's use the command line argument parser
to take a look at the ``Attributes`` of the PointToPointNetDevice. The help
listing says that we should provide a ``TypeId``. This corresponds to the
@@ -500,12 +500,12 @@ Among the ``Attributes`` you will see listed is,
This is the default value that will be used when a ``PointToPointNetDevice``
is created in the system. We overrode this default with the ``Attribute``
setting in the ``PointToPointHelper`` above. Let's use the default values
for the point-to-point devices and channels by deleting the
``SetDeviceAttribute`` call and the ``SetChannelAttribute`` call from
setting in the ``PointToPointHelper`` above. Let's use the default values
for the point-to-point devices and channels by deleting the
``SetDeviceAttribute`` call and the ``SetChannelAttribute`` call from
the ``myfirst.cc`` we have in the scratch directory.
Your script should now just declare the ``PointToPointHelper`` and not do
Your script should now just declare the ``PointToPointHelper`` and not do
any ``set`` operations as in the following example,
::
@@ -523,7 +523,7 @@ any ``set`` operations as in the following example,
...
Go ahead and build the new script with ns3 (``./ns3``) and let's go back
and enable some logging from the UDP echo server application and turn on the
and enable some logging from the UDP echo server application and turn on the
time prefix.
.. sourcecode:: bash
@@ -555,10 +555,10 @@ was received by the echo server, it was at 2.00369 seconds.
2.00369s UdpEchoServerApplication:HandleRead(): Received 1024 bytes from 10.1.1.1
Now it is receiving the packet at 2.25732 seconds. This is because we just dropped
the data rate of the ``PointToPointNetDevice`` down to its default of
the data rate of the ``PointToPointNetDevice`` down to its default of
32768 bits per second from five megabits per second.
If we were to provide a new ``DataRate`` using the command line, we could
If we were to provide a new ``DataRate`` using the command line, we could
speed our simulation up again. We do this in the following way, according to
the formula implied by the help item:
@@ -566,10 +566,10 @@ the formula implied by the help item:
$ ./ns3 run "scratch/myfirst --ns3::PointToPointNetDevice::DataRate=5Mbps"
This will set the default value of the ``DataRate`` ``Attribute`` back to
This will set the default value of the ``DataRate`` ``Attribute`` back to
five megabits per second. Are you surprised by the result? It turns out that
in order to get the original behavior of the script back, we will have to set
the speed-of-light delay of the channel as well. We can ask the command line
in order to get the original behavior of the script back, we will have to set
the speed-of-light delay of the channel as well. We can ask the command line
system to print out the ``Attributes`` of the channel just like we did for
the net device:
@@ -611,25 +611,25 @@ in which case we recover the timing we had when we explicitly set the
UdpEchoServerApplication:DoDispose()
UdpEchoServerApplication:~UdpEchoServer()
Note that the packet is again received by the server at 2.00369 seconds. We
Note that the packet is again received by the server at 2.00369 seconds. We
could actually set any of the ``Attributes`` used in the script in this way.
In particular we could set the ``UdpEchoClient Attribute MaxPackets``
In particular we could set the ``UdpEchoClient Attribute MaxPackets``
to some other value than one.
How would you go about that? Give it a try. Remember you have to comment
out the place we override the default ``Attribute`` and explicitly set
``MaxPackets`` in the script. Then you have to rebuild the script. You
How would you go about that? Give it a try. Remember you have to comment
out the place we override the default ``Attribute`` and explicitly set
``MaxPackets`` in the script. Then you have to rebuild the script. You
will also have to find the syntax for actually setting the new default attribute
value using the command line help facility. Once you have this figured out
you should be able to control the number of packets echoed from the command
line. Since we're nice folks, we'll tell you that your command line should
value using the command line help facility. Once you have this figured out
you should be able to control the number of packets echoed from the command
line. Since we're nice folks, we'll tell you that your command line should
end up looking something like,
.. sourcecode:: bash
$ ./ns3 run "scratch/myfirst
--ns3::PointToPointNetDevice::DataRate=5Mbps
--ns3::PointToPointChannel::Delay=2ms
--ns3::PointToPointNetDevice::DataRate=5Mbps
--ns3::PointToPointChannel::Delay=2ms
--ns3::UdpEchoClient::MaxPackets=2"
A natural question to arise at this point is how to learn about the existence
@@ -651,8 +651,8 @@ a feature for this. If we ask for command line help we should see:
If you select the "PrintGroups" argument, you should see a list of all
registered TypeId groups. The group names are aligned with the module names
in the source directory (although with a leading capital letter). Printing
out all of the information at once would be too much, so a further filter
in the source directory (although with a leading capital letter). Printing
out all of the information at once would be too much, so a further filter
is available to print information on a per-group basis. So, focusing
again on the point-to-point module:
@@ -677,11 +677,11 @@ Hooking Your Own Values
You can also add your own hooks to the command line system. This is done
quite simply by using the ``AddValue`` method to the command line parser.
Let's use this facility to specify the number of packets to echo in a
Let's use this facility to specify the number of packets to echo in a
completely different way. Let's add a local variable called ``nPackets``
to the ``main`` function. We'll initialize it to one to match our previous
to the ``main`` function. We'll initialize it to one to match our previous
default behavior. To allow the command line parser to change this value, we
need to hook the value into the parser. We do this by adding a call to
need to hook the value into the parser. We do this by adding a call to
``AddValue``. Go ahead and change the ``scratch/myfirst.cc`` script to
start with the following code,
@@ -706,7 +706,7 @@ instead of the constant ``1`` as is shown below.
echoClient.SetAttribute ("MaxPackets", UintegerValue (nPackets));
Now if you run the script and provide the ``--PrintHelp`` argument, you
Now if you run the script and provide the ``--PrintHelp`` argument, you
should see your new ``User Argument`` listed in the help display.
Try,
@@ -759,11 +759,11 @@ You should now see
You have now echoed two packets. Pretty easy, isn't it?
You can see that if you are an |ns3| user, you can use the command
You can see that if you are an |ns3| user, you can use the command
line argument system to control global values and ``Attributes``. If you are
a model author, you can add new ``Attributes`` to your ``Objects`` and
a model author, you can add new ``Attributes`` to your ``Objects`` and
they will automatically be available for setting by your users through the
command line system. If you are a script author, you can add new variables to
command line system. If you are a script author, you can add new variables to
your scripts and hook them into the command line system quite painlessly.
.. _UsingTracingSystem:
@@ -771,10 +771,10 @@ your scripts and hook them into the command line system quite painlessly.
Using the Tracing System
************************
The whole point of simulation is to generate output for further study, and
the |ns3| tracing system is a primary mechanism for this. Since
|ns3| is a C++ program, standard facilities for generating output
from C++ programs could be used:
The whole point of simulation is to generate output for further study, and
the |ns3| tracing system is a primary mechanism for this. Since
|ns3| is a C++ program, standard facilities for generating output
from C++ programs could be used:
::
@@ -785,43 +785,43 @@ from C++ programs could be used:
...
std::cout << "The value of x is " << x << std::endl;
...
}
}
You could even use the logging module to add a little structure to your
You could even use the logging module to add a little structure to your
solution. There are many well-known problems generated by such approaches
and so we have provided a generic event tracing subsystem to address the
and so we have provided a generic event tracing subsystem to address the
issues we thought were important.
The basic goals of the |ns3| tracing system are:
* For basic tasks, the tracing system should allow the user to generate
* For basic tasks, the tracing system should allow the user to generate
standard tracing for popular tracing sources, and to customize which objects
generate the tracing;
* Intermediate users must be able to extend the tracing system to modify
the output format generated, or to insert new tracing sources, without
the output format generated, or to insert new tracing sources, without
modifying the core of the simulator;
* Advanced users can modify the simulator core to add new tracing sources
and sinks.
The |ns3| tracing system is built on the concepts of independent
The |ns3| tracing system is built on the concepts of independent
tracing sources and tracing sinks, and a uniform mechanism for connecting
sources to sinks. Trace sources are entities that can signal events that
happen in a simulation and provide access to interesting underlying data.
happen in a simulation and provide access to interesting underlying data.
For example, a trace source could indicate when a packet is received by a net
device and provide access to the packet contents for interested trace sinks.
Trace sources are not useful by themselves, they must be "connected" to
other pieces of code that actually do something useful with the information
other pieces of code that actually do something useful with the information
provided by the sink. Trace sinks are consumers of the events and data
provided by the trace sources. For example, one could create a trace sink
that would (when connected to the trace source of the previous example) print
provided by the trace sources. For example, one could create a trace sink
that would (when connected to the trace source of the previous example) print
out interesting parts of the received packet.
The rationale for this explicit division is to allow users to attach new
types of sinks to existing tracing sources, without requiring editing and
recompilation of the core of the simulator. Thus, in the example above,
a user could define a new tracing sink in her script and attach it to an
existing tracing source defined in the simulation core by editing only the
types of sinks to existing tracing sources, without requiring editing and
recompilation of the core of the simulator. Thus, in the example above,
a user could define a new tracing sink in her script and attach it to an
existing tracing source defined in the simulation core by editing only the
user script.
In this tutorial, we will walk through some pre-defined sources and sinks and
@@ -832,14 +832,14 @@ extending the tracing namespace and creating new tracing sources.
ASCII Tracing
+++++++++++++
|ns3| provides helper functionality that wraps the low-level tracing
system to help you with the details involved in configuring some easily
system to help you with the details involved in configuring some easily
understood packet traces. If you enable this functionality, you will see
output in a ASCII files --- thus the name. For those familiar with
output in a ASCII files --- thus the name. For those familiar with
|ns2| output, this type of trace is analogous to the ``out.tr``
generated by many scripts.
Let's just jump right in and add some ASCII tracing output to our
``scratch/myfirst.cc`` script. Right before the call to
Let's just jump right in and add some ASCII tracing output to our
``scratch/myfirst.cc`` script. Right before the call to
``Simulator::Run ()``, add the following lines of code:
::
@@ -847,23 +847,23 @@ Let's just jump right in and add some ASCII tracing output to our
AsciiTraceHelper ascii;
pointToPoint.EnableAsciiAll (ascii.CreateFileStream ("myfirst.tr"));
Like in many other |ns3| idioms, this code uses a helper object to
help create ASCII traces. The second line contains two nested method calls.
Like in many other |ns3| idioms, this code uses a helper object to
help create ASCII traces. The second line contains two nested method calls.
The "inside" method, ``CreateFileStream()`` uses an unnamed object idiom
to create a file stream object on the stack (without an object name) and pass
it down to the called method. We'll go into this more in the future, but all
you have to know at this point is that you are creating an object representing
a file named "myfirst.tr" and passing it into ``ns-3``. You are telling
``ns-3`` to deal with the lifetime issues of the created object and also to
deal with problems caused by a little-known (intentional) limitation of C++
a file named "myfirst.tr" and passing it into ``ns-3``. You are telling
``ns-3`` to deal with the lifetime issues of the created object and also to
deal with problems caused by a little-known (intentional) limitation of C++
ofstream objects relating to copy constructors.
The outside call, to ``EnableAsciiAll()``, tells the helper that you
want to enable ASCII tracing on all point-to-point devices in your simulation;
and you want the (provided) trace sinks to write out information about packet
The outside call, to ``EnableAsciiAll()``, tells the helper that you
want to enable ASCII tracing on all point-to-point devices in your simulation;
and you want the (provided) trace sinks to write out information about packet
movement in ASCII format.
For those familiar with |ns2|, the traced events are equivalent to
For those familiar with |ns2|, the traced events are equivalent to
the popular trace points that log "+", "-", "d", and "r" events.
You can now build the script and run it from the command line:
@@ -873,15 +873,15 @@ You can now build the script and run it from the command line:
$ ./ns3 run scratch/myfirst
Just as you have seen many times before, you will see some messages from ns3 and then
"'build' finished successfully" with some number of messages from
the running program.
"'build' finished successfully" with some number of messages from
the running program.
When it ran, the program will have created a file named ``myfirst.tr``.
When it ran, the program will have created a file named ``myfirst.tr``.
Because of the way that ns3 works, the file is not created in the local
directory, it is created at the top-level directory of the repository by
default. If you want to control where the traces are saved you can use the
directory, it is created at the top-level directory of the repository by
default. If you want to control where the traces are saved you can use the
``--cwd`` option of ns3 to specify this. We have not done so, thus we
need to change into the top level directory of our repo and take a look at
need to change into the top level directory of our repo and take a look at
the ASCII trace file ``myfirst.tr`` in your favorite editor.
Parsing Ascii Traces
@@ -891,10 +891,10 @@ to notice is that there are a number of distinct lines in this file. It may
be difficult to see this clearly unless you widen your window considerably.
Each line in the file corresponds to a *trace event*. In this case
we are tracing events on the *transmit queue* present in every
point-to-point net device in the simulation. The transmit queue is a queue
we are tracing events on the *transmit queue* present in every
point-to-point net device in the simulation. The transmit queue is a queue
through which every packet destined for a point-to-point channel must pass.
Note that each line in the trace file begins with a lone character (has a
Note that each line in the trace file begins with a lone character (has a
space after it). This character will have the following meaning:
* ``+``: An enqueue operation occurred on the device queue;
@@ -902,60 +902,60 @@ space after it). This character will have the following meaning:
* ``d``: A packet was dropped, typically because the queue was full;
* ``r``: A packet was received by the net device.
Let's take a more detailed view of the first line in the trace file. I'll
Let's take a more detailed view of the first line in the trace file. I'll
break it down into sections (indented for clarity) with a reference
number on the left side:
.. sourcecode:: text
:linenos:
+
2
/NodeList/0/DeviceList/0/$ns3::PointToPointNetDevice/TxQueue/Enqueue
+
2
/NodeList/0/DeviceList/0/$ns3::PointToPointNetDevice/TxQueue/Enqueue
ns3::PppHeader (
Point-to-Point Protocol: IP (0x0021))
Point-to-Point Protocol: IP (0x0021))
ns3::Ipv4Header (
tos 0x0 ttl 64 id 0 protocol 17 offset 0 flags [none]
tos 0x0 ttl 64 id 0 protocol 17 offset 0 flags [none]
length: 1052 10.1.1.1 > 10.1.1.2)
ns3::UdpHeader (
length: 1032 49153 > 9)
length: 1032 49153 > 9)
Payload (size=1024)
The first section of this expanded trace event (reference number 0) is the
The first section of this expanded trace event (reference number 0) is the
operation. We have a ``+`` character, so this corresponds to an
*enqueue* operation on the transmit queue. The second section (reference 1)
is the simulation time expressed in seconds. You may recall that we asked the
is the simulation time expressed in seconds. You may recall that we asked the
``UdpEchoClientApplication`` to start sending packets at two seconds. Here
we see confirmation that this is, indeed, happening.
The next section of the example trace (reference 2) tell us which trace source
originated this event (expressed in the tracing namespace). You can think
of the tracing namespace somewhat like you would a filesystem namespace. The
of the tracing namespace somewhat like you would a filesystem namespace. The
root of the namespace is the ``NodeList``. This corresponds to a container
managed in the |ns3| core code that contains all of the nodes that are
created in a script. Just as a filesystem may have directories under the
root, we may have node numbers in the ``NodeList``. The string
created in a script. Just as a filesystem may have directories under the
root, we may have node numbers in the ``NodeList``. The string
``/NodeList/0`` therefore refers to the zeroth node in the ``NodeList``
which we typically think of as "node 0". In each node there is a list of
which we typically think of as "node 0". In each node there is a list of
devices that have been installed. This list appears next in the namespace.
You can see that this trace event comes from ``DeviceList/0`` which is the
zeroth device installed in the node.
You can see that this trace event comes from ``DeviceList/0`` which is the
zeroth device installed in the node.
The next string, ``$ns3::PointToPointNetDevice`` tells you what kind of
The next string, ``$ns3::PointToPointNetDevice`` tells you what kind of
device is in the zeroth position of the device list for node zero.
Recall that the operation ``+`` found at reference 00 meant that an enqueue
operation happened on the transmit queue of the device. This is reflected in
Recall that the operation ``+`` found at reference 00 meant that an enqueue
operation happened on the transmit queue of the device. This is reflected in
the final segments of the "trace path" which are ``TxQueue/Enqueue``.
The remaining sections in the trace should be fairly intuitive. References 3-4
indicate that the packet is encapsulated in the point-to-point protocol.
indicate that the packet is encapsulated in the point-to-point protocol.
References 5-7 show that the packet has an IP version four header and has
originated from IP address 10.1.1.1 and is destined for 10.1.1.2. References
8-9 show that this packet has a UDP header and, finally, reference 10 shows
that the payload is the expected 1024 bytes.
The next line in the trace file shows the same packet being dequeued from the
transmit queue on the same node.
transmit queue on the same node.
The Third line in the trace file shows the packet being received by the net
device on the node with the echo server. I have reproduced that event below.
@@ -963,55 +963,55 @@ device on the node with the echo server. I have reproduced that event below.
.. sourcecode:: text
:linenos:
r
2.25732
/NodeList/1/DeviceList/0/$ns3::PointToPointNetDevice/MacRx
r
2.25732
/NodeList/1/DeviceList/0/$ns3::PointToPointNetDevice/MacRx
ns3::Ipv4Header (
tos 0x0 ttl 64 id 0 protocol 17 offset 0 flags [none]
length: 1052 10.1.1.1 > 10.1.1.2)
ns3::UdpHeader (
length: 1032 49153 > 9)
length: 1032 49153 > 9)
Payload (size=1024)
Notice that the trace operation is now ``r`` and the simulation time has
increased to 2.25732 seconds. If you have been following the tutorial steps
closely this means that you have left the ``DataRate`` of the net devices
and the channel ``Delay`` set to their default values. This time should
and the channel ``Delay`` set to their default values. This time should
be familiar as you have seen it before in a previous section.
The trace source namespace entry (reference 02) has changed to reflect that
this event is coming from node 1 (``/NodeList/1``) and the packet reception
trace source (``/MacRx``). It should be quite easy for you to follow the
progress of the packet through the topology by looking at the rest of the
trace source (``/MacRx``). It should be quite easy for you to follow the
progress of the packet through the topology by looking at the rest of the
traces in the file.
PCAP Tracing
++++++++++++
The |ns3| device helpers can also be used to create trace files in the
``.pcap`` format. The acronym pcap (usually written in lower case) stands
for packet capture, and is actually an API that includes the
for packet capture, and is actually an API that includes the
definition of a ``.pcap`` file format. The most popular program that can
read and display this format is Wireshark (formerly called Ethereal).
However, there are many traffic trace analyzers that use this packet format.
We encourage users to exploit the many tools available for analyzing pcap
traces. In this tutorial, we concentrate on viewing pcap traces with tcpdump.
The code used to enable pcap tracing is a one-liner.
The code used to enable pcap tracing is a one-liner.
::
pointToPoint.EnablePcapAll ("myfirst");
Go ahead and insert this line of code after the ASCII tracing code we just
Go ahead and insert this line of code after the ASCII tracing code we just
added to ``scratch/myfirst.cc``. Notice that we only passed the string
"myfirst," and not "myfirst.pcap" or something similar. This is because the
parameter is a prefix, not a complete file name. The helper will actually
create a trace file for every point-to-point device in the simulation. The
"myfirst," and not "myfirst.pcap" or something similar. This is because the
parameter is a prefix, not a complete file name. The helper will actually
create a trace file for every point-to-point device in the simulation. The
file names will be built using the prefix, the node number, the device number
and a ".pcap" suffix.
In our example script, we will eventually see files named "myfirst-0-0.pcap"
and "myfirst-1-0.pcap" which are the pcap traces for node 0-device 0 and
In our example script, we will eventually see files named "myfirst-0-0.pcap"
and "myfirst-1-0.pcap" which are the pcap traces for node 0-device 0 and
node 1-device 0, respectively.
Once you have added the line of code to enable pcap tracing, you can run the
@@ -1022,14 +1022,14 @@ script in the usual way:
$ ./ns3 run scratch/myfirst
If you look at the top level directory of your distribution, you should now
see three log files: ``myfirst.tr`` is the ASCII trace file we have
see three log files: ``myfirst.tr`` is the ASCII trace file we have
previously examined. ``myfirst-0-0.pcap`` and ``myfirst-1-0.pcap``
are the new pcap files we just generated.
are the new pcap files we just generated.
Reading output with tcpdump
~~~~~~~~~~~~~~~~~~~~~~~~~~~
The easiest thing to do at this point will be to use ``tcpdump`` to look
at the ``pcap`` files.
at the ``pcap`` files.
.. sourcecode:: bash
@@ -1043,11 +1043,11 @@ at the ``pcap`` files.
2.257324 IP 10.1.1.1.49153 > 10.1.1.2.9: UDP, length 1024
2.257324 IP 10.1.1.2.9 > 10.1.1.1.49153: UDP, length 1024
You can see in the dump of ``myfirst-0-0.pcap`` (the client device) that the
You can see in the dump of ``myfirst-0-0.pcap`` (the client device) that the
echo packet is sent at 2 seconds into the simulation. If you look at the
second dump (``myfirst-1-0.pcap``) you can see that packet being received
at 2.257324 seconds. You see the packet being echoed back at 2.257324 seconds
in the second dump, and finally, you see the packet being received back at
in the second dump, and finally, you see the packet being received back at
the client in the first dump at 2.514648 seconds.
Reading output with Wireshark

View File

@@ -46,7 +46,7 @@ do for [i=0:299] {
unset ylabel
unset xtics
unset ytics
plot 'example-output.txt' using 1:1:1 with labels offset -10, 0, 'example-output.txt' using 1:1:6 with labels offset 10, 0
plot 'example-output.txt' using 1:1:1 with labels offset -10, 0, 'example-output.txt' using 1:1:6 with labels offset 10, 0
unset object 101
unset multiplot
}
@@ -54,8 +54,8 @@ do for [i=0:299] {
reset
set terminal png
set output 'snr.png'
set xlabel 'Time [s]'
set ylabel 'SNR [dB]'
set xlabel 'Time [s]'
set ylabel 'SNR [dB]'
set xtics
set ytics
set grid

View File

@@ -177,7 +177,7 @@ main (int argc, char *argv[])
YansWifiChannelHelper wifiChannel;
wifiChannel.SetPropagationDelay ("ns3::ConstantSpeedPropagationDelayModel");
wifiChannel.AddPropagationLoss ("ns3::FriisPropagationLossModel");
// create wifi channel
Ptr<YansWifiChannel> wifiChannelPtr = wifiChannel.Create ();
wifiPhy.SetChannel (wifiChannelPtr);

View File

@@ -169,7 +169,7 @@ main (int argc, char *argv[])
double interval = 1; // seconds
double startTime = 0.0; // seconds
double distanceToRx = 100.0; // meters
// Energy Harvester variables
double harvestingUpdateInterval = 1; // seconds

View File

@@ -30,8 +30,8 @@
// - FTP/TCP flow from n0 to n3, starting at time 1.2 to time 1.35 sec.
// - UDP packet size of 210 bytes, with per-packet interval 0.00375 sec.
// (i.e., DataRate of 448,000 bps)
// - DropTail queues
// - Tracing of queues and packet receptions to file
// - DropTail queues
// - Tracing of queues and packet receptions to file
// "simple-error-model.tr"
#include <fstream>
@@ -46,12 +46,12 @@ using namespace ns3;
NS_LOG_COMPONENT_DEFINE ("SimpleErrorModelExample");
int
int
main (int argc, char *argv[])
{
// Users may find it convenient to turn on explicit debugging
// for selected modules; the below lines suggest how to do this
#if 0
#if 0
LogComponentEnable ("SimplePointToPointExample", LOG_LEVEL_INFO);
#endif
@@ -134,14 +134,14 @@ main (int argc, char *argv[])
apps.Stop (Seconds (10.0));
// Create a similar flow from n3 to n1, starting at time 1.1 seconds
onoff.SetAttribute ("Remote",
onoff.SetAttribute ("Remote",
AddressValue (InetSocketAddress (i1i2.GetAddress (0), port)));
apps = onoff.Install (c.Get (3));
apps.Start (Seconds (1.1));
apps.Stop (Seconds (10.0));
// Create a packet sink to receive these packets
sink.SetAttribute ("Local",
sink.SetAttribute ("Local",
AddressValue (InetSocketAddress (Ipv4Address::GetAny (), port)));
apps = sink.Install (c.Get (1));
apps.Start (Seconds (1.1));
@@ -152,7 +152,7 @@ main (int argc, char *argv[])
//
// Create an ErrorModel based on the implementation (constructor)
// specified by the default TypeId
ObjectFactory factory;
factory.SetTypeId (errorModelType);
Ptr<ErrorModel> em = factory.Create<ErrorModel> ();

View File

@@ -11,7 +11,7 @@ cpp_examples = [
("icmpv6-redirect", "True", "True"),
("ping6", "True", "True"),
("radvd", "True", "True"),
("radvd-two-prefix", "True", "True"),
("radvd-two-prefix", "True", "True"),
("test-ipv6", "True", "True"),
]

View File

@@ -103,7 +103,7 @@ int main (int argc, char** argv)
Ping6Helper ping6;
ping6.SetLocal (i1.GetAddress (0, 1));
ping6.SetRemote (i2.GetAddress (1, 1));
ping6.SetRemote (i2.GetAddress (1, 1));
ping6.SetAttribute ("MaxPackets", UintegerValue (maxPacketCount));
ping6.SetAttribute ("Interval", TimeValue (interPacketInterval));

View File

@@ -100,7 +100,7 @@ int main (int argc, char** argv)
Ping6Helper ping6;
ping6.SetLocal (i1.GetAddress (0, 1));
ping6.SetRemote (i2.GetAddress (1, 1));
ping6.SetRemote (i2.GetAddress (1, 1));
ping6.SetAttribute ("MaxPackets", UintegerValue (maxPacketCount));
ping6.SetAttribute ("Interval", TimeValue (interPacketInterval));

View File

@@ -88,7 +88,7 @@ int main (int argc, char **argv)
CsmaHelper csma;
csma.SetChannelAttribute ("DataRate", DataRateValue (5000000));
csma.SetChannelAttribute ("Delay", TimeValue (MilliSeconds (2)));
NetDeviceContainer ndc1 = csma.Install (net1);
NetDeviceContainer ndc1 = csma.Install (net1);
NetDeviceContainer ndc2 = csma.Install (net2);
NS_LOG_INFO ("Assign IPv6 Addresses.");

View File

@@ -26,7 +26,7 @@
// LAN
//
// - ICMPv6 echo request flows from n0 to n1 and back with ICMPv6 echo reply
// - DropTail queues
// - DropTail queues
// - Tracing of queues and packet receptions to file "ping6.tr"
#include <fstream>
@@ -90,7 +90,7 @@ int main (int argc, char **argv)
Ping6Helper ping6;
/*
ping6.SetLocal (i.GetAddress (0, 1));
ping6.SetLocal (i.GetAddress (0, 1));
ping6.SetRemote (i.GetAddress (1, 1));
*/
ping6.SetIfIndex (i.GetInterfaceIndex (0));

View File

@@ -147,7 +147,7 @@ int main (int argc, char** argv)
NetDeviceContainer tmp4;
tmp4.Add (d2.Get (1)); /* n1 */
Ipv6InterfaceContainer iic2 = ipv6.AssignWithoutAddress (tmp4);
Ipv6InterfaceContainer iic2 = ipv6.AssignWithoutAddress (tmp4);
iic2.Add (iicr2);
/* radvd configuration */

View File

@@ -108,7 +108,7 @@ int main (int argc, char** argv)
NetDeviceContainer tmp4;
tmp4.Add (d2.Get (1)); /* n1 */
Ipv6InterfaceContainer iic2 = ipv6.AssignWithoutAddress (tmp4);
Ipv6InterfaceContainer iic2 = ipv6.AssignWithoutAddress (tmp4);
iic2.Add (iicr2);
/* radvd configuration */

View File

@@ -29,7 +29,7 @@ using namespace ns3;
NS_LOG_COMPONENT_DEFINE ("TestIpv6");
int
int
main (int argc, char *argv[])
{
LogComponentEnable ("TestIpv6", LOG_LEVEL_ALL);

View File

@@ -26,7 +26,7 @@
// WSN (802.15.4)
//
// - ICMPv6 echo request flows from n0 to n1 and back with ICMPv6 echo reply
// - DropTail queues
// - DropTail queues
// - Tracing of queues and packet receptions to file "wsn-ping6.tr"
//
// This example is based on the "ping6.cc" example.

View File

@@ -24,4 +24,4 @@
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0

View File

@@ -21,14 +21,14 @@
*
* James P.G. Sterbenz <jpgs@ittc.ku.edu>, director
* ResiliNets Research Group http://wiki.ittc.ku.edu/resilinets
* Information and Telecommunication Technology Center
* Information and Telecommunication Technology Center
* and
* Department of Electrical Engineering and Computer Science
* The University of Kansas
* Lawrence, KS USA
*
* Work supported in part by NSF FIND (Future Internet Design) Program
* under grant CNS-0626918 (Postmodern Internet Architecture) and
* under grant CNS-0626918 (Postmodern Internet Architecture) and
* by NSF grant CNS-1050226 (Multilayer Network Resilience Analysis and Experimentation on GENI)
*
* This program reads an upper triangular adjacency matrix (e.g. adjacency_matrix.txt) and
@@ -100,7 +100,7 @@ int main (int argc, char *argv[])
CommandLine cmd (__FILE__);
cmd.Parse (argc, argv);
// ---------- End of Simulation Variables ----------------------------------
// ---------- Read Adjacency Matrix ----------------------------------------

View File

@@ -24,4 +24,4 @@
105.95 40.13
106 40.71
107.41 42.1
108.94 42.36
108.94 42.36

View File

@@ -35,14 +35,14 @@ NS_LOG_COMPONENT_DEFINE ("ObjectNamesExample");
uint32_t bytesReceived = 0;
void
void
RxEvent (std::string context, Ptr<const Packet> packet)
{
std::cout << Simulator::Now ().GetSeconds () << "s " << context << " packet size " << packet->GetSize () << std::endl;
bytesReceived += packet->GetSize ();
}
int
int
main (int argc, char *argv[])
{
bool outputValidated = true;
@@ -73,7 +73,7 @@ main (int argc, char *argv[])
// It is possible to rename a node that has been previously named. This is
// useful in automatic name generation. You can automatically generate node
// names such as, "node-0", "node-1", etc., and then go back and change
// the name of some distinguished node to another value -- "access-point"
// the name of some distinguished node to another value -- "access-point"
// for example. We illustrate this by just changing the client's name.
// As is typical of the object name service, you can either provide or elide
// the "/Names" prefix as you choose.
@@ -101,9 +101,9 @@ main (int argc, char *argv[])
//
// You can use the object names that you've assigned in calls to the Config
// system to set Object Attributes. For example, you can set the Mtu
// Attribute of a Csma devices using the object naming service. Note that
// in this case, the "/Names" prefix is always required since the _Config_
// system to set Object Attributes. For example, you can set the Mtu
// Attribute of a Csma devices using the object naming service. Note that
// in this case, the "/Names" prefix is always required since the _Config_
// system always expects to see a fully qualified path name.
//
@@ -111,7 +111,7 @@ main (int argc, char *argv[])
UintegerValue val;
csmaNetDevice->GetAttribute ("Mtu", val);
std::cout << "MTU on device 0 before configuration is " << val.Get () << std::endl;
Config::Set ("/Names/client/eth0/Mtu", UintegerValue (1234));
// Check the attribute again
@@ -130,7 +130,7 @@ main (int argc, char *argv[])
// to get to the server node, and then continue seamlessly adding named objects
// in the path. This is not nearly as readable as the previous version, but it
// illustrates how you can mix and match object names and Attribute names.
// Note that the config path now begins with a path in the "/NodeList"
// Note that the config path now begins with a path in the "/NodeList"
// namespace.
//
Config::Set ("/NodeList/1/eth0/Mtu", UintegerValue (1234));
@@ -167,13 +167,13 @@ main (int argc, char *argv[])
//
// Use the Config system to connect a trace source using the object name
// service to specify the path. Note that in this case, the "/Names"
// prefix is always required since the _Config_ system always expects to
// see a fully qualified path name
// prefix is always required since the _Config_ system always expects to
// see a fully qualified path name
//
Config::Connect ("/Names/client/eth0/MacRx", MakeCallback (&RxEvent));
//
// Set up some pcap tracing on the CSMA devices. The names of the trace
// Set up some pcap tracing on the CSMA devices. The names of the trace
// files will automatically correspond to the object names if present.
// In this case, you will find trace files called:
//
@@ -209,7 +209,7 @@ main (int argc, char *argv[])
if (outputValidated == false)
{
std::cerr << "Program internal checking failed; returning with error" << std::endl;
std::cerr << "Program internal checking failed; returning with error" << std::endl;
return (1);
}
}

View File

@@ -22,7 +22,7 @@
// LAN
//
// - UDP flows from n0 to n1 and back
// - DropTail queues
// - DropTail queues
// - Tracing of queues and packet receptions to file "udp-echo.tr"
#include <fstream>
@@ -35,7 +35,7 @@ using namespace ns3;
NS_LOG_COMPONENT_DEFINE ("RealtimeUdpEchoExample");
int
int
main (int argc, char *argv[])
{
//
@@ -49,7 +49,7 @@ main (int argc, char *argv[])
// But since this is a realtime script, don't allow the user to mess with
// that.
//
GlobalValue::Bind ("SimulatorImplementationType",
GlobalValue::Bind ("SimulatorImplementationType",
StringValue ("ns3::RealtimeSimulatorImpl"));
//

View File

@@ -20,7 +20,7 @@
# LAN
#
# - UDP flows from n0 to n1 and back
# - DropTail queues
# - DropTail queues
# - Tracing of queues and packet receptions to file "udp-echo.tr"
import ns.applications

View File

@@ -29,7 +29,7 @@
// \ p-p
// \ (shared csma/cd)
// n2 -------------------------n3
// / | |
// / | |
// / p-p n4 n5 ---------- n6
// n1 p-p
// | |
@@ -55,7 +55,7 @@
// on the n1/n6 p2p link)
// At time 12s, bring the n1 interface down between n1 and n6. Packets
// will be diverted to the alternate path
// At time 14s, re-enable the n1/n6 interface to up. This will change
// At time 14s, re-enable the n1/n6 interface to up. This will change
// routing back to n1-n6 since the interface up notification will cause
// a new local interface route, at higher priority than global routing
// At time 16s, stop the second flow.
@@ -79,7 +79,7 @@ using namespace ns3;
NS_LOG_COMPONENT_DEFINE ("DynamicGlobalRoutingExample");
int
int
main (int argc, char *argv[])
{
// The below value configures the default behavior of global routing.
@@ -193,7 +193,7 @@ main (int argc, char *argv[])
p2p.EnablePcapAll ("dynamic-global-routing");
csma.EnablePcapAll ("dynamic-global-routing", false);
Ptr<Node> n1 = c.Get (1);
Ptr<Ipv4> ipv41 = n1->GetObject<Ipv4> ();
// The first ifIndex is 0 for loopback, then the first p2p is numbered 1,
@@ -214,7 +214,7 @@ main (int argc, char *argv[])
Simulator::Schedule (Seconds (12),&Ipv4::SetDown,ipv41, ipv4ifIndex1);
Simulator::Schedule (Seconds (14),&Ipv4::SetUp,ipv41, ipv4ifIndex1);
// Trace routing tables
// Trace routing tables
Ipv4GlobalRoutingHelper g;
Ptr<OutputStreamWrapper> routingStream = Create<OutputStreamWrapper> ("dynamic-global-routing.routes", std::ios::out);
g.PrintRoutingTableAllAt (Seconds (12), routingStream);

View File

@@ -44,7 +44,7 @@ using std::cout;
NS_LOG_COMPONENT_DEFINE ("GlobalRouterInjectionTest");
int
int
main (int argc, char *argv[])
{
@@ -114,7 +114,7 @@ main (int argc, char *argv[])
ipv4C->AddAddress (ifIndexC, ifInAddrC);
ipv4C->SetMetric (ifIndexC, 1);
ipv4C->SetUp (ifIndexC);
// Create router nodes, initialize routing database and set up the routing
// tables in the nodes.
@@ -136,7 +136,7 @@ main (int argc, char *argv[])
// Create the OnOff application to send UDP datagrams of size
// 210 bytes at a rate of 448 Kb/s
uint16_t port = 9; // Discard port (RFC 863)
OnOffHelper onoff ("ns3::UdpSocketFactory",
OnOffHelper onoff ("ns3::UdpSocketFactory",
Address (InetSocketAddress (ifInAddrC.GetLocal (), port)));
onoff.SetConstantRate (DataRate (6000));
ApplicationContainer apps = onoff.Install (nA);

View File

@@ -714,7 +714,7 @@ main (int argc, char *argv[])
// ======================================================================
// Print routing tables at T=0.1
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// NOTE: Node 0 and Node 13 must have non-empty tables (except for local
// NOTE: Node 0 and Node 13 must have non-empty tables (except for local
// loopback and local LAN) if routing is operating correctly.
// ----------------------------------------------------------------------
NS_LOG_INFO ("Set up to print routing tables at T=0.1s");

View File

@@ -36,7 +36,7 @@ using namespace ns3;
NS_LOG_COMPONENT_DEFINE ("GlobalRouterSlash32Test");
int
int
main (int argc, char *argv[])
{
@@ -99,7 +99,7 @@ main (int argc, char *argv[])
ipv4C->AddAddress (ifIndexC, ifInAddrC);
ipv4C->SetMetric (ifIndexC, 1);
ipv4C->SetUp (ifIndexC);
// Create router nodes, initialize routing database and set up the routing
// tables in the nodes.
Ipv4GlobalRoutingHelper::PopulateRoutingTables ();
@@ -107,7 +107,7 @@ main (int argc, char *argv[])
// Create the OnOff application to send UDP datagrams of size
// 210 bytes at a rate of 448 Kb/s
uint16_t port = 9; // Discard port (RFC 863)
OnOffHelper onoff ("ns3::UdpSocketFactory",
OnOffHelper onoff ("ns3::UdpSocketFactory",
Address (InetSocketAddress (ifInAddrC.GetLocal (), port)));
onoff.SetConstantRate (DataRate (6000));
ApplicationContainer apps = onoff.Install (nA);

View File

@@ -85,7 +85,7 @@ NS_LOG_COMPONENT_DEFINE ("manet-routing-compare");
/**
* Routing experiment class.
*
*
* It handles the creation and run of an experiment.
*/
class RoutingExperiment

View File

@@ -24,7 +24,7 @@
// \ p-p
// \ (shared csma/cd)
// n2 -------------------------n3
// / | |
// / | |
// / p-p n4 n5 ---------- n6
// n1 p-p
//
@@ -47,7 +47,7 @@ using namespace ns3;
NS_LOG_COMPONENT_DEFINE ("MixedGlobalRoutingExample");
int
int
main (int argc, char *argv[])
{
Config::SetDefault ("ns3::OnOffApplication::PacketSize", UintegerValue (210));

View File

@@ -29,7 +29,7 @@
//
// this is a modification of simple-global-routing to allow for
// a single hop but higher-cost path between n1 and n3
//
//
// - Tracing of queues and packet receptions to file "simple-rerouting.tr"
#include <iostream>
@@ -48,12 +48,12 @@ using namespace ns3;
NS_LOG_COMPONENT_DEFINE ("SimpleAlternateRoutingExample");
int
int
main (int argc, char *argv[])
{
// Users may find it convenient to turn on explicit debugging
// for selected modules; the below lines suggest how to do this
#if 0
#if 0
LogComponentEnable ("GlobalRoutingHelper", LOG_LOGIC);
LogComponentEnable ("GlobalRouter", LOG_LOGIC);
#endif
@@ -66,15 +66,15 @@ main (int argc, char *argv[])
// n1 and n3 to take the 2-hop route through n2
CommandLine cmd (__FILE__);
//
// Additionally, we plumb this metric into the default value / command
// line argument system as well, for exemplary purposes. This means
// that it can be resettable at the command-line to the program,
//
// Additionally, we plumb this metric into the default value / command
// line argument system as well, for exemplary purposes. This means
// that it can be resettable at the command-line to the program,
// rather than recompiling
// e.g. ns3 --run "simple-alternate-routing --AlternateCost=5"
uint16_t sampleMetric = 1;
cmd.AddValue ("AlternateCost",
"This metric is used in the example script between n3 and n1 ",
"This metric is used in the example script between n3 and n1 ",
sampleMetric);
// Allow the user to override any of the defaults and the above
@@ -110,7 +110,7 @@ main (int argc, char *argv[])
InternetStackHelper internet;
internet.Install (c);
// Later, we add IP addresses. The middle two octets correspond to
// Later, we add IP addresses. The middle two octets correspond to
// the channel number.
NS_LOG_INFO ("Assign IP Addresses.");
Ipv4AddressHelper ipv4;
@@ -133,7 +133,7 @@ main (int argc, char *argv[])
// tables in the nodes.
Ipv4GlobalRoutingHelper::PopulateRoutingTables ();
// Create the OnOff application to send UDP datagrams
// Create the OnOff application to send UDP datagrams
NS_LOG_INFO ("Create Application.");
uint16_t port = 9; // Discard port (RFC 863)

View File

@@ -31,7 +31,7 @@
// - FTP/TCP flow from n0 to n3, starting at time 1.2 to time 1.35 sec.
// - UDP packet size of 210 bytes, with per-packet interval 0.00375 sec.
// (i.e., DataRate of 448,000 bps)
// - DropTail queues
// - DropTail queues
// - Tracing of queues and packet receptions to file "simple-global-routing.tr"
#include <iostream>
@@ -51,16 +51,16 @@ using namespace ns3;
NS_LOG_COMPONENT_DEFINE ("SimpleGlobalRoutingExample");
int
int
main (int argc, char *argv[])
{
// Users may find it convenient to turn on explicit debugging
// for selected modules; the below lines suggest how to do this
#if 0
#if 0
LogComponentEnable ("SimpleGlobalRoutingExample", LOG_LEVEL_INFO);
#endif
// Set up some default values for the simulation. Use the
// Set up some default values for the simulation. Use the
Config::SetDefault ("ns3::OnOffApplication::PacketSize", UintegerValue (210));
Config::SetDefault ("ns3::OnOffApplication::DataRate", StringValue ("448kb/s"));
@@ -118,7 +118,7 @@ main (int argc, char *argv[])
// 210 bytes at a rate of 448 Kb/s
NS_LOG_INFO ("Create Applications.");
uint16_t port = 9; // Discard port (RFC 863)
OnOffHelper onoff ("ns3::UdpSocketFactory",
OnOffHelper onoff ("ns3::UdpSocketFactory",
Address (InetSocketAddress (i3i2.GetAddress (0), port)));
onoff.SetConstantRate (DataRate ("448kb/s"));
ApplicationContainer apps = onoff.Install (c.Get (0));
@@ -133,7 +133,7 @@ main (int argc, char *argv[])
apps.Stop (Seconds (10.0));
// Create a similar flow from n3 to n1, starting at time 1.1 seconds
onoff.SetAttribute ("Remote",
onoff.SetAttribute ("Remote",
AddressValue (InetSocketAddress (i1i2.GetAddress (0), port)));
apps = onoff.Install (c.Get (3));
apps.Start (Seconds (1.1));

View File

@@ -72,7 +72,7 @@ using namespace ns3;
*
* This example demonstrates configuration of
* static routing to realize broadcast-like
* flooding of packets from node A
* flooding of packets from node A
* across the illustrated topology.
*/
int
@@ -163,7 +163,7 @@ main (int argc, char *argv[])
simplechannel->BlackList (Names::Find <SimpleNetDevice> ("E/dev"), Names::Find <SimpleNetDevice> ("C/dev"));
// ensure some time progress between re-transmissions
simplechannel->SetAttribute ("Delay", TimeValue (MilliSeconds (1)));
// sinks
PacketSinkHelper sinkHelper ("ns3::UdpSocketFactory", InetSocketAddress (Ipv4Address::GetAny (), 9));
auto sinks = sinkHelper.Install ("B");
@@ -188,14 +188,14 @@ main (int argc, char *argv[])
// run simulation
Simulator::Run ();
std::cout << "Node A sent " << 10 * 1024 << " bytes" << std::endl;
for (auto end = sinks.End (),
iter = sinks.Begin (); iter != end; ++iter)
{
auto node = (*iter)->GetNode ();
auto sink = (*iter)->GetObject <PacketSink> ();
std::cout << "Node " << Names::FindName (node)
std::cout << "Node " << Names::FindName (node)
<< " received " << sink->GetTotalRx () << " bytes" << std::endl;
}

View File

@@ -93,7 +93,7 @@ public:
int main (int argc, char** argv)
{
#if 0
#if 0
LogComponentEnable ("Ipv6L3Protocol", LOG_LEVEL_ALL);
LogComponentEnable ("Icmpv6L4Protocol", LOG_LEVEL_ALL);
LogComponentEnable ("Ipv6StaticRouting", LOG_LEVEL_ALL);
@@ -146,7 +146,7 @@ int main (int argc, char** argv)
Ping6Helper ping6;
ping6.SetLocal (i1.GetAddress (0, 1));
ping6.SetRemote (i2.GetAddress (1, 1));
ping6.SetRemote (i2.GetAddress (1, 1));
ping6.SetAttribute ("MaxPackets", UintegerValue (maxPacketCount));
ping6.SetAttribute ("Interval", TimeValue (interPacketInterval));

View File

@@ -20,7 +20,7 @@
#
# Network topology:
#
#
# n0 r n1
# | _ |
# ====|_|====
@@ -79,7 +79,7 @@ def main(argv):
i2.SetForwarding(0, True);
i2.SetDefaultRouteInAllNodes(0);
# Create a Ping6 application to send ICMPv6 echo request from n0 to n1 via r
# Create a Ping6 application to send ICMPv6 echo request from n0 to n1 via r
print ("Application")
packetSize = 1024;
maxPacketCount = 5;
@@ -87,7 +87,7 @@ def main(argv):
ping6 = ns.internet_apps.Ping6Helper();
ping6.SetLocal(i1.GetAddress(0, 1));
ping6.SetRemote(i2.GetAddress(1, 1));
ping6.SetRemote(i2.GetAddress(1, 1));
ping6.SetAttribute("MaxPackets", ns.core.UintegerValue(maxPacketCount));
ping6.SetAttribute("Interval", ns.core.TimeValue(interPacketInterval));
@@ -102,7 +102,7 @@ def main(argv):
csma.EnableAsciiAll(ascii.CreateFileStream("simple-routing-ping6.tr"))
csma.EnablePcapAll("simple-routing-ping6", True)
# Run Simulation
# Run Simulation
ns.core.Simulator.Run()
ns.core.Simulator.Destroy()

View File

@@ -36,7 +36,7 @@ using namespace ns3;
NS_LOG_COMPONENT_DEFINE ("StaticRoutingSlash32Test");
int
int
main (int argc, char *argv[])
{
@@ -100,7 +100,7 @@ main (int argc, char *argv[])
ipv4C->AddAddress (ifIndexC, ifInAddrC);
ipv4C->SetMetric (ifIndexC, 1);
ipv4C->SetUp (ifIndexC);
Ipv4StaticRoutingHelper ipv4RoutingHelper;
// Create static routes from A to C
Ptr<Ipv4StaticRouting> staticRoutingA = ipv4RoutingHelper.GetStaticRouting (ipv4A);
@@ -112,7 +112,7 @@ main (int argc, char *argv[])
// Create the OnOff application to send UDP datagrams of size
// 210 bytes at a rate of 448 Kb/s
uint16_t port = 9; // Discard port (RFC 863)
OnOffHelper onoff ("ns3::UdpSocketFactory",
OnOffHelper onoff ("ns3::UdpSocketFactory",
Address (InetSocketAddress (ifInAddrC.GetLocal (), port)));
onoff.SetConstantRate (DataRate (6000));
ApplicationContainer apps = onoff.Install (nA);

View File

@@ -20,7 +20,7 @@
Destination host (10.20.1.2)
|
| 10.20.1.0/24
DSTRTR
DSTRTR
10.10.1.0/24 / \ 10.10.2.0/24
/ \
Rtr1 Rtr2
@@ -51,7 +51,7 @@ void BindSock (Ptr<Socket> sock, Ptr<NetDevice> netdev);
void srcSocketRecv (Ptr<Socket> socket);
void dstSocketRecv (Ptr<Socket> socket);
int
int
main (int argc, char *argv[])
{
@@ -121,7 +121,7 @@ main (int argc, char *argv[])
staticRoutingRtr1->AddHostRouteTo (Ipv4Address ("10.20.1.2"), Ipv4Address ("10.10.1.2"), 2);
staticRoutingRtr2->AddHostRouteTo (Ipv4Address ("10.20.1.2"), Ipv4Address ("10.10.2.2"), 2);
// Two routes to same destination - setting separate metrics.
// Two routes to same destination - setting separate metrics.
// You can switch these to see how traffic gets diverted via different routes
staticRoutingSrc->AddHostRouteTo (Ipv4Address ("10.20.1.2"), Ipv4Address ("10.1.1.2"), 1,5);
staticRoutingSrc->AddHostRouteTo (Ipv4Address ("10.20.1.2"), Ipv4Address ("10.1.2.2"), 2,10);
@@ -163,9 +163,9 @@ main (int argc, char *argv[])
// Fourth again as normal (goes via Rtr1)
Simulator::Schedule (Seconds (3.0),&BindSock, srcSocket, Ptr<NetDevice>(0));
Simulator::Schedule (Seconds (3.1),&SendStuff, srcSocket, dstaddr, dstport);
// If you uncomment what's below, it results in ASSERT failing since you can't
// If you uncomment what's below, it results in ASSERT failing since you can't
// bind to a socket not existing on a node
// Simulator::Schedule(Seconds(4.0),&BindSock, srcSocket, dDstRtrdDst.Get(0));
// Simulator::Schedule(Seconds(4.0),&BindSock, srcSocket, dDstRtrdDst.Get(0));
Simulator::Run ();
Simulator::Destroy ();
@@ -197,7 +197,7 @@ srcSocketRecv (Ptr<Socket> socket)
if (socket->GetBoundNetDevice ())
{
NS_LOG_INFO ("Socket was bound");
}
}
else
{
NS_LOG_INFO ("Socket was not bound");

View File

@@ -20,7 +20,7 @@
Destination host (10.20.1.2)
|
| 10.20.1.0/24
DSTRTR
DSTRTR
10.10.1.0/24 / \ 10.10.2.0/24
/ \
Rtr1 Rtr2
@@ -61,7 +61,7 @@ void BindSock (Ptr<Socket> sock, Ptr<NetDevice> netdev);
void srcSocketRecv (Ptr<Socket> socket);
void dstSocketRecv (Ptr<Socket> socket);
int
int
main (int argc, char *argv[])
{
@@ -131,7 +131,7 @@ main (int argc, char *argv[])
staticRoutingRtr1->AddHostRouteTo (Ipv4Address ("10.20.1.2"), Ipv4Address ("10.10.1.2"), 2);
staticRoutingRtr2->AddHostRouteTo (Ipv4Address ("10.20.1.2"), Ipv4Address ("10.10.2.2"), 2);
// Two routes to same destination - setting separate metrics.
// Two routes to same destination - setting separate metrics.
// You can switch these to see how traffic gets diverted via different routes
staticRoutingSrc->AddHostRouteTo (Ipv4Address ("10.20.1.2"), Ipv4Address ("10.1.1.2"), 1,5);
staticRoutingSrc->AddHostRouteTo (Ipv4Address ("10.20.1.2"), Ipv4Address ("10.1.2.2"), 2,10);
@@ -180,9 +180,9 @@ main (int argc, char *argv[])
// Fourth again as normal (goes via Rtr1)
Simulator::Schedule (Seconds (3.0),&BindSock, srcSocket4, Ptr<NetDevice>(0));
Simulator::Schedule (Seconds (3.1),&StartFlow, srcSocket4, dstaddr, dstport);
// If you uncomment what's below, it results in ASSERT failing since you can't
// If you uncomment what's below, it results in ASSERT failing since you can't
// bind to a socket not existing on a node
// Simulator::Schedule(Seconds(4.0),&BindSock, srcSocket, dDstRtrdDst.Get(0));
// Simulator::Schedule(Seconds(4.0),&BindSock, srcSocket, dDstRtrdDst.Get(0));
Simulator::Run ();
Simulator::Destroy ();

View File

@@ -50,13 +50,13 @@ void ReceivePacket (Ptr<Socket> socket)
}
}
static void SendPacket (Ptr<Socket> socket, uint32_t pktSize,
static void SendPacket (Ptr<Socket> socket, uint32_t pktSize,
uint32_t pktCount, Time pktInterval )
{
if (pktCount > 0)
{
socket->Send (Create<Packet> (pktSize));
Simulator::Schedule (pktInterval, &SendPacket,
Simulator::Schedule (pktInterval, &SendPacket,
socket, pktSize,pktCount - 1, pktInterval);
}
else
@@ -65,7 +65,7 @@ static void SendPacket (Ptr<Socket> socket, uint32_t pktSize,
}
}
int
int
main (int argc, char *argv[])
{
//
@@ -114,7 +114,7 @@ main (int argc, char *argv[])
ipv4.SetBase ("10.1.1.0", "255.255.255.0");
Ipv4InterfaceContainer i = ipv4.Assign (d);
serverAddress = Address(i.GetAddress (1));
NS_LOG_INFO ("Create sockets.");
//Receiver socket on n1
TypeId tid = TypeId::LookupByName ("ns3::UdpSocketFactory");
@@ -148,7 +148,7 @@ main (int argc, char *argv[])
//Schedule SendPacket
Time interPacketInterval = Seconds (packetInterval);
Simulator::ScheduleWithContext (source->GetNode ()->GetId (),
Seconds (1.0), &SendPacket,
Seconds (1.0), &SendPacket,
source, packetSize, packetCount, interPacketInterval);
NS_LOG_INFO ("Run Simulation.");

View File

@@ -50,13 +50,13 @@ void ReceivePacket (Ptr<Socket> socket)
}
}
static void SendPacket (Ptr<Socket> socket, uint32_t pktSize,
static void SendPacket (Ptr<Socket> socket, uint32_t pktSize,
uint32_t pktCount, Time pktInterval )
{
if (pktCount > 0)
{
socket->Send (Create<Packet> (pktSize));
Simulator::Schedule (pktInterval, &SendPacket,
Simulator::Schedule (pktInterval, &SendPacket,
socket, pktSize,pktCount - 1, pktInterval);
}
else
@@ -65,7 +65,7 @@ static void SendPacket (Ptr<Socket> socket, uint32_t pktSize,
}
}
int
int
main (int argc, char *argv[])
{
//
@@ -114,7 +114,7 @@ main (int argc, char *argv[])
ipv6.SetBase ("2001:0000:f00d:cafe::", Ipv6Prefix (64));
Ipv6InterfaceContainer i6 = ipv6.Assign (d);
serverAddress = Address(i6.GetAddress (1,1));
NS_LOG_INFO ("Create sockets.");
//Receiver socket on n1
TypeId tid = TypeId::LookupByName ("ns3::UdpSocketFactory");
@@ -147,7 +147,7 @@ main (int argc, char *argv[])
//Schedule SendPacket
Time interPacketInterval = Seconds (packetInterval);
Simulator::ScheduleWithContext (source->GetNode ()->GetId (),
Seconds (1.0), &SendPacket,
Seconds (1.0), &SendPacket,
source, packetSize, packetCount, interPacketInterval);
NS_LOG_INFO ("Run Simulation.");

View File

@@ -200,7 +200,7 @@ Receiver::StartApplication ()
Ptr<SocketFactory> socketFactory = GetNode ()->GetObject<SocketFactory>
(UdpSocketFactory::GetTypeId ());
m_socket = socketFactory->CreateSocket ();
InetSocketAddress local =
InetSocketAddress local =
InetSocketAddress (Ipv4Address::GetAny (), m_port);
m_socket->Bind (local);
}
@@ -275,7 +275,7 @@ Receiver::Receive (Ptr<Socket> socket)
//----------------------------------------------------------------------
//-- TimestampTag
//------------------------------------------------------
TypeId
TypeId
TimestampTag::GetTypeId (void)
{
static TypeId tid = TypeId ("TimestampTag")
@@ -289,24 +289,24 @@ TimestampTag::GetTypeId (void)
;
return tid;
}
TypeId
TypeId
TimestampTag::GetInstanceTypeId (void) const
{
return GetTypeId ();
}
uint32_t
uint32_t
TimestampTag::GetSerializedSize (void) const
{
return 8;
}
void
void
TimestampTag::Serialize (TagBuffer i) const
{
int64_t t = m_timestamp.GetNanoSeconds ();
i.Write ((const uint8_t *)&t, 8);
}
void
void
TimestampTag::Deserialize (TagBuffer i)
{
int64_t t;
@@ -325,7 +325,7 @@ TimestampTag::GetTimestamp (void) const
return m_timestamp;
}
void
void
TimestampTag::Print (std::ostream &os) const
{
os << "t=" << m_timestamp;

View File

@@ -126,8 +126,8 @@ private:
/**
* Timestamp tag - it carries when the packet has been sent.
*
* It would have been more realistic to include this info in
*
* It would have been more realistic to include this info in
* a header. Here we show how to avoid the extra overhead in
* a simulation.
*/

Some files were not shown because too many files have changed in this diff Show More