Compare commits

...

76 Commits

Author SHA1 Message Date
menglei.zhang
c92e91fa1d bug fix
Some checks failed
CI / Ubuntu (push) Has been cancelled
CI / Codecov (push) Has been cancelled
CI / Windows_MinGW (push) Has been cancelled
CI / Mac_OS_X (macos-14) (push) Has been cancelled
CI / Mac_OS_X (macos-latest) (push) Has been cancelled
CI / Windows_Visual_Studio (push) Has been cancelled
2025-11-04 23:15:13 +08:00
menglei.zhang
4f5774be5b Initialize m_savedNodeList during Partition 2025-11-04 23:15:13 +08:00
menglei.zhang
3bac85dcab save the global node list 2025-11-04 23:15:13 +08:00
menglei.zhang
0ec074a511 Replace system_clock with steady_clock. 2025-11-04 23:15:13 +08:00
F5
d3b620b1c8 Merge tag 'ns-3.46.1' into unison
ns-3.46.1 release
2025-10-26 21:24:06 +08:00
F5
136a6e22bf Merge tag 'ns-3.46' into unison
ns-3.46 release
2025-10-26 20:52:40 +08:00
Tom Henderson
51387bce7e Update availability in RELEASE_NOTES.md 2025-10-16 15:12:37 -07:00
Tom Henderson
1aca85afa5 Update VERSION and documentation tags for ns-3.46.1 release 2025-10-16 15:09:40 -07:00
Tom Henderson
5867e38d32 doc: Update RELEASE_NOTES.md and CHANGES.md for ns-3.46.1 2025-10-16 08:18:02 -07:00
Gabriel Ferreira
afb92d3733 core: Add missing algorithm header 2025-10-16 14:26:10 +02:00
F5
bd4fa08a7b mtp, mpi: Modify examples 2025-05-01 06:06:02 +08:00
F5
23a0788c02 mtp, mpi: Adjust doxygen format 2025-04-29 17:15:41 +08:00
F5
c39089e746 Merge tag 'ns-3.44' into unison
ns-3.44 release
2025-04-29 17:12:30 +08:00
F5
95baf579f3 examples: Keep mtp examples up to date 2024-10-20 16:15:32 +08:00
F5
a1d007d173 Merge tag 'ns-3.43' into unison
ns-3.43 release
2024-10-20 15:23:35 +08:00
F5
a43bb999d8 Merge tag 'ns-3.42' into unison
ns-3.42 release
2024-09-07 16:01:00 +08:00
F5
1f5b064f6e Update paper information in README 2024-04-30 22:20:43 +08:00
F5
c556528ea4 mpi, mtp: Use override syntax for HybridSimulatorImpl 2024-04-21 10:30:29 +08:00
F5
1d5c3830e8 mtp: Keep examples up to date and change their reflogs correspondingly 2024-03-26 14:47:15 +08:00
F5
6046f4f889 mtp: Use override syntax instead of virtual 2024-03-26 14:46:30 +08:00
F5
08db4ac6d7 Merge tag 'ns-3.41' into unison
ns-3.41 release
2024-03-26 10:54:49 +08:00
yyq15280
7340502ecc network: Fix a memory leak issue in byte-tag-list 2024-03-19 18:47:06 +08:00
F5
b969f6eac6 mtp: Update testcases since clang-tidy modifications influence rng 2023-11-22 23:47:18 +08:00
F5
f20ef44e11 mtp, mpi: Add docstings for the atomic counter 2023-11-22 23:40:29 +08:00
F5
48fe0d92ab mtp, mpi: Add docstings for each method 2023-11-22 23:40:19 +08:00
F5
86b9cfb981 mpi: Keep original event UID after automatic partition 2023-11-22 16:29:41 +08:00
F5
4b5f831aa3 mtp: Add tests for the hybrid simulator 2023-11-22 16:15:28 +08:00
F5
0f243283a0 network: Fix clang-tidy warnings 2023-11-22 16:12:35 +08:00
F5
c4479366ef nix-vector-routing: Fix clang-tidy warnings 2023-11-22 16:12:19 +08:00
F5
c4783e254b flow-monitor: Fix clang-tidy warnings 2023-11-22 16:12:05 +08:00
F5
2f84697f6b mtp, mpi: Fix clang-tidy warnings 2023-11-22 16:11:42 +08:00
F5
b3fdcbc6bd mtp, mpi: Add copyright and licence for source files 2023-11-22 14:24:27 +08:00
F5
ba1398822a doc: Add documentations for the mtp module 2023-11-22 14:13:25 +08:00
F5
26f7c97791 mtp: Keep original event UID after automatic partition 2023-11-22 13:17:33 +08:00
F5
2f30ba37c4 mtp: Add example testing 2023-11-22 13:07:23 +08:00
F5
b551804f91 mpi: Fix code style warning of the hybrid simulator 2023-11-20 23:12:30 +08:00
F5
d70b17337a mtp, mpi: Add fat-tree examples 2023-11-20 23:11:33 +08:00
F5
b69f13f5d2 mtp, mpi: Modify simple examples 2023-11-20 21:48:37 +08:00
F5
b3f09bcee2 Merge tag 'ns-3.40' into unison
ns-3.40 release
2023-11-20 21:18:22 +08:00
F5
5241eb7c99 mtp: Keep the examples up to date 2023-11-14 22:11:17 +08:00
F5
d581156f70 mtp: Mark the test suite as TODO 2023-11-14 21:40:39 +08:00
F5
f620881ed6 docs: Modify README 2023-11-14 21:09:25 +08:00
F5
2600c62fa6 Merge tag 'ns-3.39' into unison
ns-3.39 release
2023-11-14 20:52:26 +08:00
F5
ac6d1f3920 mtp: Modify examples to keep them update 2023-11-14 20:40:40 +08:00
F5
55d6b56a89 internet: Revert # of available ports to pass the test 2023-11-14 20:40:10 +08:00
F5
0df9cb6264 Merge tag 'ns-3.38' into unison
ns-3.38 release
2023-11-14 15:58:35 +08:00
F5
daba77e6c1 network: Make packet metadata thread-safe 2023-11-14 15:36:42 +08:00
F5
4d20e01482 mtp: Remove the legacy first line 2023-11-14 15:36:14 +08:00
F5
b06d6278f9 mtp: Change the code style of examples 2023-11-14 15:35:16 +08:00
F5
07400b582e Merge tag 'ns-3.37' into unison
ns-3.37 release
2023-11-11 21:44:45 +08:00
F5
48577b4659 mtp: Fix deprecation usage since ns-3.37 2023-11-11 21:43:02 +08:00
F5
0d4617a5c6 Merge commit 'e25ff96' into unison 2023-11-11 21:33:08 +08:00
F5
e605065a43 Apply clang-format to modified code for ease of merging 2023-11-11 21:17:27 +08:00
F5
ec09348f8b Merge commit 'ebb5969' into unison 2023-11-11 21:16:15 +08:00
F5
5fbd46107f doc: Add the DOI badge for the evaluated artifact 2023-11-08 14:41:04 +08:00
F5
6f020aef5a mtp: Fix index out-of-bound if no links are in the simulated topology 2023-11-02 12:32:17 +08:00
FujiZ
41cde0d0c7 docs(readme): fix a typo in README 2023-10-15 11:01:51 +08:00
F5
843c579402 flow-monitor: Fix deadlocks when using the hybrid simulator 2023-09-29 17:28:27 +08:00
F5
379f255d63 Update README.md for UNISON 2023-09-22 11:55:42 +08:00
F5
556959fa43 mtp: Add examples 2023-09-15 16:04:12 +08:00
F5
cce2a28a0a build: Add --enable-mtp option 2023-09-15 16:04:12 +08:00
F5
af331aed29 mpi, mtp: Add hybrid simulation support 2023-09-15 16:04:12 +08:00
F5
f818faabcd mtp: Add multithreaded parallel simulation support 2023-09-15 16:03:58 +08:00
F5
6764518fff flow-monitor: Make flow-monitor thread-safe 2022-12-14 21:08:47 +08:00
F5
f657cd0e2a nix-vector-routing: Make nix-vector routing thread-safe 2022-12-14 21:08:47 +08:00
F5
7dcc9828ab tcp: Make TCP options thread-safe 2022-12-14 21:08:47 +08:00
F5
428ac6727d tag: Make sure packet tags are registered when using MPI irecv 2022-12-14 21:08:47 +08:00
F5
753def9f7e internet: Add per-flow ECMP routing 2022-12-14 21:08:47 +08:00
F5
787a3093b2 internet: Increase available ephemeral port range to support heavy DC traffic 2022-12-14 21:08:47 +08:00
F5
2076423b1e internet: Fix global routing when using the multithreaded simulator 2022-10-29 22:04:11 +08:00
F5
91428b851c network: Make simple channel supporting automatic partition 2022-10-29 22:04:11 +08:00
F5
89c128b1a2 network: Enable modifing system IDs for automatic partition 2022-10-25 18:59:38 +08:00
F5
5110ade9ed network: Make packets thread-safe 2022-10-25 18:59:00 +08:00
F5
2825400936 core: Make hash functions thread-safe 2022-10-25 18:59:00 +08:00
F5
34b8e39172 core: Make aggregated objects thread-safe 2022-10-25 18:58:54 +08:00
F5
ba81313570 core: Make reference counting thread-safe 2022-10-25 18:46:29 +08:00
88 changed files with 10477 additions and 307 deletions

View File

@@ -12,15 +12,12 @@ Note that users who upgrade the simulator across versions, or who work directly
This file is a best-effort approach to solving this issue; we will do our best but can guarantee that there will be things that fall through the cracks, unfortunately. If you, as a user, can suggest improvements to this file based on your experience, please contribute a patch or drop us a note on ns-developers mailing list.
## Changes from ns-3.46 to ns-3-dev
## Changes from ns-3.46 to ns-3.46.1
### New API
### Changes to existing API
### Changes to build system
### Changed behavior
The ns-3.46.1 contains some small build system fixes discovered after the ns-3.46 release, and two
new module documentation chapters (see [RELEASE_NOTES.md](RELEASE_NOTES.md)). There are no API
changes, changes to how the build system works, or changed behavior of the models, compared with
the ns-3.46 release.
## Changes from ns-3.45 to ns-3.46

View File

@@ -72,6 +72,7 @@ option(NS3_MONOLIB
"Build a single shared ns-3 library and link it against executables" OFF
)
option(NS3_MPI "Build with MPI support" OFF)
option(NS3_MTP "Build with Multithreaded simulation support" OFF)
option(NS3_NATIVE_OPTIMIZATIONS "Build with -march=native -mtune=native" OFF)
option(
NS3_NINJA_TRACING

471
README.md
View File

@@ -1,283 +1,282 @@
# The Network Simulator, Version 3
# Unison for ns-3
[![codecov](https://codecov.io/gh/nsnam/ns-3-dev-git/branch/master/graph/badge.svg)](https://codecov.io/gh/nsnam/ns-3-dev-git/branch/master/)
[![Gitlab CI](https://gitlab.com/nsnam/ns-3-dev/badges/master/pipeline.svg)](https://gitlab.com/nsnam/ns-3-dev/-/pipelines)
[![Github CI](https://github.com/nsnam/ns-3-dev-git/actions/workflows/per_commit.yml/badge.svg)](https://github.com/nsnam/ns-3-dev-git/actions)
[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.10077300.svg)](https://doi.org/10.5281/zenodo.10077300)
[![CI](https://github.com/NASA-NJU/UNISON-for-ns-3/actions/workflows/per_commit.yml/badge.svg)](https://github.com/NASA-NJU/UNISON-for-ns-3/actions/workflows/per_commit.yml)
[![Latest Release](https://gitlab.com/nsnam/ns-3-dev/-/badges/release.svg)](https://gitlab.com/nsnam/ns-3-dev/-/releases)
A fast and user-transparent parallel simulator implementation for ns-3.
## License
With fine-grained partition and load-adaptive scheduling, Unison allows users to easily simulate models with multithreaded parallelization without further configurations.
Meanwhile, cache misses are reduced by fine-grained partition, and the mutual waiting time among threads is minimized by load-adaptive scheduling, resulting in efficient parallelization.
More information about Unison can be found in our [EuroSys '24 paper](https://dl.acm.org/doi/10.1145/3627703.3629574).
This software is licensed under the terms of the GNU General Public License v2.0 only (GPL-2.0-only).
See the LICENSE file for more details.
Supported ns-3 version: >= 3.36.1.
We are trying to keep Unison updated with the latest version of ns-3.
You can find each unison-enabled ns-3 version via `unison-*` tags.
## Table of Contents
## Getting Started
* [Overview](#overview-an-open-source-project)
* [Software overview](#software-overview)
* [Getting ns-3](#getting-ns-3)
* [Building ns-3](#building-ns-3)
* [Testing ns-3](#testing-ns-3)
* [Running ns-3](#running-ns-3)
* [ns-3 Documentation](#ns-3-documentation)
* [Working with the Development Version of ns-3](#working-with-the-development-version-of-ns-3)
* [Contributing to ns-3](#contributing-to-ns-3)
* [Reporting Issues](#reporting-issues)
* [Asking Questions](#asking-questions)
* [ns-3 App Store](#ns-3-app-store)
> **NOTE**: Much more substantial information about ns-3 can be found at
<https://www.nsnam.org>
## Overview: An Open Source Project
ns-3 is a free open source project aiming to build a discrete-event
network simulator targeted for simulation research and education.
This is a collaborative project; we hope that
the missing pieces of the models we have not yet implemented
will be contributed by the community in an open collaboration
process. If you would like to contribute to ns-3, please check
the [Contributing to ns-3](#contributing-to-ns-3) section below.
This README excerpts some details from a more extensive
tutorial that is maintained at:
<https://www.nsnam.org/documentation/latest/>
## Software overview
From a software perspective, ns-3 consists of a number of C++
libraries organized around different topics and technologies.
Programs that actually run simulations can be written in
either C++ or Python; the use of Python is enabled by
[runtime C++/Python bindings](https://cppyy.readthedocs.io/en/latest/). Simulation programs will
typically link or import the ns `core` library and any additional
libraries that they need. ns-3 requires a modern C++ compiler
installation (g++ or clang++) and the [CMake](https://cmake.org) build system.
Most ns-3 programs are single-threaded; there is some limited
support for parallelization using the [MPI](https://www.nsnam.org/docs/models/html/distributed.html) framework.
ns-3 can also run in a real-time emulation mode by binding to an
Ethernet device on the host machine and generating and consuming
packets on an actual network. The ns-3 APIs are documented
using [Doxygen](https://www.doxygen.nl).
The code for the framework and the default models provided
by ns-3 is built as a set of libraries. The libraries maintained
by the open source project can be found in the `src` directory.
Users may extend ns-3 by adding libraries to the build;
third-party libraries can be found on the [ns-3 App Store](https://www.nsnam.org)
or elsewhere in public Git repositories, and are usually added to the `contrib` directory.
## Getting ns-3
ns-3 can be obtained by either downloading a released source
archive, or by cloning the project's
[Git repository](https://gitlab.com/nsnam/ns-3-dev.git).
Starting with ns-3 release version 3.45, there are two versions
of source archives that are published with each release:
1. ns-3.##.tar.bz2
1. ns-allinone-3.##.tar.bz2
The first archive is simply a compressed archive of the same code
that one can obtain by checking out the release tagged code from
the ns-3-dev Git repository. The second archive consists of
ns-3 plus additional contributed modules that are maintained outside
of the main ns-3 open source project but that have been reviewed
by maintainers and lightly tested for compatibility with the
release. The contributed modules included in the `allinone` release
will change over time as new third-party libraries emerge while others
may lose compatibility with the ns-3 mainline (e.g., if they become
unmaintained).
## Building ns-3
As mentioned above, ns-3 uses the CMake build system, but
the project maintains a customized wrapper around CMake
called the `ns3` tool. This tool provides a
[Waf-like](https://waf.io) API
to the underlying CMake build manager.
To build the set of default libraries and the example
programs included in this package, you need to use the
`ns3` tool. This tool provides a Waf-like API to the
underlying CMake build manager.
Detailed information on how to use `ns3` is included in the
[quick start guide](doc/installation/source/quick-start.rst).
Before building ns-3, you must configure it.
This step allows the configuration of the build options,
such as whether to enable the examples, tests and more.
To configure ns-3 with examples and tests enabled,
run the following command on the ns-3 main directory:
The quickest way to get started is to type the command
```shell
./ns3 configure --enable-examples --enable-tests
./ns3 configure --enable-mtp --enable-examples
```
Then, build ns-3 by running the following command:
> The build profile is set to default (which uses `-O2 -g` compiler flags) in this case.
> If you want to get `-O3` optimized build and discard all log outputs, please add `-d optimized` arguments.
The `--enable-mtp` option will enable multi-threaded parallelization.
You can verify Unison is enabled by checking whether `Multithreaded Simulation : ON` appears in the optional feature list.
Now, let's build and run a DCTCP example with default sequential simulation and parallel simulation (using 4 threads) respectively:
```shell
./ns3 build
./ns3 build dctcp-example dctcp-example-mtp
time ./ns3 run dctcp-example
time ./ns3 run dctcp-example-mtp
```
By default, the build artifacts will be stored in the `build/` directory.
The simulation should finish in 4-5 minutes for `dctcp-example` and 1-2 minutes for `dctcp-example-mtp`, depending on your hardware and your build profile.
The output in `*.dat` should be in accordance with the comments in the source file.
### Supported Platforms
The speedup of Unison is more significant for larger topologies and traffic volumes.
If you are interested in using it to simulate topologies like fat-tree, BCube and 2D-torus, please refer to [Running Evaluations](#running-evaluations).
The current codebase is expected to build and run on the
set of platforms listed in the [release notes](RELEASE_NOTES.md)
file.
## Speedup Your Existing Code
Other platforms may or may not work: we welcome patches to
improve the portability of the code to these other platforms.
## Testing ns-3
ns-3 contains test suites to validate the models and detect regressions.
To run the test suite, run the following command on the ns-3 main directory:
To understand how Unison affects your model code, let's find the differences between two versions of the source files of the above example:
```shell
./test.py
diff examples/tcp/dctcp-example.cc examples/mtp/dctcp-example-mtp.cc
```
More information about ns-3 tests is available in the
[test framework](doc/manual/source/test-framework.rst) section of the manual.
It turns out that to bring Unison to the existing model code, all you need to do is to include the `ns3/mtp-interface.h` header file and add the following line at the beginning of the `main` function:
## Running ns-3
On recent Linux systems, once you have built ns-3 (with examples
enabled), it should be easy to run the sample programs with the
following command, such as:
```shell
./ns3 run simple-global-routing
```c++
MtpInterface::Enable(numberOfThreads);
```
That program should generate a `simple-global-routing.tr` text
trace file and a set of `simple-global-routing-xx-xx.pcap` binary
PCAP trace files, which can be read by `tcpdump -n -tt -r filename.pcap`.
The program source can be found in the `examples/routing` directory.
The parameter `numberOfThreads` is optional.
If it is omitted, the number of threads is automatically chosen and will not exceed the maximum number of available hardware threads on your system.
If you want to enable Unison for distributed simulation on existing MPI programs for further speedup, place the above line before MPI initialization and do not explicitly specify the simulator implementation in your code.
For such hybrid simulation with MPI, the `--enable-mpi` option is also required when configuring ns-3.
## Running ns-3 from Python
Unison resolved a lot of thread-safety issues with ns-3's architecture.
You don't need to consider these issues on your own for most of the time, except if you have custom global statistics other than the built-in flow-monitor.
In the latter case, if multiple nodes can access your global statistics, you can replace them with atomic variables via `std::atomic<>`.
When collecting tracing data such as Pcap, it is strongly recommended to create separate output files for each node instead of a single trace file.
For complex custom data structures, you can create critical sections by adding
If you do not plan to modify ns-3 upstream modules, you can get
a pre-built version of the ns-3 python bindings. It is recommended
to create a python virtual environment to isolate different application
packages from system-wide packages (installable via the OS package managers).
```shell
python3 -m venv ns3env
source ./ns3env/bin/activate
pip install ns3
```c++
MtpInterface::CriticalSection cs;
```
If you do not have `pip`, check their documents
on [how to install it](https://pip.pypa.io/en/stable/installation/).
at the beginning of your methods.
After installing the `ns3` package, you can then create your simulation python script.
Below is a trivial demo script to get you started.
## Examples
```python
from ns import ns
In addition to the DCTCP example, you can find other adapted examples in `examples/mtp`.
Meanwhile, Unison also supports manual partition, and you can find a minimal example in `src/mtp/examples/simple-mtp.cc`
For hybrid simulation with MPI, you can find a minimal example in `src/mpi/examples/simple-hybrid.cc`.
ns.LogComponentEnable("Simulator", ns.LOG_LEVEL_ALL)
We also provide three detailed fat-tree examples for Unison, traditional MPI parallel simulation and hybrid simulation:
ns.Simulator.Stop(ns.Seconds(10))
ns.Simulator.Run()
ns.Simulator.Destroy()
| Name | Location | Required configuration flags | Running commands |
| - | - | - | - |
| fat-tree-mtp | src/mtp/examples/fat-tree-mtp.cc | `--enable-mtp --enable-exaples` without `--enable-mpi` | `./ns3 run "fat-tree-mtp --thread=4"` |
| fat-tree-mpi | src/mpi/examples/fat-tree-mpi.cc | `--enable-mpi --enable-exaples` without `--enable-mtp` | `./ns3 run fat-tree-mpi --command-template "mpirun -np 4 %s"` |
| fat-tree-hybrid | src/mpi/examples/fat-tree-hybrid.cc | `--enable-mtp --enable-mpi --enable-exaples` | `./ns3 run fat-tree-hybrid --command-template "mpirun -np 2 %s --thread=2"` |
Feel free to explore these examples, compare code changes and adjust the `-np` and `--thread` arguments.
## Running Evaluations
To evaluate Unison, please switch to [unison-evaluations](https://github.com/NASA-NJU/Unison-for-ns-3/tree/unison-evaluations) branch, which is based on ns-3.36.1.
In this branch, you can find various topology models in the `scratch` folder.
There are a lot of parameters you can set for each topology.
We provided a utility script `exp.py` to compare these simulators and parameters.
We also provided `process.py` to convert these raw experiment data to CSV files suitable for plotting.
Please see the [README in that branch](https://github.com/NASA-NJU/Unison-for-ns-3/tree/unison-evaluations) for more details.
The evaluated artifact (based on ns-3.36.1) is persistently indexed by DOI [10.5281/zenodo.10077300](https://doi.org/10.5281/zenodo.10077300).
## Module Documentation
### 1. Overview
Unison for ns-3 is mainly implemented in the `mtp` module (located at `src/mtp/*`), which stands for multi-threaded parallelization.
This module contains three parts: A parallel simulator implementation `multithreaded-simulator-impl`, an interface to users `mtp-interface`, and `logical-process` to represent LPs in terms of parallel simulation.
All LPs and threads are stored in the `mtp-interface`.
It controls the simulation progress, schedules LPs to threads and manages the lifecycles of LPs and threads.
The interface also provides some methods and options for users to tweak the simulation.
Each LP's logic is implemented in `logical-process`. It contains most of the methods of the default sequential simulator plus some auxiliary methods for parallel simulation.
The simulator implementation `multithreaded-simulator-impl` is a derived class from the base simulator.
It converts calls to the base simulator into calls to logical processes based on the context of the current thread.
It also provides a partition method for automatic fine-grained topology partition.
For distributed simulation with MPI, we added `hybrid-simulator-impl` in the `mpi` module (located at `src/mpi/model/hybrid-simulator-impl*`).
This simulator uses both `mtp-interface` and `mpi-interface` to coordinate local LPs and global MPI communications.
We also modified the module to make it locally thread-safe.
### 2. Modifications to ns-3 Architecture
In addition to the `mtp` and `mpi` modules, we also modified the following part of the ns-3 architecture to make it thread-safe, also with some bug fixing for ns-3.
You can find the modifications to each unison-enabled ns-3 version via `git diff unison-* ns-*`.
Modifications to the build system to provide `--enable-mtp` option to enable/disable Unison:
```
ns3 | 2 +
CMakeLists.txt | 1 +
build-support/custom-modules/ns3-configtable.cmake | 3 +
build-support/macros-and-definitions.cmake | 10 +
```
The simulation will take a while to start, while the bindings are loaded.
The script above will print the logging messages for the called commands.
Modifications to the `core` module to make reference counting thread-safe:
Use `help(ns)` to check the prototypes for all functions defined in the
ns3 namespace. To get more useful results, query specific classes of
interest and their functions e.g., `help(ns.Simulator)`.
Smart pointers `Ptr<>` can be differentiated from objects by checking if
`__deref__` is listed in `dir(variable)`. To dereference the pointer,
use `variable.__deref__()`.
Most ns-3 simulations are written in C++ and the documentation is
oriented towards C++ users. The ns-3 tutorial programs (`first.cc`,
`second.cc`, etc.) have Python equivalents, if you are looking for
some initial guidance on how to use the Python API. The Python
API may not be as full-featured as the C++ API, and an API guide
for what C++ APIs are supported or not from Python do not currently exist.
The project is looking for additional Python maintainers to improve
the support for future Python users.
## ns-3 Documentation
Once you have verified that your build of ns-3 works by running
the `simple-global-routing` example as outlined in the [running ns-3](#running-ns-3)
section, it is quite likely that you will want to get started on reading
some ns-3 documentation.
All of that documentation should always be available from
the ns-3 website: <https://www.nsnam.org/documentation/>.
This documentation includes:
* a tutorial
* a reference manual
* models in the ns-3 model library
* a wiki for user-contributed tips: <https://www.nsnam.org/wiki/>
* API documentation generated using doxygen: this is
a reference manual, most likely not very well suited
as introductory text:
<https://www.nsnam.org/doxygen/index.html>
## Working with the Development Version of ns-3
If you want to download and use the development version of ns-3, you
need to use the tool `git`. A quick and dirty cheat sheet is included
in the manual, but reading through the Git
tutorials found in the Internet is usually a good idea if you are not
familiar with it.
If you have successfully installed Git, you can get
a copy of the development version with the following command:
```shell
git clone https://gitlab.com/nsnam/ns-3-dev.git
```
src/core/CMakeLists.txt | 1 +
src/core/model/atomic-counter.h | 50 +
src/core/model/hash.h | 16 +
src/core/model/object.cc | 2 +
src/core/model/simple-ref-count.h | 11 +-
```
However, we recommend to follow the GitLab guidelines for starters,
that includes creating a GitLab account, forking the ns-3-dev project
under the new account's name, and then cloning the forked repository.
You can find more information in the [manual](https://www.nsnam.org/docs/manual/html/working-with-git.html).
Modifications to the `network` module to make packets thread-safe:
## Contributing to ns-3
```
src/network/model/buffer.cc | 15 +-
src/network/model/buffer.h | 7 +
src/network/model/byte-tag-list.cc | 14 +-
src/network/model/node.cc | 7 +
src/network/model/node.h | 7 +
src/network/model/packet-metadata.cc | 26 +-
src/network/model/packet-metadata.h | 14 +-
src/network/model/packet-tag-list.h | 11 +-
src/network/model/socket.cc | 6 +
```
The process of contributing to the ns-3 project varies with
the people involved, the amount of time they can invest
and the type of model they want to work on, but the current
process that the project tries to follow is described in the
[contributing code](https://www.nsnam.org/developers/contributing-code/)
website and in the [CONTRIBUTING.md](CONTRIBUTING.md) file.
Modifications to the `internet` module to make it thread-safe and add per-flow ECMP routing:
## Reporting Issues
```
src/internet/model/global-route-manager-impl.cc | 2 +
src/internet/model/ipv4-global-routing.cc | 32 +-
src/internet/model/ipv4-global-routing.h | 8 +-
src/internet/model/ipv4-packet-info-tag.cc | 2 +
src/internet/model/ipv6-packet-info-tag.cc | 2 +
src/internet/model/tcp-option.cc | 2 +-
```
If you would like to report an issue, you can open a new issue in the
[GitLab issue tracker](https://gitlab.com/nsnam/ns-3-dev/-/issues).
Before creating a new issue, please check if the problem that you are facing
was already reported and contribute to the discussion, if necessary.
Modifications to the `flow-monitor` module to make it thread-safe:
## Asking Questions
```
src/flow-monitor/model/flow-monitor.cc | 48 +
src/flow-monitor/model/flow-monitor.h | 4 +
src/flow-monitor/model/ipv4-flow-classifier.cc | 12 +
src/flow-monitor/model/ipv4-flow-classifier.h | 5 +
src/flow-monitor/model/ipv4-flow-probe.cc | 2 +
src/flow-monitor/model/ipv6-flow-classifier.cc | 12 +
src/flow-monitor/model/ipv6-flow-classifier.h | 5 +
src/flow-monitor/model/ipv6-flow-probe.cc | 2 +
```
ns-3 has an official [ns-3-users message board](https://groups.google.com/g/ns-3-users)
where the community asks questions and share helpful advice.
Additionally, ns-3 has the [ns-3 Zulip chat](https://ns-3.zulipchat.com/), used to discuss
development issues and questions among maintainers and the community.
Modifications to the `nix-vector-routing` module to make it thread-safe:
Please use the above resources to ask questions about ns-3, rather than creating issues.
```
src/nix-vector-routing/model/nix-vector-routing.cc | 92 ++
src/nix-vector-routing/model/nix-vector-routing.h | 8 +
```
## ns-3 App Store
Modifications to the `mpi` module to make it thread-safe with the hybrid simulator:
The official [ns-3 App Store](https://apps.nsnam.org/) is a centralized directory
listing third-party modules for ns-3 available on the Internet.
```
src/mpi/model/granted-time-window-mpi-interface.cc | 25 +
src/mpi/model/granted-time-window-mpi-interface.h | 7 +
src/mpi/model/mpi-interface.cc | 3 +-
```
More information on how to submit an ns-3 module to the ns-3 App Store is available
in the [ns-3 App Store documentation](https://www.nsnam.org/docs/contributing/html/external.html).
### 3. Logging
The reason behind Unison's fast speed is that it divides the network into multiple logical processes (LPs) with fine granularity and schedules them dynamically.
To get to know more details of such workflow, you can enable the following log component:
```c++
LogComponentEnable("LogicalProcess", LOG_LEVEL_INFO);
LogComponentEnable("MultithreadedSimulatorImpl", LOG_LEVEL_INFO);
```
### 4. Advanced Options
These options can be modified at the beginning of the `main` function using the native config syntax of ns-3.
You can also change the default maximum number of threads by setting
```c++
Config::SetDefault("ns3::MultithreadedSimulatorImpl::MaxThreads", UintegerValue(8));
Config::SetDefault("ns3::HybridSimulatorImpl::MaxThreads", UintegerValue(8));
```
The automatic partition will cut off stateless links whose delay is above the threshold.
The threshold is automatically calculated based on the delay of every link.
If you are not satisfied with the partition results, you can set a custom threshold by setting
```c++
Config::SetDefault("ns3::MultithreadedSimulatorImpl::MinLookahead", TimeValue(NanoSeconds(500));
Config::SetDefault("ns3::HybridSimulatorImpl::MinLookahead", TimeValue(NanoSeconds(500));
```
The scheduling method determines the priority (estimated completion time of the next round) of each logical process.
There are five available options:
- `ByExecutionTime`: LPs with a higher execution time of the last round will have higher priority.
- `ByPendingEventCount`: LPs with more pending events of this round will have higher priority.
- `ByEventCount`: LPs with more pending events of this round will have higher priority.
- `BySimulationTime`: LPs with larger current clock time will have higher priority.
- `None`: Do not schedule. The partition's priority is based on their ID.
Many experiments show that the first one usually leads to better performance.
However, you can still choose one according to your taste by setting
```c++
GlobalValue::Bind("PartitionSchedulingMethod", StringValue("ByExecutionTime"));
```
By default, the scheduling period is 2 when the number of partitions is less than 16, 3 when it is less than 256, 4 when it is less than 4096, etc.
Since more partitions lead to more scheduling costs.
You can also set how frequently scheduling occurs by setting
```c++
GlobalValue::Bind("PartitionSchedulingPeriod", UintegerValue(4));
```
## Links
If you find the code useful, please consider citing [our paper](https://dl.acm.org/doi/10.1145/3627703.3629574).
```bibtex
@inproceedings{10.1145/3627703.3629574,
author = {Bai, Songyuan and Zheng, Hao and Tian, Chen and Wang, Xiaoliang and Liu, Chang and Jin, Xin and Xiao, Fu and Xiang, Qiao and Dou, Wanchun and Chen, Guihai},
title = {Unison: A Parallel-Efficient and User-Transparent Network Simulation Kernel},
year = {2024},
isbn = {9798400704376},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3627703.3629574},
doi = {10.1145/3627703.3629574},
abstract = {Discrete-event simulation (DES) is a prevalent tool for evaluating network designs. Although DES offers full fidelity and generality, its slow performance limits its application. To speed up DES, many network simulators employ parallel discrete-event simulation (PDES). However, adapting existing network simulation models to PDES requires complex reconfigurations and often yields limited performance improvement. In this paper, we address this gap by proposing a parallel-efficient and user-transparent network simulation kernel, Unison, that adopts fine-grained partition and load-adaptive scheduling optimized for network scenarios. We prototype Unison based on ns-3. Existing network simulation models of ns-3 can be seamlessly transitioned to Unison. Testbed experiments on commodity servers demonstrate that Unison can achieve a 40\texttimes{} speedup over DES using 24 CPU cores, and a 10\texttimes{} speedup compared with existing PDES algorithms under the same CPU cores.},
booktitle = {Proceedings of the Nineteenth European Conference on Computer Systems},
pages = {115131},
numpages = {17},
keywords = {Data center networks, Network simulation, Parallel discrete-event simulation},
location = {<conf-loc>, <city>Athens</city>, <country>Greece</country>, </conf-loc>},
series = {EuroSys '24}
}
```
Below are some links that may also be helpful to you:
- [ns-3 Tutorial](https://www.nsnam.org/docs/tutorial/html/index.html)
- [ns-3 Model Library](https://www.nsnam.org/docs/models/html/index.html)
- [ns-3 Manual](https://www.nsnam.org/docs/manual/html/index.html)

View File

@@ -12,7 +12,13 @@ a [GitLab.com issue tracker](https://gitlab.com/nsnam/ns-3-dev/-/issues) number,
and references prefixed by '!' refer to a
[GitLab.com merge request](https://gitlab.com/nsnam/ns-3-dev/-/merge_requests) number.
## Release 3-dev
## Release 3.46.1
ns-3.46.1 is a small update to ns-3.46 to fix build issues discovered after release.
There should be no model behavior or API changes compared with ns-3.46.
This release is available from:
<https://www.nsnam.org/release/ns-3.46.1.tar.bz2>
### Supported platforms
@@ -36,8 +42,14 @@ been tested on Linux. As of this release, the latest known version to work with
### New user-visible features
- (doc) New module documentation for mobility and propagation modules
### Bugs fixed
- (build) The ns3 script was not compatible with Python 3.14
- (build) A missing header include in test.cc was breaking the g++-12 build
- (build) Fixed OpenMPI-based build on Alpine Linux
## Release 3.46
This release is available from:

View File

@@ -1 +1 @@
3-dev
3.46.1

View File

@@ -166,6 +166,9 @@ macro(write_configtable)
string(APPEND out "MPI Support : ")
check_on_or_off("NS3_MPI" "MPI_FOUND")
string(APPEND out "Multithreaded Simulation : ")
check_on_or_off("${NS3_MTP}" "ON")
string(APPEND out "ns-3 Click Integration : ")
check_on_or_off("ON" "NS3_CLICK")

View File

@@ -841,6 +841,12 @@ macro(process_options)
endif()
endif()
set(ENABLE_MTP FALSE)
if(${NS3_MTP})
add_definitions(-DNS3_MTP)
set(ENABLE_MTP TRUE)
endif()
# Use upstream boost package config with CMake 3.30 and above
if(POLICY CMP0167)
cmake_policy(SET CMP0167 NEW)
@@ -1187,6 +1193,10 @@ macro(process_options)
list(REMOVE_ITEM libs_to_build mpi)
endif()
if(NOT ${ENABLE_MTP})
list(REMOVE_ITEM libs_to_build mtp)
endif()
if(NOT ${ENABLE_VISUALIZER})
list(REMOVE_ITEM libs_to_build visualizer)
endif()

View File

@@ -49,9 +49,9 @@ copyright = "2015, ns-3 project"
# built documents.
#
# The short X.Y version.
version = "ns-3-dev"
version = "ns-3.46.1"
# The full version, including alpha/beta/rc tags.
release = "ns-3-dev"
release = "ns-3.46.1"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.

View File

@@ -49,9 +49,9 @@ copyright = "2018, ns-3 project"
# built documents.
#
# The short X.Y version.
version = "ns-3-dev"
version = "ns-3.46.1"
# The full version, including alpha/beta/rc tags.
release = "ns-3-dev"
release = "ns-3.46.1"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.

View File

@@ -74,9 +74,9 @@ copyright = "2006-2019, ns-3 project"
# built documents.
#
# The short X.Y version.
version = "ns-3-dev"
version = "ns-3.46.1"
# The full version, including alpha/beta/rc tags.
release = "ns-3-dev"
release = "ns-3.46.1"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.

View File

@@ -74,9 +74,9 @@ copyright = "2006-2019, ns-3 project"
# built documents.
#
# The short X.Y version.
version = "ns-3-dev"
version = "ns-3.46.1"
# The full version, including alpha/beta/rc tags.
release = "ns-3-dev"
release = "ns-3.46.1"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.

View File

@@ -71,9 +71,9 @@ copyright = "2006-2019, ns-3 project"
# built documents.
#
# The short X.Y version.
version = "ns-3-dev"
version = "ns-3.46.1"
# The full version, including alpha/beta/rc tags.
release = "ns-3-dev"
release = "ns-3.46.1"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.

120
examples/mtp/CMakeLists.txt Normal file
View File

@@ -0,0 +1,120 @@
if(${ENABLE_MTP})
build_example(
NAME dctcp-example-mtp
SOURCE_FILES dctcp-example-mtp.cc
LIBRARIES_TO_LINK
${libcore}
${libnetwork}
${libinternet}
${libpoint-to-point}
${libapplications}
${libtraffic-control}
${libmtp}
)
build_example(
NAME dynamic-global-routing-mtp
SOURCE_FILES dynamic-global-routing-mtp.cc
LIBRARIES_TO_LINK
${libpoint-to-point}
${libcsma}
${libinternet}
${libapplications}
${libmtp}
)
build_example(
NAME queue-discs-benchmark-mtp
SOURCE_FILES queue-discs-benchmark-mtp.cc
LIBRARIES_TO_LINK
${libinternet}
${libpoint-to-point}
${libapplications}
${libinternet-apps}
${libtraffic-control}
${libflow-monitor}
${libmtp}
)
build_example(
NAME ripng-simple-network-mtp
SOURCE_FILES ripng-simple-network-mtp.cc
LIBRARIES_TO_LINK
${libcsma}
${libinternet}
${libpoint-to-point}
${libinternet-apps}
${libmtp}
)
build_example(
NAME simple-multicast-flooding-mtp
SOURCE_FILES simple-multicast-flooding-mtp.cc
LIBRARIES_TO_LINK
${libcore}
${libnetwork}
${libapplications}
${libinternet}
${libmtp}
)
build_example(
NAME socket-bound-tcp-static-routing-mtp
SOURCE_FILES socket-bound-tcp-static-routing-mtp.cc
LIBRARIES_TO_LINK
${libnetwork}
${libcsma}
${libpoint-to-point}
${libinternet}
${libapplications}
${libmtp}
)
build_example(
NAME tcp-bbr-example-mtp
SOURCE_FILES tcp-bbr-example-mtp.cc
LIBRARIES_TO_LINK
${libpoint-to-point}
${libinternet}
${libapplications}
${libtraffic-control}
${libnetwork}
${libinternet-apps}
${libflow-monitor}
${libmtp}
)
build_example(
NAME tcp-pacing-mtp
SOURCE_FILES tcp-pacing-mtp.cc
LIBRARIES_TO_LINK
${libpoint-to-point}
${libinternet}
${libapplications}
${libflow-monitor}
${libmtp}
)
build_example(
NAME tcp-star-server-mtp
SOURCE_FILES tcp-star-server-mtp.cc
LIBRARIES_TO_LINK
${libpoint-to-point}
${libapplications}
${libinternet}
${libmtp}
)
build_example(
NAME tcp-validation-mtp
SOURCE_FILES tcp-validation-mtp.cc
LIBRARIES_TO_LINK
${libpoint-to-point}
${libinternet}
${libapplications}
${libtraffic-control}
${libnetwork}
${libinternet-apps}
${libmtp}
)
endif()

View File

@@ -0,0 +1,593 @@
/*
* Copyright (c) 2017-20 NITK Surathkal
* Copyright (c) 2020 Tom Henderson (better alignment with experiment)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Authors: Shravya K.S. <shravya.ks0@gmail.com>
* Apoorva Bhargava <apoorvabhargava13@gmail.com>
* Shikha Bakshi <shikhabakshi912@gmail.com>
* Mohit P. Tahiliani <tahiliani@nitk.edu.in>
* Tom Henderson <tomh@tomh.org>
*/
// The network topology used in this example is based on Fig. 17 described in
// Mohammad Alizadeh, Albert Greenberg, David A. Maltz, Jitendra Padhye,
// Parveen Patel, Balaji Prabhakar, Sudipta Sengupta, and Murari Sridharan.
// "Data Center TCP (DCTCP)." In ACM SIGCOMM Computer Communication Review,
// Vol. 40, No. 4, pp. 63-74. ACM, 2010.
// The topology is roughly as follows
//
// S1 S3
// | | (1 Gbps)
// T1 ------- T2 -- R1
// | | (1 Gbps)
// S2 R2
//
// The link between switch T1 and T2 is 10 Gbps. All other
// links are 1 Gbps. In the SIGCOMM paper, there is a Scorpion switch
// between T1 and T2, but it doesn't contribute another bottleneck.
//
// S1 and S3 each have 10 senders sending to receiver R1 (20 total)
// S2 (20 senders) sends traffic to R2 (20 receivers)
//
// This sets up two bottlenecks: 1) T1 -> T2 interface (30 senders
// using the 10 Gbps link) and 2) T2 -> R1 (20 senders using 1 Gbps link)
//
// RED queues configured for ECN marking are used at the bottlenecks.
//
// Figure 17 published results are that each sender in S1 gets 46 Mbps
// and each in S3 gets 54 Mbps, while each S2 sender gets 475 Mbps, and
// that these are within 10% of their fair-share throughputs (Jain index
// of 0.99).
//
// This program runs the program by default for five seconds. The first
// second is devoted to flow startup (all 40 TCP flows are stagger started
// during this period). There is a three second convergence time where
// no measurement data is taken, and then there is a one second measurement
// interval to gather raw throughput for each flow. These time intervals
// can be changed at the command line.
//
// The program outputs six files. The first three:
// * dctcp-example-s1-r1-throughput.dat
// * dctcp-example-s2-r2-throughput.dat
// * dctcp-example-s3-r1-throughput.dat
// provide per-flow throughputs (in Mb/s) for each of the forty flows, summed
// over the measurement window. The fourth file,
// * dctcp-example-fairness.dat
// provides average throughputs for the three flow paths, and computes
// Jain's fairness index for each flow group (i.e. across each group of
// 10, 20, and 10 flows). It also sums the throughputs across each bottleneck.
// The fifth and sixth:
// * dctcp-example-t1-length.dat
// * dctcp-example-t2-length.dat
// report on the bottleneck queue length (in packets and microseconds
// of delay) at 10 ms intervals during the measurement window.
//
// By default, the throughput averages are 23 Mbps for S1 senders, 471 Mbps
// for S2 senders, and 74 Mbps for S3 senders, and the Jain index is greater
// than 0.99 for each group of flows. The average queue delay is about 1ms
// for the T2->R2 bottleneck, and about 200us for the T1->T2 bottleneck.
//
// The RED parameters (min_th and max_th) are set to the same values as
// reported in the paper, but we observed that throughput distributions
// and queue delays are very sensitive to these parameters, as was also
// observed in the paper; it is likely that the paper's throughput results
// could be achieved by further tuning of the RED parameters. However,
// the default results show that DCTCP is able to achieve high link
// utilization and low queueing delay and fairness across competing flows
// sharing the same path.
#include "ns3/applications-module.h"
#include "ns3/core-module.h"
#include "ns3/internet-module.h"
#include "ns3/mtp-module.h"
#include "ns3/network-module.h"
#include "ns3/point-to-point-module.h"
#include "ns3/traffic-control-module.h"
#include <iomanip>
#include <iostream>
using namespace ns3;
std::stringstream filePlotQueue1;
std::stringstream filePlotQueue2;
std::ofstream rxS1R1Throughput;
std::ofstream rxS2R2Throughput;
std::ofstream rxS3R1Throughput;
std::ofstream fairnessIndex;
std::ofstream t1QueueLength;
std::ofstream t2QueueLength;
std::vector<uint64_t> rxS1R1Bytes;
std::vector<uint64_t> rxS2R2Bytes;
std::vector<uint64_t> rxS3R1Bytes;
void
PrintProgress(Time interval)
{
std::cout << "Progress to " << std::fixed << std::setprecision(1)
<< Simulator::Now().GetSeconds() << " seconds simulation time" << std::endl;
Simulator::Schedule(interval, &PrintProgress, interval);
}
void
TraceS1R1Sink(std::size_t index, Ptr<const Packet> p, const Address& a)
{
rxS1R1Bytes[index] += p->GetSize();
}
void
TraceS2R2Sink(std::size_t index, Ptr<const Packet> p, const Address& a)
{
rxS2R2Bytes[index] += p->GetSize();
}
void
TraceS3R1Sink(std::size_t index, Ptr<const Packet> p, const Address& a)
{
rxS3R1Bytes[index] += p->GetSize();
}
void
InitializeCounters()
{
for (std::size_t i = 0; i < 10; i++)
{
rxS1R1Bytes[i] = 0;
}
for (std::size_t i = 0; i < 20; i++)
{
rxS2R2Bytes[i] = 0;
}
for (std::size_t i = 0; i < 10; i++)
{
rxS3R1Bytes[i] = 0;
}
}
void
PrintThroughput(Time measurementWindow)
{
for (std::size_t i = 0; i < 10; i++)
{
rxS1R1Throughput << measurementWindow.GetSeconds() << "s " << i << " "
<< (rxS1R1Bytes[i] * 8) / (measurementWindow.GetSeconds()) / 1e6
<< std::endl;
}
for (std::size_t i = 0; i < 20; i++)
{
rxS2R2Throughput << Simulator::Now().GetSeconds() << "s " << i << " "
<< (rxS2R2Bytes[i] * 8) / (measurementWindow.GetSeconds()) / 1e6
<< std::endl;
}
for (std::size_t i = 0; i < 10; i++)
{
rxS3R1Throughput << Simulator::Now().GetSeconds() << "s " << i << " "
<< (rxS3R1Bytes[i] * 8) / (measurementWindow.GetSeconds()) / 1e6
<< std::endl;
}
}
// Jain's fairness index: https://en.wikipedia.org/wiki/Fairness_measure
void
PrintFairness(Time measurementWindow)
{
double average = 0;
uint64_t sumSquares = 0;
uint64_t sum = 0;
double fairness = 0;
for (std::size_t i = 0; i < 10; i++)
{
sum += rxS1R1Bytes[i];
sumSquares += (rxS1R1Bytes[i] * rxS1R1Bytes[i]);
}
average = ((sum / 10) * 8 / measurementWindow.GetSeconds()) / 1e6;
fairness = static_cast<double>(sum * sum) / (10 * sumSquares);
fairnessIndex << "Average throughput for S1-R1 flows: " << std::fixed << std::setprecision(2)
<< average << " Mbps; fairness: " << std::fixed << std::setprecision(3)
<< fairness << std::endl;
average = 0;
sumSquares = 0;
sum = 0;
fairness = 0;
for (std::size_t i = 0; i < 20; i++)
{
sum += rxS2R2Bytes[i];
sumSquares += (rxS2R2Bytes[i] * rxS2R2Bytes[i]);
}
average = ((sum / 20) * 8 / measurementWindow.GetSeconds()) / 1e6;
fairness = static_cast<double>(sum * sum) / (20 * sumSquares);
fairnessIndex << "Average throughput for S2-R2 flows: " << std::fixed << std::setprecision(2)
<< average << " Mbps; fairness: " << std::fixed << std::setprecision(3)
<< fairness << std::endl;
average = 0;
sumSquares = 0;
sum = 0;
fairness = 0;
for (std::size_t i = 0; i < 10; i++)
{
sum += rxS3R1Bytes[i];
sumSquares += (rxS3R1Bytes[i] * rxS3R1Bytes[i]);
}
average = ((sum / 10) * 8 / measurementWindow.GetSeconds()) / 1e6;
fairness = static_cast<double>(sum * sum) / (10 * sumSquares);
fairnessIndex << "Average throughput for S3-R1 flows: " << std::fixed << std::setprecision(2)
<< average << " Mbps; fairness: " << std::fixed << std::setprecision(3)
<< fairness << std::endl;
sum = 0;
for (std::size_t i = 0; i < 10; i++)
{
sum += rxS1R1Bytes[i];
}
for (std::size_t i = 0; i < 20; i++)
{
sum += rxS2R2Bytes[i];
}
fairnessIndex << "Aggregate user-level throughput for flows through T1: "
<< static_cast<double>(sum * 8) / 1e9 << " Gbps" << std::endl;
sum = 0;
for (std::size_t i = 0; i < 10; i++)
{
sum += rxS3R1Bytes[i];
}
for (std::size_t i = 0; i < 10; i++)
{
sum += rxS1R1Bytes[i];
}
fairnessIndex << "Aggregate user-level throughput for flows to R1: "
<< static_cast<double>(sum * 8) / 1e9 << " Gbps" << std::endl;
}
void
CheckT1QueueSize(Ptr<QueueDisc> queue)
{
// 1500 byte packets
uint32_t qSize = queue->GetNPackets();
Time backlog = Seconds(static_cast<double>(qSize * 1500 * 8) / 1e10); // 10 Gb/s
// report size in units of packets and ms
t1QueueLength << std::fixed << std::setprecision(2) << Simulator::Now().GetSeconds() << " "
<< qSize << " " << backlog.GetMicroSeconds() << std::endl;
// check queue size every 1/100 of a second
Simulator::Schedule(MilliSeconds(10), &CheckT1QueueSize, queue);
}
void
CheckT2QueueSize(Ptr<QueueDisc> queue)
{
uint32_t qSize = queue->GetNPackets();
Time backlog = Seconds(static_cast<double>(qSize * 1500 * 8) / 1e9); // 1 Gb/s
// report size in units of packets and ms
t2QueueLength << std::fixed << std::setprecision(2) << Simulator::Now().GetSeconds() << " "
<< qSize << " " << backlog.GetMicroSeconds() << std::endl;
// check queue size every 1/100 of a second
Simulator::Schedule(MilliSeconds(10), &CheckT2QueueSize, queue);
}
int
main(int argc, char* argv[])
{
MtpInterface::Enable (4);
std::string outputFilePath = ".";
std::string tcpTypeId = "TcpDctcp";
Time flowStartupWindow = Seconds(1);
Time convergenceTime = Seconds(3);
Time measurementWindow = Seconds(1);
bool enableSwitchEcn = true;
Time progressInterval = MilliSeconds(100);
CommandLine cmd(__FILE__);
cmd.AddValue("tcpTypeId", "ns-3 TCP TypeId", tcpTypeId);
cmd.AddValue("flowStartupWindow",
"startup time window (TCP staggered starts)",
flowStartupWindow);
cmd.AddValue("convergenceTime", "convergence time", convergenceTime);
cmd.AddValue("measurementWindow", "measurement window", measurementWindow);
cmd.AddValue("enableSwitchEcn", "enable ECN at switches", enableSwitchEcn);
cmd.Parse(argc, argv);
Config::SetDefault("ns3::TcpL4Protocol::SocketType", StringValue("ns3::" + tcpTypeId));
Time startTime = Seconds(0);
Time stopTime = flowStartupWindow + convergenceTime + measurementWindow;
rxS1R1Bytes.reserve(10);
rxS2R2Bytes.reserve(20);
rxS3R1Bytes.reserve(10);
NodeContainer S1;
NodeContainer S2;
NodeContainer S3;
NodeContainer R2;
Ptr<Node> T1 = CreateObject<Node>();
Ptr<Node> T2 = CreateObject<Node>();
Ptr<Node> R1 = CreateObject<Node>();
S1.Create(10);
S2.Create(20);
S3.Create(10);
R2.Create(20);
Config::SetDefault("ns3::TcpSocket::SegmentSize", UintegerValue(1448));
Config::SetDefault("ns3::TcpSocket::DelAckCount", UintegerValue(2));
GlobalValue::Bind("ChecksumEnabled", BooleanValue(false));
// Set default parameters for RED queue disc
Config::SetDefault("ns3::RedQueueDisc::UseEcn", BooleanValue(enableSwitchEcn));
// ARED may be used but the queueing delays will increase; it is disabled
// here because the SIGCOMM paper did not mention it
// Config::SetDefault ("ns3::RedQueueDisc::ARED", BooleanValue (true));
// Config::SetDefault ("ns3::RedQueueDisc::Gentle", BooleanValue (true));
Config::SetDefault("ns3::RedQueueDisc::UseHardDrop", BooleanValue(false));
Config::SetDefault("ns3::RedQueueDisc::MeanPktSize", UintegerValue(1500));
// Triumph and Scorpion switches used in DCTCP Paper have 4 MB of buffer
// If every packet is 1500 bytes, 2666 packets can be stored in 4 MB
Config::SetDefault("ns3::RedQueueDisc::MaxSize", QueueSizeValue(QueueSize("2666p")));
// DCTCP tracks instantaneous queue length only; so set QW = 1
Config::SetDefault("ns3::RedQueueDisc::QW", DoubleValue(1));
Config::SetDefault("ns3::RedQueueDisc::MinTh", DoubleValue(20));
Config::SetDefault("ns3::RedQueueDisc::MaxTh", DoubleValue(60));
PointToPointHelper pointToPointSR;
pointToPointSR.SetDeviceAttribute("DataRate", StringValue("1Gbps"));
pointToPointSR.SetChannelAttribute("Delay", StringValue("10us"));
PointToPointHelper pointToPointT;
pointToPointT.SetDeviceAttribute("DataRate", StringValue("10Gbps"));
pointToPointT.SetChannelAttribute("Delay", StringValue("10us"));
// Create a total of 62 links.
std::vector<NetDeviceContainer> S1T1;
S1T1.reserve(10);
std::vector<NetDeviceContainer> S2T1;
S2T1.reserve(20);
std::vector<NetDeviceContainer> S3T2;
S3T2.reserve(10);
std::vector<NetDeviceContainer> R2T2;
R2T2.reserve(20);
NetDeviceContainer T1T2 = pointToPointT.Install(T1, T2);
NetDeviceContainer R1T2 = pointToPointSR.Install(R1, T2);
for (std::size_t i = 0; i < 10; i++)
{
Ptr<Node> n = S1.Get(i);
S1T1.push_back(pointToPointSR.Install(n, T1));
}
for (std::size_t i = 0; i < 20; i++)
{
Ptr<Node> n = S2.Get(i);
S2T1.push_back(pointToPointSR.Install(n, T1));
}
for (std::size_t i = 0; i < 10; i++)
{
Ptr<Node> n = S3.Get(i);
S3T2.push_back(pointToPointSR.Install(n, T2));
}
for (std::size_t i = 0; i < 20; i++)
{
Ptr<Node> n = R2.Get(i);
R2T2.push_back(pointToPointSR.Install(n, T2));
}
InternetStackHelper stack;
stack.InstallAll();
TrafficControlHelper tchRed10;
// MinTh = 50, MaxTh = 150 recommended in ACM SIGCOMM 2010 DCTCP Paper
// This yields a target (MinTh) queue depth of 60us at 10 Gb/s
tchRed10.SetRootQueueDisc("ns3::RedQueueDisc",
"LinkBandwidth",
StringValue("10Gbps"),
"LinkDelay",
StringValue("10us"),
"MinTh",
DoubleValue(50),
"MaxTh",
DoubleValue(150));
QueueDiscContainer queueDiscs1 = tchRed10.Install(T1T2);
TrafficControlHelper tchRed1;
// MinTh = 20, MaxTh = 60 recommended in ACM SIGCOMM 2010 DCTCP Paper
// This yields a target queue depth of 250us at 1 Gb/s
tchRed1.SetRootQueueDisc("ns3::RedQueueDisc",
"LinkBandwidth",
StringValue("1Gbps"),
"LinkDelay",
StringValue("10us"),
"MinTh",
DoubleValue(20),
"MaxTh",
DoubleValue(60));
QueueDiscContainer queueDiscs2 = tchRed1.Install(R1T2.Get(1));
for (std::size_t i = 0; i < 10; i++)
{
tchRed1.Install(S1T1[i].Get(1));
}
for (std::size_t i = 0; i < 20; i++)
{
tchRed1.Install(S2T1[i].Get(1));
}
for (std::size_t i = 0; i < 10; i++)
{
tchRed1.Install(S3T2[i].Get(1));
}
for (std::size_t i = 0; i < 20; i++)
{
tchRed1.Install(R2T2[i].Get(1));
}
Ipv4AddressHelper address;
std::vector<Ipv4InterfaceContainer> ipS1T1;
ipS1T1.reserve(10);
std::vector<Ipv4InterfaceContainer> ipS2T1;
ipS2T1.reserve(20);
std::vector<Ipv4InterfaceContainer> ipS3T2;
ipS3T2.reserve(10);
std::vector<Ipv4InterfaceContainer> ipR2T2;
ipR2T2.reserve(20);
address.SetBase("172.16.1.0", "255.255.255.0");
Ipv4InterfaceContainer ipT1T2 = address.Assign(T1T2);
address.SetBase("192.168.0.0", "255.255.255.0");
Ipv4InterfaceContainer ipR1T2 = address.Assign(R1T2);
address.SetBase("10.1.1.0", "255.255.255.0");
for (std::size_t i = 0; i < 10; i++)
{
ipS1T1.push_back(address.Assign(S1T1[i]));
address.NewNetwork();
}
address.SetBase("10.2.1.0", "255.255.255.0");
for (std::size_t i = 0; i < 20; i++)
{
ipS2T1.push_back(address.Assign(S2T1[i]));
address.NewNetwork();
}
address.SetBase("10.3.1.0", "255.255.255.0");
for (std::size_t i = 0; i < 10; i++)
{
ipS3T2.push_back(address.Assign(S3T2[i]));
address.NewNetwork();
}
address.SetBase("10.4.1.0", "255.255.255.0");
for (std::size_t i = 0; i < 20; i++)
{
ipR2T2.push_back(address.Assign(R2T2[i]));
address.NewNetwork();
}
Ipv4GlobalRoutingHelper::PopulateRoutingTables();
// Each sender in S2 sends to a receiver in R2
std::vector<Ptr<PacketSink>> r2Sinks;
r2Sinks.reserve(20);
for (std::size_t i = 0; i < 20; i++)
{
uint16_t port = 50000 + i;
Address sinkLocalAddress(InetSocketAddress(Ipv4Address::GetAny(), port));
PacketSinkHelper sinkHelper("ns3::TcpSocketFactory", sinkLocalAddress);
ApplicationContainer sinkApp = sinkHelper.Install(R2.Get(i));
Ptr<PacketSink> packetSink = sinkApp.Get(0)->GetObject<PacketSink>();
r2Sinks.push_back(packetSink);
sinkApp.Start(startTime);
sinkApp.Stop(stopTime);
OnOffHelper clientHelper1("ns3::TcpSocketFactory", Address());
clientHelper1.SetAttribute("OnTime",
StringValue("ns3::ConstantRandomVariable[Constant=1]"));
clientHelper1.SetAttribute("OffTime",
StringValue("ns3::ConstantRandomVariable[Constant=0]"));
clientHelper1.SetAttribute("DataRate", DataRateValue(DataRate("1Gbps")));
clientHelper1.SetAttribute("PacketSize", UintegerValue(1000));
ApplicationContainer clientApps1;
AddressValue remoteAddress(InetSocketAddress(ipR2T2[i].GetAddress(0), port));
clientHelper1.SetAttribute("Remote", remoteAddress);
clientApps1.Add(clientHelper1.Install(S2.Get(i)));
clientApps1.Start(i * flowStartupWindow / 20 + startTime + MilliSeconds(i * 5));
clientApps1.Stop(stopTime);
}
// Each sender in S1 and S3 sends to R1
std::vector<Ptr<PacketSink>> s1r1Sinks;
std::vector<Ptr<PacketSink>> s3r1Sinks;
s1r1Sinks.reserve(10);
s3r1Sinks.reserve(10);
for (std::size_t i = 0; i < 20; i++)
{
uint16_t port = 50000 + i;
Address sinkLocalAddress(InetSocketAddress(Ipv4Address::GetAny(), port));
PacketSinkHelper sinkHelper("ns3::TcpSocketFactory", sinkLocalAddress);
ApplicationContainer sinkApp = sinkHelper.Install(R1);
Ptr<PacketSink> packetSink = sinkApp.Get(0)->GetObject<PacketSink>();
if (i < 10)
{
s1r1Sinks.push_back(packetSink);
}
else
{
s3r1Sinks.push_back(packetSink);
}
sinkApp.Start(startTime);
sinkApp.Stop(stopTime);
OnOffHelper clientHelper1("ns3::TcpSocketFactory", Address());
clientHelper1.SetAttribute("OnTime",
StringValue("ns3::ConstantRandomVariable[Constant=1]"));
clientHelper1.SetAttribute("OffTime",
StringValue("ns3::ConstantRandomVariable[Constant=0]"));
clientHelper1.SetAttribute("DataRate", DataRateValue(DataRate("1Gbps")));
clientHelper1.SetAttribute("PacketSize", UintegerValue(1000));
ApplicationContainer clientApps1;
AddressValue remoteAddress(InetSocketAddress(ipR1T2.GetAddress(0), port));
clientHelper1.SetAttribute("Remote", remoteAddress);
if (i < 10)
{
clientApps1.Add(clientHelper1.Install(S1.Get(i)));
clientApps1.Start(i * flowStartupWindow / 10 + startTime + MilliSeconds(i * 5));
}
else
{
clientApps1.Add(clientHelper1.Install(S3.Get(i - 10)));
clientApps1.Start((i - 10) * flowStartupWindow / 10 + startTime + MilliSeconds(i * 5));
}
clientApps1.Stop(stopTime);
}
rxS1R1Throughput.open("dctcp-example-s1-r1-throughput.dat", std::ios::out);
rxS1R1Throughput << "#Time(s) flow thruput(Mb/s)" << std::endl;
rxS2R2Throughput.open("dctcp-example-s2-r2-throughput.dat", std::ios::out);
rxS2R2Throughput << "#Time(s) flow thruput(Mb/s)" << std::endl;
rxS3R1Throughput.open("dctcp-example-s3-r1-throughput.dat", std::ios::out);
rxS3R1Throughput << "#Time(s) flow thruput(Mb/s)" << std::endl;
fairnessIndex.open("dctcp-example-fairness.dat", std::ios::out);
t1QueueLength.open("dctcp-example-t1-length.dat", std::ios::out);
t1QueueLength << "#Time(s) qlen(pkts) qlen(us)" << std::endl;
t2QueueLength.open("dctcp-example-t2-length.dat", std::ios::out);
t2QueueLength << "#Time(s) qlen(pkts) qlen(us)" << std::endl;
for (std::size_t i = 0; i < 10; i++)
{
s1r1Sinks[i]->TraceConnectWithoutContext("Rx", MakeBoundCallback(&TraceS1R1Sink, i));
}
for (std::size_t i = 0; i < 20; i++)
{
r2Sinks[i]->TraceConnectWithoutContext("Rx", MakeBoundCallback(&TraceS2R2Sink, i));
}
for (std::size_t i = 0; i < 10; i++)
{
s3r1Sinks[i]->TraceConnectWithoutContext("Rx", MakeBoundCallback(&TraceS3R1Sink, i));
}
Simulator::Schedule(flowStartupWindow + convergenceTime, &InitializeCounters);
Simulator::Schedule(flowStartupWindow + convergenceTime + measurementWindow,
&PrintThroughput,
measurementWindow);
Simulator::Schedule(flowStartupWindow + convergenceTime + measurementWindow,
&PrintFairness,
measurementWindow);
Simulator::Schedule(progressInterval, &PrintProgress, progressInterval);
Simulator::Schedule(flowStartupWindow + convergenceTime, &CheckT1QueueSize, queueDiscs1.Get(0));
Simulator::Schedule(flowStartupWindow + convergenceTime, &CheckT2QueueSize, queueDiscs2.Get(0));
Simulator::Stop(stopTime + TimeStep(1));
Simulator::Run();
rxS1R1Throughput.close();
rxS2R2Throughput.close();
rxS3R1Throughput.close();
fairnessIndex.close();
t1QueueLength.close();
t2QueueLength.close();
Simulator::Destroy();
return 0;
}

View File

@@ -0,0 +1,225 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Contributed by: Luis Cortes (cortes@gatech.edu)
*/
// This script exercises global routing code in a mixed point-to-point
// and csma/cd environment. We bring up and down interfaces and observe
// the effect on global routing. We explicitly enable the attribute
// to respond to interface events, so that routes are recomputed
// automatically.
//
// Network topology
//
// n0
// \ p-p
// \ (shared csma/cd)
// n2 -------------------------n3
// / | |
// / p-p n4 n5 ---------- n6
// n1 p-p
// | |
// ----------------------------------------
// p-p
//
// - at time 1 CBR/UDP flow from n1 to n6's IP address on the n5/n6 link
// - at time 10, start similar flow from n1 to n6's address on the n1/n6 link
//
// Order of events
// At pre-simulation time, configure global routes. Shortest path from
// n1 to n6 is via the direct point-to-point link
// At time 1s, start CBR traffic flow from n1 to n6
// At time 2s, set the n1 point-to-point interface to down. Packets
// will be diverted to the n1-n2-n5-n6 path
// At time 4s, re-enable the n1/n6 interface to up. n1-n6 route restored.
// At time 6s, set the n6-n1 point-to-point Ipv4 interface to down (note, this
// keeps the point-to-point link "up" from n1's perspective). Traffic will
// flow through the path n1-n2-n5-n6
// At time 8s, bring the interface back up. Path n1-n6 is restored
// At time 10s, stop the first flow.
// At time 11s, start a new flow, but to n6's other IP address (the one
// on the n1/n6 p2p link)
// At time 12s, bring the n1 interface down between n1 and n6. Packets
// will be diverted to the alternate path
// At time 14s, re-enable the n1/n6 interface to up. This will change
// routing back to n1-n6 since the interface up notification will cause
// a new local interface route, at higher priority than global routing
// At time 16s, stop the second flow.
// - Tracing of queues and packet receptions to file "dynamic-global-routing.tr"
#include "ns3/applications-module.h"
#include "ns3/core-module.h"
#include "ns3/csma-module.h"
#include "ns3/internet-module.h"
#include "ns3/ipv4-global-routing-helper.h"
#include "ns3/mtp-module.h"
#include "ns3/network-module.h"
#include "ns3/point-to-point-module.h"
#include <cassert>
#include <fstream>
#include <iostream>
#include <string>
using namespace ns3;
NS_LOG_COMPONENT_DEFINE("DynamicGlobalRoutingExample");
int
main(int argc, char* argv[])
{
MtpInterface::Enable();
// The below value configures the default behavior of global routing.
// By default, it is disabled. To respond to interface events, set to true
Config::SetDefault("ns3::Ipv4GlobalRouting::RespondToInterfaceEvents", BooleanValue(true));
// Allow the user to override any of the defaults and the above
// Bind ()s at run-time, via command-line arguments
CommandLine cmd(__FILE__);
cmd.Parse(argc, argv);
NS_LOG_INFO("Create nodes.");
NodeContainer c;
c.Create(7);
NodeContainer n0n2 = NodeContainer(c.Get(0), c.Get(2));
NodeContainer n1n2 = NodeContainer(c.Get(1), c.Get(2));
NodeContainer n5n6 = NodeContainer(c.Get(5), c.Get(6));
NodeContainer n1n6 = NodeContainer(c.Get(1), c.Get(6));
NodeContainer n2345 = NodeContainer(c.Get(2), c.Get(3), c.Get(4), c.Get(5));
InternetStackHelper internet;
internet.Install(c);
// We create the channels first without any IP addressing information
NS_LOG_INFO("Create channels.");
PointToPointHelper p2p;
p2p.SetDeviceAttribute("DataRate", StringValue("5Mbps"));
p2p.SetChannelAttribute("Delay", StringValue("2ms"));
NetDeviceContainer d0d2 = p2p.Install(n0n2);
NetDeviceContainer d1d6 = p2p.Install(n1n6);
NetDeviceContainer d1d2 = p2p.Install(n1n2);
p2p.SetDeviceAttribute("DataRate", StringValue("1500kbps"));
p2p.SetChannelAttribute("Delay", StringValue("10ms"));
NetDeviceContainer d5d6 = p2p.Install(n5n6);
// We create the channels first without any IP addressing information
CsmaHelper csma;
csma.SetChannelAttribute("DataRate", StringValue("5Mbps"));
csma.SetChannelAttribute("Delay", StringValue("2ms"));
NetDeviceContainer d2345 = csma.Install(n2345);
// Later, we add IP addresses.
NS_LOG_INFO("Assign IP Addresses.");
Ipv4AddressHelper ipv4;
ipv4.SetBase("10.1.1.0", "255.255.255.0");
ipv4.Assign(d0d2);
ipv4.SetBase("10.1.2.0", "255.255.255.0");
ipv4.Assign(d1d2);
ipv4.SetBase("10.1.3.0", "255.255.255.0");
Ipv4InterfaceContainer i5i6 = ipv4.Assign(d5d6);
ipv4.SetBase("10.250.1.0", "255.255.255.0");
ipv4.Assign(d2345);
ipv4.SetBase("172.16.1.0", "255.255.255.0");
Ipv4InterfaceContainer i1i6 = ipv4.Assign(d1d6);
// Create router nodes, initialize routing database and set up the routing
// tables in the nodes.
Ipv4GlobalRoutingHelper::PopulateRoutingTables();
// Create the OnOff application to send UDP datagrams of size
// 210 bytes at a rate of 448 Kb/s
NS_LOG_INFO("Create Applications.");
uint16_t port = 9; // Discard port (RFC 863)
OnOffHelper onoff("ns3::UdpSocketFactory", InetSocketAddress(i5i6.GetAddress(1), port));
onoff.SetConstantRate(DataRate("2kbps"));
onoff.SetAttribute("PacketSize", UintegerValue(50));
ApplicationContainer apps = onoff.Install(c.Get(1));
apps.Start(Seconds(1.0));
apps.Stop(Seconds(10.0));
// Create a second OnOff application to send UDP datagrams of size
// 210 bytes at a rate of 448 Kb/s
OnOffHelper onoff2("ns3::UdpSocketFactory", InetSocketAddress(i1i6.GetAddress(1), port));
onoff2.SetAttribute("OnTime", StringValue("ns3::ConstantRandomVariable[Constant=1]"));
onoff2.SetAttribute("OffTime", StringValue("ns3::ConstantRandomVariable[Constant=0]"));
onoff2.SetAttribute("DataRate", StringValue("2kbps"));
onoff2.SetAttribute("PacketSize", UintegerValue(50));
ApplicationContainer apps2 = onoff2.Install(c.Get(1));
apps2.Start(Seconds(11.0));
apps2.Stop(Seconds(16.0));
// Create an optional packet sink to receive these packets
PacketSinkHelper sink("ns3::UdpSocketFactory",
Address(InetSocketAddress(Ipv4Address::GetAny(), port)));
apps = sink.Install(c.Get(6));
apps.Start(Seconds(1.0));
apps.Stop(Seconds(10.0));
PacketSinkHelper sink2("ns3::UdpSocketFactory",
Address(InetSocketAddress(Ipv4Address::GetAny(), port)));
apps2 = sink2.Install(c.Get(6));
apps2.Start(Seconds(11.0));
apps2.Stop(Seconds(16.0));
AsciiTraceHelper ascii;
Ptr<OutputStreamWrapper> stream = ascii.CreateFileStream("dynamic-global-routing.tr");
p2p.EnableAsciiAll(stream);
csma.EnableAsciiAll(stream);
internet.EnableAsciiIpv4All(stream);
p2p.EnablePcapAll("dynamic-global-routing");
csma.EnablePcapAll("dynamic-global-routing", false);
Ptr<Node> n1 = c.Get(1);
Ptr<Ipv4> ipv41 = n1->GetObject<Ipv4>();
// The first ifIndex is 0 for loopback, then the first p2p is numbered 1,
// then the next p2p is numbered 2
uint32_t ipv4ifIndex1 = 2;
Simulator::Schedule(Seconds(2), &Ipv4::SetDown, ipv41, ipv4ifIndex1);
Simulator::Schedule(Seconds(4), &Ipv4::SetUp, ipv41, ipv4ifIndex1);
Ptr<Node> n6 = c.Get(6);
Ptr<Ipv4> ipv46 = n6->GetObject<Ipv4>();
// The first ifIndex is 0 for loopback, then the first p2p is numbered 1,
// then the next p2p is numbered 2
uint32_t ipv4ifIndex6 = 2;
Simulator::Schedule(Seconds(6), &Ipv4::SetDown, ipv46, ipv4ifIndex6);
Simulator::Schedule(Seconds(8), &Ipv4::SetUp, ipv46, ipv4ifIndex6);
Simulator::Schedule(Seconds(12), &Ipv4::SetDown, ipv41, ipv4ifIndex1);
Simulator::Schedule(Seconds(14), &Ipv4::SetUp, ipv41, ipv4ifIndex1);
// Trace routing tables
Ptr<OutputStreamWrapper> routingStream =
Create<OutputStreamWrapper>("dynamic-global-routing.routes", std::ios::out);
Ipv4RoutingHelper::PrintRoutingTableAllAt(Seconds(12), routingStream);
NS_LOG_INFO("Run Simulation.");
Simulator::Run();
Simulator::Destroy();
NS_LOG_INFO("Done.");
return 0;
}

View File

@@ -0,0 +1,374 @@
/*
* Copyright (c) 2015 Universita' degli Studi di Napoli Federico II
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Authors: Pasquale Imputato <p.imputato@gmail.com>
* Stefano Avallone <stefano.avallone@unina.it>
*/
// This example serves as a benchmark for all the queue discs (with BQL enabled or not)
//
// Network topology
//
// 192.168.1.0 192.168.2.0
// n1 ------------------------------------ n2 ----------------------------------- n3
// point-to-point (access link) point-to-point (bottleneck link)
// 100 Mbps, 0.1 ms bandwidth [10 Mbps], delay [5 ms]
// qdiscs PfifoFast with capacity qdiscs queueDiscType in {PfifoFast, ARED, CoDel,
// FqCoDel, PIE} [PfifoFast] of 1000 packets with capacity of
// queueDiscSize packets [1000] netdevices queues with size of 100 packets netdevices queues with
// size of netdevicesQueueSize packets [100] without BQL bql BQL
// [false]
// *** fixed configuration ***
//
// Two TCP flows are generated: one from n1 to n3 and the other from n3 to n1.
// Additionally, n1 pings n3, so that the RTT can be measured.
//
// The output will consist of a number of ping Rtt such as:
//
// /NodeList/0/ApplicationList/2/$ns3::Ping/Rtt=111 ms
// /NodeList/0/ApplicationList/2/$ns3::Ping/Rtt=111 ms
// /NodeList/0/ApplicationList/2/$ns3::Ping/Rtt=110 ms
// /NodeList/0/ApplicationList/2/$ns3::Ping/Rtt=111 ms
// /NodeList/0/ApplicationList/2/$ns3::Ping/Rtt=111 ms
// /NodeList/0/ApplicationList/2/$ns3::Ping/Rtt=112 ms
// /NodeList/0/ApplicationList/2/$ns3::Ping/Rtt=111 ms
//
// The files output will consist of a trace file with bytes in queue and of a trace file for limits
// (when BQL is enabled) both for bottleneck NetDevice on n2, two files with upload and download
// goodput for flows configuration and a file with flow monitor stats.
//
// If you use an AQM as queue disc on the bottleneck netdevices, you can observe that the ping Rtt
// decrease. A further decrease can be observed when you enable BQL.
#include "ns3/applications-module.h"
#include "ns3/core-module.h"
#include "ns3/flow-monitor-module.h"
#include "ns3/internet-apps-module.h"
#include "ns3/internet-module.h"
#include "ns3/mtp-module.h"
#include "ns3/network-module.h"
#include "ns3/point-to-point-module.h"
#include "ns3/traffic-control-module.h"
using namespace ns3;
NS_LOG_COMPONENT_DEFINE("BenchmarkQueueDiscs");
/**
* Print the queue limits.
*
* \param stream The output stream.
* \param oldVal Old value.
* \param newVal New value.
*/
void
LimitsTrace(Ptr<OutputStreamWrapper> stream, uint32_t oldVal, uint32_t newVal)
{
*stream->GetStream() << Simulator::Now().GetSeconds() << " " << newVal << std::endl;
}
/**
* Print the bytes in the queue.
*
* \param stream The output stream.
* \param oldVal Old value.
* \param newVal New value.
*/
void
BytesInQueueTrace(Ptr<OutputStreamWrapper> stream, uint32_t oldVal, uint32_t newVal)
{
*stream->GetStream() << Simulator::Now().GetSeconds() << " " << newVal << std::endl;
}
/**
* Sample and print the queue goodput.
*
* \param app The Tx app.
* \param stream The output stream.
* \param period The sampling period.
*/
static void
GoodputSampling(ApplicationContainer app, Ptr<OutputStreamWrapper> stream, float period)
{
Simulator::Schedule(Seconds(period), &GoodputSampling, app, stream, period);
double goodput;
uint64_t totalPackets = DynamicCast<PacketSink>(app.Get(0))->GetTotalRx();
goodput = totalPackets * 8 / (Simulator::Now().GetSeconds() * 1024); // Kbit/s
*stream->GetStream() << Simulator::Now().GetSeconds() << " " << goodput << std::endl;
}
/**
* Print the ping RTT.
*
* \param context The context.
* \param rtt The RTT.
*/
static void
PingRtt(std::string context, uint16_t, Time rtt)
{
std::cout << context << "=" << rtt.GetMilliSeconds() << " ms" << std::endl;
}
int
main(int argc, char* argv[])
{
MtpInterface::Enable();
std::string bandwidth = "10Mbps";
std::string delay = "5ms";
std::string queueDiscType = "PfifoFast";
uint32_t queueDiscSize = 1000;
uint32_t netdevicesQueueSize = 50;
bool bql = false;
std::string flowsDatarate = "20Mbps";
uint32_t flowsPacketsSize = 1000;
float startTime = 0.1F; // in s
float simDuration = 60;
float samplingPeriod = 1;
CommandLine cmd(__FILE__);
cmd.AddValue("bandwidth", "Bottleneck bandwidth", bandwidth);
cmd.AddValue("delay", "Bottleneck delay", delay);
cmd.AddValue("queueDiscType",
"Bottleneck queue disc type in {PfifoFast, ARED, CoDel, FqCoDel, PIE, prio}",
queueDiscType);
cmd.AddValue("queueDiscSize", "Bottleneck queue disc size in packets", queueDiscSize);
cmd.AddValue("netdevicesQueueSize",
"Bottleneck netdevices queue size in packets",
netdevicesQueueSize);
cmd.AddValue("bql", "Enable byte queue limits on bottleneck netdevices", bql);
cmd.AddValue("flowsDatarate", "Upload and download flows datarate", flowsDatarate);
cmd.AddValue("flowsPacketsSize", "Upload and download flows packets sizes", flowsPacketsSize);
cmd.AddValue("startTime", "Simulation start time", startTime);
cmd.AddValue("simDuration", "Simulation duration in seconds", simDuration);
cmd.AddValue("samplingPeriod", "Goodput sampling period in seconds", samplingPeriod);
cmd.Parse(argc, argv);
float stopTime = startTime + simDuration;
// Create nodes
NodeContainer n1;
NodeContainer n2;
NodeContainer n3;
n1.Create(1);
n2.Create(1);
n3.Create(1);
// Create and configure access link and bottleneck link
PointToPointHelper accessLink;
accessLink.SetDeviceAttribute("DataRate", StringValue("100Mbps"));
accessLink.SetChannelAttribute("Delay", StringValue("0.1ms"));
accessLink.SetQueue("ns3::DropTailQueue", "MaxSize", StringValue("100p"));
PointToPointHelper bottleneckLink;
bottleneckLink.SetDeviceAttribute("DataRate", StringValue(bandwidth));
bottleneckLink.SetChannelAttribute("Delay", StringValue(delay));
bottleneckLink.SetQueue("ns3::DropTailQueue",
"MaxSize",
StringValue(std::to_string(netdevicesQueueSize) + "p"));
InternetStackHelper stack;
stack.InstallAll();
// Access link traffic control configuration
TrafficControlHelper tchPfifoFastAccess;
tchPfifoFastAccess.SetRootQueueDisc("ns3::PfifoFastQueueDisc", "MaxSize", StringValue("1000p"));
// Bottleneck link traffic control configuration
TrafficControlHelper tchBottleneck;
if (queueDiscType == "PfifoFast")
{
tchBottleneck.SetRootQueueDisc(
"ns3::PfifoFastQueueDisc",
"MaxSize",
QueueSizeValue(QueueSize(QueueSizeUnit::PACKETS, queueDiscSize)));
}
else if (queueDiscType == "ARED")
{
tchBottleneck.SetRootQueueDisc("ns3::RedQueueDisc");
Config::SetDefault("ns3::RedQueueDisc::ARED", BooleanValue(true));
Config::SetDefault("ns3::RedQueueDisc::MaxSize",
QueueSizeValue(QueueSize(QueueSizeUnit::PACKETS, queueDiscSize)));
}
else if (queueDiscType == "CoDel")
{
tchBottleneck.SetRootQueueDisc("ns3::CoDelQueueDisc");
Config::SetDefault("ns3::CoDelQueueDisc::MaxSize",
QueueSizeValue(QueueSize(QueueSizeUnit::PACKETS, queueDiscSize)));
}
else if (queueDiscType == "FqCoDel")
{
tchBottleneck.SetRootQueueDisc("ns3::FqCoDelQueueDisc");
Config::SetDefault("ns3::FqCoDelQueueDisc::MaxSize",
QueueSizeValue(QueueSize(QueueSizeUnit::PACKETS, queueDiscSize)));
}
else if (queueDiscType == "PIE")
{
tchBottleneck.SetRootQueueDisc("ns3::PieQueueDisc");
Config::SetDefault("ns3::PieQueueDisc::MaxSize",
QueueSizeValue(QueueSize(QueueSizeUnit::PACKETS, queueDiscSize)));
}
else if (queueDiscType == "prio")
{
uint16_t handle =
tchBottleneck.SetRootQueueDisc("ns3::PrioQueueDisc",
"Priomap",
StringValue("0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1"));
TrafficControlHelper::ClassIdList cid =
tchBottleneck.AddQueueDiscClasses(handle, 2, "ns3::QueueDiscClass");
tchBottleneck.AddChildQueueDisc(handle, cid[0], "ns3::FifoQueueDisc");
tchBottleneck.AddChildQueueDisc(handle, cid[1], "ns3::RedQueueDisc");
}
else
{
NS_ABORT_MSG("--queueDiscType not valid");
}
if (bql)
{
tchBottleneck.SetQueueLimits("ns3::DynamicQueueLimits");
}
NetDeviceContainer devicesAccessLink = accessLink.Install(n1.Get(0), n2.Get(0));
tchPfifoFastAccess.Install(devicesAccessLink);
Ipv4AddressHelper address;
address.SetBase("192.168.0.0", "255.255.255.0");
address.NewNetwork();
Ipv4InterfaceContainer interfacesAccess = address.Assign(devicesAccessLink);
NetDeviceContainer devicesBottleneckLink = bottleneckLink.Install(n2.Get(0), n3.Get(0));
QueueDiscContainer qdiscs;
qdiscs = tchBottleneck.Install(devicesBottleneckLink);
address.NewNetwork();
Ipv4InterfaceContainer interfacesBottleneck = address.Assign(devicesBottleneckLink);
Ptr<NetDeviceQueueInterface> interface =
devicesBottleneckLink.Get(0)->GetObject<NetDeviceQueueInterface>();
Ptr<NetDeviceQueue> queueInterface = interface->GetTxQueue(0);
Ptr<DynamicQueueLimits> queueLimits =
StaticCast<DynamicQueueLimits>(queueInterface->GetQueueLimits());
AsciiTraceHelper ascii;
if (bql)
{
queueDiscType = queueDiscType + "-bql";
Ptr<OutputStreamWrapper> streamLimits =
ascii.CreateFileStream(queueDiscType + "-limits.txt");
queueLimits->TraceConnectWithoutContext("Limit",
MakeBoundCallback(&LimitsTrace, streamLimits));
}
Ptr<Queue<Packet>> queue =
StaticCast<PointToPointNetDevice>(devicesBottleneckLink.Get(0))->GetQueue();
Ptr<OutputStreamWrapper> streamBytesInQueue =
ascii.CreateFileStream(queueDiscType + "-bytesInQueue.txt");
queue->TraceConnectWithoutContext("BytesInQueue",
MakeBoundCallback(&BytesInQueueTrace, streamBytesInQueue));
Ipv4InterfaceContainer n1Interface;
n1Interface.Add(interfacesAccess.Get(0));
Ipv4InterfaceContainer n3Interface;
n3Interface.Add(interfacesBottleneck.Get(1));
Ipv4GlobalRoutingHelper::PopulateRoutingTables();
Config::SetDefault("ns3::TcpSocket::SegmentSize", UintegerValue(flowsPacketsSize));
// Flows configuration
// Bidirectional TCP streams with ping like flent tcp_bidirectional test.
uint16_t port = 7;
ApplicationContainer uploadApp;
ApplicationContainer downloadApp;
ApplicationContainer sourceApps;
// Configure and install upload flow
Address addUp(InetSocketAddress(Ipv4Address::GetAny(), port));
PacketSinkHelper sinkHelperUp("ns3::TcpSocketFactory", addUp);
sinkHelperUp.SetAttribute("Protocol", TypeIdValue(TcpSocketFactory::GetTypeId()));
uploadApp.Add(sinkHelperUp.Install(n3));
InetSocketAddress socketAddressUp = InetSocketAddress(n3Interface.GetAddress(0), port);
OnOffHelper onOffHelperUp("ns3::TcpSocketFactory", Address());
onOffHelperUp.SetAttribute("Remote", AddressValue(socketAddressUp));
onOffHelperUp.SetAttribute("OnTime", StringValue("ns3::ConstantRandomVariable[Constant=1]"));
onOffHelperUp.SetAttribute("OffTime", StringValue("ns3::ConstantRandomVariable[Constant=0]"));
onOffHelperUp.SetAttribute("PacketSize", UintegerValue(flowsPacketsSize));
onOffHelperUp.SetAttribute("DataRate", StringValue(flowsDatarate));
sourceApps.Add(onOffHelperUp.Install(n1));
port = 8;
// Configure and install download flow
Address addDown(InetSocketAddress(Ipv4Address::GetAny(), port));
PacketSinkHelper sinkHelperDown("ns3::TcpSocketFactory", addDown);
sinkHelperDown.SetAttribute("Protocol", TypeIdValue(TcpSocketFactory::GetTypeId()));
downloadApp.Add(sinkHelperDown.Install(n1));
InetSocketAddress socketAddressDown = InetSocketAddress(n1Interface.GetAddress(0), port);
OnOffHelper onOffHelperDown("ns3::TcpSocketFactory", Address());
onOffHelperDown.SetAttribute("Remote", AddressValue(socketAddressDown));
onOffHelperDown.SetAttribute("OnTime", StringValue("ns3::ConstantRandomVariable[Constant=1]"));
onOffHelperDown.SetAttribute("OffTime", StringValue("ns3::ConstantRandomVariable[Constant=0]"));
onOffHelperDown.SetAttribute("PacketSize", UintegerValue(flowsPacketsSize));
onOffHelperDown.SetAttribute("DataRate", StringValue(flowsDatarate));
sourceApps.Add(onOffHelperDown.Install(n3));
// Configure and install ping
PingHelper ping(n3Interface.GetAddress(0));
ping.SetAttribute("VerboseMode", EnumValue(Ping::VerboseMode::QUIET));
ping.Install(n1);
Config::Connect("/NodeList/*/ApplicationList/*/$ns3::Ping/Rtt", MakeCallback(&PingRtt));
uploadApp.Start(Seconds(0));
uploadApp.Stop(Seconds(stopTime));
downloadApp.Start(Seconds(0));
downloadApp.Stop(Seconds(stopTime));
sourceApps.Start(Seconds(0 + 0.1));
sourceApps.Stop(Seconds(stopTime - 0.1));
Ptr<OutputStreamWrapper> uploadGoodputStream =
ascii.CreateFileStream(queueDiscType + "-upGoodput.txt");
Simulator::Schedule(Seconds(samplingPeriod),
&GoodputSampling,
uploadApp,
uploadGoodputStream,
samplingPeriod);
Ptr<OutputStreamWrapper> downloadGoodputStream =
ascii.CreateFileStream(queueDiscType + "-downGoodput.txt");
Simulator::Schedule(Seconds(samplingPeriod),
&GoodputSampling,
downloadApp,
downloadGoodputStream,
samplingPeriod);
// Flow monitor
Ptr<FlowMonitor> flowMonitor;
FlowMonitorHelper flowHelper;
flowMonitor = flowHelper.InstallAll();
accessLink.EnablePcapAll("queue");
Simulator::Stop(Seconds(stopTime));
Simulator::Run();
flowMonitor->SerializeToXmlFile(queueDiscType + "-flowMonitor.xml", true, true);
Simulator::Destroy();
return 0;
}

View File

@@ -0,0 +1,270 @@
/*
* Copyright (c) 2014 Universita' di Firenze, Italy
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Tommaso Pecorella <tommaso.pecorella@unifi.it>
*/
// Network topology
//
// SRC
// |<=== source network
// A-----B
// \ / \ all networks have cost 1, except
// \ / | for the direct link from C to D, which
// C / has cost 10
// | /
// |/
// D
// |<=== target network
// DST
//
//
// A, B, C and D are RIPng routers.
// A and D are configured with static addresses.
// SRC and DST will exchange packets.
//
// After about 3 seconds, the topology is built, and Echo Reply will be received.
// After 40 seconds, the link between B and D will break, causing a route failure.
// After 44 seconds from the failure, the routers will recovery from the failure.
// Split Horizoning should affect the recovery time, but it is not. See the manual
// for an explanation of this effect.
//
// If "showPings" is enabled, the user will see:
// 1) if the ping has been acknowledged
// 2) if a Destination Unreachable has been received by the sender
// 3) nothing, when the Echo Request has been received by the destination but
// the Echo Reply is unable to reach the sender.
// Examining the .pcap files with Wireshark can confirm this effect.
#include "ns3/core-module.h"
#include "ns3/internet-apps-module.h"
#include "ns3/internet-module.h"
#include "ns3/ipv6-routing-table-entry.h"
#include "ns3/ipv6-static-routing-helper.h"
#include "ns3/mtp-module.h"
#include "ns3/point-to-point-module.h"
#include <fstream>
using namespace ns3;
NS_LOG_COMPONENT_DEFINE("RipNgSimpleRouting");
void
TearDownLink(Ptr<Node> nodeA, Ptr<Node> nodeB, uint32_t interfaceA, uint32_t interfaceB)
{
nodeA->GetObject<Ipv6>()->SetDown(interfaceA);
nodeB->GetObject<Ipv6>()->SetDown(interfaceB);
}
int
main(int argc, char** argv)
{
MtpInterface::Enable();
bool verbose = false;
bool printRoutingTables = false;
bool showPings = false;
std::string SplitHorizon("PoisonReverse");
CommandLine cmd(__FILE__);
cmd.AddValue("verbose", "turn on log components", verbose);
cmd.AddValue("printRoutingTables",
"Print routing tables at 30, 60 and 90 seconds",
printRoutingTables);
cmd.AddValue("showPings", "Show Ping6 reception", showPings);
cmd.AddValue("splitHorizonStrategy",
"Split Horizon strategy to use (NoSplitHorizon, SplitHorizon, PoisonReverse)",
SplitHorizon);
cmd.Parse(argc, argv);
if (verbose)
{
LogComponentEnable("RipNgSimpleRouting", LOG_LEVEL_INFO);
LogComponentEnable("RipNg", LOG_LEVEL_ALL);
LogComponentEnable("Icmpv6L4Protocol", LOG_LEVEL_INFO);
LogComponentEnable("Ipv6Interface", LOG_LEVEL_ALL);
LogComponentEnable("Icmpv6L4Protocol", LOG_LEVEL_ALL);
LogComponentEnable("NdiscCache", LOG_LEVEL_ALL);
LogComponentEnable("Ping", LOG_LEVEL_ALL);
}
if (SplitHorizon == "NoSplitHorizon")
{
Config::SetDefault("ns3::RipNg::SplitHorizon", EnumValue(RipNg::NO_SPLIT_HORIZON));
}
else if (SplitHorizon == "SplitHorizon")
{
Config::SetDefault("ns3::RipNg::SplitHorizon", EnumValue(RipNg::SPLIT_HORIZON));
}
else
{
Config::SetDefault("ns3::RipNg::SplitHorizon", EnumValue(RipNg::POISON_REVERSE));
}
NS_LOG_INFO("Create nodes.");
Ptr<Node> src = CreateObject<Node>();
Names::Add("SrcNode", src);
Ptr<Node> dst = CreateObject<Node>();
Names::Add("DstNode", dst);
Ptr<Node> a = CreateObject<Node>();
Names::Add("RouterA", a);
Ptr<Node> b = CreateObject<Node>();
Names::Add("RouterB", b);
Ptr<Node> c = CreateObject<Node>();
Names::Add("RouterC", c);
Ptr<Node> d = CreateObject<Node>();
Names::Add("RouterD", d);
NodeContainer net1(src, a);
NodeContainer net2(a, b);
NodeContainer net3(a, c);
NodeContainer net4(b, c);
NodeContainer net5(c, d);
NodeContainer net6(b, d);
NodeContainer net7(d, dst);
NodeContainer routers(a, b, c, d);
NodeContainer nodes(src, dst);
NS_LOG_INFO("Create channels.");
PointToPointHelper p2p;
p2p.SetDeviceAttribute("DataRate", DataRateValue(5000000));
p2p.SetChannelAttribute("Delay", TimeValue(MilliSeconds(2)));
NetDeviceContainer ndc1 = p2p.Install(net1);
NetDeviceContainer ndc2 = p2p.Install(net2);
NetDeviceContainer ndc3 = p2p.Install(net3);
NetDeviceContainer ndc4 = p2p.Install(net4);
NetDeviceContainer ndc5 = p2p.Install(net5);
NetDeviceContainer ndc6 = p2p.Install(net6);
NetDeviceContainer ndc7 = p2p.Install(net7);
NS_LOG_INFO("Create IPv6 and routing");
RipNgHelper ripNgRouting;
// Rule of thumb:
// Interfaces are added sequentially, starting from 0
// However, interface 0 is always the loopback...
ripNgRouting.ExcludeInterface(a, 1);
ripNgRouting.ExcludeInterface(d, 3);
ripNgRouting.SetInterfaceMetric(c, 3, 10);
ripNgRouting.SetInterfaceMetric(d, 1, 10);
Ipv6ListRoutingHelper listRH;
listRH.Add(ripNgRouting, 0);
Ipv6StaticRoutingHelper staticRh;
listRH.Add(staticRh, 5);
InternetStackHelper internetv6;
internetv6.SetIpv4StackInstall(false);
internetv6.SetRoutingHelper(listRH);
internetv6.Install(routers);
InternetStackHelper internetv6Nodes;
internetv6Nodes.SetIpv4StackInstall(false);
internetv6Nodes.Install(nodes);
// Assign addresses.
// The source and destination networks have global addresses
// The "core" network just needs link-local addresses for routing.
// We assign global addresses to the routers as well to receive
// ICMPv6 errors.
NS_LOG_INFO("Assign IPv6 Addresses.");
Ipv6AddressHelper ipv6;
ipv6.SetBase(Ipv6Address("2001:1::"), Ipv6Prefix(64));
Ipv6InterfaceContainer iic1 = ipv6.Assign(ndc1);
iic1.SetForwarding(1, true);
iic1.SetDefaultRouteInAllNodes(1);
ipv6.SetBase(Ipv6Address("2001:0:1::"), Ipv6Prefix(64));
Ipv6InterfaceContainer iic2 = ipv6.Assign(ndc2);
iic2.SetForwarding(0, true);
iic2.SetForwarding(1, true);
ipv6.SetBase(Ipv6Address("2001:0:2::"), Ipv6Prefix(64));
Ipv6InterfaceContainer iic3 = ipv6.Assign(ndc3);
iic3.SetForwarding(0, true);
iic3.SetForwarding(1, true);
ipv6.SetBase(Ipv6Address("2001:0:3::"), Ipv6Prefix(64));
Ipv6InterfaceContainer iic4 = ipv6.Assign(ndc4);
iic4.SetForwarding(0, true);
iic4.SetForwarding(1, true);
ipv6.SetBase(Ipv6Address("2001:0:4::"), Ipv6Prefix(64));
Ipv6InterfaceContainer iic5 = ipv6.Assign(ndc5);
iic5.SetForwarding(0, true);
iic5.SetForwarding(1, true);
ipv6.SetBase(Ipv6Address("2001:0:5::"), Ipv6Prefix(64));
Ipv6InterfaceContainer iic6 = ipv6.Assign(ndc6);
iic6.SetForwarding(0, true);
iic6.SetForwarding(1, true);
ipv6.SetBase(Ipv6Address("2001:2::"), Ipv6Prefix(64));
Ipv6InterfaceContainer iic7 = ipv6.Assign(ndc7);
iic7.SetForwarding(0, true);
iic7.SetDefaultRouteInAllNodes(0);
if (printRoutingTables)
{
Ptr<OutputStreamWrapper> routingStream = Create<OutputStreamWrapper>(&std::cout);
Ipv6RoutingHelper::PrintRoutingTableAt(Seconds(30.0), a, routingStream);
Ipv6RoutingHelper::PrintRoutingTableAt(Seconds(30.0), b, routingStream);
Ipv6RoutingHelper::PrintRoutingTableAt(Seconds(30.0), c, routingStream);
Ipv6RoutingHelper::PrintRoutingTableAt(Seconds(30.0), d, routingStream);
Ipv6RoutingHelper::PrintRoutingTableAt(Seconds(60.0), a, routingStream);
Ipv6RoutingHelper::PrintRoutingTableAt(Seconds(60.0), b, routingStream);
Ipv6RoutingHelper::PrintRoutingTableAt(Seconds(60.0), c, routingStream);
Ipv6RoutingHelper::PrintRoutingTableAt(Seconds(60.0), d, routingStream);
Ipv6RoutingHelper::PrintRoutingTableAt(Seconds(90.0), a, routingStream);
Ipv6RoutingHelper::PrintRoutingTableAt(Seconds(90.0), b, routingStream);
Ipv6RoutingHelper::PrintRoutingTableAt(Seconds(90.0), c, routingStream);
Ipv6RoutingHelper::PrintRoutingTableAt(Seconds(90.0), d, routingStream);
}
NS_LOG_INFO("Create Applications.");
uint32_t packetSize = 1024;
Time interPacketInterval = Seconds(1.0);
PingHelper ping(iic7.GetAddress(1, 1));
ping.SetAttribute("Interval", TimeValue(interPacketInterval));
ping.SetAttribute("Size", UintegerValue(packetSize));
if (showPings)
{
ping.SetAttribute("VerboseMode", EnumValue(Ping::VerboseMode::VERBOSE));
}
ApplicationContainer apps = ping.Install(src);
apps.Start(Seconds(1.0));
apps.Stop(Seconds(110.0));
AsciiTraceHelper ascii;
p2p.EnableAsciiAll(ascii.CreateFileStream("ripng-simple-routing.tr"));
p2p.EnablePcapAll("ripng-simple-routing", true);
Simulator::Schedule(Seconds(40), &TearDownLink, b, d, 3, 2);
/* Now, do the actual simulation. */
NS_LOG_INFO("Run Simulation.");
Simulator::Stop(Seconds(120));
Simulator::Run();
Simulator::Destroy();
NS_LOG_INFO("Done.");
return 0;
}

View File

@@ -0,0 +1,215 @@
/*
* Copyright (c) 2013 Universita' di Firenze
* Copyright (c) 2019 Caliola Engineering, LLC : RFC 6621 multicast packet de-duplication
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Tommaso Pecorella <tommaso.pecorella@unifi.it>
* Modified (2019): Jared Dulmage <jared.dulmage@caliola.com>
* Demonstrates dissemination of multicast packets across a mesh
* network to all nodes over multiple hops.
*/
#include "ns3/boolean.h"
#include "ns3/config.h"
#include "ns3/data-rate.h"
#include "ns3/double.h"
#include "ns3/inet-socket-address.h"
#include "ns3/internet-stack-helper.h"
#include "ns3/ipv4-address-helper.h"
#include "ns3/ipv4-l3-protocol.h"
#include "ns3/ipv4-list-routing-helper.h"
#include "ns3/ipv4-static-routing-helper.h"
#include "ns3/ipv4-static-routing.h"
#include "ns3/log.h"
#include "ns3/mtp-interface.h"
#include "ns3/names.h"
#include "ns3/node.h"
#include "ns3/on-off-helper.h"
#include "ns3/packet-sink-helper.h"
#include "ns3/packet-sink.h"
#include "ns3/random-variable-stream.h"
#include "ns3/simple-channel.h"
#include "ns3/simple-net-device-helper.h"
#include "ns3/simple-net-device.h"
#include "ns3/simulator.h"
#include "ns3/socket.h"
#include "ns3/string.h"
#include "ns3/test.h"
#include "ns3/trace-helper.h"
#include "ns3/traffic-control-layer.h"
#include "ns3/udp-socket-factory.h"
#include "ns3/udp-socket.h"
#include "ns3/uinteger.h"
#include <functional>
#include <limits>
#include <string>
using namespace ns3;
/**
* Network topology:
*
* /---- B ----\
* A ---- | ---- D ---- E
* \---- C ----/
*
* This example demonstrates configuration of
* static routing to realize broadcast-like
* flooding of packets from node A
* across the illustrated topology.
*/
int
main(int argc, char* argv[])
{
MtpInterface::Enable();
// multicast target
const std::string targetAddr = "239.192.100.1";
Config::SetDefault("ns3::Ipv4L3Protocol::EnableDuplicatePacketDetection", BooleanValue(true));
Config::SetDefault("ns3::Ipv4L3Protocol::DuplicateExpire", TimeValue(Seconds(10)));
// Create topology
// Create nodes
auto nodes = NodeContainer();
nodes.Create(5);
// Name nodes
Names::Add("A", nodes.Get(0));
Names::Add("B", nodes.Get(1));
Names::Add("C", nodes.Get(2));
Names::Add("D", nodes.Get(3));
Names::Add("E", nodes.Get(4));
SimpleNetDeviceHelper simplenet;
auto devices = simplenet.Install(nodes);
// name devices
Names::Add("A/dev", devices.Get(0));
Names::Add("B/dev", devices.Get(1));
Names::Add("C/dev", devices.Get(2));
Names::Add("D/dev", devices.Get(3));
Names::Add("E/dev", devices.Get(4));
// setup static routes to facilitate multicast flood
Ipv4ListRoutingHelper listRouting;
Ipv4StaticRoutingHelper staticRouting;
listRouting.Add(staticRouting, 0);
InternetStackHelper internet;
internet.SetIpv6StackInstall(false);
internet.SetIpv4ArpJitter(true);
internet.SetRoutingHelper(listRouting);
internet.Install(nodes);
Ipv4AddressHelper ipv4address;
ipv4address.SetBase("10.0.0.0", "255.255.255.0");
ipv4address.Assign(devices);
// add static routes for each node / device
for (auto diter = devices.Begin(); diter != devices.End(); ++diter)
{
Ptr<Node> node = (*diter)->GetNode();
if (Names::FindName(node) == "A")
{
// route for host
// Use host routing entry according to note in Ipv4StaticRouting::RouteOutput:
//// Note: Multicast routes for outbound packets are stored in the
//// normal unicast table. An implication of this is that it is not
//// possible to source multicast datagrams on multiple interfaces.
//// This is a well-known property of sockets implementation on
//// many Unix variants.
//// So, we just log it and fall through to LookupStatic ()
auto ipv4 = node->GetObject<Ipv4>();
NS_ASSERT_MSG((bool)ipv4,
"Node " << Names::FindName(node) << " does not have Ipv4 aggregate");
auto routing = staticRouting.GetStaticRouting(ipv4);
routing->AddHostRouteTo(targetAddr.c_str(), ipv4->GetInterfaceForDevice(*diter), 0);
}
else
{
// route for forwarding
staticRouting.AddMulticastRoute(node,
Ipv4Address::GetAny(),
targetAddr.c_str(),
*diter,
NetDeviceContainer(*diter));
}
}
// set the topology, by default fully-connected
auto channel = devices.Get(0)->GetChannel();
auto simplechannel = channel->GetObject<SimpleChannel>();
simplechannel->BlackList(Names::Find<SimpleNetDevice>("A/dev"),
Names::Find<SimpleNetDevice>("D/dev"));
simplechannel->BlackList(Names::Find<SimpleNetDevice>("D/dev"),
Names::Find<SimpleNetDevice>("A/dev"));
simplechannel->BlackList(Names::Find<SimpleNetDevice>("A/dev"),
Names::Find<SimpleNetDevice>("E/dev"));
simplechannel->BlackList(Names::Find<SimpleNetDevice>("E/dev"),
Names::Find<SimpleNetDevice>("A/dev"));
simplechannel->BlackList(Names::Find<SimpleNetDevice>("B/dev"),
Names::Find<SimpleNetDevice>("E/dev"));
simplechannel->BlackList(Names::Find<SimpleNetDevice>("E/dev"),
Names::Find<SimpleNetDevice>("B/dev"));
simplechannel->BlackList(Names::Find<SimpleNetDevice>("C/dev"),
Names::Find<SimpleNetDevice>("E/dev"));
simplechannel->BlackList(Names::Find<SimpleNetDevice>("E/dev"),
Names::Find<SimpleNetDevice>("C/dev"));
// ensure some time progress between re-transmissions
simplechannel->SetAttribute("Delay", TimeValue(MilliSeconds(1)));
// sinks
PacketSinkHelper sinkHelper("ns3::UdpSocketFactory",
InetSocketAddress(Ipv4Address::GetAny(), 9));
auto sinks = sinkHelper.Install("B");
sinks.Add(sinkHelper.Install("C"));
sinks.Add(sinkHelper.Install("D"));
sinks.Add(sinkHelper.Install("E"));
sinks.Start(Seconds(1));
// source
OnOffHelper onoffHelper("ns3::UdpSocketFactory", InetSocketAddress(targetAddr.c_str(), 9));
onoffHelper.SetAttribute("DataRate", DataRateValue(DataRate("8Mbps")));
onoffHelper.SetAttribute("MaxBytes", UintegerValue(10 * 1024));
auto source = onoffHelper.Install("A");
source.Start(Seconds(1.1));
// pcap traces
for (auto end = nodes.End(), iter = nodes.Begin(); iter != end; ++iter)
{
internet.EnablePcapIpv4("smf-trace", (*iter)->GetId(), 1, false);
}
// run simulation
Simulator::Run();
std::cout << "Node A sent " << 10 * 1024 << " bytes" << std::endl;
for (auto end = sinks.End(), iter = sinks.Begin(); iter != end; ++iter)
{
auto node = (*iter)->GetNode();
auto sink = (*iter)->GetObject<PacketSink>();
std::cout << "Node " << Names::FindName(node) << " received " << sink->GetTotalRx()
<< " bytes" << std::endl;
}
Simulator::Destroy();
Names::Clear();
return 0;
}

View File

@@ -0,0 +1,234 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
/* Test program for multi-interface host, static routing
Destination host (10.20.1.2)
|
| 10.20.1.0/24
DSTRTR
10.10.1.0/24 / \ 10.10.2.0/24
/ \
Rtr1 Rtr2
10.1.1.0/24 | | 10.1.2.0/24
| /
\ /
Source
*/
#include "ns3/applications-module.h"
#include "ns3/core-module.h"
#include "ns3/internet-module.h"
#include "ns3/ipv4-list-routing-helper.h"
#include "ns3/ipv4-static-routing-helper.h"
#include "ns3/mtp-module.h"
#include "ns3/network-module.h"
#include "ns3/point-to-point-module.h"
#include <cassert>
#include <fstream>
#include <iostream>
#include <string>
using namespace ns3;
NS_LOG_COMPONENT_DEFINE("SocketBoundTcpRoutingExample");
static const uint32_t totalTxBytes = 20000;
static uint32_t currentTxBytes = 0;
static const uint32_t writeSize = 1040;
uint8_t data[writeSize];
void StartFlow(Ptr<Socket>, Ipv4Address, uint16_t);
void WriteUntilBufferFull(Ptr<Socket>, uint32_t);
void SendStuff(Ptr<Socket> sock, Ipv4Address dstaddr, uint16_t port);
void BindSock(Ptr<Socket> sock, Ptr<NetDevice> netdev);
void srcSocketRecv(Ptr<Socket> socket);
void dstSocketRecv(Ptr<Socket> socket);
int
main(int argc, char* argv[])
{
MtpInterface::Enable();
// Allow the user to override any of the defaults and the above
// DefaultValue::Bind ()s at run-time, via command-line arguments
CommandLine cmd(__FILE__);
cmd.Parse(argc, argv);
Ptr<Node> nSrc = CreateObject<Node>();
Ptr<Node> nDst = CreateObject<Node>();
Ptr<Node> nRtr1 = CreateObject<Node>();
Ptr<Node> nRtr2 = CreateObject<Node>();
Ptr<Node> nDstRtr = CreateObject<Node>();
NodeContainer c = NodeContainer(nSrc, nDst, nRtr1, nRtr2, nDstRtr);
InternetStackHelper internet;
internet.Install(c);
// Point-to-point links
NodeContainer nSrcnRtr1 = NodeContainer(nSrc, nRtr1);
NodeContainer nSrcnRtr2 = NodeContainer(nSrc, nRtr2);
NodeContainer nRtr1nDstRtr = NodeContainer(nRtr1, nDstRtr);
NodeContainer nRtr2nDstRtr = NodeContainer(nRtr2, nDstRtr);
NodeContainer nDstRtrnDst = NodeContainer(nDstRtr, nDst);
// We create the channels first without any IP addressing information
PointToPointHelper p2p;
p2p.SetDeviceAttribute("DataRate", StringValue("5Mbps"));
p2p.SetChannelAttribute("Delay", StringValue("2ms"));
NetDeviceContainer dSrcdRtr1 = p2p.Install(nSrcnRtr1);
NetDeviceContainer dSrcdRtr2 = p2p.Install(nSrcnRtr2);
NetDeviceContainer dRtr1dDstRtr = p2p.Install(nRtr1nDstRtr);
NetDeviceContainer dRtr2dDstRtr = p2p.Install(nRtr2nDstRtr);
NetDeviceContainer dDstRtrdDst = p2p.Install(nDstRtrnDst);
Ptr<NetDevice> SrcToRtr1 = dSrcdRtr1.Get(0);
Ptr<NetDevice> SrcToRtr2 = dSrcdRtr2.Get(0);
// Later, we add IP addresses.
Ipv4AddressHelper ipv4;
ipv4.SetBase("10.1.1.0", "255.255.255.0");
Ipv4InterfaceContainer iSrciRtr1 = ipv4.Assign(dSrcdRtr1);
ipv4.SetBase("10.1.2.0", "255.255.255.0");
Ipv4InterfaceContainer iSrciRtr2 = ipv4.Assign(dSrcdRtr2);
ipv4.SetBase("10.10.1.0", "255.255.255.0");
Ipv4InterfaceContainer iRtr1iDstRtr = ipv4.Assign(dRtr1dDstRtr);
ipv4.SetBase("10.10.2.0", "255.255.255.0");
Ipv4InterfaceContainer iRtr2iDstRtr = ipv4.Assign(dRtr2dDstRtr);
ipv4.SetBase("10.20.1.0", "255.255.255.0");
Ipv4InterfaceContainer iDstRtrDst = ipv4.Assign(dDstRtrdDst);
Ptr<Ipv4> ipv4Src = nSrc->GetObject<Ipv4>();
Ptr<Ipv4> ipv4Rtr1 = nRtr1->GetObject<Ipv4>();
Ptr<Ipv4> ipv4Rtr2 = nRtr2->GetObject<Ipv4>();
Ptr<Ipv4> ipv4DstRtr = nDstRtr->GetObject<Ipv4>();
Ptr<Ipv4> ipv4Dst = nDst->GetObject<Ipv4>();
Ipv4StaticRoutingHelper ipv4RoutingHelper;
Ptr<Ipv4StaticRouting> staticRoutingSrc = ipv4RoutingHelper.GetStaticRouting(ipv4Src);
Ptr<Ipv4StaticRouting> staticRoutingRtr1 = ipv4RoutingHelper.GetStaticRouting(ipv4Rtr1);
Ptr<Ipv4StaticRouting> staticRoutingRtr2 = ipv4RoutingHelper.GetStaticRouting(ipv4Rtr2);
Ptr<Ipv4StaticRouting> staticRoutingDstRtr = ipv4RoutingHelper.GetStaticRouting(ipv4DstRtr);
Ptr<Ipv4StaticRouting> staticRoutingDst = ipv4RoutingHelper.GetStaticRouting(ipv4Dst);
// Create static routes from Src to Dst
staticRoutingRtr1->AddHostRouteTo(Ipv4Address("10.20.1.2"), Ipv4Address("10.10.1.2"), 2);
staticRoutingRtr2->AddHostRouteTo(Ipv4Address("10.20.1.2"), Ipv4Address("10.10.2.2"), 2);
// Two routes to same destination - setting separate metrics.
// You can switch these to see how traffic gets diverted via different routes
staticRoutingSrc->AddHostRouteTo(Ipv4Address("10.20.1.2"), Ipv4Address("10.1.1.2"), 1, 5);
staticRoutingSrc->AddHostRouteTo(Ipv4Address("10.20.1.2"), Ipv4Address("10.1.2.2"), 2, 10);
// Creating static routes from DST to Source pointing to Rtr1 VIA Rtr2(!)
staticRoutingDst->AddHostRouteTo(Ipv4Address("10.1.1.1"), Ipv4Address("10.20.1.1"), 1);
staticRoutingDstRtr->AddHostRouteTo(Ipv4Address("10.1.1.1"), Ipv4Address("10.10.2.1"), 2);
staticRoutingRtr2->AddHostRouteTo(Ipv4Address("10.1.1.1"), Ipv4Address("10.1.2.1"), 1);
staticRoutingDst->AddHostRouteTo(Ipv4Address("10.1.2.1"), Ipv4Address("10.20.1.1"), 1);
staticRoutingDstRtr->AddHostRouteTo(Ipv4Address("10.1.2.1"), Ipv4Address("10.10.2.1"), 2);
staticRoutingRtr2->AddHostRouteTo(Ipv4Address("10.1.2.1"), Ipv4Address("10.1.2.1"), 1);
// There are no apps that can utilize the Socket Option so doing the work directly..
// Taken from tcp-large-transfer example
Ptr<Socket> srcSocket1 =
Socket::CreateSocket(nSrc, TypeId::LookupByName("ns3::TcpSocketFactory"));
Ptr<Socket> srcSocket2 =
Socket::CreateSocket(nSrc, TypeId::LookupByName("ns3::TcpSocketFactory"));
Ptr<Socket> srcSocket3 =
Socket::CreateSocket(nSrc, TypeId::LookupByName("ns3::TcpSocketFactory"));
Ptr<Socket> srcSocket4 =
Socket::CreateSocket(nSrc, TypeId::LookupByName("ns3::TcpSocketFactory"));
uint16_t dstport = 12345;
Ipv4Address dstaddr("10.20.1.2");
PacketSinkHelper sink("ns3::TcpSocketFactory",
InetSocketAddress(Ipv4Address::GetAny(), dstport));
ApplicationContainer apps = sink.Install(nDst);
apps.Start(Seconds(0.0));
apps.Stop(Seconds(10.0));
AsciiTraceHelper ascii;
p2p.EnableAsciiAll(ascii.CreateFileStream("socket-bound-tcp-static-routing.tr"));
p2p.EnablePcapAll("socket-bound-tcp-static-routing");
LogComponentEnableAll(LOG_PREFIX_TIME);
LogComponentEnable("SocketBoundTcpRoutingExample", LOG_LEVEL_INFO);
// First packet as normal (goes via Rtr1)
Simulator::Schedule(Seconds(0.1), &StartFlow, srcSocket1, dstaddr, dstport);
// Second via Rtr1 explicitly
Simulator::Schedule(Seconds(1.0), &BindSock, srcSocket2, SrcToRtr1);
Simulator::Schedule(Seconds(1.1), &StartFlow, srcSocket2, dstaddr, dstport);
// Third via Rtr2 explicitly
Simulator::Schedule(Seconds(2.0), &BindSock, srcSocket3, SrcToRtr2);
Simulator::Schedule(Seconds(2.1), &StartFlow, srcSocket3, dstaddr, dstport);
// Fourth again as normal (goes via Rtr1)
Simulator::Schedule(Seconds(3.0), &BindSock, srcSocket4, Ptr<NetDevice>(nullptr));
Simulator::Schedule(Seconds(3.1), &StartFlow, srcSocket4, dstaddr, dstport);
// If you uncomment what's below, it results in ASSERT failing since you can't
// bind to a socket not existing on a node
// Simulator::Schedule(Seconds(4.0),&BindSock, srcSocket, dDstRtrdDst.Get(0));
Simulator::Run();
Simulator::Destroy();
return 0;
}
void
BindSock(Ptr<Socket> sock, Ptr<NetDevice> netdev)
{
sock->BindToNetDevice(netdev);
}
void
StartFlow(Ptr<Socket> localSocket, Ipv4Address servAddress, uint16_t servPort)
{
NS_LOG_INFO("Starting flow at time " << Simulator::Now().GetSeconds());
currentTxBytes = 0;
localSocket->Bind();
localSocket->Connect(InetSocketAddress(servAddress, servPort)); // connect
// tell the tcp implementation to call WriteUntilBufferFull again
// if we blocked and new tx buffer space becomes available
localSocket->SetSendCallback(MakeCallback(&WriteUntilBufferFull));
WriteUntilBufferFull(localSocket, localSocket->GetTxAvailable());
}
void
WriteUntilBufferFull(Ptr<Socket> localSocket, uint32_t txSpace)
{
while (currentTxBytes < totalTxBytes && localSocket->GetTxAvailable() > 0)
{
uint32_t left = totalTxBytes - currentTxBytes;
uint32_t dataOffset = currentTxBytes % writeSize;
uint32_t toWrite = writeSize - dataOffset;
toWrite = std::min(toWrite, left);
toWrite = std::min(toWrite, localSocket->GetTxAvailable());
int amountSent = localSocket->Send(&data[dataOffset], toWrite, 0);
if (amountSent < 0)
{
// we will be called again when new tx space becomes available.
return;
}
currentTxBytes += amountSent;
}
localSocket->Close();
}

View File

@@ -0,0 +1,287 @@
/*
* Copyright (c) 2018-20 NITK Surathkal
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Authors: Aarti Nandagiri <aarti.nandagiri@gmail.com>
* Vivek Jain <jain.vivek.anand@gmail.com>
* Mohit P. Tahiliani <tahiliani@nitk.edu.in>
*/
// This program simulates the following topology:
//
// 1000 Mbps 10Mbps 1000 Mbps
// Sender -------------- R1 -------------- R2 -------------- Receiver
// 5ms 10ms 5ms
//
// The link between R1 and R2 is a bottleneck link with 10 Mbps. All other
// links are 1000 Mbps.
//
// This program runs by default for 100 seconds and creates a new directory
// called 'bbr-results' in the ns-3 root directory. The program creates one
// sub-directory called 'pcap' in 'bbr-results' directory (if pcap generation
// is enabled) and three .dat files.
//
// (1) 'pcap' sub-directory contains six PCAP files:
// * bbr-0-0.pcap for the interface on Sender
// * bbr-1-0.pcap for the interface on Receiver
// * bbr-2-0.pcap for the first interface on R1
// * bbr-2-1.pcap for the second interface on R1
// * bbr-3-0.pcap for the first interface on R2
// * bbr-3-1.pcap for the second interface on R2
// (2) cwnd.dat file contains congestion window trace for the sender node
// (3) throughput.dat file contains sender side throughput trace
// (4) queueSize.dat file contains queue length trace from the bottleneck link
//
// BBR algorithm enters PROBE_RTT phase in every 10 seconds. The congestion
// window is fixed to 4 segments in this phase with a goal to achieve a better
// estimate of minimum RTT (because queue at the bottleneck link tends to drain
// when the congestion window is reduced to 4 segments).
//
// The congestion window and queue occupancy traces output by this program show
// periodic drops every 10 seconds when BBR algorithm is in PROBE_RTT phase.
#include "ns3/applications-module.h"
#include "ns3/core-module.h"
#include "ns3/flow-monitor-module.h"
#include "ns3/internet-module.h"
#include "ns3/mtp-module.h"
#include "ns3/network-module.h"
#include "ns3/point-to-point-module.h"
#include "ns3/traffic-control-module.h"
#include <filesystem>
using namespace ns3;
using namespace ns3::SystemPath;
std::string dir;
std::ofstream throughput;
std::ofstream queueSize;
uint32_t prev = 0;
Time prevTime = Seconds(0);
// Calculate throughput
static void
TraceThroughput(Ptr<FlowMonitor> monitor)
{
FlowMonitor::FlowStatsContainer stats = monitor->GetFlowStats();
if (!stats.empty())
{
auto itr = stats.begin();
Time curTime = Now();
throughput << curTime << " "
<< 8 * (itr->second.txBytes - prev) / ((curTime - prevTime).ToDouble(Time::US))
<< std::endl;
prevTime = curTime;
prev = itr->second.txBytes;
}
Simulator::Schedule(Seconds(0.2), &TraceThroughput, monitor);
}
// Check the queue size
void
CheckQueueSize(Ptr<QueueDisc> qd)
{
uint32_t qsize = qd->GetCurrentSize().GetValue();
Simulator::Schedule(Seconds(0.2), &CheckQueueSize, qd);
queueSize << Simulator::Now().GetSeconds() << " " << qsize << std::endl;
}
// Trace congestion window
static void
CwndTracer(Ptr<OutputStreamWrapper> stream, uint32_t oldval, uint32_t newval)
{
*stream->GetStream() << Simulator::Now().GetSeconds() << " " << newval / 1448.0 << std::endl;
}
void
TraceCwnd(uint32_t nodeId, uint32_t socketId)
{
AsciiTraceHelper ascii;
Ptr<OutputStreamWrapper> stream = ascii.CreateFileStream(dir + "/cwnd.dat");
Config::ConnectWithoutContext("/NodeList/" + std::to_string(nodeId) +
"/$ns3::TcpL4Protocol/SocketList/" +
std::to_string(socketId) + "/CongestionWindow",
MakeBoundCallback(&CwndTracer, stream));
}
int
main(int argc, char* argv[])
{
MtpInterface::Enable();
// Naming the output directory using local system time
time_t rawtime;
struct tm* timeinfo;
char buffer[80];
time(&rawtime);
timeinfo = localtime(&rawtime);
strftime(buffer, sizeof(buffer), "%d-%m-%Y-%I-%M-%S", timeinfo);
std::string currentTime(buffer);
std::string tcpTypeId = "TcpBbr";
std::string queueDisc = "FifoQueueDisc";
uint32_t delAckCount = 2;
bool bql = true;
bool enablePcap = false;
Time stopTime = Seconds(100);
CommandLine cmd(__FILE__);
cmd.AddValue("tcpTypeId", "Transport protocol to use: TcpNewReno, TcpBbr", tcpTypeId);
cmd.AddValue("delAckCount", "Delayed ACK count", delAckCount);
cmd.AddValue("enablePcap", "Enable/Disable pcap file generation", enablePcap);
cmd.AddValue("stopTime",
"Stop time for applications / simulation time will be stopTime + 1",
stopTime);
cmd.Parse(argc, argv);
queueDisc = std::string("ns3::") + queueDisc;
Config::SetDefault("ns3::TcpL4Protocol::SocketType", StringValue("ns3::" + tcpTypeId));
// The maximum send buffer size is set to 4194304 bytes (4MB) and the
// maximum receive buffer size is set to 6291456 bytes (6MB) in the Linux
// kernel. The same buffer sizes are used as default in this example.
Config::SetDefault("ns3::TcpSocket::SndBufSize", UintegerValue(4194304));
Config::SetDefault("ns3::TcpSocket::RcvBufSize", UintegerValue(6291456));
Config::SetDefault("ns3::TcpSocket::InitialCwnd", UintegerValue(10));
Config::SetDefault("ns3::TcpSocket::DelAckCount", UintegerValue(delAckCount));
Config::SetDefault("ns3::TcpSocket::SegmentSize", UintegerValue(1448));
Config::SetDefault("ns3::DropTailQueue<Packet>::MaxSize", QueueSizeValue(QueueSize("1p")));
Config::SetDefault(queueDisc + "::MaxSize", QueueSizeValue(QueueSize("100p")));
NodeContainer sender;
NodeContainer receiver;
NodeContainer routers;
sender.Create(1);
receiver.Create(1);
routers.Create(2);
// Create the point-to-point link helpers
PointToPointHelper bottleneckLink;
bottleneckLink.SetDeviceAttribute("DataRate", StringValue("10Mbps"));
bottleneckLink.SetChannelAttribute("Delay", StringValue("10ms"));
PointToPointHelper edgeLink;
edgeLink.SetDeviceAttribute("DataRate", StringValue("1000Mbps"));
edgeLink.SetChannelAttribute("Delay", StringValue("5ms"));
// Create NetDevice containers
NetDeviceContainer senderEdge = edgeLink.Install(sender.Get(0), routers.Get(0));
NetDeviceContainer r1r2 = bottleneckLink.Install(routers.Get(0), routers.Get(1));
NetDeviceContainer receiverEdge = edgeLink.Install(routers.Get(1), receiver.Get(0));
// Install Stack
InternetStackHelper internet;
internet.Install(sender);
internet.Install(receiver);
internet.Install(routers);
// Configure the root queue discipline
TrafficControlHelper tch;
tch.SetRootQueueDisc(queueDisc);
if (bql)
{
tch.SetQueueLimits("ns3::DynamicQueueLimits", "HoldTime", StringValue("1000ms"));
}
tch.Install(senderEdge);
tch.Install(receiverEdge);
// Assign IP addresses
Ipv4AddressHelper ipv4;
ipv4.SetBase("10.0.0.0", "255.255.255.0");
Ipv4InterfaceContainer i1i2 = ipv4.Assign(r1r2);
ipv4.NewNetwork();
Ipv4InterfaceContainer is1 = ipv4.Assign(senderEdge);
ipv4.NewNetwork();
Ipv4InterfaceContainer ir1 = ipv4.Assign(receiverEdge);
// Populate routing tables
Ipv4GlobalRoutingHelper::PopulateRoutingTables();
// Select sender side port
uint16_t port = 50001;
// Install application on the sender
BulkSendHelper source("ns3::TcpSocketFactory", InetSocketAddress(ir1.GetAddress(1), port));
source.SetAttribute("MaxBytes", UintegerValue(0));
ApplicationContainer sourceApps = source.Install(sender.Get(0));
sourceApps.Start(Seconds(0.1));
// Hook trace source after application starts
Simulator::Schedule(Seconds(0.1) + MilliSeconds(1), &TraceCwnd, 0, 0);
sourceApps.Stop(stopTime);
// Install application on the receiver
PacketSinkHelper sink("ns3::TcpSocketFactory", InetSocketAddress(Ipv4Address::GetAny(), port));
ApplicationContainer sinkApps = sink.Install(receiver.Get(0));
sinkApps.Start(Seconds(0.0));
sinkApps.Stop(stopTime);
// Create a new directory to store the output of the program
dir = "bbr-results/" + currentTime + "/";
MakeDirectories(dir);
// The plotting scripts are provided in the following repository, if needed:
// https://github.com/mohittahiliani/BBR-Validation/
//
// Download 'PlotScripts' directory (which is inside ns-3 scripts directory)
// from the link given above and place it in the ns-3 root directory.
// Uncomment the following three lines to copy plot scripts for
// Congestion Window, sender side throughput and queue occupancy on the
// bottleneck link into the output directory.
//
// std::filesystem::copy("PlotScripts/gnuplotScriptCwnd", dir);
// std::filesystem::copy("PlotScripts/gnuplotScriptThroughput", dir);
// std::filesystem::copy("PlotScripts/gnuplotScriptQueueSize", dir);
// Trace the queue occupancy on the second interface of R1
tch.Uninstall(routers.Get(0)->GetDevice(1));
QueueDiscContainer qd;
qd = tch.Install(routers.Get(0)->GetDevice(1));
Simulator::ScheduleNow(&CheckQueueSize, qd.Get(0));
// Generate PCAP traces if it is enabled
if (enablePcap)
{
MakeDirectories(dir + "pcap/");
bottleneckLink.EnablePcapAll(dir + "/pcap/bbr", true);
}
// Open files for writing throughput traces and queue size
throughput.open(dir + "/throughput.dat", std::ios::out);
queueSize.open(dir + "/queueSize.dat", std::ios::out);
NS_ASSERT_MSG(throughput.is_open(), "Throughput file was not opened correctly");
NS_ASSERT_MSG(queueSize.is_open(), "Queue size file was not opened correctly");
// Check for dropped packets using Flow Monitor
FlowMonitorHelper flowmon;
Ptr<FlowMonitor> monitor = flowmon.InstallAll();
Simulator::Schedule(Seconds(0 + 0.000001), &TraceThroughput, monitor);
Simulator::Stop(stopTime + TimeStep(1));
Simulator::Run();
Simulator::Destroy();
throughput.close();
queueSize.close();
return 0;
}

View File

@@ -0,0 +1,353 @@
/*
* Copyright (c) 2020 NITK Surathkal
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Authors: Vivek Jain <jain.vivek.anand@gmail.com>
* Deepak Kumaraswamy <deepakkavoor99@gmail.com>
*/
// The following network topology is used in this example, and is taken from
// Figure 2 of https://homes.cs.washington.edu/~tom/pubs/pacing.pdf
//
// n0 n4
// | |
// |(4x Mbps, 5ms) |(4x Mbps, 5ms)
// | |
// | |
// | (x Mbps, 40ms) |
// n2 ------------------------ n3
// | |
// | |
// |(4x Mbps, 5ms) |(4x Mbps, 5ms)
// | |
// n1 n5
//
//
// This example illustrates how TCP pacing can be enabled on a socket.
// Two long-running TCP flows are instantiated at nodes n0 and n1 to
// send data over a bottleneck link (n2->n3) to sink nodes n4 and n5.
// At the end of the simulation, the IP-level flow monitor tool will
// print out summary statistics of the flows. The flow monitor detects
// four flows, but that is because the flow records are unidirectional;
// the latter two flows reported are actually ack streams.
//
// At the end of this simulation, data files are also generated
// that track changes in Congestion Window, Slow Start threshold and
// TCP pacing rate for the first flow (n0). Additionally, a data file
// that contains information about packet transmission and reception times
// (collected through TxTrace and RxTrace respectively) is also produced.
// This transmission and reception (ack) trace is the most direct way to
// observe the effects of pacing. All the above information is traced
// just for the single node n0.
//
// A small amount of randomness is introduced to the program to control
// the start time of the flows.
//
// This example has pacing enabled by default, which means that TCP
// does not send packets back-to-back, but instead paces them out over
// an RTT. The size of initial congestion window is set to 10, and pacing
// of the initial window is enabled. The available command-line options and
// their default values can be observed in the usual way by running the
// program to print the help info; i.e.: ./ns3 run 'tcp-pacing --PrintHelp'
//
// When pacing is disabled, TCP sends eligible packets back-to-back. The
// differences in behaviour when pacing is disabled can be observed from the
// packet transmission data file. For instance, one can observe that
// packets in the initial window are sent one after the other simultaneously,
// without any inter-packet gaps. Another instance is when n0 receives a
// packet in the form of an acknowledgement, and sends out data packets without
// pacing them.
//
// Although this example serves as a useful demonstration of how pacing could
// be enabled/disabled in ns-3 TCP congestion controls, we could not observe
// significant improvements in throughput for the above topology when pacing
// was enabled. In future, one could try and incorporate models such as
// TCP Prague and ACK-filtering, which may show a stronger performance
// impact for TCP pacing.
#include "ns3/applications-module.h"
#include "ns3/core-module.h"
#include "ns3/flow-monitor-module.h"
#include "ns3/internet-module.h"
#include "ns3/ipv4-global-routing-helper.h"
#include "ns3/mtp-module.h"
#include "ns3/network-module.h"
#include "ns3/packet-sink.h"
#include "ns3/point-to-point-module.h"
#include "ns3/traffic-control-module.h"
#include <fstream>
#include <iomanip>
#include <iostream>
#include <string>
using namespace ns3;
NS_LOG_COMPONENT_DEFINE("TcpPacingExample");
std::ofstream cwndStream;
std::ofstream pacingRateStream;
std::ofstream ssThreshStream;
std::ofstream packetTraceStream;
static void
CwndTracer(uint32_t oldval, uint32_t newval)
{
cwndStream << std::fixed << std::setprecision(6) << Simulator::Now().GetSeconds()
<< std::setw(12) << newval << std::endl;
}
static void
PacingRateTracer(DataRate oldval, DataRate newval)
{
pacingRateStream << std::fixed << std::setprecision(6) << Simulator::Now().GetSeconds()
<< std::setw(12) << newval.GetBitRate() / 1e6 << std::endl;
}
static void
SsThreshTracer(uint32_t oldval, uint32_t newval)
{
ssThreshStream << std::fixed << std::setprecision(6) << Simulator::Now().GetSeconds()
<< std::setw(12) << newval << std::endl;
}
static void
TxTracer(Ptr<const Packet> p, Ptr<Ipv4> ipv4, uint32_t interface)
{
packetTraceStream << std::fixed << std::setprecision(6) << Simulator::Now().GetSeconds()
<< " tx " << p->GetSize() << std::endl;
}
static void
RxTracer(Ptr<const Packet> p, Ptr<Ipv4> ipv4, uint32_t interface)
{
packetTraceStream << std::fixed << std::setprecision(6) << Simulator::Now().GetSeconds()
<< " rx " << p->GetSize() << std::endl;
}
void
ConnectSocketTraces()
{
Config::ConnectWithoutContext("/NodeList/0/$ns3::TcpL4Protocol/SocketList/0/CongestionWindow",
MakeCallback(&CwndTracer));
Config::ConnectWithoutContext("/NodeList/0/$ns3::TcpL4Protocol/SocketList/0/PacingRate",
MakeCallback(&PacingRateTracer));
Config::ConnectWithoutContext("/NodeList/0/$ns3::TcpL4Protocol/SocketList/0/SlowStartThreshold",
MakeCallback(&SsThreshTracer));
Config::ConnectWithoutContext("/NodeList/0/$ns3::Ipv4L3Protocol/Tx", MakeCallback(&TxTracer));
Config::ConnectWithoutContext("/NodeList/0/$ns3::Ipv4L3Protocol/Rx", MakeCallback(&RxTracer));
}
int
main(int argc, char* argv[])
{
MtpInterface::Enable();
bool tracing = false;
uint32_t maxBytes = 0; // value of zero corresponds to unlimited send
std::string transportProtocol = "ns3::TcpCubic";
Time simulationEndTime = Seconds(5);
DataRate bottleneckBandwidth("10Mbps"); // value of x as shown in the above network topology
Time bottleneckDelay = MilliSeconds(40);
DataRate regLinkBandwidth(4 * bottleneckBandwidth.GetBitRate());
Time regLinkDelay = MilliSeconds(5);
DataRate maxPacingRate("4Gbps");
bool isPacingEnabled = true;
bool useEcn = true;
bool useQueueDisc = true;
bool shouldPaceInitialWindow = true;
// Configure defaults that are not based on explicit command-line arguments
// They may be overridden by general attribute configuration of command line
Config::SetDefault("ns3::TcpL4Protocol::SocketType",
TypeIdValue(TypeId::LookupByName(transportProtocol)));
Config::SetDefault("ns3::TcpSocket::InitialCwnd", UintegerValue(10));
CommandLine cmd(__FILE__);
cmd.AddValue("tracing", "Flag to enable/disable Ascii and Pcap tracing", tracing);
cmd.AddValue("maxBytes", "Total number of bytes for application to send", maxBytes);
cmd.AddValue("isPacingEnabled", "Flag to enable/disable pacing in TCP", isPacingEnabled);
cmd.AddValue("maxPacingRate", "Max Pacing Rate", maxPacingRate);
cmd.AddValue("useEcn", "Flag to enable/disable ECN", useEcn);
cmd.AddValue("useQueueDisc", "Flag to enable/disable queue disc on bottleneck", useQueueDisc);
cmd.AddValue("shouldPaceInitialWindow",
"Flag to enable/disable pacing of TCP initial window",
shouldPaceInitialWindow);
cmd.AddValue("simulationEndTime", "Simulation end time", simulationEndTime);
cmd.Parse(argc, argv);
// Configure defaults based on command-line arguments
Config::SetDefault("ns3::TcpSocketState::EnablePacing", BooleanValue(isPacingEnabled));
Config::SetDefault("ns3::TcpSocketState::PaceInitialWindow",
BooleanValue(shouldPaceInitialWindow));
Config::SetDefault("ns3::TcpSocketBase::UseEcn",
(useEcn ? EnumValue(TcpSocketState::On) : EnumValue(TcpSocketState::Off)));
Config::SetDefault("ns3::TcpSocketState::MaxPacingRate", DataRateValue(maxPacingRate));
NS_LOG_INFO("Create nodes.");
NodeContainer c;
c.Create(6);
NS_LOG_INFO("Create channels.");
NodeContainer n0n2 = NodeContainer(c.Get(0), c.Get(2));
NodeContainer n1n2 = NodeContainer(c.Get(1), c.Get(2));
NodeContainer n2n3 = NodeContainer(c.Get(2), c.Get(3));
NodeContainer n3n4 = NodeContainer(c.Get(3), c.Get(4));
NodeContainer n3n5 = NodeContainer(c.Get(3), c.Get(5));
// Define Node link properties
PointToPointHelper regLink;
regLink.SetDeviceAttribute("DataRate", DataRateValue(regLinkBandwidth));
regLink.SetChannelAttribute("Delay", TimeValue(regLinkDelay));
NetDeviceContainer d0d2 = regLink.Install(n0n2);
NetDeviceContainer d1d2 = regLink.Install(n1n2);
NetDeviceContainer d3d4 = regLink.Install(n3n4);
NetDeviceContainer d3d5 = regLink.Install(n3n5);
PointToPointHelper bottleNeckLink;
bottleNeckLink.SetDeviceAttribute("DataRate", DataRateValue(bottleneckBandwidth));
bottleNeckLink.SetChannelAttribute("Delay", TimeValue(bottleneckDelay));
NetDeviceContainer d2d3 = bottleNeckLink.Install(n2n3);
// Install Internet stack
InternetStackHelper stack;
stack.Install(c);
// Install traffic control
if (useQueueDisc)
{
TrafficControlHelper tchBottleneck;
tchBottleneck.SetRootQueueDisc("ns3::FqCoDelQueueDisc");
tchBottleneck.Install(d2d3);
}
NS_LOG_INFO("Assign IP Addresses.");
Ipv4AddressHelper ipv4;
ipv4.SetBase("10.1.1.0", "255.255.255.0");
Ipv4InterfaceContainer regLinkInterface0 = ipv4.Assign(d0d2);
ipv4.SetBase("10.1.2.0", "255.255.255.0");
Ipv4InterfaceContainer regLinkInterface1 = ipv4.Assign(d1d2);
ipv4.SetBase("10.1.3.0", "255.255.255.0");
Ipv4InterfaceContainer bottleneckInterface = ipv4.Assign(d2d3);
ipv4.SetBase("10.1.4.0", "255.255.255.0");
Ipv4InterfaceContainer regLinkInterface4 = ipv4.Assign(d3d4);
ipv4.SetBase("10.1.5.0", "255.255.255.0");
Ipv4InterfaceContainer regLinkInterface5 = ipv4.Assign(d3d5);
Ipv4GlobalRoutingHelper::PopulateRoutingTables();
NS_LOG_INFO("Create Applications.");
// Two Sink Applications at n4 and n5
uint16_t sinkPort = 8080;
Address sinkAddress4(
InetSocketAddress(regLinkInterface4.GetAddress(1), sinkPort)); // interface of n4
Address sinkAddress5(
InetSocketAddress(regLinkInterface5.GetAddress(1), sinkPort)); // interface of n5
PacketSinkHelper packetSinkHelper("ns3::TcpSocketFactory",
InetSocketAddress(Ipv4Address::GetAny(), sinkPort));
ApplicationContainer sinkApps4 = packetSinkHelper.Install(c.Get(4)); // n4 as sink
ApplicationContainer sinkApps5 = packetSinkHelper.Install(c.Get(5)); // n5 as sink
sinkApps4.Start(Seconds(0));
sinkApps4.Stop(simulationEndTime);
sinkApps5.Start(Seconds(0));
sinkApps5.Stop(simulationEndTime);
// Randomize the start time between 0 and 1ms
Ptr<UniformRandomVariable> uniformRv = CreateObject<UniformRandomVariable>();
uniformRv->SetStream(0);
// Two Source Applications at n0 and n1
BulkSendHelper source0("ns3::TcpSocketFactory", sinkAddress4);
BulkSendHelper source1("ns3::TcpSocketFactory", sinkAddress5);
// Set the amount of data to send in bytes. Zero is unlimited.
source0.SetAttribute("MaxBytes", UintegerValue(maxBytes));
source1.SetAttribute("MaxBytes", UintegerValue(maxBytes));
ApplicationContainer sourceApps0 = source0.Install(c.Get(0));
ApplicationContainer sourceApps1 = source1.Install(c.Get(1));
sourceApps0.Start(MicroSeconds(uniformRv->GetInteger(0, 1000)));
sourceApps0.Stop(simulationEndTime);
sourceApps1.Start(MicroSeconds(uniformRv->GetInteger(0, 1000)));
sourceApps1.Stop(simulationEndTime);
if (tracing)
{
AsciiTraceHelper ascii;
regLink.EnableAsciiAll(ascii.CreateFileStream("tcp-dynamic-pacing.tr"));
regLink.EnablePcapAll("tcp-dynamic-pacing", false);
}
cwndStream.open("tcp-dynamic-pacing-cwnd.dat", std::ios::out);
cwndStream << "#Time(s) Congestion Window (B)" << std::endl;
pacingRateStream.open("tcp-dynamic-pacing-pacing-rate.dat", std::ios::out);
pacingRateStream << "#Time(s) Pacing Rate (Mb/s)" << std::endl;
ssThreshStream.open("tcp-dynamic-pacing-ssthresh.dat", std::ios::out);
ssThreshStream << "#Time(s) Slow Start threshold (B)" << std::endl;
packetTraceStream.open("tcp-dynamic-pacing-packet-trace.dat", std::ios::out);
packetTraceStream << "#Time(s) tx/rx size (B)" << std::endl;
Simulator::Schedule(MicroSeconds(1001), &ConnectSocketTraces);
FlowMonitorHelper flowmon;
Ptr<FlowMonitor> monitor = flowmon.InstallAll();
NS_LOG_INFO("Run Simulation.");
Simulator::Stop(simulationEndTime);
Simulator::Run();
monitor->CheckForLostPackets();
Ptr<Ipv4FlowClassifier> classifier = DynamicCast<Ipv4FlowClassifier>(flowmon.GetClassifier());
FlowMonitor::FlowStatsContainer stats = monitor->GetFlowStats();
for (auto i = stats.begin(); i != stats.end(); ++i)
{
Ipv4FlowClassifier::FiveTuple t = classifier->FindFlow(i->first);
std::cout << "Flow " << i->first << " (" << t.sourceAddress << " -> "
<< t.destinationAddress << ")\n";
std::cout << " Tx Packets: " << i->second.txPackets << "\n";
std::cout << " Tx Bytes: " << i->second.txBytes << "\n";
std::cout << " TxOffered: "
<< i->second.txBytes * 8.0 / simulationEndTime.GetSeconds() / 1000 / 1000
<< " Mbps\n";
std::cout << " Rx Packets: " << i->second.rxPackets << "\n";
std::cout << " Rx Bytes: " << i->second.rxBytes << "\n";
std::cout << " Throughput: "
<< i->second.rxBytes * 8.0 / simulationEndTime.GetSeconds() / 1000 / 1000
<< " Mbps\n";
}
cwndStream.close();
pacingRateStream.close();
ssThreshStream.close();
Simulator::Destroy();
return 0;
}

View File

@@ -0,0 +1,163 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
// Default Network topology, 9 nodes in a star
/*
n2 n3 n4
\ | /
\|/
n1---n0---n5
/| \
/ | \
n8 n7 n6
*/
// - CBR Traffic goes from the star "arms" to the "hub"
// - Tracing of queues and packet receptions to file
// "tcp-star-server.tr"
// - pcap traces also generated in the following files
// "tcp-star-server-$n-$i.pcap" where n and i represent node and interface
// numbers respectively
// Usage examples for things you might want to tweak:
// ./ns3 run "tcp-star-server"
// ./ns3 run "tcp-star-server --nNodes=25"
// ./ns3 run "tcp-star-server --ns3::OnOffApplication::DataRate=10000"
// ./ns3 run "tcp-star-server --ns3::OnOffApplication::PacketSize=500"
// See the ns-3 tutorial for more info on the command line:
// https://www.nsnam.org/docs/tutorial/html/index.html
#include "ns3/applications-module.h"
#include "ns3/core-module.h"
#include "ns3/internet-module.h"
#include "ns3/ipv4-global-routing-helper.h"
#include "ns3/mtp-module.h"
#include "ns3/network-module.h"
#include "ns3/point-to-point-module.h"
#include <cassert>
#include <fstream>
#include <iostream>
#include <string>
using namespace ns3;
NS_LOG_COMPONENT_DEFINE("TcpServer");
int
main(int argc, char* argv[])
{
MtpInterface::Enable();
// Users may find it convenient to turn on explicit debugging
// for selected modules; the below lines suggest how to do this
// LogComponentEnable ("TcpServer", LOG_LEVEL_INFO);
// LogComponentEnable ("TcpL4Protocol", LOG_LEVEL_ALL);
// LogComponentEnable ("TcpSocketImpl", LOG_LEVEL_ALL);
// LogComponentEnable ("PacketSink", LOG_LEVEL_ALL);
// Set up some default values for the simulation.
Config::SetDefault("ns3::OnOffApplication::PacketSize", UintegerValue(250));
Config::SetDefault("ns3::OnOffApplication::DataRate", StringValue("5kb/s"));
uint32_t N = 9; // number of nodes in the star
// Allow the user to override any of the defaults and the above
// Config::SetDefault()s at run-time, via command-line arguments
CommandLine cmd(__FILE__);
cmd.AddValue("nNodes", "Number of nodes to place in the star", N);
cmd.Parse(argc, argv);
// Here, we will create N nodes in a star.
NS_LOG_INFO("Create nodes.");
NodeContainer serverNode;
NodeContainer clientNodes;
serverNode.Create(1);
clientNodes.Create(N - 1);
NodeContainer allNodes = NodeContainer(serverNode, clientNodes);
// Install network stacks on the nodes
InternetStackHelper internet;
internet.Install(allNodes);
// Collect an adjacency list of nodes for the p2p topology
std::vector<NodeContainer> nodeAdjacencyList(N - 1);
for (uint32_t i = 0; i < nodeAdjacencyList.size(); ++i)
{
nodeAdjacencyList[i] = NodeContainer(serverNode, clientNodes.Get(i));
}
// We create the channels first without any IP addressing information
NS_LOG_INFO("Create channels.");
PointToPointHelper p2p;
p2p.SetDeviceAttribute("DataRate", StringValue("5Mbps"));
p2p.SetChannelAttribute("Delay", StringValue("2ms"));
std::vector<NetDeviceContainer> deviceAdjacencyList(N - 1);
for (uint32_t i = 0; i < deviceAdjacencyList.size(); ++i)
{
deviceAdjacencyList[i] = p2p.Install(nodeAdjacencyList[i]);
}
// Later, we add IP addresses.
NS_LOG_INFO("Assign IP Addresses.");
Ipv4AddressHelper ipv4;
std::vector<Ipv4InterfaceContainer> interfaceAdjacencyList(N - 1);
for (uint32_t i = 0; i < interfaceAdjacencyList.size(); ++i)
{
std::ostringstream subnet;
subnet << "10.1." << i + 1 << ".0";
ipv4.SetBase(subnet.str().c_str(), "255.255.255.0");
interfaceAdjacencyList[i] = ipv4.Assign(deviceAdjacencyList[i]);
}
// Turn on global static routing
Ipv4GlobalRoutingHelper::PopulateRoutingTables();
// Create a packet sink on the star "hub" to receive these packets
uint16_t port = 50000;
Address sinkLocalAddress(InetSocketAddress(Ipv4Address::GetAny(), port));
PacketSinkHelper sinkHelper("ns3::TcpSocketFactory", sinkLocalAddress);
ApplicationContainer sinkApp = sinkHelper.Install(serverNode);
sinkApp.Start(Seconds(1.0));
sinkApp.Stop(Seconds(10.0));
// Create the OnOff applications to send TCP to the server
OnOffHelper clientHelper("ns3::TcpSocketFactory", Address());
clientHelper.SetAttribute("OnTime", StringValue("ns3::ConstantRandomVariable[Constant=1]"));
clientHelper.SetAttribute("OffTime", StringValue("ns3::ConstantRandomVariable[Constant=0]"));
// normally wouldn't need a loop here but the server IP address is different
// on each p2p subnet
ApplicationContainer clientApps;
for (uint32_t i = 0; i < clientNodes.GetN(); ++i)
{
AddressValue remoteAddress(
InetSocketAddress(interfaceAdjacencyList[i].GetAddress(0), port));
clientHelper.SetAttribute("Remote", remoteAddress);
clientApps.Add(clientHelper.Install(clientNodes.Get(i)));
}
clientApps.Start(Seconds(1.0));
clientApps.Stop(Seconds(10.0));
// configure tracing
AsciiTraceHelper ascii;
p2p.EnableAsciiAll(ascii.CreateFileStream("tcp-star-server.tr"));
p2p.EnablePcapAll("tcp-star-server");
NS_LOG_INFO("Run Simulation.");
Simulator::Run();
Simulator::Destroy();
NS_LOG_INFO("Done.");
return 0;
}

File diff suppressed because it is too large Load Diff

2
ns3
View File

@@ -215,6 +215,7 @@ def parse_args(argv):
("logs", "the logs regardless of the compile mode"),
("monolib", "a single shared library with all ns-3 modules"),
("mpi", "the MPI support for distributed simulation"),
("mtp", "Multithreading support for high speed parallel simulation"),
(
"ninja-tracing",
"the conversion of the Ninja generator log file into about://tracing format",
@@ -937,6 +938,7 @@ def configure_cmake(
("LOG", "logs"),
("MONOLIB", "monolib"),
("MPI", "mpi"),
("MTP", "mtp"),
("NINJA_TRACING", "ninja_tracing"),
("PRECOMPILE_HEADERS", "precompiled_headers"),
("PYTHON_BINDINGS", "python_bindings"),

View File

@@ -210,6 +210,7 @@ set(header_files
model/ascii-file.h
model/ascii-test.h
model/assert.h
model/atomic-counter.h
model/attribute-accessor-helper.h
model/attribute-construction-list.h
model/attribute-container.h

View File

@@ -0,0 +1,102 @@
/*
* Copyright (c) 2023 State Key Laboratory for Novel Software Technology
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Songyuan Bai <i@f5soft.site>
*/
#ifndef ATOMIC_COUNTER_H
#define ATOMIC_COUNTER_H
#include <atomic>
namespace ns3
{
/**
* @brief
* The implementation of the atomic counter used for reference counting.
*
* It overrides operators of existing atomic variables with a more relaxed
* memory order to improve reference counting performance.
*/
class AtomicCounter
{
public:
/** Constructor */
inline AtomicCounter()
{
m_count.store(0, std::memory_order_release);
}
/**
* @brief Construct a new atomic counter object.
*
* @param count The initialization count number
*/
inline AtomicCounter(uint32_t count)
{
m_count.store(count, std::memory_order_release);
}
/**
* @brief Read the counter value with a more relaxed memory order.
*
* @return The counter value.
*/
inline operator uint32_t() const
{
return m_count.load(std::memory_order_acquire);
}
/**
* @brief Set the counter value with a more relaxed memory order.
*
* @param count The counter value to be set
* @return The counter value to be set
*/
inline uint32_t operator=(const uint32_t count)
{
m_count.store(count, std::memory_order_release);
return count;
}
/**
* @brief Increment the counter by one with a more relaxed memory order.
*
* @return The old counter value
*/
inline uint32_t operator++(int)
{
return m_count.fetch_add(1, std::memory_order_relaxed);
}
/**
* @brief Decrement the counter by one with a more relaxed memory order.
*
* @return The old counter value
*/
inline uint32_t operator--(int)
{
return m_count.fetch_sub(1, std::memory_order_release);
}
private:
std::atomic<uint32_t> m_count;
};
} // namespace ns3
#endif /* ATOMIC_COUNTER_H */

View File

@@ -264,25 +264,41 @@ Hasher& GetStaticHash();
inline uint32_t
Hash32(const char* buffer, const std::size_t size)
{
#ifdef NS3_MTP
return Hasher().GetHash32(buffer, size);
#else
return GetStaticHash().GetHash32(buffer, size);
#endif
}
inline uint64_t
Hash64(const char* buffer, const std::size_t size)
{
#ifdef NS3_MTP
return Hasher().GetHash64(buffer, size);
#else
return GetStaticHash().GetHash64(buffer, size);
#endif
}
inline uint32_t
Hash32(const std::string s)
{
#ifdef NS3_MTP
return Hasher().GetHash32(s);
#else
return GetStaticHash().GetHash32(s);
#endif
}
inline uint64_t
Hash64(const std::string s)
{
#ifdef NS3_MTP
return Hasher().GetHash64(s);
#else
return GetStaticHash().GetHash64(s);
#endif
}
} // namespace ns3

View File

@@ -168,6 +168,7 @@ Object::DoGetObject(TypeId tid) const
}
if (cur == tid)
{
#ifndef NS3_MTP
// This is an attempt to 'cache' the result of this lookup.
// the idea is that if we perform a lookup for a TypeId on this object,
// we are likely to perform the same lookup later so, we make sure
@@ -178,6 +179,7 @@ Object::DoGetObject(TypeId tid) const
current->m_getObjectCount++;
// then, update the sort
UpdateSortedArray(m_aggregates, i);
#endif
// finally, return the match
return const_cast<Object*>(current);
}

View File

@@ -11,6 +11,7 @@
#define SIMPLE_REF_COUNT_H
#include "assert.h"
#include "atomic-counter.h"
#include "default-deleter.h"
#include <limits>
@@ -114,9 +115,11 @@ class SimpleRefCount : public PARENT
*/
inline void Unref() const
{
m_count--;
if (m_count == 0)
if (m_count-- == 1)
{
#ifdef NS3_MTP
std::atomic_thread_fence(std::memory_order_acquire);
#endif
DELETER::Delete(static_cast<T*>(const_cast<SimpleRefCount*>(this)));
}
}
@@ -140,7 +143,11 @@ class SimpleRefCount : public PARENT
* Note we make this mutable so that the const methods can still
* change it.
*/
#ifdef NS3_MTP
mutable AtomicCounter m_count;
#else
mutable uint32_t m_count;
#endif
};
} // namespace ns3

View File

@@ -15,6 +15,7 @@
#include "singleton.h"
#include "system-path.h"
#include <algorithm>
#include <cmath>
#include <cstring>
#include <list>

View File

@@ -78,6 +78,9 @@ FlowMonitor::FlowMonitor()
: m_enabled(false)
{
NS_LOG_FUNCTION(this);
#ifdef NS3_MTP
m_lock.store(false, std::memory_order_relaxed);
#endif
}
void
@@ -142,6 +145,13 @@ FlowMonitor::ReportFirstTx(Ptr<FlowProbe> probe,
return;
}
Time now = Simulator::Now();
#ifdef NS3_MTP
while (m_lock.exchange(true, std::memory_order_acquire))
{
};
#endif
TrackedPacket& tracked = m_trackedPackets[std::make_pair(flowId, packetId)];
tracked.firstSeenTime = now;
tracked.lastSeenTime = tracked.firstSeenTime;
@@ -159,6 +169,10 @@ FlowMonitor::ReportFirstTx(Ptr<FlowProbe> probe,
stats.timeFirstTxPacket = now;
}
stats.timeLastTxPacket = now;
#ifdef NS3_MTP
m_lock.store(false, std::memory_order_release);
#endif
}
void
@@ -173,12 +187,22 @@ FlowMonitor::ReportForwarding(Ptr<FlowProbe> probe,
NS_LOG_DEBUG("FlowMonitor not enabled; returning");
return;
}
#ifdef NS3_MTP
while (m_lock.exchange(true, std::memory_order_acquire))
{
};
#endif
std::pair<FlowId, FlowPacketId> key(flowId, packetId);
auto tracked = m_trackedPackets.find(key);
if (tracked == m_trackedPackets.end())
{
NS_LOG_WARN("Received packet forward report (flowId="
<< flowId << ", packetId=" << packetId << ") but not known to be transmitted.");
#ifdef NS3_MTP
m_lock.store(false, std::memory_order_release);
#endif
return;
}
@@ -187,6 +211,10 @@ FlowMonitor::ReportForwarding(Ptr<FlowProbe> probe,
Time delay = (Simulator::Now() - tracked->second.firstSeenTime);
probe->AddPacketStats(flowId, packetSize, delay);
#ifdef NS3_MTP
m_lock.store(false, std::memory_order_release);
#endif
}
void
@@ -201,11 +229,21 @@ FlowMonitor::ReportLastRx(Ptr<FlowProbe> probe,
NS_LOG_DEBUG("FlowMonitor not enabled; returning");
return;
}
#ifdef NS3_MTP
while (m_lock.exchange(true, std::memory_order_acquire))
{
};
#endif
auto tracked = m_trackedPackets.find(std::make_pair(flowId, packetId));
if (tracked == m_trackedPackets.end())
{
NS_LOG_WARN("Received packet last-tx report (flowId="
<< flowId << ", packetId=" << packetId << ") but not known to be transmitted.");
#ifdef NS3_MTP
m_lock.store(false, std::memory_order_release);
#endif
return;
}
@@ -263,6 +301,10 @@ FlowMonitor::ReportLastRx(Ptr<FlowProbe> probe,
<< packetId << ").");
m_trackedPackets.erase(tracked); // we don't need to track this packet anymore
#ifdef NS3_MTP
m_lock.store(false, std::memory_order_release);
#endif
}
void
@@ -279,6 +321,12 @@ FlowMonitor::ReportDrop(Ptr<FlowProbe> probe,
return;
}
#ifdef NS3_MTP
while (m_lock.exchange(true, std::memory_order_acquire))
{
};
#endif
probe->AddPacketDropStats(flowId, packetSize, reasonCode);
FlowStats& stats = GetStatsForFlow(flowId);
@@ -302,6 +350,10 @@ FlowMonitor::ReportDrop(Ptr<FlowProbe> probe,
<< packetId << ").");
m_trackedPackets.erase(tracked);
}
#ifdef NS3_MTP
m_lock.store(false, std::memory_order_release);
#endif
}
const FlowMonitor::FlowStatsContainer&

View File

@@ -18,6 +18,7 @@
#include "ns3/object.h"
#include "ns3/ptr.h"
#include <atomic>
#include <map>
#include <vector>
@@ -302,6 +303,9 @@ class FlowMonitor : public Object
double m_packetSizeBinWidth; //!< packet size bin width (for histograms)
double m_flowInterruptionsBinWidth; //!< Flow interruptions bin width (for histograms)
Time m_flowInterruptionsMinTime; //!< Flow interruptions minimum time
#ifdef NS3_MTP
std::atomic<bool> m_lock;
#endif
/// Get the stats for a given flow
/// @param flowId the Flow identification

View File

@@ -82,6 +82,9 @@ operator==(const Ipv4FlowClassifier::FiveTuple& t1, const Ipv4FlowClassifier::Fi
Ipv4FlowClassifier::Ipv4FlowClassifier()
{
#ifdef NS3_MTP
m_lock.store(false, std::memory_order_relaxed);
#endif
}
bool
@@ -133,6 +136,12 @@ Ipv4FlowClassifier::Classify(const Ipv4Header& ipHeader,
tuple.sourcePort = srcPort;
tuple.destinationPort = dstPort;
#ifdef NS3_MTP
while (m_lock.exchange(true, std::memory_order_acquire))
{
};
#endif
// try to insert the tuple, but check if it already exists
auto insert = m_flowMap.insert(std::pair<FiveTuple, FlowId>(tuple, 0));
@@ -163,6 +172,10 @@ Ipv4FlowClassifier::Classify(const Ipv4Header& ipHeader,
*out_flowId = insert.first->second;
*out_packetId = m_flowPktIdMap[*out_flowId];
#ifdef NS3_MTP
m_lock.store(false, std::memory_order_release);
#endif
return true;
}

View File

@@ -13,6 +13,7 @@
#include "ns3/ipv4-header.h"
#include <atomic>
#include <map>
#include <stdint.h>
@@ -88,6 +89,10 @@ class Ipv4FlowClassifier : public FlowClassifier
std::map<FlowId, FlowPacketId> m_flowPktIdMap;
/// Map FlowIds to (DSCP value, packet count) pairs
std::map<FlowId, std::map<Ipv4Header::DscpType, uint32_t>> m_flowDscpMap;
#ifdef NS3_MTP
std::atomic<bool> m_lock;
#endif
};
/**

View File

@@ -113,6 +113,8 @@ class Ipv4FlowProbeTag : public Tag
Ipv4Address m_dst; //!< IP destination
};
NS_OBJECT_ENSURE_REGISTERED(Ipv4FlowProbeTag);
TypeId
Ipv4FlowProbeTag::GetTypeId()
{

View File

@@ -83,6 +83,9 @@ operator==(const Ipv6FlowClassifier::FiveTuple& t1, const Ipv6FlowClassifier::Fi
Ipv6FlowClassifier::Ipv6FlowClassifier()
{
#ifdef NS3_MTP
m_lock.store(false, std::memory_order_relaxed);
#endif
}
bool
@@ -134,6 +137,12 @@ Ipv6FlowClassifier::Classify(const Ipv6Header& ipHeader,
tuple.sourcePort = srcPort;
tuple.destinationPort = dstPort;
#ifdef NS3_MTP
while (m_lock.exchange(true, std::memory_order_acquire))
{
};
#endif
// try to insert the tuple, but check if it already exists
auto insert = m_flowMap.insert(std::pair<FiveTuple, FlowId>(tuple, 0));
@@ -164,6 +173,10 @@ Ipv6FlowClassifier::Classify(const Ipv6Header& ipHeader,
*out_flowId = insert.first->second;
*out_packetId = m_flowPktIdMap[*out_flowId];
#ifdef NS3_MTP
m_lock.store(false, std::memory_order_release);
#endif
return true;
}

View File

@@ -14,6 +14,7 @@
#include "ns3/ipv6-header.h"
#include <atomic>
#include <map>
#include <stdint.h>
@@ -89,6 +90,10 @@ class Ipv6FlowClassifier : public FlowClassifier
std::map<FlowId, FlowPacketId> m_flowPktIdMap;
/// Map FlowIds to (DSCP value, packet count) pairs
std::map<FlowId, std::map<Ipv6Header::DscpType, uint32_t>> m_flowDscpMap;
#ifdef NS3_MTP
std::atomic<bool> m_lock;
#endif
};
/**

View File

@@ -244,6 +244,8 @@ Ipv6FlowProbe::Ipv6FlowProbe(Ptr<FlowMonitor> monitor,
MakeCallback(&Ipv6FlowProbe::QueueDropLogger, Ptr<Ipv6FlowProbe>(this)));
}
NS_OBJECT_ENSURE_REGISTERED(Ipv6FlowProbeTag);
/* static */
TypeId
Ipv6FlowProbe::GetTypeId()

View File

@@ -699,12 +699,14 @@ GlobalRouteManagerImpl::InitializeRoutes()
//
Ptr<GlobalRouter> rtr = node->GetObject<GlobalRouter>();
#ifdef NS3_MPI
uint32_t systemId = Simulator::GetSystemId();
// Ignore nodes that are not assigned to our systemId (distributed sim)
if (node->GetSystemId() != systemId)
{
continue;
}
#endif
//
// if the node has a global router interface, then run the global routing

View File

@@ -7,6 +7,7 @@
#include "ipv4-global-routing.h"
#include "global-route-manager.h"
#include "ipv4-queue-disc-item.h"
#include "ipv4-route.h"
#include "ipv4-routing-table-entry.h"
@@ -42,6 +43,12 @@ Ipv4GlobalRouting::GetTypeId()
BooleanValue(false),
MakeBooleanAccessor(&Ipv4GlobalRouting::m_randomEcmpRouting),
MakeBooleanChecker())
.AddAttribute("FlowEcmpRouting",
"Set to true if flows are randomly routed among ECMP; set to false for "
"using only one route consistently",
BooleanValue(false),
MakeBooleanAccessor(&Ipv4GlobalRouting::m_flowEcmpRouting),
MakeBooleanChecker())
.AddAttribute("RespondToInterfaceEvents",
"Set to true if you want to dynamically recompute the global routes upon "
"Interface notification events (up/down, or add/remove address)",
@@ -162,7 +169,7 @@ Ipv4GlobalRouting::AddASExternalRouteTo(Ipv4Address network,
}
Ptr<Ipv4Route>
Ipv4GlobalRouting::LookupGlobal(Ipv4Address dest, Ptr<NetDevice> oif)
Ipv4GlobalRouting::LookupGlobal(Ipv4Address dest, uint32_t flowHash, Ptr<NetDevice> oif)
{
NS_LOG_FUNCTION(this << dest << oif);
NS_LOG_LOGIC("Looking for route for destination " << dest);
@@ -258,7 +265,11 @@ Ipv4GlobalRouting::LookupGlobal(Ipv4Address dest, Ptr<NetDevice> oif)
// ECMP routing is enabled, or always select the first route
// consistently if random ECMP routing is disabled
uint32_t selectIndex;
if (m_randomEcmpRouting)
if (m_flowEcmpRouting)
{
selectIndex = flowHash % allRoutes.size();
}
else if (m_randomEcmpRouting)
{
selectIndex = m_rand->GetInteger(0, allRoutes.size() - 1);
}
@@ -496,6 +507,12 @@ Ipv4GlobalRouting::RouteOutput(Ptr<Packet> p,
Socket::SocketErrno& sockerr)
{
NS_LOG_FUNCTION(this << p << &header << oif << &sockerr);
uint32_t flowHash = 0;
if (m_flowEcmpRouting)
{
flowHash = Ipv4QueueDiscItem(p, Address(), header.GetProtocol(), header).Hash(0);
}
//
// First, see if this is a multicast packet we have a route for. If we
// have a route, then send the packet down each of the specified interfaces.
@@ -509,7 +526,7 @@ Ipv4GlobalRouting::RouteOutput(Ptr<Packet> p,
// See if this is a unicast packet we have a route for.
//
NS_LOG_LOGIC("Unicast destination- looking up");
Ptr<Ipv4Route> rtentry = LookupGlobal(header.GetDestination(), oif);
Ptr<Ipv4Route> rtentry = LookupGlobal(header.GetDestination(), flowHash, oif);
if (rtentry)
{
sockerr = Socket::ERROR_NOTERROR;
@@ -532,6 +549,13 @@ Ipv4GlobalRouting::RouteInput(Ptr<const Packet> p,
{
NS_LOG_FUNCTION(this << p << header << header.GetSource() << header.GetDestination() << idev
<< &lcb << &ecb);
uint32_t flowHash = 0;
if (m_flowEcmpRouting)
{
flowHash = Ipv4QueueDiscItem(p->Copy(), Address(), header.GetProtocol(), header).Hash(0);
}
// Check if input device supports IP
NS_ASSERT(m_ipv4->GetInterfaceForDevice(idev) >= 0);
uint32_t iif = m_ipv4->GetInterfaceForDevice(idev);
@@ -564,7 +588,7 @@ Ipv4GlobalRouting::RouteInput(Ptr<const Packet> p,
}
// Next, try to find a route
NS_LOG_LOGIC("Unicast destination- looking up global route");
Ptr<Ipv4Route> rtentry = LookupGlobal(header.GetDestination());
Ptr<Ipv4Route> rtentry = LookupGlobal(header.GetDestination(), flowHash);
if (rtentry)
{
NS_LOG_LOGIC("Found unicast destination- calling unicast callback");

View File

@@ -227,6 +227,9 @@ class Ipv4GlobalRouting : public Ipv4RoutingProtocol
/// Set to true if packets are randomly routed among ECMP; set to false for using only one route
/// consistently
bool m_randomEcmpRouting;
/// Set to true if flows are randomly routed among ECMP; set to false for using only one route
/// consistently
bool m_flowEcmpRouting;
/// Set to true if this interface should respond to interface events by globally recomputing
/// routes
bool m_respondToInterfaceEvents;
@@ -257,10 +260,13 @@ class Ipv4GlobalRouting : public Ipv4RoutingProtocol
/**
* @brief Lookup in the forwarding table for destination.
* @param dest destination address
* @param flowHash flow hash for per-flow ECMP routing
* @param oif output interface if any (put 0 otherwise)
* @return Ipv4Route to route the packet to reach dest address
*/
Ptr<Ipv4Route> LookupGlobal(Ipv4Address dest, Ptr<NetDevice> oif = nullptr);
Ptr<Ipv4Route> LookupGlobal(Ipv4Address dest,
uint32_t flowHash = 0,
Ptr<NetDevice> oif = nullptr);
HostRoutes m_hostRoutes; //!< Routes to hosts
NetworkRoutes m_networkRoutes; //!< Routes to networks

View File

@@ -18,6 +18,8 @@ namespace ns3
NS_LOG_COMPONENT_DEFINE("Ipv4PacketInfoTag");
NS_OBJECT_ENSURE_REGISTERED(Ipv4PacketInfoTag);
Ipv4PacketInfoTag::Ipv4PacketInfoTag()
: m_addr(Ipv4Address()),
m_ifindex(0),

View File

@@ -15,6 +15,8 @@
namespace ns3
{
NS_OBJECT_ENSURE_REGISTERED(Ipv6PacketInfoTag);
Ipv6PacketInfoTag::Ipv6PacketInfoTag()
: m_addr(Ipv6Address()),
m_ifindex(0),

View File

@@ -50,7 +50,6 @@ TcpOption::CreateOption(uint8_t kind)
TypeId tid;
};
static ObjectFactory objectFactory;
static KindToTid toTid[] = {
{TcpOption::END, TcpOptionEnd::GetTypeId()},
{TcpOption::MSS, TcpOptionMSS::GetTypeId()},
@@ -66,6 +65,7 @@ TcpOption::CreateOption(uint8_t kind)
{
if (toTid[i].kind == kind)
{
ObjectFactory objectFactory;
objectFactory.SetTypeId(toTid[i].tid);
return objectFactory.Create<TcpOption>();
}

View File

@@ -5,23 +5,48 @@ if(${ENABLE_EXAMPLES})
)
endif()
build_lib(
LIBNAME mpi
SOURCE_FILES
model/distributed-simulator-impl.cc
model/granted-time-window-mpi-interface.cc
model/mpi-interface.cc
model/mpi-receiver.cc
model/null-message-mpi-interface.cc
model/null-message-simulator-impl.cc
model/parallel-communication-interface.h
model/remote-channel-bundle-manager.cc
model/remote-channel-bundle.cc
HEADER_FILES
model/mpi-interface.h
model/mpi-receiver.h
model/parallel-communication-interface.h
LIBRARIES_TO_LINK ${libnetwork}
MPI::MPI_CXX
TEST_SOURCES ${example_as_test_suite}
)
if(${ENABLE_MTP})
build_lib(
LIBNAME mpi
SOURCE_FILES
model/distributed-simulator-impl.cc
model/granted-time-window-mpi-interface.cc
model/hybrid-simulator-impl.cc
model/mpi-interface.cc
model/mpi-receiver.cc
model/null-message-mpi-interface.cc
model/null-message-simulator-impl.cc
model/parallel-communication-interface.h
model/remote-channel-bundle-manager.cc
model/remote-channel-bundle.cc
HEADER_FILES
model/mpi-interface.h
model/mpi-receiver.h
model/parallel-communication-interface.h
LIBRARIES_TO_LINK ${libnetwork}
${libmtp}
MPI::MPI_CXX
TEST_SOURCES ${example_as_test_suite}
)
else()
build_lib(
LIBNAME mpi
SOURCE_FILES
model/distributed-simulator-impl.cc
model/granted-time-window-mpi-interface.cc
model/mpi-interface.cc
model/mpi-receiver.cc
model/null-message-mpi-interface.cc
model/null-message-simulator-impl.cc
model/parallel-communication-interface.h
model/remote-channel-bundle-manager.cc
model/remote-channel-bundle.cc
HEADER_FILES
model/mpi-interface.h
model/mpi-receiver.h
model/parallel-communication-interface.h
LIBRARIES_TO_LINK ${libnetwork}
MPI::MPI_CXX
TEST_SOURCES ${example_as_test_suite}
)
endif()

View File

@@ -35,3 +35,43 @@ build_lib_example(
${libcsma}
${libapplications}
)
build_lib_example(
NAME fat-tree-mpi
SOURCE_FILES fat-tree-mpi.cc
LIBRARIES_TO_LINK
${libmpi}
${libpoint-to-point}
${libinternet}
${libnix-vector-routing}
${libapplications}
${libflow-monitor}
)
if(${ENABLE_MTP})
build_lib_example(
NAME simple-hybrid
SOURCE_FILES simple-hybrid.cc
mpi-test-fixtures.cc
LIBRARIES_TO_LINK
${libmpi}
${libpoint-to-point}
${libinternet}
${libnix-vector-routing}
${libapplications}
${libmtp}
)
build_lib_example(
NAME fat-tree-hybrid
SOURCE_FILES fat-tree-hybrid.cc
LIBRARIES_TO_LINK
${libmpi}
${libmtp}
${libpoint-to-point}
${libinternet}
${libnix-vector-routing}
${libapplications}
${libflow-monitor}
)
endif()

View File

@@ -0,0 +1,643 @@
/*
* Copyright (c) 2023 State Key Laboratory for Novel Software Technology
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Songyuan Bai <i@f5soft.site>
*/
#include "ns3/applications-module.h"
#include "ns3/core-module.h"
#include "ns3/flow-monitor-module.h"
#include "ns3/internet-module.h"
#include "ns3/mpi-module.h"
#include "ns3/mtp-module.h"
#include "ns3/network-module.h"
#include "ns3/nix-vector-routing-module.h"
#include "ns3/point-to-point-module.h"
#include "ns3/traffic-control-module.h"
#include <chrono>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <map>
#include <numeric>
#include <vector>
using namespace std;
using namespace chrono;
using namespace ns3;
#define LOCAL(r) ((r) == conf::rank)
#define LOG(content) \
{ \
if (conf::rank == 0) \
cout << content << endl; \
}
// random variable distribution
class Distribution
{
public:
// load a distribution from a CDF file
Distribution(const string filename)
{
ifstream fin;
fin.open(filename);
while (!fin.eof())
{
double x;
double cdf;
fin >> x >> cdf;
m_cdf.emplace_back(x, cdf);
}
fin.close();
m_rand = CreateObject<UniformRandomVariable>();
}
// expectation value of the distribution
double Expectation() const
{
double ex = 0;
for (uint32_t i = 1; i < m_cdf.size(); i++)
{
ex +=
(m_cdf[i].first + m_cdf[i - 1].first) / 2 * (m_cdf[i].second - m_cdf[i - 1].second);
}
return ex;
}
// get a random value from the distribution
double Sample()
{
double rand = m_rand->GetValue(0, 1);
for (uint32_t i = 1; i < m_cdf.size(); i++)
{
if (rand <= m_cdf[i].second)
{
double slope =
(m_cdf[i].first - m_cdf[i - 1].first) / (m_cdf[i].second - m_cdf[i - 1].second);
return m_cdf[i - 1].first + slope * (rand - m_cdf[i - 1].second);
}
}
return m_cdf[m_cdf.size() - 1].second;
}
private:
// the actual CDF function
vector<pair<double, double>> m_cdf;
// random variable stream
Ptr<UniformRandomVariable> m_rand;
};
// traffic generator
class TrafficGenerator
{
public:
TrafficGenerator(const string cdfFile,
const uint32_t hostTotal,
const double dataRate,
const double incastRatio,
const vector<uint32_t> victims)
: m_currentTime(0),
m_incastRatio(incastRatio),
m_hostTotal(hostTotal),
m_victims(victims),
m_flowCount(0),
m_flowSizeTotal(0),
m_distribution(cdfFile)
{
m_averageInterval = m_distribution.Expectation() * 8 / dataRate;
m_uniformRand = CreateObject<UniformRandomVariable>();
m_expRand = CreateObject<ExponentialRandomVariable>();
}
// get one flow with incremental time and random src, dst and size
tuple<double, uint32_t, uint32_t, uint32_t> GetFlow()
{
uint32_t src;
uint32_t dst;
if (m_uniformRand->GetValue(0, 1) < m_incastRatio)
{
dst = m_victims[m_uniformRand->GetInteger(0, m_victims.size() - 1)];
}
else
{
dst = m_uniformRand->GetInteger(0, m_hostTotal - 1);
}
do
{
src = m_uniformRand->GetInteger(0, m_hostTotal - 1);
} while (src == dst);
uint32_t flowSize = max((uint32_t)round(m_distribution.Sample()), 1U);
m_currentTime += m_expRand->GetValue(m_averageInterval, 0);
m_flowSizeTotal += flowSize;
m_flowCount++;
return make_tuple(m_currentTime, src, dst, flowSize);
}
double GetActualDataRate() const
{
return m_flowSizeTotal / m_currentTime * 8;
}
double GetAvgFlowSize() const
{
return m_distribution.Expectation();
}
double GetActualAvgFlowSize() const
{
return m_flowSizeTotal / (double)m_flowCount;
}
uint32_t GetFlowCount() const
{
return m_flowCount;
}
private:
double m_currentTime;
double m_averageInterval;
double m_incastRatio;
uint32_t m_hostTotal;
vector<uint32_t> m_victims;
uint32_t m_flowCount;
uint64_t m_flowSizeTotal;
Distribution m_distribution;
Ptr<UniformRandomVariable> m_uniformRand;
Ptr<ExponentialRandomVariable> m_expRand;
};
namespace conf
{
// fat-tree scale
uint32_t k = 4;
uint32_t cluster = 0;
// link layer options
uint32_t mtu = 1500;
uint32_t delay = 3000;
string bandwidth = "10Gbps";
// traffic-control layer options
string buffer = "4MB";
bool ecn = true;
// network layer options
bool nix = false;
bool rip = false;
bool ecmp = true;
bool flow = true;
// transport layer options
uint32_t port = 443;
string socket = "ns3::TcpSocketFactory";
string tcp = "ns3::TcpDctcp";
// application layer options
uint32_t size = 1448;
string cdf = "src/mtp/examples/web-search.txt";
double load = 0.3;
double incast = 0;
string victim = "0";
// simulation options
string seed = "";
bool flowmon = false;
double time = 1;
double interval = 0.1;
// mtp options
uint32_t thread = 4;
// mpi options
uint32_t system = 0;
uint32_t rank = 0;
bool nullmsg = false;
}; // namespace conf
void
Initialize(int argc, char* argv[])
{
CommandLine cmd;
// parse scale
cmd.AddValue("k", "Number of pods in a fat-tree", conf::k);
cmd.AddValue("cluster", "Number of clusters in a variant fat-tree", conf::cluster);
// parse network options
cmd.AddValue("mtu", "P2P link MTU", conf::mtu);
cmd.AddValue("delay", "Link delay in nanoseconds", conf::delay);
cmd.AddValue("bandwidth", "Link bandwidth", conf::bandwidth);
cmd.AddValue("buffer", "Switch buffer size", conf::buffer);
cmd.AddValue("ecn", "Use explicit congestion control", conf::ecn);
cmd.AddValue("nix", "Enable nix-vector routing", conf::nix);
cmd.AddValue("rip", "Enable RIP routing", conf::rip);
cmd.AddValue("ecmp", "Use equal-cost multi-path routing", conf::ecmp);
cmd.AddValue("flow", "Use per-flow ECMP routing", conf::flow);
cmd.AddValue("port", "Port number of server applications", conf::port);
cmd.AddValue("socket", "Socket protocol", conf::socket);
cmd.AddValue("tcp", "TCP protocol", conf::tcp);
cmd.AddValue("size", "Application packet size", conf::size);
cmd.AddValue("cdf", "Traffic CDF file location", conf::cdf);
cmd.AddValue("load", "Traffic load relative to bisection bandwidth", conf::load);
cmd.AddValue("incast", "Incast traffic ratio", conf::incast);
cmd.AddValue("victim", "Incast traffic victim list", conf::victim);
// parse simulation options
cmd.AddValue("seed", "The seed of the random number generator", conf::seed);
cmd.AddValue("flowmon", "Use flow-monitor to record statistics", conf::flowmon);
cmd.AddValue("time", "Simulation time in seconds", conf::time);
cmd.AddValue("interval", "Simulation progreess print interval in seconds", conf::interval);
// parse mtp/mpi options
cmd.AddValue("thread", "Maximum number of threads", conf::thread);
cmd.AddValue("system", "Number of logical processes in MTP manual partition", conf::system);
cmd.AddValue("nullmsg", "Enable null message algorithm", conf::nullmsg);
cmd.Parse(argc, argv);
// link layer settings
Config::SetDefault("ns3::PointToPointChannel::Delay", TimeValue(NanoSeconds(conf::delay)));
Config::SetDefault("ns3::PointToPointNetDevice::DataRate", StringValue(conf::bandwidth));
Config::SetDefault("ns3::PointToPointNetDevice::Mtu", UintegerValue(conf::mtu));
// traffic control layer settings
Config::SetDefault("ns3::RedQueueDisc::MeanPktSize", UintegerValue(conf::mtu));
Config::SetDefault("ns3::RedQueueDisc::UseEcn", BooleanValue(conf::ecn));
Config::SetDefault("ns3::RedQueueDisc::UseHardDrop", BooleanValue(false));
Config::SetDefault("ns3::RedQueueDisc::LinkDelay", TimeValue(NanoSeconds(conf::delay)));
Config::SetDefault("ns3::RedQueueDisc::LinkBandwidth", StringValue(conf::bandwidth));
Config::SetDefault("ns3::RedQueueDisc::MaxSize", QueueSizeValue(QueueSize(conf::buffer)));
Config::SetDefault("ns3::RedQueueDisc::MinTh", DoubleValue(50));
Config::SetDefault("ns3::RedQueueDisc::MaxTh", DoubleValue(150));
// network layer settings
Config::SetDefault("ns3::Ipv4GlobalRouting::RandomEcmpRouting", BooleanValue(conf::ecmp));
Config::SetDefault("ns3::Ipv4GlobalRouting::FlowEcmpRouting", BooleanValue(conf::flow));
// transport layer settings
Config::SetDefault("ns3::TcpL4Protocol::SocketType", StringValue(conf::tcp));
Config::SetDefault("ns3::TcpSocket::SegmentSize", UintegerValue(conf::size));
Config::SetDefault("ns3::TcpSocket::ConnTimeout",
TimeValue(conf::tcp == "ns3::TcpDctcp" ? MilliSeconds(10) : Seconds(3)));
Config::SetDefault("ns3::TcpSocket::SndBufSize", UintegerValue(1073725440));
Config::SetDefault("ns3::TcpSocket::RcvBufSize", UintegerValue(1073725440));
Config::SetDefault(
"ns3::TcpSocketBase::MinRto",
TimeValue(conf::tcp == "ns3::TcpDctcp" ? MilliSeconds(5) : MilliSeconds(200)));
Config::SetDefault(
"ns3::TcpSocketBase::ClockGranularity",
TimeValue(conf::tcp == "ns3::TcpDctcp" ? MicroSeconds(100) : MilliSeconds(1)));
Config::SetDefault("ns3::RttEstimator::InitialEstimation",
TimeValue(conf::tcp == "ns3::TcpDctcp" ? MicroSeconds(200) : Seconds(1)));
// application layer settings
Config::SetDefault("ns3::BulkSendApplication::SendSize", UintegerValue(UINT32_MAX));
Config::SetDefault("ns3::OnOffApplication::DataRate", StringValue(conf::bandwidth));
Config::SetDefault("ns3::OnOffApplication::PacketSize", UintegerValue(conf::size));
Config::SetDefault("ns3::OnOffApplication::OnTime",
StringValue("ns3::ConstantRandomVariable[Constant=1000]"));
Config::SetDefault("ns3::OnOffApplication::OffTime",
StringValue("ns3::ConstantRandomVariable[Constant=0]"));
// simulation settings
Time::SetResolution(Time::PS);
RngSeedManager::SetSeed(Hash32(conf::seed));
// initialize hybrid
MtpInterface::Enable(conf::thread);
MpiInterface::Enable(&argc, &argv);
conf::rank = MpiInterface::GetSystemId();
conf::system = MpiInterface::GetSize();
}
void
SetupRouting()
{
InternetStackHelper internet;
if (conf::nix)
{
internet.SetRoutingHelper(Ipv4NixVectorHelper());
}
else if (conf::rip)
{
internet.SetRoutingHelper(RipHelper());
}
else
{
internet.SetRoutingHelper(Ipv4GlobalRoutingHelper());
}
internet.SetIpv6StackInstall(false);
internet.InstallAll();
LOG("\n- Setup the topology...");
}
void
InstallTraffic(map<uint32_t, Ptr<Node>>& hosts,
map<Ptr<Node>, Ipv4Address>& addrs,
double bisection)
{
// output address for debugging
LOG("\n- Calculating routes...");
LOG(" Host NodeId System Address");
for (auto& p : hosts)
{
LOG(" " << left << setw(6) << p.first << setw(8) << p.second->GetId() << setw(8)
<< p.second->GetSystemId() << addrs[p.second]);
}
if (!conf::nix)
{
Ipv4GlobalRoutingHelper::PopulateRoutingTables();
}
// server applications
PacketSinkHelper server(conf::socket, InetSocketAddress(Ipv4Address::GetAny(), conf::port));
for (auto& p : hosts)
{
if (LOCAL(p.second->GetSystemId()))
{
server.Install(p.second).Start(Seconds(0));
}
}
// calculate traffic
LOG("\n- Generating traffic...");
double bandwidth = bisection * DataRate(conf::bandwidth).GetBitRate() * 2;
string victim;
stringstream sin(conf::victim);
vector<uint32_t> victims;
while (getline(sin, victim, '-'))
{
victims.push_back(stoi(victim));
}
TrafficGenerator traffic(conf::cdf,
hosts.size(),
bandwidth * conf::load,
conf::incast,
victims);
// install traffic (client applications)
auto flow = traffic.GetFlow();
while (get<0>(flow) < conf::time)
{
Ptr<Node> clientNode = hosts[get<1>(flow)];
Ptr<Node> serverNode = hosts[get<2>(flow)];
if (LOCAL(clientNode->GetSystemId()))
{
if (conf::socket != "ns3::TcpSocketFactory")
{
OnOffHelper client(conf::socket, InetSocketAddress(addrs[serverNode], conf::port));
client.SetAttribute("MaxBytes", UintegerValue(get<3>(flow)));
client.Install(clientNode).Start(Seconds(get<0>(flow)));
}
else
{
BulkSendHelper client(conf::socket,
InetSocketAddress(addrs[serverNode], conf::port));
client.SetAttribute("MaxBytes", UintegerValue(get<3>(flow)));
client.Install(clientNode).Start(Seconds(get<0>(flow)));
}
}
flow = traffic.GetFlow();
}
// traffic installation check
LOG(" Expected data rate = " << bandwidth * conf::load / 1e9 << "Gbps");
LOG(" Generated data rate = " << traffic.GetActualDataRate() / 1e9 << "Gbps");
LOG(" Expected avg flow size = " << traffic.GetAvgFlowSize() / 1e6 << "MB");
LOG(" Generated avg flow size = " << traffic.GetActualAvgFlowSize() / 1e6 << "MB");
LOG(" Total flow count = " << traffic.GetFlowCount());
}
void
PrintProgress()
{
LOG(" Progressed to " << Simulator::Now().GetSeconds() << "s");
Simulator::Schedule(Seconds(conf::interval), PrintProgress);
}
void
StartSimulation()
{
// install flow-monitor
Ptr<FlowMonitor> flowMonitor;
FlowMonitorHelper flowHelper;
if (conf::flowmon)
{
flowMonitor = flowHelper.InstallAll();
}
// print progress
if (conf::interval)
{
Simulator::Schedule(Seconds(conf::interval), PrintProgress);
}
// start the simulation
Simulator::Stop(Seconds(conf::time));
LOG("\n- Start simulation...");
auto start = system_clock::now();
Simulator::Run();
auto end = system_clock::now();
auto time = duration_cast<duration<double>>(end - start).count();
// output simulation statistics
uint64_t eventCount = Simulator::GetEventCount();
if (conf::flowmon)
{
uint64_t dropped = 0;
uint64_t totalTx = 0;
uint64_t totalRx = 0;
uint64_t totalTxBytes = 0;
uint64_t flowCount = 0;
uint64_t finishedFlowCount = 0;
double totalThroughput = 0;
Time totalFct(0);
Time totalFinishedFct(0);
Time totalDelay(0);
flowMonitor->CheckForLostPackets();
for (auto& p : flowMonitor->GetFlowStats())
{
dropped = p.second.packetsDropped.size();
if ((p.second.timeLastRxPacket - p.second.timeFirstTxPacket).GetTimeStep() > 0 &&
p.second.txPackets && p.second.rxPackets)
{
totalTx += p.second.txPackets;
totalRx += p.second.rxPackets;
totalTxBytes += p.second.txBytes;
totalFct += p.second.timeLastRxPacket - p.second.timeFirstTxPacket;
if (p.second.txPackets - p.second.rxPackets == p.second.packetsDropped.size())
{
totalFinishedFct += p.second.timeLastRxPacket - p.second.timeFirstTxPacket;
finishedFlowCount++;
}
totalDelay += p.second.delaySum;
totalThroughput +=
(double)p.second.txBytes /
(p.second.timeLastRxPacket - p.second.timeFirstTxPacket).GetSeconds();
flowCount++;
}
}
double avgFct = (double)totalFct.GetMicroSeconds() / flowCount;
double avgFinishedFct = (double)totalFinishedFct.GetMicroSeconds() / finishedFlowCount;
double avgDelay = (double)totalDelay.GetMicroSeconds() / totalRx;
double avgThroughput = totalThroughput / flowCount / 1e9 * 8;
LOG(" Detected #flow = " << flowCount);
LOG(" Finished #flow = " << finishedFlowCount);
LOG(" Average FCT (all) = " << avgFct << "us");
LOG(" Average FCT (finished) = " << avgFinishedFct << "us");
LOG(" Average end to end delay = " << avgDelay << "us");
LOG(" Average flow throughput = " << avgThroughput << "Gbps");
LOG(" Network throughput = " << totalTxBytes / 1e9 * 8 / conf::time << "Gbps");
LOG(" Total Tx packets = " << totalTx);
LOG(" Total Rx packets = " << totalRx);
LOG(" Dropped packets = " << dropped);
}
Simulator::Destroy();
uint64_t eventCounts[conf::system];
MPI_Gather(&eventCount,
1,
MPI_UNSIGNED_LONG_LONG,
eventCounts,
1,
MPI_UNSIGNED_LONG_LONG,
0,
MpiInterface::GetCommunicator());
LOG("\n- Done!");
for (uint32_t i = 0; i < conf::system; i++)
{
LOG(" Event count of LP " << i << " = " << eventCounts[i]);
}
LOG(" Event count = " << accumulate(eventCounts, eventCounts + conf::system, 0ULL));
LOG(" Simulation time = " << time << "s\n");
MpiInterface::Disable();
}
int
main(int argc, char* argv[])
{
Initialize(argc, argv);
uint32_t hostId = 0;
map<uint32_t, Ptr<Node>> hosts;
map<Ptr<Node>, Ipv4Address> addrs;
// calculate topo scales
uint32_t nPod = conf::cluster ? conf::cluster : conf::k; // number of pods
uint32_t nGroup = conf::k / 2; // number of group of core switches
uint32_t nCore = conf::k / 2; // number of core switch in a group
uint32_t nAgg = conf::k / 2; // number of aggregation switch in a pod
uint32_t nEdge = conf::k / 2; // number of edge switch in a pod
uint32_t nHost = conf::k / 2; // number of hosts under a switch
NodeContainer core = new NodeContainer[nGroup];
NodeContainer agg = new NodeContainer[nPod];
NodeContainer edge = new NodeContainer[nPod];
NodeContainer host = new NodeContainer[nPod + nEdge];
// create nodes
for (uint32_t i = 0; i < nGroup; i++)
{
core[i].Create(nCore / 2, (2 * i) % conf::system);
core[i].Create((nCore - 1) / 2 + 1, (2 * i + 1) % conf::system);
}
for (uint32_t i = 0; i < nPod; i++)
{
agg[i].Create(nAgg, i % conf::system);
}
for (uint32_t i = 0; i < nPod; i++)
{
edge[i].Create(nEdge, i % conf::system);
}
for (uint32_t i = 0; i < nPod; i++)
{
for (uint32_t j = 0; j < nEdge; j++)
{
host[i * nEdge + j].Create(nHost, i % conf::system);
for (uint32_t k = 0; k < nHost; k++)
{
hosts[hostId++] = host[i * nEdge + j].Get(k);
}
}
}
SetupRouting();
Ipv4AddressHelper addr;
TrafficControlHelper red;
PointToPointHelper p2p;
red.SetRootQueueDisc("ns3::RedQueueDisc");
// connect edge switches to hosts
for (uint32_t i = 0; i < nPod; i++)
{
for (uint32_t j = 0; j < nEdge; j++)
{
string subnet = "10." + to_string(i) + "." + to_string(j) + ".0";
addr.SetBase(subnet.c_str(), "255.255.255.0");
for (uint32_t k = 0; k < nHost; k++)
{
Ptr<Node> node = host[i * nEdge + j].Get(k);
NetDeviceContainer ndc = p2p.Install(NodeContainer(node, edge[i].Get(j)));
red.Install(ndc.Get(1));
addrs[node] = addr.Assign(ndc).GetAddress(0);
}
}
}
// connect aggregate switches to edge switches
for (uint32_t i = 0; i < nPod; i++)
{
for (uint32_t j = 0; j < nAgg; j++)
{
string subnet = "10." + to_string(i) + "." + to_string(j + nEdge) + ".0";
addr.SetBase(subnet.c_str(), "255.255.255.0");
for (uint32_t k = 0; k < nEdge; k++)
{
NetDeviceContainer ndc = p2p.Install(agg[i].Get(j), edge[i].Get(k));
red.Install(ndc);
addr.Assign(ndc);
}
}
}
// connect core switches to aggregate switches
for (uint32_t i = 0; i < nGroup; i++)
{
for (uint32_t j = 0; j < nPod; j++)
{
string subnet = "10." + to_string(i + nPod) + "." + to_string(j) + ".0";
addr.SetBase(subnet.c_str(), "255.255.255.0");
for (uint32_t k = 0; k < nCore; k++)
{
NetDeviceContainer ndc = p2p.Install(core[i].Get(k), agg[j].Get(i));
red.Install(ndc);
addr.Assign(ndc);
}
}
}
InstallTraffic(hosts, addrs, nGroup * nCore * nPod / 2.0);
StartSimulation();
return 0;
}

View File

@@ -0,0 +1,647 @@
/*
* Copyright (c) 2023 State Key Laboratory for Novel Software Technology
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Songyuan Bai <i@f5soft.site>
*/
#include "ns3/applications-module.h"
#include "ns3/core-module.h"
#include "ns3/flow-monitor-module.h"
#include "ns3/internet-module.h"
#include "ns3/mpi-module.h"
#include "ns3/network-module.h"
#include "ns3/nix-vector-routing-module.h"
#include "ns3/point-to-point-module.h"
#include "ns3/traffic-control-module.h"
#include <chrono>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <map>
#include <numeric>
#include <vector>
using namespace std;
using namespace chrono;
using namespace ns3;
#define LOCAL(r) ((r) == conf::rank)
#define LOG(content) \
{ \
if (conf::rank == 0) \
cout << content << endl; \
}
// random variable distribution
class Distribution
{
public:
// load a distribution from a CDF file
Distribution(const string filename)
{
ifstream fin;
fin.open(filename);
while (!fin.eof())
{
double x;
double cdf;
fin >> x >> cdf;
m_cdf.emplace_back(x, cdf);
}
fin.close();
m_rand = CreateObject<UniformRandomVariable>();
}
// expectation value of the distribution
double Expectation() const
{
double ex = 0;
for (uint32_t i = 1; i < m_cdf.size(); i++)
{
ex +=
(m_cdf[i].first + m_cdf[i - 1].first) / 2 * (m_cdf[i].second - m_cdf[i - 1].second);
}
return ex;
}
// get a random value from the distribution
double Sample()
{
double rand = m_rand->GetValue(0, 1);
for (uint32_t i = 1; i < m_cdf.size(); i++)
{
if (rand <= m_cdf[i].second)
{
double slope =
(m_cdf[i].first - m_cdf[i - 1].first) / (m_cdf[i].second - m_cdf[i - 1].second);
return m_cdf[i - 1].first + slope * (rand - m_cdf[i - 1].second);
}
}
return m_cdf[m_cdf.size() - 1].second;
}
private:
// the actual CDF function
vector<pair<double, double>> m_cdf;
// random variable stream
Ptr<UniformRandomVariable> m_rand;
};
// traffic generator
class TrafficGenerator
{
public:
TrafficGenerator(const string cdfFile,
const uint32_t hostTotal,
const double dataRate,
const double incastRatio,
const vector<uint32_t> victims)
: m_currentTime(0),
m_incastRatio(incastRatio),
m_hostTotal(hostTotal),
m_victims(victims),
m_flowCount(0),
m_flowSizeTotal(0),
m_distribution(cdfFile)
{
m_averageInterval = m_distribution.Expectation() * 8 / dataRate;
m_uniformRand = CreateObject<UniformRandomVariable>();
m_expRand = CreateObject<ExponentialRandomVariable>();
}
// get one flow with incremental time and random src, dst and size
tuple<double, uint32_t, uint32_t, uint32_t> GetFlow()
{
uint32_t src;
uint32_t dst;
if (m_uniformRand->GetValue(0, 1) < m_incastRatio)
{
dst = m_victims[m_uniformRand->GetInteger(0, m_victims.size() - 1)];
}
else
{
dst = m_uniformRand->GetInteger(0, m_hostTotal - 1);
}
do
{
src = m_uniformRand->GetInteger(0, m_hostTotal - 1);
} while (src == dst);
uint32_t flowSize = max((uint32_t)round(m_distribution.Sample()), 1U);
m_currentTime += m_expRand->GetValue(m_averageInterval, 0);
m_flowSizeTotal += flowSize;
m_flowCount++;
return make_tuple(m_currentTime, src, dst, flowSize);
}
double GetActualDataRate() const
{
return m_flowSizeTotal / m_currentTime * 8;
}
double GetAvgFlowSize() const
{
return m_distribution.Expectation();
}
double GetActualAvgFlowSize() const
{
return m_flowSizeTotal / (double)m_flowCount;
}
uint32_t GetFlowCount() const
{
return m_flowCount;
}
private:
double m_currentTime;
double m_averageInterval;
double m_incastRatio;
uint32_t m_hostTotal;
vector<uint32_t> m_victims;
uint32_t m_flowCount;
uint64_t m_flowSizeTotal;
Distribution m_distribution;
Ptr<UniformRandomVariable> m_uniformRand;
Ptr<ExponentialRandomVariable> m_expRand;
};
namespace conf
{
// fat-tree scale
uint32_t k = 4;
uint32_t cluster = 0;
// link layer options
uint32_t mtu = 1500;
uint32_t delay = 3000;
string bandwidth = "10Gbps";
// traffic-control layer options
string buffer = "4MB";
bool ecn = true;
// network layer options
bool nix = false;
bool rip = false;
bool ecmp = true;
bool flow = true;
// transport layer options
uint32_t port = 443;
string socket = "ns3::TcpSocketFactory";
string tcp = "ns3::TcpDctcp";
// application layer options
uint32_t size = 1448;
string cdf = "src/mtp/examples/web-search.txt";
double load = 0.3;
double incast = 0;
string victim = "0";
// simulation options
string seed = "";
bool flowmon = false;
double time = 1;
double interval = 0.1;
// mpi options
uint32_t system = 0;
uint32_t rank = 0;
bool nullmsg = false;
}; // namespace conf
void
Initialize(int argc, char* argv[])
{
CommandLine cmd;
// parse scale
cmd.AddValue("k", "Number of pods in a fat-tree", conf::k);
cmd.AddValue("cluster", "Number of clusters in a variant fat-tree", conf::cluster);
// parse network options
cmd.AddValue("mtu", "P2P link MTU", conf::mtu);
cmd.AddValue("delay", "Link delay in nanoseconds", conf::delay);
cmd.AddValue("bandwidth", "Link bandwidth", conf::bandwidth);
cmd.AddValue("buffer", "Switch buffer size", conf::buffer);
cmd.AddValue("ecn", "Use explicit congestion control", conf::ecn);
cmd.AddValue("nix", "Enable nix-vector routing", conf::nix);
cmd.AddValue("rip", "Enable RIP routing", conf::rip);
cmd.AddValue("ecmp", "Use equal-cost multi-path routing", conf::ecmp);
cmd.AddValue("flow", "Use per-flow ECMP routing", conf::flow);
cmd.AddValue("port", "Port number of server applications", conf::port);
cmd.AddValue("socket", "Socket protocol", conf::socket);
cmd.AddValue("tcp", "TCP protocol", conf::tcp);
cmd.AddValue("size", "Application packet size", conf::size);
cmd.AddValue("cdf", "Traffic CDF file location", conf::cdf);
cmd.AddValue("load", "Traffic load relative to bisection bandwidth", conf::load);
cmd.AddValue("incast", "Incast traffic ratio", conf::incast);
cmd.AddValue("victim", "Incast traffic victim list", conf::victim);
// parse simulation options
cmd.AddValue("seed", "The seed of the random number generator", conf::seed);
cmd.AddValue("flowmon", "Use flow-monitor to record statistics", conf::flowmon);
cmd.AddValue("time", "Simulation time in seconds", conf::time);
cmd.AddValue("interval", "Simulation progreess print interval in seconds", conf::interval);
// parse mtp/mpi options
cmd.AddValue("system", "Number of logical processes in MTP manual partition", conf::system);
cmd.AddValue("nullmsg", "Enable null message algorithm", conf::nullmsg);
cmd.Parse(argc, argv);
// link layer settings
Config::SetDefault("ns3::PointToPointChannel::Delay", TimeValue(NanoSeconds(conf::delay)));
Config::SetDefault("ns3::PointToPointNetDevice::DataRate", StringValue(conf::bandwidth));
Config::SetDefault("ns3::PointToPointNetDevice::Mtu", UintegerValue(conf::mtu));
// traffic control layer settings
Config::SetDefault("ns3::RedQueueDisc::MeanPktSize", UintegerValue(conf::mtu));
Config::SetDefault("ns3::RedQueueDisc::UseEcn", BooleanValue(conf::ecn));
Config::SetDefault("ns3::RedQueueDisc::UseHardDrop", BooleanValue(false));
Config::SetDefault("ns3::RedQueueDisc::LinkDelay", TimeValue(NanoSeconds(conf::delay)));
Config::SetDefault("ns3::RedQueueDisc::LinkBandwidth", StringValue(conf::bandwidth));
Config::SetDefault("ns3::RedQueueDisc::MaxSize", QueueSizeValue(QueueSize(conf::buffer)));
Config::SetDefault("ns3::RedQueueDisc::MinTh", DoubleValue(50));
Config::SetDefault("ns3::RedQueueDisc::MaxTh", DoubleValue(150));
// network layer settings
Config::SetDefault("ns3::Ipv4GlobalRouting::RandomEcmpRouting", BooleanValue(conf::ecmp));
Config::SetDefault("ns3::Ipv4GlobalRouting::FlowEcmpRouting", BooleanValue(conf::flow));
// transport layer settings
Config::SetDefault("ns3::TcpL4Protocol::SocketType", StringValue(conf::tcp));
Config::SetDefault("ns3::TcpSocket::SegmentSize", UintegerValue(conf::size));
Config::SetDefault("ns3::TcpSocket::ConnTimeout",
TimeValue(conf::tcp == "ns3::TcpDctcp" ? MilliSeconds(10) : Seconds(3)));
Config::SetDefault("ns3::TcpSocket::SndBufSize", UintegerValue(1073725440));
Config::SetDefault("ns3::TcpSocket::RcvBufSize", UintegerValue(1073725440));
Config::SetDefault(
"ns3::TcpSocketBase::MinRto",
TimeValue(conf::tcp == "ns3::TcpDctcp" ? MilliSeconds(5) : MilliSeconds(200)));
Config::SetDefault(
"ns3::TcpSocketBase::ClockGranularity",
TimeValue(conf::tcp == "ns3::TcpDctcp" ? MicroSeconds(100) : MilliSeconds(1)));
Config::SetDefault("ns3::RttEstimator::InitialEstimation",
TimeValue(conf::tcp == "ns3::TcpDctcp" ? MicroSeconds(200) : Seconds(1)));
// application layer settings
Config::SetDefault("ns3::BulkSendApplication::SendSize", UintegerValue(UINT32_MAX));
Config::SetDefault("ns3::OnOffApplication::DataRate", StringValue(conf::bandwidth));
Config::SetDefault("ns3::OnOffApplication::PacketSize", UintegerValue(conf::size));
Config::SetDefault("ns3::OnOffApplication::OnTime",
StringValue("ns3::ConstantRandomVariable[Constant=1000]"));
Config::SetDefault("ns3::OnOffApplication::OffTime",
StringValue("ns3::ConstantRandomVariable[Constant=0]"));
// simulation settings
Time::SetResolution(Time::PS);
RngSeedManager::SetSeed(Hash32(conf::seed));
// initialize mpi
if (conf::nullmsg)
{
GlobalValue::Bind("SimulatorImplementationType",
StringValue("ns3::NullMessageSimulatorImpl"));
}
else
{
GlobalValue::Bind("SimulatorImplementationType",
StringValue("ns3::DistributedSimulatorImpl"));
}
MpiInterface::Enable(&argc, &argv);
conf::rank = MpiInterface::GetSystemId();
conf::system = MpiInterface::GetSize();
}
void
SetupRouting()
{
InternetStackHelper internet;
if (conf::nix)
{
internet.SetRoutingHelper(Ipv4NixVectorHelper());
}
else if (conf::rip)
{
internet.SetRoutingHelper(RipHelper());
}
else
{
internet.SetRoutingHelper(Ipv4GlobalRoutingHelper());
}
internet.SetIpv6StackInstall(false);
internet.InstallAll();
LOG("\n- Setup the topology...");
}
void
InstallTraffic(map<uint32_t, Ptr<Node>>& hosts,
map<Ptr<Node>, Ipv4Address>& addrs,
double bisection)
{
// output address for debugging
LOG("\n- Calculating routes...");
LOG(" Host NodeId System Address");
for (auto& p : hosts)
{
LOG(" " << left << setw(6) << p.first << setw(8) << p.second->GetId() << setw(8)
<< p.second->GetSystemId() << addrs[p.second]);
}
if (!conf::nix)
{
Ipv4GlobalRoutingHelper::PopulateRoutingTables();
}
// server applications
PacketSinkHelper server(conf::socket, InetSocketAddress(Ipv4Address::GetAny(), conf::port));
for (auto& p : hosts)
{
if (LOCAL(p.second->GetSystemId()))
{
server.Install(p.second).Start(Seconds(0));
}
}
// calculate traffic
LOG("\n- Generating traffic...");
double bandwidth = bisection * DataRate(conf::bandwidth).GetBitRate() * 2;
string victim;
stringstream sin(conf::victim);
vector<uint32_t> victims;
while (getline(sin, victim, '-'))
{
victims.push_back(stoi(victim));
}
TrafficGenerator traffic(conf::cdf,
hosts.size(),
bandwidth * conf::load,
conf::incast,
victims);
// install traffic (client applications)
auto flow = traffic.GetFlow();
while (get<0>(flow) < conf::time)
{
Ptr<Node> clientNode = hosts[get<1>(flow)];
Ptr<Node> serverNode = hosts[get<2>(flow)];
if (LOCAL(clientNode->GetSystemId()))
{
if (conf::socket != "ns3::TcpSocketFactory")
{
OnOffHelper client(conf::socket, InetSocketAddress(addrs[serverNode], conf::port));
client.SetAttribute("MaxBytes", UintegerValue(get<3>(flow)));
client.Install(clientNode).Start(Seconds(get<0>(flow)));
}
else
{
BulkSendHelper client(conf::socket,
InetSocketAddress(addrs[serverNode], conf::port));
client.SetAttribute("MaxBytes", UintegerValue(get<3>(flow)));
client.Install(clientNode).Start(Seconds(get<0>(flow)));
}
}
flow = traffic.GetFlow();
}
// traffic installation check
LOG(" Expected data rate = " << bandwidth * conf::load / 1e9 << "Gbps");
LOG(" Generated data rate = " << traffic.GetActualDataRate() / 1e9 << "Gbps");
LOG(" Expected avg flow size = " << traffic.GetAvgFlowSize() / 1e6 << "MB");
LOG(" Generated avg flow size = " << traffic.GetActualAvgFlowSize() / 1e6 << "MB");
LOG(" Total flow count = " << traffic.GetFlowCount());
}
void
PrintProgress()
{
LOG(" Progressed to " << Simulator::Now().GetSeconds() << "s");
Simulator::Schedule(Seconds(conf::interval), PrintProgress);
}
void
StartSimulation()
{
// install flow-monitor
Ptr<FlowMonitor> flowMonitor;
FlowMonitorHelper flowHelper;
if (conf::flowmon)
{
flowMonitor = flowHelper.InstallAll();
}
// print progress
if (conf::interval)
{
Simulator::Schedule(Seconds(conf::interval), PrintProgress);
}
// start the simulation
Simulator::Stop(Seconds(conf::time));
LOG("\n- Start simulation...");
auto start = system_clock::now();
Simulator::Run();
auto end = system_clock::now();
auto time = duration_cast<duration<double>>(end - start).count();
// output simulation statistics
uint64_t eventCount = Simulator::GetEventCount();
if (conf::flowmon)
{
uint64_t dropped = 0;
uint64_t totalTx = 0;
uint64_t totalRx = 0;
uint64_t totalTxBytes = 0;
uint64_t flowCount = 0;
uint64_t finishedFlowCount = 0;
double totalThroughput = 0;
Time totalFct(0);
Time totalFinishedFct(0);
Time totalDelay(0);
flowMonitor->CheckForLostPackets();
for (auto& p : flowMonitor->GetFlowStats())
{
dropped = p.second.packetsDropped.size();
if ((p.second.timeLastRxPacket - p.second.timeFirstTxPacket).GetTimeStep() > 0 &&
p.second.txPackets && p.second.rxPackets)
{
totalTx += p.second.txPackets;
totalRx += p.second.rxPackets;
totalTxBytes += p.second.txBytes;
totalFct += p.second.timeLastRxPacket - p.second.timeFirstTxPacket;
if (p.second.txPackets - p.second.rxPackets == p.second.packetsDropped.size())
{
totalFinishedFct += p.second.timeLastRxPacket - p.second.timeFirstTxPacket;
finishedFlowCount++;
}
totalDelay += p.second.delaySum;
totalThroughput +=
(double)p.second.txBytes /
(p.second.timeLastRxPacket - p.second.timeFirstTxPacket).GetSeconds();
flowCount++;
}
}
double avgFct = (double)totalFct.GetMicroSeconds() / flowCount;
double avgFinishedFct = (double)totalFinishedFct.GetMicroSeconds() / finishedFlowCount;
double avgDelay = (double)totalDelay.GetMicroSeconds() / totalRx;
double avgThroughput = totalThroughput / flowCount / 1e9 * 8;
LOG(" Detected #flow = " << flowCount);
LOG(" Finished #flow = " << finishedFlowCount);
LOG(" Average FCT (all) = " << avgFct << "us");
LOG(" Average FCT (finished) = " << avgFinishedFct << "us");
LOG(" Average end to end delay = " << avgDelay << "us");
LOG(" Average flow throughput = " << avgThroughput << "Gbps");
LOG(" Network throughput = " << totalTxBytes / 1e9 * 8 / conf::time << "Gbps");
LOG(" Total Tx packets = " << totalTx);
LOG(" Total Rx packets = " << totalRx);
LOG(" Dropped packets = " << dropped);
}
Simulator::Destroy();
uint64_t eventCounts[conf::system];
MPI_Gather(&eventCount,
1,
MPI_UNSIGNED_LONG_LONG,
eventCounts,
1,
MPI_UNSIGNED_LONG_LONG,
0,
MpiInterface::GetCommunicator());
LOG("\n- Done!");
for (uint32_t i = 0; i < conf::system; i++)
{
LOG(" Event count of LP " << i << " = " << eventCounts[i]);
}
LOG(" Event count = " << accumulate(eventCounts, eventCounts + conf::system, 0ULL));
LOG(" Simulation time = " << time << "s\n");
MpiInterface::Disable();
}
int
main(int argc, char* argv[])
{
Initialize(argc, argv);
uint32_t hostId = 0;
map<uint32_t, Ptr<Node>> hosts;
map<Ptr<Node>, Ipv4Address> addrs;
// calculate topo scales
uint32_t nPod = conf::cluster ? conf::cluster : conf::k; // number of pods
uint32_t nGroup = conf::k / 2; // number of group of core switches
uint32_t nCore = conf::k / 2; // number of core switch in a group
uint32_t nAgg = conf::k / 2; // number of aggregation switch in a pod
uint32_t nEdge = conf::k / 2; // number of edge switch in a pod
uint32_t nHost = conf::k / 2; // number of hosts under a switch
NodeContainer core = new NodeContainer[nGroup];
NodeContainer agg = new NodeContainer[nPod];
NodeContainer edge = new NodeContainer[nPod];
NodeContainer host = new NodeContainer[nPod * nEdge];
// create nodes
for (uint32_t i = 0; i < nGroup; i++)
{
core[i].Create(nCore / 2, (2 * i) % conf::system);
core[i].Create((nCore - 1) / 2 + 1, (2 * i + 1) % conf::system);
}
for (uint32_t i = 0; i < nPod; i++)
{
agg[i].Create(nAgg, i % conf::system);
}
for (uint32_t i = 0; i < nPod; i++)
{
edge[i].Create(nEdge, i % conf::system);
}
for (uint32_t i = 0; i < nPod; i++)
{
for (uint32_t j = 0; j < nEdge; j++)
{
host[i * nEdge + j].Create(nHost, i % conf::system);
for (uint32_t k = 0; k < nHost; k++)
{
hosts[hostId++] = host[i * nEdge + j].Get(k);
}
}
}
SetupRouting();
Ipv4AddressHelper addr;
TrafficControlHelper red;
PointToPointHelper p2p;
red.SetRootQueueDisc("ns3::RedQueueDisc");
// connect edge switches to hosts
for (uint32_t i = 0; i < nPod; i++)
{
for (uint32_t j = 0; j < nEdge; j++)
{
string subnet = "10." + to_string(i) + "." + to_string(j) + ".0";
addr.SetBase(subnet.c_str(), "255.255.255.0");
for (uint32_t k = 0; k < nHost; k++)
{
Ptr<Node> node = host[i * nEdge + j].Get(k);
NetDeviceContainer ndc = p2p.Install(NodeContainer(node, edge[i].Get(j)));
red.Install(ndc.Get(1));
addrs[node] = addr.Assign(ndc).GetAddress(0);
}
}
}
// connect aggregate switches to edge switches
for (uint32_t i = 0; i < nPod; i++)
{
for (uint32_t j = 0; j < nAgg; j++)
{
string subnet = "10." + to_string(i) + "." + to_string(j + nEdge) + ".0";
addr.SetBase(subnet.c_str(), "255.255.255.0");
for (uint32_t k = 0; k < nEdge; k++)
{
NetDeviceContainer ndc = p2p.Install(agg[i].Get(j), edge[i].Get(k));
red.Install(ndc);
addr.Assign(ndc);
}
}
}
// connect core switches to aggregate switches
for (uint32_t i = 0; i < nGroup; i++)
{
for (uint32_t j = 0; j < nPod; j++)
{
string subnet = "10." + to_string(i + nPod) + "." + to_string(j) + ".0";
addr.SetBase(subnet.c_str(), "255.255.255.0");
for (uint32_t k = 0; k < nCore; k++)
{
NetDeviceContainer ndc = p2p.Install(core[i].Get(k), agg[j].Get(i));
red.Install(ndc);
addr.Assign(ndc);
}
}
}
InstallTraffic(hosts, addrs, nGroup * nCore * nPod / 2.0);
StartSimulation();
return 0;
}

View File

@@ -0,0 +1,284 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/**
* \file
* \ingroup mpi
*
* TestDistributed creates a dumbbell topology and logically splits it in
* half. The left half is placed on logical processor 0 and the right half
* is placed on logical processor 1.
*
* ------- -------
* RANK 0 RANK 1
* ------- | -------
* |
* n0 ---------| | |---------- n6
* | | |
* n1 -------\ | | | /------- n7
* n4 ----------|---------- n5
* n2 -------/ | | | \------- n8
* | | |
* n3 ---------| | |---------- n9
*
*
* OnOff clients are placed on each left leaf node. Each right leaf node
* is a packet sink for a left leaf node. As a packet travels from one
* logical processor to another (the link between n4 and n5), MPI messages
* are passed containing the serialized packet. The message is then
* deserialized into a new packet and sent on as normal.
*
* One packet is sent from each left leaf node. The packet sinks on the
* right leaf nodes output logging information when they receive the packet.
*/
#include "mpi-test-fixtures.h"
#include "ns3/core-module.h"
#include "ns3/internet-stack-helper.h"
#include "ns3/ipv4-address-helper.h"
#include "ns3/ipv4-global-routing-helper.h"
#include "ns3/mpi-interface.h"
#include "ns3/mtp-interface.h"
#include "ns3/network-module.h"
#include "ns3/nix-vector-helper.h"
#include "ns3/on-off-helper.h"
#include "ns3/packet-sink-helper.h"
#include "ns3/point-to-point-helper.h"
#include <iomanip>
#include <mpi.h>
using namespace ns3;
NS_LOG_COMPONENT_DEFINE("SimpleHybrid");
int
main(int argc, char* argv[])
{
bool nix = true;
bool tracing = false;
bool testing = false;
bool verbose = false;
// Parse command line
CommandLine cmd(__FILE__);
cmd.AddValue("nix", "Enable the use of nix-vector or global routing", nix);
cmd.AddValue("tracing", "Enable pcap tracing", tracing);
cmd.AddValue("verbose", "verbose output", verbose);
cmd.AddValue("test", "Enable regression test output", testing);
cmd.Parse(argc, argv);
// Enable parallel simulator with the command line arguments
MtpInterface::Enable();
MpiInterface::Enable(&argc, &argv);
SinkTracer::Init();
if (verbose)
{
LogComponentEnable("PacketSink",
(LogLevel)(LOG_LEVEL_INFO | LOG_PREFIX_NODE | LOG_PREFIX_TIME));
}
uint32_t systemId = MpiInterface::GetSystemId();
uint32_t systemCount = MpiInterface::GetSize();
// Check for valid distributed parameters.
// Must have 2 and only 2 Logical Processors (LPs)
if (systemCount != 2)
{
std::cout << "This simulation requires 2 and only 2 logical processors." << std::endl;
return 1;
}
// Some default values
Config::SetDefault("ns3::OnOffApplication::PacketSize", UintegerValue(512));
Config::SetDefault("ns3::OnOffApplication::DataRate", StringValue("1Mbps"));
Config::SetDefault("ns3::OnOffApplication::MaxBytes", UintegerValue(512));
// Create leaf nodes on left with system id 0
NodeContainer leftLeafNodes;
leftLeafNodes.Create(4, 0);
// Create router nodes. Left router
// with system id 0, right router with
// system id 1
NodeContainer routerNodes;
Ptr<Node> routerNode1 = CreateObject<Node>(0);
Ptr<Node> routerNode2 = CreateObject<Node>(1);
routerNodes.Add(routerNode1);
routerNodes.Add(routerNode2);
// Create leaf nodes on right with system id 1
NodeContainer rightLeafNodes;
rightLeafNodes.Create(4, 1);
PointToPointHelper routerLink;
routerLink.SetDeviceAttribute("DataRate", StringValue("5Mbps"));
routerLink.SetChannelAttribute("Delay", StringValue("5ms"));
PointToPointHelper leafLink;
leafLink.SetDeviceAttribute("DataRate", StringValue("1Mbps"));
leafLink.SetChannelAttribute("Delay", StringValue("2ms"));
// Add link connecting routers
NetDeviceContainer routerDevices;
routerDevices = routerLink.Install(routerNodes);
// Add links for left side leaf nodes to left router
NetDeviceContainer leftRouterDevices;
NetDeviceContainer leftLeafDevices;
for (uint32_t i = 0; i < 4; ++i)
{
NetDeviceContainer temp = leafLink.Install(leftLeafNodes.Get(i), routerNodes.Get(0));
leftLeafDevices.Add(temp.Get(0));
leftRouterDevices.Add(temp.Get(1));
}
// Add links for right side leaf nodes to right router
NetDeviceContainer rightRouterDevices;
NetDeviceContainer rightLeafDevices;
for (uint32_t i = 0; i < 4; ++i)
{
NetDeviceContainer temp = leafLink.Install(rightLeafNodes.Get(i), routerNodes.Get(1));
rightLeafDevices.Add(temp.Get(0));
rightRouterDevices.Add(temp.Get(1));
}
InternetStackHelper stack;
if (nix)
{
Ipv4NixVectorHelper nixRouting;
stack.SetRoutingHelper(nixRouting); // has effect on the next Install ()
}
stack.InstallAll();
Ipv4InterfaceContainer routerInterfaces;
Ipv4InterfaceContainer leftLeafInterfaces;
Ipv4InterfaceContainer leftRouterInterfaces;
Ipv4InterfaceContainer rightLeafInterfaces;
Ipv4InterfaceContainer rightRouterInterfaces;
Ipv4AddressHelper leftAddress;
leftAddress.SetBase("10.1.1.0", "255.255.255.0");
Ipv4AddressHelper routerAddress;
routerAddress.SetBase("10.2.1.0", "255.255.255.0");
Ipv4AddressHelper rightAddress;
rightAddress.SetBase("10.3.1.0", "255.255.255.0");
// Router-to-Router interfaces
routerInterfaces = routerAddress.Assign(routerDevices);
// Left interfaces
for (uint32_t i = 0; i < 4; ++i)
{
NetDeviceContainer ndc;
ndc.Add(leftLeafDevices.Get(i));
ndc.Add(leftRouterDevices.Get(i));
Ipv4InterfaceContainer ifc = leftAddress.Assign(ndc);
leftLeafInterfaces.Add(ifc.Get(0));
leftRouterInterfaces.Add(ifc.Get(1));
leftAddress.NewNetwork();
}
// Right interfaces
for (uint32_t i = 0; i < 4; ++i)
{
NetDeviceContainer ndc;
ndc.Add(rightLeafDevices.Get(i));
ndc.Add(rightRouterDevices.Get(i));
Ipv4InterfaceContainer ifc = rightAddress.Assign(ndc);
rightLeafInterfaces.Add(ifc.Get(0));
rightRouterInterfaces.Add(ifc.Get(1));
rightAddress.NewNetwork();
}
if (!nix)
{
Ipv4GlobalRoutingHelper::PopulateRoutingTables();
}
if (tracing)
{
if (systemId == 0)
{
routerLink.EnablePcap("router-left", routerDevices, true);
leafLink.EnablePcap("leaf-left", leftLeafDevices, true);
}
if (systemId == 1)
{
routerLink.EnablePcap("router-right", routerDevices, true);
leafLink.EnablePcap("leaf-right", rightLeafDevices, true);
}
}
// Create a packet sink on the right leafs to receive packets from left leafs
uint16_t port = 50000;
if (systemId == 1)
{
Address sinkLocalAddress(InetSocketAddress(Ipv4Address::GetAny(), port));
PacketSinkHelper sinkHelper("ns3::UdpSocketFactory", sinkLocalAddress);
ApplicationContainer sinkApp;
for (uint32_t i = 0; i < 4; ++i)
{
sinkApp.Add(sinkHelper.Install(rightLeafNodes.Get(i)));
if (testing)
{
sinkApp.Get(i)->TraceConnectWithoutContext("RxWithAddresses",
MakeCallback(&SinkTracer::SinkTrace));
}
}
sinkApp.Start(Seconds(1.0));
sinkApp.Stop(Seconds(5));
}
// Create the OnOff applications to send
if (systemId == 0)
{
OnOffHelper clientHelper("ns3::UdpSocketFactory", Address());
clientHelper.SetAttribute("OnTime", StringValue("ns3::ConstantRandomVariable[Constant=1]"));
clientHelper.SetAttribute("OffTime",
StringValue("ns3::ConstantRandomVariable[Constant=0]"));
ApplicationContainer clientApps;
for (uint32_t i = 0; i < 4; ++i)
{
AddressValue remoteAddress(InetSocketAddress(rightLeafInterfaces.GetAddress(i), port));
clientHelper.SetAttribute("Remote", remoteAddress);
clientApps.Add(clientHelper.Install(leftLeafNodes.Get(i)));
}
clientApps.Start(Seconds(1.0));
clientApps.Stop(Seconds(5));
}
Simulator::Stop(Seconds(5));
Simulator::Run();
Simulator::Destroy();
if (testing)
{
SinkTracer::Verify(4);
}
// Exit the MPI execution environment
MpiInterface::Disable();
return 0;
}

View File

@@ -25,6 +25,9 @@
#include "ns3/nstime.h"
#include "ns3/simulator-impl.h"
#include "ns3/simulator.h"
#ifdef NS3_MTP
#include "ns3/mtp-interface.h"
#endif
#include <iomanip>
#include <iostream>
@@ -80,6 +83,10 @@ char** GrantedTimeWindowMpiInterface::g_pRxBuffers;
MPI_Comm GrantedTimeWindowMpiInterface::g_communicator = MPI_COMM_WORLD;
bool GrantedTimeWindowMpiInterface::g_freeCommunicator = false;
#ifdef NS3_MTP
std::atomic<bool> GrantedTimeWindowMpiInterface::g_sending(false);
#endif
TypeId
GrantedTimeWindowMpiInterface::GetTypeId()
{
@@ -205,6 +212,12 @@ GrantedTimeWindowMpiInterface::SendPacket(Ptr<Packet> p,
{
NS_LOG_FUNCTION(this << p << rxTime.GetTimeStep() << node << dev);
#ifdef NS3_MTP
while (g_sending.exchange(true, std::memory_order_acquire))
{
};
#endif
SentBuffer sendBuf;
g_pendingTx.push_back(sendBuf);
auto i = g_pendingTx.rbegin(); // Points to the last element
@@ -224,7 +237,11 @@ GrantedTimeWindowMpiInterface::SendPacket(Ptr<Packet> p,
// Find the system id for the destination node
Ptr<Node> destNode = NodeList::GetNode(node);
#ifdef NS3_MTP
uint32_t nodeSysId = destNode->GetSystemId() & 0xFFFF;
#else
uint32_t nodeSysId = destNode->GetSystemId();
#endif
MPI_Isend(reinterpret_cast<void*>(i->GetBuffer()),
serializedSize + 16,
@@ -234,6 +251,10 @@ GrantedTimeWindowMpiInterface::SendPacket(Ptr<Packet> p,
g_communicator,
(i->GetRequest()));
g_txCount++;
#ifdef NS3_MTP
g_sending.store(false, std::memory_order_release);
#endif
}
void
@@ -287,11 +308,16 @@ GrantedTimeWindowMpiInterface::ReceiveMessages()
NS_ASSERT(pNode && pMpiRec);
// Schedule the rx event
#ifdef NS3_MTP
MtpInterface::GetSystem(pNode->GetSystemId() >> 16)
->ScheduleAt(pNode->GetId(), rxTime, MakeEvent(&MpiReceiver::Receive, pMpiRec, p));
#else
Simulator::ScheduleWithContext(pNode->GetId(),
rxTime - Simulator::Now(),
&MpiReceiver::Receive,
pMpiRec,
p);
#endif
// Re-queue the next read
MPI_Irecv(g_pRxBuffers[index],

View File

@@ -21,6 +21,7 @@
#include "ns3/buffer.h"
#include "ns3/nstime.h"
#include <atomic>
#include <list>
#include <mpi.h>
#include <stdint.h>
@@ -68,6 +69,7 @@ class SentBuffer
class Packet;
class DistributedSimulatorImpl;
class HybridSimulatorImpl;
/**
* @ingroup mpi
@@ -106,6 +108,7 @@ class GrantedTimeWindowMpiInterface : public ParallelCommunicationInterface, Obj
* It is not intended for state to be shared.
*/
friend ns3::DistributedSimulatorImpl;
friend ns3::HybridSimulatorImpl;
/**
* Check for received messages complete
@@ -158,6 +161,10 @@ class GrantedTimeWindowMpiInterface : public ParallelCommunicationInterface, Obj
/** Did ns-3 create the communicator? Have to free it. */
static bool g_freeCommunicator;
#ifdef NS3_MTP
static std::atomic<bool> g_sending;
#endif
};
} // namespace ns3

View File

@@ -0,0 +1,525 @@
/*
* Copyright (c) 2023 State Key Laboratory for Novel Software Technology
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Songyuan Bai <i@f5soft.site>
*/
/**
* \file
* \ingroup mtp
* \ingroup mpi
* Implementation of classes ns3::HybridSimulatorImpl
*/
#include "hybrid-simulator-impl.h"
#include "granted-time-window-mpi-interface.h"
#include "mpi-interface.h"
#include "ns3/channel.h"
#include "ns3/mtp-interface.h"
#include "ns3/node-container.h"
#include "ns3/node-list.h"
#include "ns3/node.h"
#include "ns3/simulator.h"
#include "ns3/uinteger.h"
#include <algorithm>
#include <mpi.h>
#include <queue>
#include <thread>
namespace ns3
{
NS_LOG_COMPONENT_DEFINE("HybridSimulatorImpl");
NS_OBJECT_ENSURE_REGISTERED(HybridSimulatorImpl);
HybridSimulatorImpl::HybridSimulatorImpl()
{
NS_LOG_FUNCTION(this);
MtpInterface::Enable(1, 0);
m_myId = MpiInterface::GetSystemId();
m_systemCount = MpiInterface::GetSize();
// Allocate the LBTS message buffer
m_pLBTS = new LbtsMessage[m_systemCount];
m_smallestTime = Seconds(0);
m_globalFinished = false;
}
HybridSimulatorImpl::~HybridSimulatorImpl()
{
NS_LOG_FUNCTION(this);
}
TypeId
HybridSimulatorImpl::GetTypeId()
{
static TypeId tid = TypeId("ns3::HybridSimulatorImpl")
.SetParent<SimulatorImpl>()
.SetGroupName("Mtp")
.AddConstructor<HybridSimulatorImpl>()
.AddAttribute("MaxThreads",
"The maximum threads used in simulation",
UintegerValue(std::thread::hardware_concurrency()),
MakeUintegerAccessor(&HybridSimulatorImpl::m_maxThreads),
MakeUintegerChecker<uint32_t>(1))
.AddAttribute("MinLookahead",
"The minimum lookahead in a partition",
TimeValue(TimeStep(1)),
MakeTimeAccessor(&HybridSimulatorImpl::m_minLookahead),
MakeTimeChecker(TimeStep(0)));
return tid;
}
void
HybridSimulatorImpl::Destroy()
{
while (!m_destroyEvents.empty())
{
Ptr<EventImpl> ev = m_destroyEvents.front().PeekEventImpl();
m_destroyEvents.pop_front();
NS_LOG_LOGIC("handle destroy " << ev);
if (!ev->IsCancelled())
{
ev->Invoke();
}
}
MtpInterface::Disable();
MpiInterface::Destroy();
}
bool
HybridSimulatorImpl::IsFinished() const
{
return m_globalFinished;
}
bool
HybridSimulatorImpl::IsLocalFinished() const
{
return MtpInterface::isFinished();
}
void
HybridSimulatorImpl::Stop()
{
NS_LOG_FUNCTION(this);
for (uint32_t i = 0; i < MtpInterface::GetSize(); i++)
{
MtpInterface::GetSystem(i)->Stop();
}
}
EventId
HybridSimulatorImpl::Stop(const Time& delay)
{
NS_LOG_FUNCTION(this << delay.GetTimeStep());
return Simulator::Schedule(delay, &Simulator::Stop);
}
EventId
HybridSimulatorImpl::Schedule(const Time& delay, EventImpl* event)
{
NS_LOG_FUNCTION(this << delay.GetTimeStep() << event);
return MtpInterface::GetSystem()->Schedule(delay, event);
}
void
HybridSimulatorImpl::ScheduleWithContext(uint32_t context, const Time& delay, EventImpl* event)
{
NS_LOG_FUNCTION(this << context << delay.GetTimeStep() << event);
if (MtpInterface::GetSize() == 1)
{
// initialization stage, do not schedule remote
LogicalProcess* local = MtpInterface::GetSystem();
local->ScheduleWithContext(local, context, delay, event);
}
else
{
LogicalProcess* remote =
MtpInterface::GetSystem(NodeList::GetNode(context)->GetSystemId() >> 16);
MtpInterface::GetSystem()->ScheduleWithContext(remote, context, delay, event);
}
}
EventId
HybridSimulatorImpl::ScheduleNow(EventImpl* event)
{
return Schedule(TimeStep(0), event);
}
EventId
HybridSimulatorImpl::ScheduleDestroy(EventImpl* event)
{
EventId id(Ptr<EventImpl>(event, false),
GetMaximumSimulationTime().GetTimeStep(),
0xffffffff,
EventId::DESTROY);
MtpInterface::CriticalSection cs;
m_destroyEvents.push_back(id);
return id;
}
void
HybridSimulatorImpl::Remove(const EventId& id)
{
if (id.GetUid() == EventId::DESTROY)
{
// destroy events.
for (auto i = m_destroyEvents.begin(); i != m_destroyEvents.end(); i++)
{
if (*i == id)
{
m_destroyEvents.erase(i);
break;
}
}
}
else
{
MtpInterface::GetSystem()->Remove(id);
}
}
void
HybridSimulatorImpl::Cancel(const EventId& id)
{
if (!IsExpired(id))
{
id.PeekEventImpl()->Cancel();
}
}
bool
HybridSimulatorImpl::IsExpired(const EventId& id) const
{
if (id.GetUid() == EventId::DESTROY)
{
// destroy events.
if (id.PeekEventImpl() == nullptr || id.PeekEventImpl()->IsCancelled())
{
return true;
}
for (auto i = m_destroyEvents.begin(); i != m_destroyEvents.end(); i++)
{
if (*i == id)
{
return false;
}
}
return true;
}
else
{
return MtpInterface::GetSystem()->IsExpired(id);
}
}
void
HybridSimulatorImpl::Run()
{
NS_LOG_FUNCTION(this);
Partition();
MtpInterface::RunBefore();
m_globalFinished = false;
while (!m_globalFinished)
{
GrantedTimeWindowMpiInterface::ReceiveMessages();
GrantedTimeWindowMpiInterface::TestSendComplete();
MtpInterface::CalculateSmallestTime();
LbtsMessage lMsg(GrantedTimeWindowMpiInterface::GetRxCount(),
GrantedTimeWindowMpiInterface::GetTxCount(),
m_myId,
IsLocalFinished(),
MtpInterface::GetSmallestTime());
m_pLBTS[m_myId] = lMsg;
MPI_Allgather(&lMsg,
sizeof(LbtsMessage),
MPI_BYTE,
m_pLBTS,
sizeof(LbtsMessage),
MPI_BYTE,
MpiInterface::GetCommunicator());
m_smallestTime = m_pLBTS[0].GetSmallestTime();
// The totRx and totTx counts insure there are no transient
// messages; If totRx != totTx, there are transients,
// so we don't update the granted time.
uint32_t totRx = m_pLBTS[0].GetRxCount();
uint32_t totTx = m_pLBTS[0].GetTxCount();
m_globalFinished = m_pLBTS[0].IsFinished();
// calculate smallest time of all hosts
for (uint32_t i = 1; i < m_systemCount; ++i)
{
if (m_pLBTS[i].GetSmallestTime() < m_smallestTime)
{
m_smallestTime = m_pLBTS[i].GetSmallestTime();
}
totRx += m_pLBTS[i].GetRxCount();
totTx += m_pLBTS[i].GetTxCount();
m_globalFinished &= m_pLBTS[i].IsFinished();
}
MtpInterface::SetSmallestTime(m_smallestTime);
// Global halting condition is all nodes have empty queue's and
// no messages are in-flight.
m_globalFinished &= totRx == totTx;
// Execute next event if it is within the current time window.
// Local task may be completed.
if (totRx == totTx && !IsLocalFinished())
{ // Safe to process
MtpInterface::ProcessOneRound();
}
}
MtpInterface::RunAfter();
}
Time
HybridSimulatorImpl::Now() const
{
// Do not add function logging here, to avoid stack overflow
return MtpInterface::GetSystem()->Now();
}
Time
HybridSimulatorImpl::GetDelayLeft(const EventId& id) const
{
if (IsExpired(id))
{
return TimeStep(0);
}
else
{
return MtpInterface::GetSystem()->GetDelayLeft(id);
}
}
Time
HybridSimulatorImpl::GetMaximumSimulationTime() const
{
return Time::Max() / 2;
}
void
HybridSimulatorImpl::SetScheduler(ObjectFactory schedulerFactory)
{
NS_LOG_FUNCTION(this << schedulerFactory);
for (uint32_t i = 0; i < MtpInterface::GetSize(); i++)
{
MtpInterface::GetSystem(i)->SetScheduler(schedulerFactory);
}
m_schedulerTypeId = schedulerFactory.GetTypeId();
}
uint32_t
HybridSimulatorImpl::GetSystemId() const
{
return m_myId;
}
uint32_t
HybridSimulatorImpl::GetContext() const
{
return MtpInterface::GetSystem()->GetContext();
}
uint64_t
HybridSimulatorImpl::GetEventCount() const
{
uint64_t eventCount = 0;
for (uint32_t i = 0; i < MtpInterface::GetSize(); i++)
{
eventCount += MtpInterface::GetSystem(i)->GetEventCount();
}
return eventCount;
}
void
HybridSimulatorImpl::DoDispose()
{
delete[] m_pLBTS;
SimulatorImpl::DoDispose();
}
void
HybridSimulatorImpl::Partition()
{
NS_LOG_FUNCTION(this);
uint32_t localSystemId = 0;
NodeContainer nodes = NodeContainer::GetGlobal();
bool* visited = new bool[nodes.GetN()]{false};
std::queue<Ptr<Node>> q;
// if m_minLookahead is not set, calculate the median of delay for every link
if (m_minLookahead == TimeStep(0))
{
std::vector<Time> delays;
for (auto it = nodes.Begin(); it != nodes.End(); it++)
{
Ptr<Node> node = *it;
if (node->GetSystemId() == m_myId)
{
for (uint32_t i = 0; i < node->GetNDevices(); i++)
{
Ptr<NetDevice> localNetDevice = node->GetDevice(i);
Ptr<Channel> channel = localNetDevice->GetChannel();
if (!channel)
{
continue;
}
// cut-off p2p links for partition
if (localNetDevice->IsPointToPoint())
{
TimeValue delay;
channel->GetAttribute("Delay", delay);
delays.push_back(delay.Get());
}
}
}
}
std::sort(delays.begin(), delays.end());
if (delays.empty())
{
m_minLookahead = TimeStep(0);
}
else if (delays.size() % 2 == 1)
{
m_minLookahead = delays[delays.size() / 2];
}
else
{
m_minLookahead = (delays[delays.size() / 2 - 1] + delays[delays.size() / 2]) / 2;
}
NS_LOG_INFO("Min lookahead is set to " << m_minLookahead);
}
// perform a BFS on the whole network topo to assign each node a localSystemId
for (auto it = nodes.Begin(); it != nodes.End(); it++)
{
Ptr<Node> node = *it;
if (!visited[node->GetId()] && node->GetSystemId() == m_myId)
{
q.push(node);
localSystemId++;
while (!q.empty())
{
// pop from BFS queue
node = q.front();
q.pop();
visited[node->GetId()] = true;
// assign this node the current localSystemId
node->SetSystemId(localSystemId << 16 | m_myId);
NS_LOG_INFO("node " << node->GetId() << " is set to local system "
<< localSystemId);
for (uint32_t i = 0; i < node->GetNDevices(); i++)
{
Ptr<NetDevice> localNetDevice = node->GetDevice(i);
Ptr<Channel> channel = localNetDevice->GetChannel();
if (!channel)
{
continue;
}
// cut-off p2p links for partition
if (localNetDevice->IsPointToPoint())
{
TimeValue delay;
channel->GetAttribute("Delay", delay);
// if delay is below threshold, do not cut-off
if (delay.Get() >= m_minLookahead)
{
continue;
}
}
// grab the adjacent nodes
for (uint32_t j = 0; j < channel->GetNDevices(); j++)
{
Ptr<Node> remote = channel->GetDevice(j)->GetNode();
// if it's not visited, and not remote, add it to the current partition
if (!visited[remote->GetId()] && node->GetSystemId() == m_myId)
{
q.push(remote);
}
}
}
}
}
}
delete[] visited;
// after the partition, we finally know the system count (# of LPs)
const uint32_t systemCount = localSystemId;
const uint32_t threadCount = std::min(m_maxThreads, systemCount);
NS_LOG_INFO("Partition done! " << systemCount << " systems share " << threadCount
<< " threads");
// create new LPs
MtpInterface::EnableNew(threadCount, systemCount);
// set scheduler
ObjectFactory schedulerFactory;
schedulerFactory.SetTypeId(m_schedulerTypeId);
for (uint32_t i = 1; i <= systemCount; i++)
{
MtpInterface::GetSystem(i)->SetScheduler(schedulerFactory);
}
// remove old events in public LP
const Ptr<Scheduler> oldEvents = MtpInterface::GetSystem()->GetPendingEvents();
const Ptr<Scheduler> eventsToBeTransferred = schedulerFactory.Create<Scheduler>();
while (!oldEvents->IsEmpty())
{
Scheduler::Event next = oldEvents->RemoveNext();
eventsToBeTransferred->Insert(next);
}
// transfer events to new LPs
while (!eventsToBeTransferred->IsEmpty())
{
Scheduler::Event ev = eventsToBeTransferred->RemoveNext();
// invoke initialization events (at time 0) by their insertion order
// since changing the execution order of these events may cause error,
// they have to be invoked now rather than parallelly executed
if (ev.key.m_ts == 0)
{
MtpInterface::GetSystem(ev.key.m_context == Simulator::NO_CONTEXT
? 0
: NodeList::GetNode(ev.key.m_context)->GetSystemId() >> 16)
->InvokeNow(ev);
}
else if (ev.key.m_context == Simulator::NO_CONTEXT)
{
Schedule(TimeStep(ev.key.m_ts), ev.impl);
}
else
{
ScheduleWithContext(ev.key.m_context, TimeStep(ev.key.m_ts), ev.impl);
}
}
}
} // namespace ns3

View File

@@ -0,0 +1,119 @@
/*
* Copyright (c) 2023 State Key Laboratory for Novel Software Technology
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Songyuan Bai <i@f5soft.site>
*/
/**
* @file
* @ingroup mtp
* @ingroup mpi
* Declaration of classes ns3::HybridSimulatorImpl
*/
#ifndef NS3_HYBRID_SIMULATOR_IMPL_H
#define NS3_HYBRID_SIMULATOR_IMPL_H
#include "distributed-simulator-impl.h"
#include "ns3/event-id.h"
#include "ns3/event-impl.h"
#include "ns3/nstime.h"
#include "ns3/object-factory.h"
#include "ns3/simulator-impl.h"
#include <list>
namespace ns3
{
/**
* @brief
* Implementation of the hybrid simulator
*/
class HybridSimulatorImpl : public SimulatorImpl
{
public:
static TypeId GetTypeId();
/** Default constructor. */
HybridSimulatorImpl();
/** Destructor. */
~HybridSimulatorImpl() override;
// virtual from SimulatorImpl
void Destroy() override;
bool IsFinished() const override;
void Stop() override;
EventId Stop(const Time& delay) override;
EventId Schedule(const Time& delay, EventImpl* event) override;
void ScheduleWithContext(uint32_t context, const Time& delay, EventImpl* event) override;
EventId ScheduleNow(EventImpl* event) override;
EventId ScheduleDestroy(EventImpl* event) override;
void Remove(const EventId& id) override;
void Cancel(const EventId& id) override;
bool IsExpired(const EventId& id) const override;
void Run() override;
Time Now() const override;
Time GetDelayLeft(const EventId& id) const override;
Time GetMaximumSimulationTime() const override;
void SetScheduler(ObjectFactory schedulerFactory) override;
uint32_t GetSystemId() const override;
uint32_t GetContext() const override;
uint64_t GetEventCount() const override;
private:
// Inherited from Object
void DoDispose() override;
/**
* @brief Whether LPs on the current local process is finished.
*
* @return true if all finished
* @return false if not all finished
*/
bool IsLocalFinished() const;
/** Are all parallel instances completed. */
bool m_globalFinished;
LbtsMessage* m_pLBTS;
uint32_t m_myId; /**< MPI rank. */
uint32_t m_systemCount; /**< MPI communicator size. */
Time m_smallestTime; /**< End of current window. */
/**
* @brief Automatically divides the to-be-simulated topology
*
* This method is called at the beginning of MultithreadedSimulatorImpl::Run.
* It will set each node a systemId. Then it creates logical processes according
* to the number of partitions, and transfer old events to newly created logical
* processes.
*
* If manual partition is enabled by calling MtpInterface::Enable with two parameters,
* this method will not be called.
*/
void Partition();
uint32_t m_maxThreads;
Time m_minLookahead;
TypeId m_schedulerTypeId;
std::list<EventId> m_destroyEvents;
};
} // namespace ns3
#endif /* NS3_HYBRID_SIMULATOR_IMPL_H */

View File

@@ -91,7 +91,8 @@ MpiInterface::SetParallelSimulatorImpl()
g_parallelCommunicationInterface = new NullMessageMpiInterface();
useDefault = false;
}
else if (simulationType == "ns3::DistributedSimulatorImpl")
else if (simulationType == "ns3::DistributedSimulatorImpl" ||
simulationType == "ns3::HybridSimulatorImpl")
{
g_parallelCommunicationInterface = new GrantedTimeWindowMpiInterface();
useDefault = false;

26
src/mtp/CMakeLists.txt Normal file
View File

@@ -0,0 +1,26 @@
set(example_as_test_suite)
if(${ENABLE_EXAMPLES})
if(${ENABLE_MPI})
set(example_as_test_suite
test/hybrid-test-suite.cc
)
else()
set(example_as_test_suite
test/mtp-test-suite.cc
)
endif()
endif()
build_lib(
LIBNAME mtp
SOURCE_FILES
model/logical-process.cc
model/mtp-interface.cc
model/multithreaded-simulator-impl.cc
HEADER_FILES
model/logical-process.h
model/mtp-interface.h
model/multithreaded-simulator-impl.h
LIBRARIES_TO_LINK ${libnetwork}
TEST_SOURCES ${example_as_test_suite}
)

145
src/mtp/doc/mtp.rst Normal file
View File

@@ -0,0 +1,145 @@
.. include:: replace.txt
Multi-threaded Parallel Simulation (MTP)
----------------------------
This module provides a fast and user-transparent parallel simulator
implementation for ns-3. By splitting up the to-be-simulated topology
into multiple logical processes, LPs, with fine granularity, each LP
can be dynamically scheduled and processed by a thread for load balancing,
while reducing cache misses. With this approach, siginficant speedup
can be acheived for large topologies with heavy traffic.
.. _current-implementation-details:
Current Implementation Details
******************************
This module contains three parts: A parallel simulator implementation
``MultithreadedSimulatorImpl``, an interface to users ``MtpInterface``,
and ``LogicalProcess`` to represent LPs in terms of parallel simulation.
All LPs and threads are stored in the ``MtpInterface``. It controls the
simulation progress, schedules LPs to threads and manages the lifecycles
of LPs and threads. The interface also provides some methods and options
for users to tweak the simulation.
Each LP's logic is implemented in ``LogicalProcess``. It contains most of
the methods of the default sequential simulator plus some auxiliary methods
for parallel simulation.
The simulator implementation ``MultithreadedSimulatorImpl`` is a derived
class from the base simulator. It converts calls to the base simulator into
calls to logical processes based on the context of the current thread.
It also provides a partition method for automatic fine-grained topology partition.
For distributed simulation with MPI, we added ``HybridSimulatorImpl`` in the
``mpi`` module. This simulator uses both ``MtpInterface`` and ``MpiInterface``
to coordinate local LPs and global MPI communications. We also modified the
module to make it locally thread-safe.
Running Multithreaded Simulations
*********************************
Prerequisites
+++++++++++++
.. highlight:: bash
For multithreaded simulation on a single machine with many cores, ensure
that your system supports pthread library. For hybrid distributed simulation,
ensure that MPI is installed, as well as mpic++. In Ubuntu repositories,
these are openmpi-bin, openmpi-common, openmpi-doc, libopenmpi-dev. In
Fedora, these are openmpi and openmpi-devel.
Building and running examples
+++++++++++++++++++++++++++++
If you already built |ns3| without MTP enabled, you must re-build::
$ ./ns3 distclean
Configure |ns3| with the --enable-mtp option::
$ ./ns3 configure --enable-examples --enable-tests --enable-mtp
Ensure that MTP is enabled by checking the optional features shown from the
output of configure. If you want to use the hybrid simulator, you also have
to pass the --enable-mpi option.
Next, build |ns3|::
$ ./ns3
After building |ns3| with MTP enabled, the example programs are now
ready to run. Here are a few adapted examples (you can run these original
examples by omiting the ``-mtp`` in the program name)::
$ ./ns3 run dctcp-example-mtp
$ ./ns3 run rping-simple-network-mtp
$ ./ns3 run simple-multicast-flooding-mtp
An example simulating the fat-tree topology with the multithreaded simulator
and the hybrid simulator::
$ ./ns3 run "fat-tree-mtp --thread=4"
$ ./ns3 run "fat-tree-mtp --command-template "mpirun -np 2 %s --thread=2"
The thread parameter is the number of threads to use (for each process in the
hybrid case).
Advanced Options
++++++++++++++++
.. highlight:: cpp
These options can be modified at the beginning of the ``main`` function using
the native config syntax of ns-3.
You can also change the default maximum number of threads by setting
Config::SetDefault("ns3::MultithreadedSimulatorImpl::MaxThreads", UintegerValue(8));
Config::SetDefault("ns3::HybridSimulatorImpl::MaxThreads", UintegerValue(8));
The automatic partition will cut off stateless links whose delay is above the
threshold. The threshold is automatically calculated based on the delay of every
link. If you are not satisfied with the partition results, you can set a custom
threshold by setting
Config::SetDefault("ns3::MultithreadedSimulatorImpl::MinLookahead", TimeValue(NanoSeconds(500));
Config::SetDefault("ns3::HybridSimulatorImpl::MinLookahead", TimeValue(NanoSeconds(500));
The scheduling method determines the priority (estimated completion time of the
next round) of each logical process. There are five available options:
- ``ByExecutionTime``: LPs with a higher execution time of the last round will have higher priority.
- ``ByPendingEventCount``: LPs with more pending events of this round will have higher priority.
- ``ByEventCount``: LPs with more pending events of this round will have higher priority.
- ``BySimulationTime``: LPs with larger current clock time will have higher priority.
- ``None``: Do not schedule. The partition's priority is based on their ID.
Many experiments show that the first one usually leads to better performance.
However, you can still choose one according to your taste by setting
GlobalValue::Bind("PartitionSchedulingMethod", StringValue("ByExecutionTime"));
By default, the scheduling period is 2 when the number of partitions is less than
16, 3 when it is less than 256, 4 when it is less than 4096, etc. Since more
partitions lead to more scheduling costs. You can also set how frequently scheduling
occurs by setting
GlobalValue::Bind("PartitionSchedulingPeriod", UintegerValue(4));
Tracing During Multithreaded Simulations
****************************************
Unison resolved a lot of thread-safety issues with ns-3's architecture. You don't
need to consider these issues on your own for most of the time, except if you have
custom global statistics other than the built-in flow-monitor. In the latter case,
if multiple nodes can access your global statistics, you can replace them with
atomic variables via ``std::atomic<>``. When collecting tracing data such as Pcap,
it is strongly recommended to create separate output files for each node instead
of a single trace file. For complex custom data structures, you can create critical
sections by adding
MtpInterface::CriticalSection cs;
at the beginning of your methods.

View File

@@ -0,0 +1,22 @@
build_lib_example(
NAME simple-mtp
SOURCE_FILES simple-mtp.cc
LIBRARIES_TO_LINK
${libmtp}
${libpoint-to-point}
${libinternet}
${libnix-vector-routing}
${libapplications}
)
build_lib_example(
NAME fat-tree-mtp
SOURCE_FILES fat-tree-mtp.cc
LIBRARIES_TO_LINK
${libmtp}
${libpoint-to-point}
${libinternet}
${libnix-vector-routing}
${libapplications}
${libflow-monitor}
)

View File

@@ -0,0 +1,609 @@
/*
* Copyright (c) 2023 State Key Laboratory for Novel Software Technology
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Songyuan Bai <i@f5soft.site>
*/
#include "ns3/applications-module.h"
#include "ns3/core-module.h"
#include "ns3/flow-monitor-module.h"
#include "ns3/internet-module.h"
#include "ns3/mtp-module.h"
#include "ns3/network-module.h"
#include "ns3/nix-vector-routing-module.h"
#include "ns3/point-to-point-module.h"
#include "ns3/traffic-control-module.h"
#include <chrono>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <map>
#include <numeric>
#include <vector>
using namespace std;
using namespace chrono;
using namespace ns3;
#define LOG(content) \
{ \
cout << content << endl; \
}
// random variable distribution
class Distribution
{
public:
// load a distribution from a CDF file
Distribution(const string filename)
{
ifstream fin;
fin.open(filename);
while (!fin.eof())
{
double x;
double cdf;
fin >> x >> cdf;
m_cdf.emplace_back(x, cdf);
}
fin.close();
m_rand = CreateObject<UniformRandomVariable>();
}
// expectation value of the distribution
double Expectation() const
{
double ex = 0;
for (uint32_t i = 1; i < m_cdf.size(); i++)
{
ex +=
(m_cdf[i].first + m_cdf[i - 1].first) / 2 * (m_cdf[i].second - m_cdf[i - 1].second);
}
return ex;
}
// get a random value from the distribution
double Sample()
{
double rand = m_rand->GetValue(0, 1);
for (uint32_t i = 1; i < m_cdf.size(); i++)
{
if (rand <= m_cdf[i].second)
{
double slope =
(m_cdf[i].first - m_cdf[i - 1].first) / (m_cdf[i].second - m_cdf[i - 1].second);
return m_cdf[i - 1].first + slope * (rand - m_cdf[i - 1].second);
}
}
return m_cdf[m_cdf.size() - 1].second;
}
private:
// the actual CDF function
vector<pair<double, double>> m_cdf;
// random variable stream
Ptr<UniformRandomVariable> m_rand;
};
// traffic generator
class TrafficGenerator
{
public:
TrafficGenerator(const string cdfFile,
const uint32_t hostTotal,
const double dataRate,
const double incastRatio,
const vector<uint32_t> victims)
: m_currentTime(0),
m_incastRatio(incastRatio),
m_hostTotal(hostTotal),
m_victims(victims),
m_flowCount(0),
m_flowSizeTotal(0),
m_distribution(cdfFile)
{
m_averageInterval = m_distribution.Expectation() * 8 / dataRate;
m_uniformRand = CreateObject<UniformRandomVariable>();
m_expRand = CreateObject<ExponentialRandomVariable>();
}
// get one flow with incremental time and random src, dst and size
tuple<double, uint32_t, uint32_t, uint32_t> GetFlow()
{
uint32_t src;
uint32_t dst;
if (m_uniformRand->GetValue(0, 1) < m_incastRatio)
{
dst = m_victims[m_uniformRand->GetInteger(0, m_victims.size() - 1)];
}
else
{
dst = m_uniformRand->GetInteger(0, m_hostTotal - 1);
}
do
{
src = m_uniformRand->GetInteger(0, m_hostTotal - 1);
} while (src == dst);
uint32_t flowSize = max((uint32_t)round(m_distribution.Sample()), 1U);
m_currentTime += m_expRand->GetValue(m_averageInterval, 0);
m_flowSizeTotal += flowSize;
m_flowCount++;
return make_tuple(m_currentTime, src, dst, flowSize);
}
double GetActualDataRate() const
{
return m_flowSizeTotal / m_currentTime * 8;
}
double GetAvgFlowSize() const
{
return m_distribution.Expectation();
}
double GetActualAvgFlowSize() const
{
return m_flowSizeTotal / (double)m_flowCount;
}
uint32_t GetFlowCount() const
{
return m_flowCount;
}
private:
double m_currentTime;
double m_averageInterval;
double m_incastRatio;
uint32_t m_hostTotal;
vector<uint32_t> m_victims;
uint32_t m_flowCount;
uint64_t m_flowSizeTotal;
Distribution m_distribution;
Ptr<UniformRandomVariable> m_uniformRand;
Ptr<ExponentialRandomVariable> m_expRand;
};
namespace conf
{
// fat-tree scale
uint32_t k = 4;
uint32_t cluster = 0;
// link layer options
uint32_t mtu = 1500;
uint32_t delay = 3000;
string bandwidth = "10Gbps";
// traffic-control layer options
string buffer = "4MB";
bool ecn = true;
// network layer options
bool nix = false;
bool rip = false;
bool ecmp = true;
bool flow = true;
// transport layer options
uint32_t port = 443;
string socket = "ns3::TcpSocketFactory";
string tcp = "ns3::TcpDctcp";
// application layer options
uint32_t size = 1448;
string cdf = "src/mtp/examples/web-search.txt";
double load = 0.3;
double incast = 0;
string victim = "0";
// simulation options
string seed = "";
bool flowmon = false;
double time = 1;
double interval = 0.1;
// mtp options
uint32_t thread = 4;
}; // namespace conf
void
Initialize(int argc, char* argv[])
{
CommandLine cmd;
// parse scale
cmd.AddValue("k", "Number of pods in a fat-tree", conf::k);
cmd.AddValue("cluster", "Number of clusters in a variant fat-tree", conf::cluster);
// parse network options
cmd.AddValue("mtu", "P2P link MTU", conf::mtu);
cmd.AddValue("delay", "Link delay in nanoseconds", conf::delay);
cmd.AddValue("bandwidth", "Link bandwidth", conf::bandwidth);
cmd.AddValue("buffer", "Switch buffer size", conf::buffer);
cmd.AddValue("ecn", "Use explicit congestion control", conf::ecn);
cmd.AddValue("nix", "Enable nix-vector routing", conf::nix);
cmd.AddValue("rip", "Enable RIP routing", conf::rip);
cmd.AddValue("ecmp", "Use equal-cost multi-path routing", conf::ecmp);
cmd.AddValue("flow", "Use per-flow ECMP routing", conf::flow);
cmd.AddValue("port", "Port number of server applications", conf::port);
cmd.AddValue("socket", "Socket protocol", conf::socket);
cmd.AddValue("tcp", "TCP protocol", conf::tcp);
cmd.AddValue("size", "Application packet size", conf::size);
cmd.AddValue("cdf", "Traffic CDF file location", conf::cdf);
cmd.AddValue("load", "Traffic load relative to bisection bandwidth", conf::load);
cmd.AddValue("incast", "Incast traffic ratio", conf::incast);
cmd.AddValue("victim", "Incast traffic victim list", conf::victim);
// parse simulation options
cmd.AddValue("seed", "The seed of the random number generator", conf::seed);
cmd.AddValue("flowmon", "Use flow-monitor to record statistics", conf::flowmon);
cmd.AddValue("time", "Simulation time in seconds", conf::time);
cmd.AddValue("interval", "Simulation progreess print interval in seconds", conf::interval);
// parse mtp/mpi options
cmd.AddValue("thread", "Maximum number of threads", conf::thread);
cmd.Parse(argc, argv);
// link layer settings
Config::SetDefault("ns3::PointToPointChannel::Delay", TimeValue(NanoSeconds(conf::delay)));
Config::SetDefault("ns3::PointToPointNetDevice::DataRate", StringValue(conf::bandwidth));
Config::SetDefault("ns3::PointToPointNetDevice::Mtu", UintegerValue(conf::mtu));
// traffic control layer settings
Config::SetDefault("ns3::RedQueueDisc::MeanPktSize", UintegerValue(conf::mtu));
Config::SetDefault("ns3::RedQueueDisc::UseEcn", BooleanValue(conf::ecn));
Config::SetDefault("ns3::RedQueueDisc::UseHardDrop", BooleanValue(false));
Config::SetDefault("ns3::RedQueueDisc::LinkDelay", TimeValue(NanoSeconds(conf::delay)));
Config::SetDefault("ns3::RedQueueDisc::LinkBandwidth", StringValue(conf::bandwidth));
Config::SetDefault("ns3::RedQueueDisc::MaxSize", QueueSizeValue(QueueSize(conf::buffer)));
Config::SetDefault("ns3::RedQueueDisc::MinTh", DoubleValue(50));
Config::SetDefault("ns3::RedQueueDisc::MaxTh", DoubleValue(150));
// network layer settings
Config::SetDefault("ns3::Ipv4GlobalRouting::RandomEcmpRouting", BooleanValue(conf::ecmp));
Config::SetDefault("ns3::Ipv4GlobalRouting::FlowEcmpRouting", BooleanValue(conf::flow));
// transport layer settings
Config::SetDefault("ns3::TcpL4Protocol::SocketType", StringValue(conf::tcp));
Config::SetDefault("ns3::TcpSocket::SegmentSize", UintegerValue(conf::size));
Config::SetDefault("ns3::TcpSocket::ConnTimeout",
TimeValue(conf::tcp == "ns3::TcpDctcp" ? MilliSeconds(10) : Seconds(3)));
Config::SetDefault("ns3::TcpSocket::SndBufSize", UintegerValue(1073725440));
Config::SetDefault("ns3::TcpSocket::RcvBufSize", UintegerValue(1073725440));
Config::SetDefault(
"ns3::TcpSocketBase::MinRto",
TimeValue(conf::tcp == "ns3::TcpDctcp" ? MilliSeconds(5) : MilliSeconds(200)));
Config::SetDefault(
"ns3::TcpSocketBase::ClockGranularity",
TimeValue(conf::tcp == "ns3::TcpDctcp" ? MicroSeconds(100) : MilliSeconds(1)));
Config::SetDefault("ns3::RttEstimator::InitialEstimation",
TimeValue(conf::tcp == "ns3::TcpDctcp" ? MicroSeconds(200) : Seconds(1)));
// application layer settings
Config::SetDefault("ns3::BulkSendApplication::SendSize", UintegerValue(UINT32_MAX));
Config::SetDefault("ns3::OnOffApplication::DataRate", StringValue(conf::bandwidth));
Config::SetDefault("ns3::OnOffApplication::PacketSize", UintegerValue(conf::size));
Config::SetDefault("ns3::OnOffApplication::OnTime",
StringValue("ns3::ConstantRandomVariable[Constant=1000]"));
Config::SetDefault("ns3::OnOffApplication::OffTime",
StringValue("ns3::ConstantRandomVariable[Constant=0]"));
// simulation settings
Time::SetResolution(Time::PS);
RngSeedManager::SetSeed(Hash32(conf::seed));
// initialize mtp
MtpInterface::Enable(conf::thread);
}
void
SetupRouting()
{
InternetStackHelper internet;
if (conf::nix)
{
internet.SetRoutingHelper(Ipv4NixVectorHelper());
}
else if (conf::rip)
{
internet.SetRoutingHelper(RipHelper());
}
else
{
internet.SetRoutingHelper(Ipv4GlobalRoutingHelper());
}
internet.SetIpv6StackInstall(false);
internet.InstallAll();
LOG("\n- Setup the topology...");
}
void
InstallTraffic(map<uint32_t, Ptr<Node>>& hosts,
map<Ptr<Node>, Ipv4Address>& addrs,
double bisection)
{
// output address for debugging
LOG("\n- Calculating routes...");
LOG(" Host NodeId System Address");
for (auto& p : hosts)
{
LOG(" " << left << setw(6) << p.first << setw(8) << p.second->GetId() << setw(8)
<< p.second->GetSystemId() << addrs[p.second]);
}
if (!conf::nix)
{
Ipv4GlobalRoutingHelper::PopulateRoutingTables();
}
// server applications
PacketSinkHelper server(conf::socket, InetSocketAddress(Ipv4Address::GetAny(), conf::port));
for (auto& p : hosts)
{
server.Install(p.second).Start(Seconds(0));
}
// calculate traffic
LOG("\n- Generating traffic...");
double bandwidth = bisection * DataRate(conf::bandwidth).GetBitRate() * 2;
string victim;
stringstream sin(conf::victim);
vector<uint32_t> victims;
while (getline(sin, victim, '-'))
{
victims.push_back(stoi(victim));
}
TrafficGenerator traffic(conf::cdf,
hosts.size(),
bandwidth * conf::load,
conf::incast,
victims);
// install traffic (client applications)
auto flow = traffic.GetFlow();
while (get<0>(flow) < conf::time)
{
Ptr<Node> clientNode = hosts[get<1>(flow)];
Ptr<Node> serverNode = hosts[get<2>(flow)];
if (conf::socket != "ns3::TcpSocketFactory")
{
OnOffHelper client(conf::socket, InetSocketAddress(addrs[serverNode], conf::port));
client.SetAttribute("MaxBytes", UintegerValue(get<3>(flow)));
client.Install(clientNode).Start(Seconds(get<0>(flow)));
}
else
{
BulkSendHelper client(conf::socket, InetSocketAddress(addrs[serverNode], conf::port));
client.SetAttribute("MaxBytes", UintegerValue(get<3>(flow)));
client.Install(clientNode).Start(Seconds(get<0>(flow)));
}
flow = traffic.GetFlow();
}
// traffic installation check
LOG(" Expected data rate = " << bandwidth * conf::load / 1e9 << "Gbps");
LOG(" Generated data rate = " << traffic.GetActualDataRate() / 1e9 << "Gbps");
LOG(" Expected avg flow size = " << traffic.GetAvgFlowSize() / 1e6 << "MB");
LOG(" Generated avg flow size = " << traffic.GetActualAvgFlowSize() / 1e6 << "MB");
LOG(" Total flow count = " << traffic.GetFlowCount());
}
void
PrintProgress()
{
LOG(" Progressed to " << Simulator::Now().GetSeconds() << "s");
Simulator::Schedule(Seconds(conf::interval), PrintProgress);
}
void
StartSimulation()
{
// install flow-monitor
Ptr<FlowMonitor> flowMonitor;
FlowMonitorHelper flowHelper;
if (conf::flowmon)
{
flowMonitor = flowHelper.InstallAll();
}
// print progress
if (conf::interval)
{
Simulator::Schedule(Seconds(conf::interval), PrintProgress);
}
// start the simulation
Simulator::Stop(Seconds(conf::time));
LOG("\n- Start simulation...");
auto start = system_clock::now();
Simulator::Run();
auto end = system_clock::now();
auto time = duration_cast<duration<double>>(end - start).count();
// output simulation statistics
uint64_t eventCount = Simulator::GetEventCount();
if (conf::flowmon)
{
uint64_t dropped = 0;
uint64_t totalTx = 0;
uint64_t totalRx = 0;
uint64_t totalTxBytes = 0;
uint64_t flowCount = 0;
uint64_t finishedFlowCount = 0;
double totalThroughput = 0;
Time totalFct(0);
Time totalFinishedFct(0);
Time totalDelay(0);
flowMonitor->CheckForLostPackets();
for (auto& p : flowMonitor->GetFlowStats())
{
dropped = p.second.packetsDropped.size();
if ((p.second.timeLastRxPacket - p.second.timeFirstTxPacket).GetTimeStep() > 0 &&
p.second.txPackets && p.second.rxPackets)
{
totalTx += p.second.txPackets;
totalRx += p.second.rxPackets;
totalTxBytes += p.second.txBytes;
totalFct += p.second.timeLastRxPacket - p.second.timeFirstTxPacket;
if (p.second.txPackets - p.second.rxPackets == p.second.packetsDropped.size())
{
totalFinishedFct += p.second.timeLastRxPacket - p.second.timeFirstTxPacket;
finishedFlowCount++;
}
totalDelay += p.second.delaySum;
totalThroughput +=
(double)p.second.txBytes /
(p.second.timeLastRxPacket - p.second.timeFirstTxPacket).GetSeconds();
flowCount++;
}
}
double avgFct = (double)totalFct.GetMicroSeconds() / flowCount;
double avgFinishedFct = (double)totalFinishedFct.GetMicroSeconds() / finishedFlowCount;
double avgDelay = (double)totalDelay.GetMicroSeconds() / totalRx;
double avgThroughput = totalThroughput / flowCount / 1e9 * 8;
LOG(" Detected #flow = " << flowCount);
LOG(" Finished #flow = " << finishedFlowCount);
LOG(" Average FCT (all) = " << avgFct << "us");
LOG(" Average FCT (finished) = " << avgFinishedFct << "us");
LOG(" Average end to end delay = " << avgDelay << "us");
LOG(" Average flow throughput = " << avgThroughput << "Gbps");
LOG(" Network throughput = " << totalTxBytes / 1e9 * 8 / conf::time << "Gbps");
LOG(" Total Tx packets = " << totalTx);
LOG(" Total Rx packets = " << totalRx);
LOG(" Dropped packets = " << dropped);
}
Simulator::Destroy();
LOG("\n- Done!");
LOG(" Event count = " << eventCount);
LOG(" Simulation time = " << time << "s\n");
}
int
main(int argc, char* argv[])
{
Initialize(argc, argv);
uint32_t hostId = 0;
map<uint32_t, Ptr<Node>> hosts;
map<Ptr<Node>, Ipv4Address> addrs;
// calculate topo scales
uint32_t nPod = conf::cluster ? conf::cluster : conf::k; // number of pods
uint32_t nGroup = conf::k / 2; // number of group of core switches
uint32_t nCore = conf::k / 2; // number of core switch in a group
uint32_t nAgg = conf::k / 2; // number of aggregation switch in a pod
uint32_t nEdge = conf::k / 2; // number of edge switch in a pod
uint32_t nHost = conf::k / 2; // number of hosts under a switch
NodeContainer *core = new NodeContainer[nGroup];
NodeContainer *agg = new NodeContainer[nPod];
NodeContainer *edge = new NodeContainer[nPod];
NodeContainer *host = new NodeContainer[nPod * nEdge];
// create nodes
for (uint32_t i = 0; i < nGroup; i++)
{
core[i].Create(nCore / 2);
core[i].Create((nCore - 1) / 2 + 1);
}
for (uint32_t i = 0; i < nPod; i++)
{
agg[i].Create(nAgg);
}
for (uint32_t i = 0; i < nPod; i++)
{
edge[i].Create(nEdge);
}
for (uint32_t i = 0; i < nPod; i++)
{
for (uint32_t j = 0; j < nEdge; j++)
{
host[i * nEdge + j].Create(nHost);
for (uint32_t k = 0; k < nHost; k++)
{
hosts[hostId++] = host[i * nEdge + j].Get(k);
}
}
}
SetupRouting();
Ipv4AddressHelper addr;
TrafficControlHelper red;
PointToPointHelper p2p;
red.SetRootQueueDisc("ns3::RedQueueDisc");
// connect edge switches to hosts
for (uint32_t i = 0; i < nPod; i++)
{
for (uint32_t j = 0; j < nEdge; j++)
{
string subnet = "10." + to_string(i) + "." + to_string(j) + ".0";
addr.SetBase(subnet.c_str(), "255.255.255.0");
for (uint32_t k = 0; k < nHost; k++)
{
Ptr<Node> node = host[i * nEdge + j].Get(k);
NetDeviceContainer ndc = p2p.Install(NodeContainer(node, edge[i].Get(j)));
red.Install(ndc.Get(1));
addrs[node] = addr.Assign(ndc).GetAddress(0);
}
}
}
// connect aggregate switches to edge switches
for (uint32_t i = 0; i < nPod; i++)
{
for (uint32_t j = 0; j < nAgg; j++)
{
string subnet = "10." + to_string(i) + "." + to_string(j + nEdge) + ".0";
addr.SetBase(subnet.c_str(), "255.255.255.0");
for (uint32_t k = 0; k < nEdge; k++)
{
NetDeviceContainer ndc = p2p.Install(agg[i].Get(j), edge[i].Get(k));
red.Install(ndc);
addr.Assign(ndc);
}
}
}
// connect core switches to aggregate switches
for (uint32_t i = 0; i < nGroup; i++)
{
for (uint32_t j = 0; j < nPod; j++)
{
string subnet = "10." + to_string(i + nPod) + "." + to_string(j) + ".0";
addr.SetBase(subnet.c_str(), "255.255.255.0");
for (uint32_t k = 0; k < nCore; k++)
{
NetDeviceContainer ndc = p2p.Install(core[i].Get(k), agg[j].Get(i));
red.Install(ndc);
addr.Assign(ndc);
}
}
}
InstallTraffic(hosts, addrs, nGroup * nCore * nPod / 2.0);
StartSimulation();
return 0;
}

View File

@@ -0,0 +1,241 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/**
* \file
* \ingroup mtp
*
* TestDistributed creates a dumbbell topology and logically splits it in
* half. The left half is placed on logical processor 1 and the right half
* is placed on logical processor 2.
*
* ------- -------
* RANK 1 RANK 2
* ------- | -------
* |
* n0 ---------| | |---------- n6
* | | |
* n1 -------\ | | | /------- n7
* n4 ----------|---------- n5
* n2 -------/ | | | \------- n8
* | | |
* n3 ---------| | |---------- n9
*
*
* OnOff clients are placed on each left leaf node. Each right leaf node
* is a packet sink for a left leaf node. As a packet travels from one
* logical processor to another (the link between n4 and n5), MPI messages
* are passed containing the serialized packet. The message is then
* deserialized into a new packet and sent on as normal.
*
* One packet is sent from each left leaf node. The packet sinks on the
* right leaf nodes output logging information when they receive the packet.
*/
#include "ns3/core-module.h"
#include "ns3/internet-stack-helper.h"
#include "ns3/ipv4-address-helper.h"
#include "ns3/ipv4-global-routing-helper.h"
#include "ns3/mtp-interface.h"
#include "ns3/multithreaded-simulator-impl.h"
#include "ns3/network-module.h"
#include "ns3/nix-vector-helper.h"
#include "ns3/on-off-helper.h"
#include "ns3/packet-sink-helper.h"
#include "ns3/point-to-point-helper.h"
#include <iomanip>
using namespace ns3;
NS_LOG_COMPONENT_DEFINE("SimpleMtp");
int
main(int argc, char* argv[])
{
bool nix = true;
bool tracing = false;
bool verbose = false;
// Parse command line
CommandLine cmd(__FILE__);
cmd.AddValue("nix", "Enable the use of nix-vector or global routing", nix);
cmd.AddValue("tracing", "Enable pcap tracing", tracing);
cmd.AddValue("verbose", "verbose output", verbose);
cmd.Parse(argc, argv);
MtpInterface::Enable(2, 2);
GlobalValue::Bind("SimulatorImplementationType",
StringValue("ns3::MultithreadedSimulatorImpl"));
if (verbose)
{
LogComponentEnable("PacketSink",
(LogLevel)(LOG_LEVEL_INFO | LOG_PREFIX_NODE | LOG_PREFIX_TIME));
}
// Some default values
Config::SetDefault("ns3::OnOffApplication::PacketSize", UintegerValue(512));
Config::SetDefault("ns3::OnOffApplication::DataRate", StringValue("1Mbps"));
Config::SetDefault("ns3::OnOffApplication::MaxBytes", UintegerValue(512));
// Create leaf nodes on left with system id 1
NodeContainer leftLeafNodes;
leftLeafNodes.Create(4, 1);
// Create router nodes. Left router
// with system id 0, right router with
// system id 1
NodeContainer routerNodes;
Ptr<Node> routerNode1 = CreateObject<Node>(1);
Ptr<Node> routerNode2 = CreateObject<Node>(2);
routerNodes.Add(routerNode1);
routerNodes.Add(routerNode2);
// Create leaf nodes on right with system id 2
NodeContainer rightLeafNodes;
rightLeafNodes.Create(4, 2);
PointToPointHelper routerLink;
routerLink.SetDeviceAttribute("DataRate", StringValue("5Mbps"));
routerLink.SetChannelAttribute("Delay", StringValue("5ms"));
PointToPointHelper leafLink;
leafLink.SetDeviceAttribute("DataRate", StringValue("1Mbps"));
leafLink.SetChannelAttribute("Delay", StringValue("2ms"));
// Add link connecting routers
NetDeviceContainer routerDevices;
routerDevices = routerLink.Install(routerNodes);
// Add links for left side leaf nodes to left router
NetDeviceContainer leftRouterDevices;
NetDeviceContainer leftLeafDevices;
for (uint32_t i = 0; i < 4; ++i)
{
NetDeviceContainer temp = leafLink.Install(leftLeafNodes.Get(i), routerNodes.Get(0));
leftLeafDevices.Add(temp.Get(0));
leftRouterDevices.Add(temp.Get(1));
}
// Add links for right side leaf nodes to right router
NetDeviceContainer rightRouterDevices;
NetDeviceContainer rightLeafDevices;
for (uint32_t i = 0; i < 4; ++i)
{
NetDeviceContainer temp = leafLink.Install(rightLeafNodes.Get(i), routerNodes.Get(1));
rightLeafDevices.Add(temp.Get(0));
rightRouterDevices.Add(temp.Get(1));
}
InternetStackHelper stack;
if (nix)
{
Ipv4NixVectorHelper nixRouting;
stack.SetRoutingHelper(nixRouting); // has effect on the next Install ()
}
stack.InstallAll();
Ipv4InterfaceContainer routerInterfaces;
Ipv4InterfaceContainer leftLeafInterfaces;
Ipv4InterfaceContainer leftRouterInterfaces;
Ipv4InterfaceContainer rightLeafInterfaces;
Ipv4InterfaceContainer rightRouterInterfaces;
Ipv4AddressHelper leftAddress;
leftAddress.SetBase("10.1.1.0", "255.255.255.0");
Ipv4AddressHelper routerAddress;
routerAddress.SetBase("10.2.1.0", "255.255.255.0");
Ipv4AddressHelper rightAddress;
rightAddress.SetBase("10.3.1.0", "255.255.255.0");
// Router-to-Router interfaces
routerInterfaces = routerAddress.Assign(routerDevices);
// Left interfaces
for (uint32_t i = 0; i < 4; ++i)
{
NetDeviceContainer ndc;
ndc.Add(leftLeafDevices.Get(i));
ndc.Add(leftRouterDevices.Get(i));
Ipv4InterfaceContainer ifc = leftAddress.Assign(ndc);
leftLeafInterfaces.Add(ifc.Get(0));
leftRouterInterfaces.Add(ifc.Get(1));
leftAddress.NewNetwork();
}
// Right interfaces
for (uint32_t i = 0; i < 4; ++i)
{
NetDeviceContainer ndc;
ndc.Add(rightLeafDevices.Get(i));
ndc.Add(rightRouterDevices.Get(i));
Ipv4InterfaceContainer ifc = rightAddress.Assign(ndc);
rightLeafInterfaces.Add(ifc.Get(0));
rightRouterInterfaces.Add(ifc.Get(1));
rightAddress.NewNetwork();
}
if (!nix)
{
Ipv4GlobalRoutingHelper::PopulateRoutingTables();
}
if (tracing)
{
routerLink.EnablePcap("router-left", routerDevices, true);
leafLink.EnablePcap("leaf-left", leftLeafDevices, true);
routerLink.EnablePcap("router-right", routerDevices, true);
leafLink.EnablePcap("leaf-right", rightLeafDevices, true);
}
// Create a packet sink on the right leafs to receive packets from left leafs
uint16_t port = 50000;
Address sinkLocalAddress(InetSocketAddress(Ipv4Address::GetAny(), port));
PacketSinkHelper sinkHelper("ns3::UdpSocketFactory", sinkLocalAddress);
ApplicationContainer sinkApp;
for (uint32_t i = 0; i < 4; ++i)
{
sinkApp.Add(sinkHelper.Install(rightLeafNodes.Get(i)));
}
sinkApp.Start(Seconds(1.0));
sinkApp.Stop(Seconds(5));
// Create the OnOff applications to send
OnOffHelper clientHelper("ns3::UdpSocketFactory", Address());
clientHelper.SetAttribute("OnTime", StringValue("ns3::ConstantRandomVariable[Constant=1]"));
clientHelper.SetAttribute("OffTime", StringValue("ns3::ConstantRandomVariable[Constant=0]"));
ApplicationContainer clientApps;
for (uint32_t i = 0; i < 4; ++i)
{
AddressValue remoteAddress(InetSocketAddress(rightLeafInterfaces.GetAddress(i), port));
clientHelper.SetAttribute("Remote", remoteAddress);
clientApps.Add(clientHelper.Install(leftLeafNodes.Get(i)));
}
clientApps.Start(Seconds(1.0));
clientApps.Stop(Seconds(5));
Simulator::Stop(Seconds(5));
Simulator::Run();
Simulator::Destroy();
// Exit the MTP execution environment
return 0;
}

View File

@@ -0,0 +1,12 @@
0 0
10000 0.15
20000 0.20
30000 0.30
50000 0.40
80000 0.53
200000 0.60
1000000 0.70
2000000 0.80
5000000 0.90
10000000 0.97
30000000 1

View File

@@ -0,0 +1,332 @@
/*
* Copyright (c) 2023 State Key Laboratory for Novel Software Technology
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Songyuan Bai <i@f5soft.site>
*/
/**
* \file
* \ingroup mtp
* Implementation of classes ns3::LogicalProcess
*/
#include "logical-process.h"
#include "mtp-interface.h"
#include "ns3/channel.h"
#include "ns3/node-container.h"
#include "ns3/simulator.h"
#include <algorithm>
namespace ns3
{
NS_LOG_COMPONENT_DEFINE("LogicalProcess");
LogicalProcess::LogicalProcess()
: m_systemId(0),
m_systemCount(0),
m_stop(false),
m_uid(EventId::UID::VALID),
m_currentContext(Simulator::NO_CONTEXT),
m_currentUid(0),
m_currentTs(0),
m_eventCount(0),
m_pendingEventCount(0),
m_events(nullptr),
m_lookAhead(TimeStep(0))
{
}
LogicalProcess::~LogicalProcess()
{
NS_LOG_INFO("system " << m_systemId << " finished with event count " << m_eventCount);
// if others hold references to event list, do not unref events
if (m_events->GetReferenceCount() == 1)
{
while (!m_events->IsEmpty())
{
Scheduler::Event next = m_events->RemoveNext();
next.impl->Unref();
}
}
}
void
LogicalProcess::Enable(const uint32_t systemId, const uint32_t systemCount)
{
m_systemId = systemId;
m_systemCount = systemCount;
}
void
LogicalProcess::CalculateLookAhead()
{
NS_LOG_FUNCTION(this);
if (m_systemId == 0)
{
m_lookAhead = TimeStep(0); // No lookahead for the public LP
}
else
{
m_lookAhead = Time::Max() / 2 - TimeStep(1);
NodeContainer c = NodeContainer::GetGlobal();
for (auto iter = c.Begin(); iter != c.End(); ++iter)
{
#ifdef NS3_MPI
// for hybrid simulation, the left 16-bit indicates local system ID,
// and the right 16-bit indicates global system ID (MPI rank)
if (((*iter)->GetSystemId() >> 16) != m_systemId)
{
continue;
}
#else
if ((*iter)->GetSystemId() != m_systemId)
{
continue;
}
#endif
for (uint32_t i = 0; i < (*iter)->GetNDevices(); ++i)
{
Ptr<NetDevice> localNetDevice = (*iter)->GetDevice(i);
// only works for p2p links currently
if (!localNetDevice->IsPointToPoint())
{
continue;
}
Ptr<Channel> channel = localNetDevice->GetChannel();
if (!channel)
{
continue;
}
// grab the adjacent node
Ptr<Node> remoteNode;
if (channel->GetDevice(0) == localNetDevice)
{
remoteNode = (channel->GetDevice(1))->GetNode();
}
else
{
remoteNode = (channel->GetDevice(0))->GetNode();
}
// if it's not remote, don't consider it
if (remoteNode->GetSystemId() == m_systemId)
{
continue;
}
// compare delay on the channel with current value of m_lookAhead.
// if delay on channel is smaller, make it the new lookAhead.
TimeValue delay;
channel->GetAttribute("Delay", delay);
if (delay.Get() < m_lookAhead)
{
m_lookAhead = delay.Get();
}
// add the neighbour to the mailbox
m_mailbox[remoteNode->GetSystemId()];
}
}
}
NS_LOG_INFO("lookahead of system " << m_systemId << " is set to " << m_lookAhead.GetTimeStep());
}
void
LogicalProcess::ReceiveMessages()
{
NS_LOG_FUNCTION(this);
m_pendingEventCount = 0;
for (auto& item : m_mailbox)
{
auto& queue = item.second;
std::sort(queue.begin(), queue.end(), std::greater<>());
while (!queue.empty())
{
auto& evWithTs = queue.back();
Scheduler::Event& ev = std::get<3>(evWithTs);
ev.key.m_uid = m_uid++;
m_events->Insert(ev);
queue.pop_back();
m_pendingEventCount++;
}
}
}
void
LogicalProcess::ProcessOneRound()
{
NS_LOG_FUNCTION(this);
// set thread context
MtpInterface::SetSystem(m_systemId);
// calculate time window
Time grantedTime =
Min(MtpInterface::GetSmallestTime() + m_lookAhead, MtpInterface::GetNextPublicTime());
auto start = std::chrono::steady_clock::now();
// process events
while (Next() <= grantedTime)
{
Scheduler::Event next = m_events->RemoveNext();
m_eventCount++;
NS_LOG_LOGIC("handle " << next.key.m_ts);
m_currentTs = next.key.m_ts;
m_currentContext = next.key.m_context;
m_currentUid = next.key.m_uid;
next.impl->Invoke();
next.impl->Unref();
}
auto end = std::chrono::steady_clock::now();
m_executionTime = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
}
EventId
LogicalProcess::Schedule(const Time& delay, EventImpl* event)
{
Scheduler::Event ev;
ev.impl = event;
ev.key.m_ts = m_currentTs + delay.GetTimeStep();
ev.key.m_context = GetContext();
ev.key.m_uid = m_uid++;
m_events->Insert(ev);
return EventId(event, ev.key.m_ts, ev.key.m_context, ev.key.m_uid);
}
void
LogicalProcess::ScheduleAt(const uint32_t context, const Time& time, EventImpl* event)
{
Scheduler::Event ev;
ev.impl = event;
ev.key.m_ts = time.GetTimeStep();
ev.key.m_context = context;
ev.key.m_uid = m_uid++;
m_events->Insert(ev);
}
void
LogicalProcess::ScheduleWithContext(LogicalProcess* remote,
const uint32_t context,
const Time& delay,
EventImpl* event)
{
Scheduler::Event ev;
ev.impl = event;
ev.key.m_ts = delay.GetTimeStep() + m_currentTs;
ev.key.m_context = context;
if (remote == this)
{
ev.key.m_uid = m_uid++;
m_events->Insert(ev);
}
else
{
ev.key.m_uid = EventId::UID::INVALID;
remote->m_mailbox[m_systemId].emplace_back(m_currentTs, m_systemId, m_uid, ev);
}
}
void
LogicalProcess::InvokeNow(const Scheduler::Event& ev)
{
uint32_t oldSystemId = MtpInterface::GetSystem()->GetSystemId();
MtpInterface::SetSystem(m_systemId);
m_eventCount++;
NS_LOG_LOGIC("handle " << ev.key.m_ts);
m_currentTs = ev.key.m_ts;
m_currentContext = ev.key.m_context;
m_currentUid = ev.key.m_uid;
ev.impl->Invoke();
ev.impl->Unref();
// restore previous thread context
MtpInterface::SetSystem(oldSystemId);
}
void
LogicalProcess::Remove(const EventId& id)
{
if (IsExpired(id))
{
return;
}
Scheduler::Event event;
event.impl = id.PeekEventImpl();
event.key.m_ts = id.GetTs();
event.key.m_context = id.GetContext();
event.key.m_uid = id.GetUid();
m_events->Remove(event);
event.impl->Cancel();
// whenever we remove an event from the event list, we have to unref it.
event.impl->Unref();
}
bool
LogicalProcess::IsExpired(const EventId& id) const
{
return id.PeekEventImpl() == nullptr || id.GetTs() < m_currentTs ||
(id.GetTs() == m_currentTs && id.GetUid() <= m_currentUid) ||
id.PeekEventImpl()->IsCancelled();
}
void
LogicalProcess::SetScheduler(ObjectFactory schedulerFactory)
{
Ptr<Scheduler> scheduler = schedulerFactory.Create<Scheduler>();
if (m_events)
{
while (!m_events->IsEmpty())
{
Scheduler::Event next = m_events->RemoveNext();
scheduler->Insert(next);
}
}
m_events = scheduler;
}
Time
LogicalProcess::Next() const
{
if (m_stop || m_events->IsEmpty())
{
return Time::Max();
}
else
{
Scheduler::Event ev = m_events->PeekNext();
return TimeStep(ev.key.m_ts);
}
}
} // namespace ns3

View File

@@ -0,0 +1,196 @@
/*
* Copyright (c) 2023 State Key Laboratory for Novel Software Technology
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Songyuan Bai <i@f5soft.site>
*/
/**
* @file
* @ingroup mtp
* Declaration of classes ns3::LogicalProcess
*/
#ifndef LOGICAL_PROCESS_H
#define LOGICAL_PROCESS_H
#include "ns3/event-id.h"
#include "ns3/event-impl.h"
#include "ns3/nstime.h"
#include "ns3/object-factory.h"
#include "ns3/ptr.h"
#include "ns3/scheduler.h"
#include <atomic>
#include <chrono>
#include <map>
#include <tuple>
#include <vector>
namespace ns3
{
/**
* @brief
* Implementation of the logical process (LP) used by the multhreaded simulator.
*/
class LogicalProcess
{
public:
/** Default constructor */
LogicalProcess();
/** Destructor */
~LogicalProcess();
/**
* Enable this logical process object by giving it a unique systemId,
* and let it know the total number of systems.
*
* @param systemId
* @param systemCount
*/
void Enable(const uint32_t systemId, const uint32_t systemCount);
/**
* @brief Calculate the lookahead value.
*/
void CalculateLookAhead();
/**
* @brief Receive events sent by other logical processes in the previous round.
*/
void ReceiveMessages();
/**
* @brief Process all events in the current round.
*/
void ProcessOneRound();
/**
* @brief Get the execution time of the last round.
*
* This method is called by MtpInterfaceused to determine the priority of each LP.
*
* @return The execution tiem of the last round
*/
inline uint64_t GetExecutionTime() const
{
return m_executionTime;
}
/**
* @brief Get the pending event count of the next round.
*
* This method is called by MtpInterfaceused to determine the priority of each LP.
*
* @return Number of pending events of the next round
*/
inline uint64_t GetPendingEventCount() const
{
return m_pendingEventCount;
}
/**
* @brief Get the future event list (scheduler)
*
* @return The event list
*/
inline Ptr<Scheduler> GetPendingEvents() const
{
return m_events;
}
/**
* @brief Invoke an event immediately at the current time.
*
* This method is called when another thread wants to process an event of an LP
* that does not belongs to it. It is used at the very beginning of the simulation
* when the main thread will invoke events of newly allocated LP, whose timestamps
* are zero.
*
* @param ev The event to be invoked now
*/
void InvokeNow(const Scheduler::Event& ev);
// The following methods are mapped from MultithreadedSimulatorImpl
EventId Schedule(const Time& delay, EventImpl* event);
void ScheduleAt(const uint32_t context, const Time& time, EventImpl* event);
void ScheduleWithContext(LogicalProcess* remote,
const uint32_t context,
const Time& delay,
EventImpl* event);
void Remove(const EventId& id);
void Cancel(const EventId& id);
bool IsExpired(const EventId& id) const;
void SetScheduler(ObjectFactory schedulerFactory);
Time Next() const;
inline bool isLocalFinished() const
{
return m_stop || m_events->IsEmpty();
}
inline void Stop()
{
m_stop = true;
}
inline Time Now() const
{
return TimeStep(m_currentTs);
}
inline Time GetDelayLeft(const EventId& id) const
{
return TimeStep(id.GetTs() - m_currentTs);
}
inline uint32_t GetSystemId(void) const
{
return m_systemId;
}
inline uint32_t GetContext() const
{
return m_currentContext;
}
inline uint64_t GetEventCount() const
{
return m_eventCount;
}
private:
uint32_t m_systemId;
uint32_t m_systemCount;
bool m_stop;
uint32_t m_uid;
uint32_t m_currentContext;
uint32_t m_currentUid;
uint64_t m_currentTs;
uint64_t m_eventCount;
uint64_t m_pendingEventCount;
Ptr<Scheduler> m_events;
Time m_lookAhead;
std::map<uint32_t, std::vector<std::tuple<uint64_t, uint32_t, uint32_t, Scheduler::Event>>>
m_mailbox; // event message mail box
std::chrono::nanoseconds::rep m_executionTime;
};
} // namespace ns3
#endif /* LOGICAL_PROCESS_H */

View File

@@ -0,0 +1,422 @@
/*
* Copyright (c) 2023 State Key Laboratory for Novel Software Technology
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Songyuan Bai <i@f5soft.site>
*/
/**
* \file
* \ingroup mtp
* Implementation of classes ns3::MtpInterface
*/
#include "mtp-interface.h"
#include "ns3/assert.h"
#include "ns3/config.h"
#include "ns3/log.h"
#include "ns3/string.h"
#include "ns3/uinteger.h"
#include <algorithm>
#include <cmath>
namespace ns3
{
NS_LOG_COMPONENT_DEFINE("MtpInterface");
void
MtpInterface::Enable()
{
#ifdef NS3_MPI
GlobalValue::Bind("SimulatorImplementationType", StringValue("ns3::HybridSimulatorImpl"));
#else
GlobalValue::Bind("SimulatorImplementationType",
StringValue("ns3::MultithreadedSimulatorImpl"));
#endif
g_enabled = true;
}
void
MtpInterface::Enable(const uint32_t threadCount)
{
#ifdef NS3_MPI
Config::SetDefault("ns3::HybridSimulatorImpl::MaxThreads", UintegerValue(threadCount));
#else
Config::SetDefault("ns3::MultithreadedSimulatorImpl::MaxThreads", UintegerValue(threadCount));
#endif
MtpInterface::Enable();
}
void
MtpInterface::Enable(const uint32_t threadCount, const uint32_t systemCount)
{
NS_ASSERT_MSG(threadCount > 0, "There must be at least one thread");
// called by manual partition
if (!g_enabled)
{
GlobalValue::Bind("SimulatorImplementationType",
StringValue("ns3::MultithreadedSimulatorImpl"));
}
// set size
g_threadCount = threadCount;
g_systemCount = systemCount;
// allocate systems
g_systems = new LogicalProcess[g_systemCount + 1]; // include the public LP
for (uint32_t i = 0; i <= g_systemCount; i++)
{
g_systems[i].Enable(i, g_systemCount + 1);
}
StringValue s;
g_sortMethod.GetValue(s);
if (s.Get() == "ByExecutionTime")
{
g_sortFunc = SortByExecutionTime;
}
else if (s.Get() == "ByPendingEventCount")
{
g_sortFunc = SortByPendingEventCount;
}
else if (s.Get() == "ByEventCount")
{
g_sortFunc = SortByEventCount;
}
else if (s.Get() == "BySimulationTime")
{
g_sortFunc = SortBySimulationTime;
}
UintegerValue ui;
g_sortPeriod.GetValue(ui);
if (ui.Get() == 0)
{
g_period = std::ceil(std::log2(g_systemCount) / 4 + 1);
NS_LOG_INFO("Secheduling period is automatically set to " << g_period);
}
else
{
g_period = ui.Get();
}
// create a thread local storage key
// so that we can access the currently assigned LP of each thread
pthread_key_create(&g_key, nullptr);
pthread_setspecific(g_key, &g_systems[0]);
}
void
MtpInterface::EnableNew(const uint32_t newSystemCount)
{
const LogicalProcess* oldSystems = g_systems;
g_systems = new LogicalProcess[g_systemCount + newSystemCount + 1];
for (uint32_t i = 0; i <= g_systemCount; i++)
{
g_systems[i] = oldSystems[i];
}
delete[] oldSystems;
g_systemCount += newSystemCount;
for (uint32_t i = 0; i <= g_systemCount; i++)
{
g_systems[i].Enable(i, g_systemCount + 1);
}
UintegerValue ui;
g_sortPeriod.GetValue(ui);
if (ui.Get() == 0)
{
g_period = std::ceil(std::log2(g_systemCount) / 4 + 1);
NS_LOG_INFO("Secheduling period is automatically set to " << g_period);
}
else
{
g_period = ui.Get();
}
// create a thread local storage key
// so that we can access the currently assigned LP of each thread
pthread_key_create(&g_key, nullptr);
pthread_setspecific(g_key, &g_systems[0]);
}
void
MtpInterface::EnableNew(const uint32_t threadCount, const uint32_t newSystemCount)
{
g_threadCount = threadCount;
EnableNew(newSystemCount);
}
void
MtpInterface::Disable()
{
g_threadCount = 0;
g_systemCount = 0;
g_sortFunc = nullptr;
g_globalFinished = false;
delete[] g_systems;
delete[] g_threads;
delete[] g_sortedSystemIndices;
}
void
MtpInterface::Run()
{
RunBefore();
while (!g_globalFinished)
{
ProcessOneRound();
CalculateSmallestTime();
}
RunAfter();
}
void
MtpInterface::RunBefore()
{
CalculateLookAhead();
// LP index for sorting & holding worker threads
g_sortedSystemIndices = new uint32_t[g_systemCount];
for (uint32_t i = 0; i < g_systemCount; i++)
{
g_sortedSystemIndices[i] = i + 1;
}
g_systemIndex.store(g_systemCount, std::memory_order_release);
// start threads
g_threads = new pthread_t[g_threadCount - 1]; // exclude the main thread
for (uint32_t i = 0; i < g_threadCount - 1; i++)
{
pthread_create(&g_threads[i], nullptr, ThreadFunc, nullptr);
}
}
void
MtpInterface::ProcessOneRound()
{
// assign logical process to threads
// determine the priority of logical processes
if (g_sortFunc != nullptr && g_round++ % g_period == 0)
{
std::sort(g_sortedSystemIndices, g_sortedSystemIndices + g_systemCount, g_sortFunc);
}
// stage 1: process events
g_recvMsgStage = false;
g_finishedSystemCount.store(0, std::memory_order_relaxed);
g_systemIndex.store(0, std::memory_order_release);
// main thread also needs to process an LP to reduce an extra thread overhead
while (true)
{
uint32_t index = g_systemIndex.fetch_add(1, std::memory_order_acquire);
if (index >= g_systemCount)
{
break;
}
LogicalProcess* system = &g_systems[g_sortedSystemIndices[index]];
system->ProcessOneRound();
g_finishedSystemCount.fetch_add(1, std::memory_order_release);
}
// logical process barriar synchronization
while (g_finishedSystemCount.load(std::memory_order_acquire) != g_systemCount)
{
};
// stage 2: process the public LP
g_systems[0].ProcessOneRound();
// stage 3: receive messages
g_recvMsgStage = true;
g_finishedSystemCount.store(0, std::memory_order_relaxed);
g_systemIndex.store(0, std::memory_order_release);
while (true)
{
uint32_t index = g_systemIndex.fetch_add(1, std::memory_order_acquire);
if (index >= g_systemCount)
{
break;
}
LogicalProcess* system = &g_systems[g_sortedSystemIndices[index]];
system->ReceiveMessages();
g_finishedSystemCount.fetch_add(1, std::memory_order_release);
}
// logical process barriar synchronization
while (g_finishedSystemCount.load(std::memory_order_acquire) != g_systemCount)
{
};
}
void
MtpInterface::CalculateSmallestTime()
{
// update smallest time
g_smallestTime = Time::Max() / 2;
for (uint32_t i = 0; i <= g_systemCount; i++)
{
Time nextTime = g_systems[i].Next();
if (nextTime < g_smallestTime)
{
g_smallestTime = nextTime;
}
}
g_nextPublicTime = g_systems[0].Next();
// test if global finished
bool globalFinished = true;
for (uint32_t i = 0; i <= g_systemCount; i++)
{
globalFinished &= g_systems[i].isLocalFinished();
}
g_globalFinished = globalFinished;
}
void
MtpInterface::RunAfter()
{
// global finished, terminate threads
g_systemIndex.store(0, std::memory_order_release);
for (uint32_t i = 0; i < g_threadCount - 1; i++)
{
pthread_join(g_threads[i], nullptr);
}
}
bool
MtpInterface::isEnabled()
{
return g_enabled;
}
bool
MtpInterface::isPartitioned()
{
return g_threadCount != 0;
}
void
MtpInterface::CalculateLookAhead()
{
for (uint32_t i = 1; i <= g_systemCount; i++)
{
g_systems[i].CalculateLookAhead();
}
}
void*
MtpInterface::ThreadFunc(void* arg)
{
while (!g_globalFinished)
{
uint32_t index = g_systemIndex.fetch_add(1, std::memory_order_acquire);
if (index >= g_systemCount)
{
while (g_systemIndex.load(std::memory_order_acquire) >= g_systemCount)
{
};
continue;
}
LogicalProcess* system = &g_systems[g_sortedSystemIndices[index]];
if (g_recvMsgStage)
{
system->ReceiveMessages();
}
else
{
system->ProcessOneRound();
}
g_finishedSystemCount.fetch_add(1, std::memory_order_release);
}
return nullptr;
}
bool
MtpInterface::SortByExecutionTime(const uint32_t& i, const uint32_t& j)
{
return g_systems[i].GetExecutionTime() > g_systems[j].GetExecutionTime();
}
bool
MtpInterface::SortByEventCount(const uint32_t& i, const uint32_t& j)
{
return g_systems[i].GetEventCount() > g_systems[j].GetEventCount();
}
bool
MtpInterface::SortByPendingEventCount(const uint32_t& i, const uint32_t& j)
{
return g_systems[i].GetPendingEventCount() > g_systems[j].GetPendingEventCount();
}
bool
MtpInterface::SortBySimulationTime(const uint32_t& i, const uint32_t& j)
{
return g_systems[i].Now() > g_systems[j].Now();
}
bool (*MtpInterface::g_sortFunc)(const uint32_t&, const uint32_t&) = nullptr;
GlobalValue MtpInterface::g_sortMethod =
GlobalValue("PartitionSchedulingMethod",
"The scheduling method to determine which partition runs first",
StringValue("ByExecutionTime"),
MakeStringChecker());
GlobalValue MtpInterface::g_sortPeriod = GlobalValue("PartitionSchedulingPeriod",
"The scheduling period of partitions",
UintegerValue(0),
MakeUintegerChecker<uint32_t>(0));
uint32_t MtpInterface::g_period = 0;
pthread_t* MtpInterface::g_threads = nullptr;
LogicalProcess* MtpInterface::g_systems = nullptr;
uint32_t MtpInterface::g_threadCount = 0;
uint32_t MtpInterface::g_systemCount = 0;
uint32_t* MtpInterface::g_sortedSystemIndices = nullptr;
std::atomic<uint32_t> MtpInterface::g_systemIndex;
std::atomic<uint32_t> MtpInterface::g_finishedSystemCount;
uint32_t MtpInterface::g_round = 0;
Time MtpInterface::g_smallestTime = TimeStep(0);
Time MtpInterface::g_nextPublicTime = TimeStep(0);
bool MtpInterface::g_recvMsgStage = false;
bool MtpInterface::g_globalFinished = false;
bool MtpInterface::g_enabled = false;
pthread_key_t MtpInterface::g_key;
std::atomic<bool> MtpInterface::g_inCriticalSection(false);
} // namespace ns3

View File

@@ -0,0 +1,401 @@
/*
* Copyright (c) 2023 State Key Laboratory for Novel Software Technology
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Songyuan Bai <i@f5soft.site>
*/
/**
* @file
* @ingroup mtp
* Declaration of classes ns3::MtpInterface
*/
#ifndef MTP_INTERFACE_H
#define MTP_INTERFACE_H
#include "logical-process.h"
#include "ns3/atomic-counter.h"
#include "ns3/global-value.h"
#include "ns3/nstime.h"
#include "ns3/simulator.h"
#include <pthread.h>
namespace ns3
{
/**
* @brief
* Implementation of the interface for multithreaded parallel simulation.
*/
class MtpInterface
{
public:
/**
* @brief
* Implementation of the critical section based on spln lock via
* atomic store & exchange.
*/
class CriticalSection
{
public:
/** Default constructor, using a globally shared atomic variable */
inline CriticalSection()
: m_spinLock(&g_inCriticalSection)
{
while (m_spinLock->exchange(true, std::memory_order_acquire))
{
};
}
/**
* @brief Construct a new critical section object using a custom
* atomic variable.
*
* @param lock Custom boolean atomic variable act as a spin lock
*/
inline CriticalSection(std::atomic<bool>* lock)
: m_spinLock(lock)
{
while (m_spinLock->exchange(true, std::memory_order_acquire))
{
};
}
/** Destructor */
inline ~CriticalSection()
{
m_spinLock->store(false, std::memory_order_release);
}
private:
std::atomic<bool>* m_spinLock;
};
/**
* @brief Enable the multithreaded simulation, the number of threads
* will be automatically chosen and the partition is also automatic.
*/
static void Enable();
/**
* @brief Enable the multithreaded simulation, the number of threads
* will be manually set and the partition is automatic.
*
* @param threadCount The number of threads to be used.
*/
static void Enable(const uint32_t threadCount);
/**
* @brief Enable the multithreaded simulation, the number of threads
* will be manually set and the partition is also done manually (by
* assigning each node a systemId).
*
* @param threadCount The number of threads to be used.
* @param systemCount The number of partitions.
*/
static void Enable(const uint32_t threadCount, const uint32_t systemCount);
/**
* @brief Create new LPs and enable them.
*
* This method can be used to dynamically create LPs for dynamically
* created nodes. After this operation, newly added LP must set their
* scheduler before running.
*
* @param newSystemCount The number of newly to-be-created LPs.
*/
static void EnableNew(const uint32_t newSystemCount);
/**
* @brief Create new LPs and enable them, while adjusting number of
* threads the simulator will use.
*
* This method is called after the automatic partition. Before the
* automatic partition, there is only one LP, and we do not know the
* number of threads to be used since it is related to the number of
* LPs. Therefore, we have to adjust the number of threads and create
* new LPs simultaneously.
*
* @param threadCount
* @param newSystemCount
*/
static void EnableNew(const uint32_t threadCount, const uint32_t newSystemCount);
/**
* @brief Disable the multithreaded simulation and free the memory
* space of LPs and threads.
*
* This method is called by the multithreaded simulator and you do
* not have to call it manually.
*/
static void Disable();
/**
* @brief Running the LPs and threads.
*
* This method is called by Simulator::Run.
*/
static void Run();
/**
* @brief Preparation before running the LPs and threads.
*
* This method is called by MtpInterface::Run. It will actually create
* threads and prepare them to process LPs.
*/
static void RunBefore();
/**
* @brief Process all events of all LPs in the current round.
*
* This method is called by MtpInterface::Run.
*/
static void ProcessOneRound();
/**
* @brief Calculate the global smallest time to determine the next
* time window of each LP.
*
* This method is called by MtpInterface::Run.
*/
static void CalculateSmallestTime();
/**
* @brief Post actions after all LPs are finished.
*
* This method is called by MtpInterface::Run. It will let threads know
* that we have done everything, and terminates them.
*/
static void RunAfter();
/**
* @brief Whether this interface is enabled.
*
* @return true if it is enabled
* @return false if it is not enabled
*/
static bool isEnabled();
/**
* @brief Whether the topology is already partitioned.
*
* This method is called by the constructor of the multithreaded simulator
* to check whether user has already manually partitioned the topology.
*
* @return true if it is partitioned
* @return false if it is not partitioned
*/
static bool isPartitioned();
/**
* @brief Calculate the lookahead value of every LP.
*
* This method is called by MtpInterface::RunBefore.
*/
static void CalculateLookAhead();
/**
* @brief Get the running logical process of the current thread.
*
* @return The curretly running logical process of the
* current thread
*/
inline static LogicalProcess* GetSystem()
{
return static_cast<LogicalProcess*>(pthread_getspecific(g_key));
}
/**
* @brief Get the a logical process based on its ID.
*
* @param systemId The given ID of the logical process to be got
* @return The corresponding logical process
*/
inline static LogicalProcess* GetSystem(const uint32_t systemId)
{
return &g_systems[systemId];
}
/**
* @brief Set the running logical process of the current thread.
*
* @param systemId The given ID of the logical process to be set
*/
inline static void SetSystem(const uint32_t systemId)
{
pthread_setspecific(g_key, &g_systems[systemId]);
}
/**
* @brief Get the total number of logical processes.
*
* @return The total number of logical processes, including
* the public LP (whose ID is zero)
*/
inline static uint32_t GetSize()
{
return g_systemCount + 1;
}
/**
* @brief Get how many rounds are passed since the simulation starts.
*
* @return The number of rounds
*/
inline static uint32_t GetRound()
{
return g_round;
}
/**
* @brief Get the smallest timestamp of every to-be-processed event
* of every LP.
*
* The smalles timestamp is used to calculate LBTS.
*
* @return The smallest timestamp.
*/
inline static Time GetSmallestTime()
{
return g_smallestTime;
}
/**
* @brief Set the smallest timestamp of every LP.
*
* This method is called by the hybrid simulator, where global MPI
* communication may resulting in a smaller timestamp than the local
* smallest timestamp, so we have to update the current smallest timestamp.
*
* @param smallestTime The new smallest timestamp
*/
inline static void SetSmallestTime(const Time smallestTime)
{
g_smallestTime = smallestTime;
}
/**
* @brief Get the timestamp of the next global event.
*
* The next global event's timestamp is also used to calculate LBTS.
*
* @return The timestamp of the next global event
*/
inline static Time GetNextPublicTime()
{
return g_nextPublicTime;
}
/**
* @brief Whether all LPs are finished all rounds (or terminated by
* Simulator::Stop).
*
* @return true if all finished
* @return false if not all finished
*/
inline static bool isFinished()
{
return g_globalFinished;
}
/**
* @brief Schedule a global event right after the current round is finished.
*/
template <
typename FUNC,
typename std::enable_if<!std::is_convertible<FUNC, Ptr<EventImpl>>::value, int>::type,
typename std::enable_if<!std::is_function<typename std::remove_pointer<FUNC>::type>::value,
int>::type,
typename... Ts>
inline static void ScheduleGlobal(FUNC f, Ts&&... args)
{
CriticalSection cs;
g_systems[0].ScheduleAt(Simulator::NO_CONTEXT,
Min(g_smallestTime, g_nextPublicTime),
MakeEvent(f, std::forward<Ts>(args)...));
}
/**
* @brief Schedule a global event right after the current round is finished.
*/
template <typename... Us, typename... Ts>
inline static void ScheduleGlobal(void (*f)(Us...), Ts&&... args)
{
CriticalSection cs;
g_systems[0].ScheduleAt(Simulator::NO_CONTEXT,
Min(g_smallestTime, g_nextPublicTime),
MakeEvent(f, std::forward<Ts>(args)...));
}
private:
/**
* @brief The actual function each thread will run.
*
* In this function, each thread repeatedly get the next unprocessed LP,
* execute it and wait until all LPs are processed.
*/
static void* ThreadFunc(void* arg);
/**
* @brief Determine logical process priority by execution time.
*/
static bool SortByExecutionTime(const uint32_t& i, const uint32_t& j);
/**
* @brief Determine logical process priority by event count.
*/
static bool SortByEventCount(const uint32_t& i, const uint32_t& j);
/**
* @brief Determine logical process priority by pending event count.
*/
static bool SortByPendingEventCount(const uint32_t& i, const uint32_t& j);
/**
* @brief Determine logical process priority by simulation time.
*/
static bool SortBySimulationTime(const uint32_t& i, const uint32_t& j);
static bool (*g_sortFunc)(const uint32_t&, const uint32_t&);
static GlobalValue g_sortMethod;
static GlobalValue g_sortPeriod;
static uint32_t g_period;
static pthread_t* g_threads;
static LogicalProcess* g_systems;
static uint32_t g_threadCount;
static uint32_t g_systemCount;
static uint32_t* g_sortedSystemIndices;
static std::atomic<uint32_t> g_systemIndex;
static std::atomic<uint32_t> g_finishedSystemCount;
static uint32_t g_round;
static Time g_smallestTime;
static Time g_nextPublicTime;
static bool g_recvMsgStage;
static bool g_globalFinished;
static bool g_enabled;
static pthread_key_t g_key;
static std::atomic<bool> g_inCriticalSection;
};
} // namespace ns3
#endif /* MTP_INTERFACE_H */

View File

@@ -0,0 +1,458 @@
/*
* Copyright (c) 2023 State Key Laboratory for Novel Software Technology
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Songyuan Bai <i@f5soft.site>
*/
/**
* \file
* \ingroup mtp
* Implementation of classes ns3::MultithreadedSimulatorImpl
*/
#include "multithreaded-simulator-impl.h"
#include "mtp-interface.h"
#include "ns3/channel.h"
#include "ns3/node-container.h"
#include "ns3/node-list.h"
#include "ns3/node.h"
#include "ns3/simulator.h"
#include "ns3/type-id.h"
#include "ns3/uinteger.h"
#include <algorithm>
#include <queue>
#include <thread>
namespace ns3
{
NS_LOG_COMPONENT_DEFINE("MultithreadedSimulatorImpl");
NS_OBJECT_ENSURE_REGISTERED(MultithreadedSimulatorImpl);
MultithreadedSimulatorImpl::MultithreadedSimulatorImpl()
{
NS_LOG_FUNCTION(this);
if (!MtpInterface::isPartitioned())
{
MtpInterface::Enable(1, 0);
m_partition = true;
}
else
{
m_partition = false;
}
}
MultithreadedSimulatorImpl::~MultithreadedSimulatorImpl()
{
NS_LOG_FUNCTION(this);
}
TypeId
MultithreadedSimulatorImpl::GetTypeId()
{
static TypeId tid =
TypeId("ns3::MultithreadedSimulatorImpl")
.SetParent<SimulatorImpl>()
.SetGroupName("Mtp")
.AddConstructor<MultithreadedSimulatorImpl>()
.AddAttribute("MaxThreads",
"The maximum threads used in simulation",
UintegerValue(std::thread::hardware_concurrency()),
MakeUintegerAccessor(&MultithreadedSimulatorImpl::m_maxThreads),
MakeUintegerChecker<uint32_t>(1))
.AddAttribute("MinLookahead",
"The minimum lookahead in a partition",
TimeValue(TimeStep(0)),
MakeTimeAccessor(&MultithreadedSimulatorImpl::m_minLookahead),
MakeTimeChecker(TimeStep(0)));
return tid;
}
void
MultithreadedSimulatorImpl::Destroy()
{
while (!m_destroyEvents.empty())
{
Ptr<EventImpl> ev = m_destroyEvents.front().PeekEventImpl();
m_destroyEvents.pop_front();
NS_LOG_LOGIC("handle destroy " << ev);
if (!ev->IsCancelled())
{
ev->Invoke();
}
}
MtpInterface::Disable();
}
bool
MultithreadedSimulatorImpl::IsFinished() const
{
return MtpInterface::isFinished();
}
void
MultithreadedSimulatorImpl::Stop()
{
NS_LOG_FUNCTION(this);
for (uint32_t i = 0; i < MtpInterface::GetSize(); i++)
{
MtpInterface::GetSystem(i)->Stop();
}
}
EventId
MultithreadedSimulatorImpl::Stop(const Time& delay)
{
NS_LOG_FUNCTION(this << delay.GetTimeStep());
return Simulator::Schedule(delay, &Simulator::Stop);
}
EventId
MultithreadedSimulatorImpl::Schedule(const Time& delay, EventImpl* event)
{
NS_LOG_FUNCTION(this << delay.GetTimeStep() << event);
return MtpInterface::GetSystem()->Schedule(delay, event);
}
void
MultithreadedSimulatorImpl::ScheduleWithContext(uint32_t context,
const Time& delay,
EventImpl* event)
{
NS_LOG_FUNCTION(this << context << delay.GetTimeStep() << event);
LogicalProcess* remote = nullptr;
if (m_savedNodeList.GetN() > context)
{
remote = MtpInterface::GetSystem(m_savedNodeList.Get(context)->GetSystemId());
}
else
{
remote = MtpInterface::GetSystem(NodeList::GetNode(context)->GetSystemId());
}
MtpInterface::GetSystem()->ScheduleWithContext(remote, context, delay, event);
}
EventId
MultithreadedSimulatorImpl::ScheduleNow(EventImpl* event)
{
return Schedule(TimeStep(0), event);
}
EventId
MultithreadedSimulatorImpl::ScheduleDestroy(EventImpl* event)
{
EventId id(Ptr<EventImpl>(event, false),
GetMaximumSimulationTime().GetTimeStep(),
0xffffffff,
EventId::DESTROY);
MtpInterface::CriticalSection cs;
m_destroyEvents.push_back(id);
return id;
}
void
MultithreadedSimulatorImpl::Remove(const EventId& id)
{
if (id.GetUid() == EventId::DESTROY)
{
// destroy events.
for (auto i = m_destroyEvents.begin(); i != m_destroyEvents.end(); i++)
{
if (*i == id)
{
m_destroyEvents.erase(i);
break;
}
}
}
else
{
MtpInterface::GetSystem()->Remove(id);
}
}
void
MultithreadedSimulatorImpl::Cancel(const EventId& id)
{
if (!IsExpired(id))
{
id.PeekEventImpl()->Cancel();
}
}
bool
MultithreadedSimulatorImpl::IsExpired(const EventId& id) const
{
if (id.GetUid() == EventId::DESTROY)
{
// destroy events.
if (id.PeekEventImpl() == nullptr || id.PeekEventImpl()->IsCancelled())
{
return true;
}
for (auto i = m_destroyEvents.begin(); i != m_destroyEvents.end(); i++)
{
if (*i == id)
{
return false;
}
}
return true;
}
else
{
return MtpInterface::GetSystem()->IsExpired(id);
}
}
void
MultithreadedSimulatorImpl::Run()
{
NS_LOG_FUNCTION(this);
// auto partition
if (m_partition)
{
Partition();
}
MtpInterface::Run();
}
Time
MultithreadedSimulatorImpl::Now() const
{
// Do not add function logging here, to avoid stack overflow
return MtpInterface::GetSystem()->Now();
}
Time
MultithreadedSimulatorImpl::GetDelayLeft(const EventId& id) const
{
if (IsExpired(id))
{
return TimeStep(0);
}
else
{
return MtpInterface::GetSystem()->GetDelayLeft(id);
}
}
Time
MultithreadedSimulatorImpl::GetMaximumSimulationTime() const
{
return Time::Max() / 2;
}
void
MultithreadedSimulatorImpl::SetScheduler(ObjectFactory schedulerFactory)
{
NS_LOG_FUNCTION(this << schedulerFactory);
for (uint32_t i = 0; i < MtpInterface::GetSize(); i++)
{
MtpInterface::GetSystem(i)->SetScheduler(schedulerFactory);
}
m_schedulerTypeId = schedulerFactory.GetTypeId();
}
uint32_t
MultithreadedSimulatorImpl::GetSystemId() const
{
return MtpInterface::GetSystem()->GetSystemId();
}
uint32_t
MultithreadedSimulatorImpl::GetContext() const
{
return MtpInterface::GetSystem()->GetContext();
}
uint64_t
MultithreadedSimulatorImpl::GetEventCount() const
{
uint64_t eventCount = 0;
for (uint32_t i = 0; i < MtpInterface::GetSize(); i++)
{
eventCount += MtpInterface::GetSystem(i)->GetEventCount();
}
return eventCount;
}
void
MultithreadedSimulatorImpl::DoDispose()
{
SimulatorImpl::DoDispose();
}
void
MultithreadedSimulatorImpl::Partition()
{
NS_LOG_FUNCTION(this);
uint32_t systemId = 0;
const NodeContainer nodes = NodeContainer::GetGlobal();
m_savedNodeList = nodes;
bool* visited = new bool[nodes.GetN()]{false};
std::queue<Ptr<Node>> q;
// if m_minLookahead is not set, calculate the median of delay for every link
if (m_minLookahead == TimeStep(0))
{
std::vector<Time> delays;
for (auto it = nodes.Begin(); it != nodes.End(); it++)
{
Ptr<Node> node = *it;
for (uint32_t i = 0; i < node->GetNDevices(); i++)
{
Ptr<NetDevice> localNetDevice = node->GetDevice(i);
Ptr<Channel> channel = localNetDevice->GetChannel();
if (!channel)
{
continue;
}
// cut-off p2p links for partition
if (localNetDevice->IsPointToPoint())
{
TimeValue delay;
channel->GetAttribute("Delay", delay);
delays.push_back(delay.Get());
}
}
}
std::sort(delays.begin(), delays.end());
if (delays.empty())
{
m_minLookahead = TimeStep(0);
}
else if (delays.size() % 2 == 1)
{
m_minLookahead = delays[delays.size() / 2];
}
else
{
m_minLookahead = (delays[delays.size() / 2 - 1] + delays[delays.size() / 2]) / 2;
}
NS_LOG_INFO("Min lookahead is set to " << m_minLookahead);
}
// perform a BFS on the whole network topo to assign each node a systemId
for (auto it = nodes.Begin(); it != nodes.End(); it++)
{
Ptr<Node> node = *it;
if (!visited[node->GetId()])
{
q.push(node);
systemId++;
while (!q.empty())
{
// pop from BFS queue
node = q.front();
q.pop();
visited[node->GetId()] = true;
// assign this node the current systemId
node->SetSystemId(systemId);
NS_LOG_INFO("node " << node->GetId() << " is set to system " << systemId);
for (uint32_t i = 0; i < node->GetNDevices(); i++)
{
Ptr<NetDevice> localNetDevice = node->GetDevice(i);
Ptr<Channel> channel = localNetDevice->GetChannel();
if (!channel)
{
continue;
}
// cut-off p2p links for partition
if (localNetDevice->IsPointToPoint())
{
TimeValue delay;
channel->GetAttribute("Delay", delay);
// if delay is below threshold, do not cut-off
if (delay.Get() >= m_minLookahead)
{
continue;
}
}
// grab the adjacent nodes
for (uint32_t j = 0; j < channel->GetNDevices(); j++)
{
Ptr<Node> remote = channel->GetDevice(j)->GetNode();
// if it's not visited, add it to the current partition
if (!visited[remote->GetId()])
{
q.push(remote);
}
}
}
}
}
}
delete[] visited;
// after the partition, we finally know the system count (# of LPs)
const uint32_t systemCount = systemId;
const uint32_t threadCount = std::min(m_maxThreads, systemCount);
NS_LOG_INFO("Partition done! " << systemCount << " systems share " << threadCount
<< " threads");
// create new LPs
MtpInterface::EnableNew(threadCount, systemCount);
// set scheduler
ObjectFactory schedulerFactory;
schedulerFactory.SetTypeId(m_schedulerTypeId);
for (uint32_t i = 1; i <= systemCount; i++)
{
MtpInterface::GetSystem(i)->SetScheduler(schedulerFactory);
}
// remove old events in public LP
const Ptr<Scheduler> oldEvents = MtpInterface::GetSystem()->GetPendingEvents();
const Ptr<Scheduler> eventsToBeTransferred = schedulerFactory.Create<Scheduler>();
while (!oldEvents->IsEmpty())
{
Scheduler::Event next = oldEvents->RemoveNext();
eventsToBeTransferred->Insert(next);
}
// transfer events to new LPs
while (!eventsToBeTransferred->IsEmpty())
{
Scheduler::Event ev = eventsToBeTransferred->RemoveNext();
// invoke initialization events (at time 0) by their insertion order
// since changing the execution order of these events may cause error,
// they have to be invoked now rather than parallelly executed
if (ev.key.m_ts == 0)
{
MtpInterface::GetSystem(ev.key.m_context == Simulator::NO_CONTEXT
? 0
: NodeList::GetNode(ev.key.m_context)->GetSystemId())
->InvokeNow(ev);
}
else if (ev.key.m_context == Simulator::NO_CONTEXT)
{
Schedule(TimeStep(ev.key.m_ts), ev.impl);
}
else
{
ScheduleWithContext(ev.key.m_context, TimeStep(ev.key.m_ts), ev.impl);
}
}
}
} // namespace ns3

View File

@@ -0,0 +1,103 @@
/*
* Copyright (c) 2023 State Key Laboratory for Novel Software Technology
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Songyuan Bai <i@f5soft.site>
*/
/**
* @file
* @ingroup mtp
* Declaration of classes ns3::MultithreadedSimulatorImpl
*/
#ifndef MULTITHREADED_SIMULATOR_IMPL_H
#define MULTITHREADED_SIMULATOR_IMPL_H
#include "ns3/event-id.h"
#include "ns3/event-impl.h"
#include "ns3/nstime.h"
#include "ns3/object-factory.h"
#include "ns3/simulator-impl.h"
#include "ns3/node-container.h"
#include <list>
namespace ns3
{
/**
* @brief
* Implementation of the multithreaded simulator
*/
class MultithreadedSimulatorImpl : public SimulatorImpl
{
public:
static TypeId GetTypeId();
/** Default constructor. */
MultithreadedSimulatorImpl();
/** Destructor. */
~MultithreadedSimulatorImpl() override;
// virtual from SimulatorImpl
void Destroy() override;
bool IsFinished() const override;
void Stop() override;
EventId Stop(const Time& delay) override;
EventId Schedule(const Time& delay, EventImpl* event) override;
void ScheduleWithContext(uint32_t context, const Time& delay, EventImpl* event) override;
EventId ScheduleNow(EventImpl* event) override;
EventId ScheduleDestroy(EventImpl* event) override;
void Remove(const EventId& id) override;
void Cancel(const EventId& id) override;
bool IsExpired(const EventId& id) const override;
void Run() override;
Time Now() const override;
Time GetDelayLeft(const EventId& id) const override;
Time GetMaximumSimulationTime() const override;
void SetScheduler(ObjectFactory schedulerFactory) override;
uint32_t GetSystemId() const override;
uint32_t GetContext() const override;
uint64_t GetEventCount() const override;
private:
// Inherited from Object
void DoDispose() override;
/**
* @brief Automatically divides the to-be-simulated topology
*
* This method is called at the beginning of MultithreadedSimulatorImpl::Run.
* It will set each node a systemId. Then it creates logical processes according
* to the number of partitions, and transfer old events to newly created logical
* processes.
*
* If manual partition is enabled by calling MtpInterface::Enable with two parameters,
* this method will not be called.
*/
void Partition();
bool m_partition;
uint32_t m_maxThreads;
Time m_minLookahead;
TypeId m_schedulerTypeId;
std::list<EventId> m_destroyEvents;
NodeContainer m_savedNodeList;
};
} // namespace ns3
#endif /* MULTITHREADED_SIMULATOR_IMPL_H */

View File

@@ -0,0 +1,42 @@
- Setup the topology...
- Calculating routes...
Host NodeId System Address
0 20 0 10.0.0.1
1 21 0 10.0.0.3
2 22 0 10.0.1.1
3 23 0 10.0.1.3
4 24 1 10.1.0.1
5 25 1 10.1.0.3
6 26 1 10.1.1.1
7 27 1 10.1.1.3
8 28 0 10.2.0.1
9 29 0 10.2.0.3
10 30 0 10.2.1.1
11 31 0 10.2.1.3
12 32 1 10.3.0.1
13 33 1 10.3.0.3
14 34 1 10.3.1.1
15 35 1 10.3.1.3
- Generating traffic...
Expected data rate = 0.48Gbps
Generated data rate = 0.322004Gbps
Expected avg flow size = 1.71125MB
Generated avg flow size = 1.19742MB
Total flow count = 34
- Start simulation...
Progressed to 0.1s
Progressed to 0.2s
Progressed to 0.3s
Progressed to 0.4s
Progressed to 0.5s
Progressed to 0.6s
Progressed to 0.7s
Progressed to 0.8s
Progressed to 0.9s
- Done!

View File

@@ -0,0 +1,42 @@
- Setup the topology...
- Calculating routes...
Host NodeId System Address
0 20 0 10.0.0.1
1 21 0 10.0.0.3
2 22 0 10.0.1.1
3 23 0 10.0.1.3
4 24 1 10.1.0.1
5 25 1 10.1.0.3
6 26 1 10.1.1.1
7 27 1 10.1.1.3
8 28 0 10.2.0.1
9 29 0 10.2.0.3
10 30 0 10.2.1.1
11 31 0 10.2.1.3
12 32 1 10.3.0.1
13 33 1 10.3.0.3
14 34 1 10.3.1.1
15 35 1 10.3.1.3
- Generating traffic...
Expected data rate = 0.48Gbps
Generated data rate = 0.322004Gbps
Expected avg flow size = 1.71125MB
Generated avg flow size = 1.19742MB
Total flow count = 34
- Start simulation...
Progressed to 0.1s
Progressed to 0.2s
Progressed to 0.3s
Progressed to 0.4s
Progressed to 0.5s
Progressed to 0.6s
Progressed to 0.7s
Progressed to 0.8s
Progressed to 0.9s
- Done!

View File

View File

@@ -0,0 +1,138 @@
/*
* Copyright (c) 2023 State Key Laboratory for Novel Software Technology
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Songyuan Bai <i@f5soft.site>
*/
#include "ns3/example-as-test.h"
#include "ns3/mpi-module.h"
#include "ns3/mtp-module.h"
#include "ns3/test.h"
#include <sstream>
using namespace ns3;
class HybridTestCase : public ExampleAsTestCase
{
public:
/**
* \copydoc ns3::ExampleAsTestCase::ExampleAsTestCase
*
* \param [in] postCmd The post processing command
*/
HybridTestCase(const std::string name,
const std::string program,
const std::string dataDir,
const std::string args = "",
const std::string postCmd = "",
const bool shouldNotErr = true);
/** Destructor */
~HybridTestCase() override
{
}
/**
* Produce the `--command-template` argument
*
* \returns The `--command-template` string.
*/
std::string GetCommandTemplate() const override;
/**
* Remove time statistics
*
* \returns The post processing command
*/
std::string GetPostProcessingCommand() const override;
private:
/** The post processing command. */
std::string m_postCmd;
};
HybridTestCase::HybridTestCase(const std::string name,
const std::string program,
const std::string dataDir,
const std::string args /* = "" */,
const std::string postCmd /* = "" */,
const bool shouldNotErr /* = true */)
: ExampleAsTestCase(name, program, dataDir, args, shouldNotErr),
m_postCmd(postCmd)
{
}
std::string
HybridTestCase::GetCommandTemplate() const
{
std::stringstream ss;
ss << "mpirun -np 2 %s " << m_args;
return ss.str();
}
std::string
HybridTestCase::GetPostProcessingCommand() const
{
std::string command(m_postCmd);
return command;
}
class HybridTestSuite : public TestSuite
{
public:
/**
* \copydoc MpiTestCase::MpiTestCase
*
* \param [in] duration Amount of time this test takes to execute
* (defaults to QUICK).
*/
HybridTestSuite(const std::string name,
const std::string program,
const std::string dataDir,
const std::string args = "",
const std::string postCmd = "",
const TestDuration duration = QUICK,
const bool shouldNotErr = true)
: TestSuite(name, EXAMPLE)
{
AddTestCase(new HybridTestCase(name, program, dataDir, args, postCmd, shouldNotErr),
duration);
}
}; // class HybridTestSuite
static HybridTestSuite g_hybridFatTree1("hybrid-fat-tree",
"fat-tree-hybrid",
NS_TEST_SOURCEDIR,
"--bandwidth=100Mbps --thread=2",
"| grep -v 'Simulation time' | grep -v 'Event count'",
TestCase::TestDuration::QUICK);
static HybridTestSuite g_hybridFatTree2("hybrid-fat-tree-incast",
"fat-tree-hybrid",
NS_TEST_SOURCEDIR,
"--bandwidth=100Mbps --incast=1 --thread=2",
"| grep -v 'Simulation time' | grep -v 'Event count'",
TestCase::TestDuration::QUICK);
static HybridTestSuite g_hybridSimple("hybrid-simple",
"simple-hybrid",
NS_TEST_SOURCEDIR,
""
"",
"",
TestCase::TestDuration::QUICK);

View File

@@ -0,0 +1,53 @@
- Setup the topology...
- Calculating routes...
Host NodeId System Address
0 20 0 10.0.0.1
1 21 0 10.0.0.3
2 22 0 10.0.1.1
3 23 0 10.0.1.3
4 24 0 10.1.0.1
5 25 0 10.1.0.3
6 26 0 10.1.1.1
7 27 0 10.1.1.3
8 28 0 10.2.0.1
9 29 0 10.2.0.3
10 30 0 10.2.1.1
11 31 0 10.2.1.3
12 32 0 10.3.0.1
13 33 0 10.3.0.3
14 34 0 10.3.1.1
15 35 0 10.3.1.3
- Generating traffic...
Expected data rate = 0.48Gbps
Generated data rate = 0.322004Gbps
Expected avg flow size = 1.71125MB
Generated avg flow size = 1.19742MB
Total flow count = 34
- Start simulation...
Progressed to 0.1s
Progressed to 0.2s
Progressed to 0.3s
Progressed to 0.4s
Progressed to 0.5s
Progressed to 0.6s
Progressed to 0.7s
Progressed to 0.8s
Progressed to 0.9s
Detected #flow = 66
Finished #flow = 56
Average FCT (all) = 224669us
Average FCT (finished) = 177315us
Average end to end delay = 8970.54us
Average flow throughput = 0.00587946Gbps
Network throughput = 0.0975372Gbps
Total Tx packets = 12217
Total Rx packets = 12111
Dropped packets = 0
- Done!
Event count = 234220

View File

@@ -0,0 +1,53 @@
- Setup the topology...
- Calculating routes...
Host NodeId System Address
0 20 0 10.0.0.1
1 21 0 10.0.0.3
2 22 0 10.0.1.1
3 23 0 10.0.1.3
4 24 0 10.1.0.1
5 25 0 10.1.0.3
6 26 0 10.1.1.1
7 27 0 10.1.1.3
8 28 0 10.2.0.1
9 29 0 10.2.0.3
10 30 0 10.2.1.1
11 31 0 10.2.1.3
12 32 0 10.3.0.1
13 33 0 10.3.0.3
14 34 0 10.3.1.1
15 35 0 10.3.1.3
- Generating traffic...
Expected data rate = 0.48Gbps
Generated data rate = 0.322004Gbps
Expected avg flow size = 1.71125MB
Generated avg flow size = 1.19742MB
Total flow count = 34
- Start simulation...
Progressed to 0.1s
Progressed to 0.2s
Progressed to 0.3s
Progressed to 0.4s
Progressed to 0.5s
Progressed to 0.6s
Progressed to 0.7s
Progressed to 0.8s
Progressed to 0.9s
Detected #flow = 66
Finished #flow = 64
Average FCT (all) = 104726us
Average FCT (finished) = 98922us
Average end to end delay = 20948.7us
Average flow throughput = 0.0284384Gbps
Network throughput = 0.218204Gbps
Total Tx packets = 26702
Total Rx packets = 25885
Dropped packets = 0
- Done!
Event count = 461877

View File

@@ -0,0 +1,161 @@
/*
* Copyright (c) 2023 State Key Laboratory for Novel Software Technology
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Songyuan Bai <i@f5soft.site>
*/
#include "ns3/example-as-test.h"
#include "ns3/mtp-module.h"
#include "ns3/test.h"
#include <sstream>
using namespace ns3;
class MtpTestCase : public ExampleAsTestCase
{
public:
/**
* \copydoc ns3::ExampleAsTestCase::ExampleAsTestCase
*
* \param [in] postCmd The post processing command
*/
MtpTestCase(const std::string name,
const std::string program,
const std::string dataDir,
const std::string args = "",
const std::string postCmd = "",
const bool shouldNotErr = true);
/** Destructor */
~MtpTestCase() override
{
}
/**
* Produce the `--command-template` argument
*
* \returns The `--command-template` string.
*/
std::string GetCommandTemplate() const override;
/**
* Remove time statistics
*
* \returns The post processing command
*/
std::string GetPostProcessingCommand() const override;
private:
/** The post processing command. */
std::string m_postCmd;
};
MtpTestCase::MtpTestCase(const std::string name,
const std::string program,
const std::string dataDir,
const std::string args /* = "" */,
const std::string postCmd /* = "" */,
const bool shouldNotErr /* = true */)
: ExampleAsTestCase(name, program, dataDir, args, shouldNotErr),
m_postCmd(postCmd)
{
}
std::string
MtpTestCase::GetCommandTemplate() const
{
std::stringstream ss;
ss << "%s " << m_args;
return ss.str();
}
std::string
MtpTestCase::GetPostProcessingCommand() const
{
std::string command(m_postCmd);
return command;
}
class MtpTestSuite : public TestSuite
{
public:
/**
* \copydoc MpiTestCase::MpiTestCase
*
* \param [in] duration Amount of time this test takes to execute
* (defaults to QUICK).
*/
MtpTestSuite(const std::string name,
const std::string program,
const std::string dataDir,
const std::string args = "",
const std::string postCmd = "",
const Duration duration = Duration::QUICK,
const bool shouldNotErr = true)
: TestSuite(name, EXAMPLE)
{
AddTestCase(new MtpTestCase(name, program, dataDir, args, postCmd, shouldNotErr), duration);
}
}; // class MtpTestSuite
static MtpTestSuite g_mtpFatTree1("mtp-fat-tree",
"fat-tree-mtp",
NS_TEST_SOURCEDIR,
"--bandwidth=100Mbps --thread=4 --flowmon=true",
"| grep -v 'Simulation time'",
TestCase::Duration::QUICK);
static MtpTestSuite g_mtpFatTree2("mtp-fat-tree-incast",
"fat-tree-mtp",
NS_TEST_SOURCEDIR,
"--bandwidth=100Mbps --incast=1 --thread=4 --flowmon=true",
"| grep -v 'Simulation time'",
TestCase::Duration::QUICK);
static MtpTestSuite g_mtpTcpValidation1("mtp-tcp-validation-dctcp-10ms",
"tcp-validation-mtp",
NS_TEST_SOURCEDIR,
"--firstTcpType=dctcp --linkRate=50Mbps --baseRtt=10ms "
"--queueUseEcn=1 --stopTime=15s --validate=dctcp-10ms",
"",
TestCase::Duration::QUICK);
static MtpTestSuite g_mtpTcpValidation2("mtp-tcp-validation-dctcp-80ms",
"tcp-validation-mtp",
NS_TEST_SOURCEDIR,
"--firstTcpType=dctcp --linkRate=50Mbps --baseRtt=80ms "
"--queueUseEcn=1 --stopTime=40s --validate=dctcp-80ms",
"",
TestCase::Duration::QUICK);
static MtpTestSuite g_mtpTcpValidation3(
"mtp-tcp-validation-cubic-50ms-no-ecn",
"tcp-validation-mtp",
NS_TEST_SOURCEDIR,
"--firstTcpType=cubic --linkRate=50Mbps --baseRtt=50ms --queueUseEcn=0 --stopTime=20s "
"--validate=cubic-50ms-no-ecn",
"",
TestCase::Duration::QUICK);
static MtpTestSuite g_mtpTcpValidation4("mtp-tcp-validation-cubic-50ms-ecn",
"tcp-validation-mtp",
NS_TEST_SOURCEDIR,
"--firstTcpType=cubic --linkRate=50Mbps --baseRtt=50ms "
"--queueUseEcn=1 --stopTime=20s --validate=cubic-50ms-ecn",
"",
TestCase::Duration::QUICK);

View File

@@ -138,6 +138,9 @@ Buffer::Recycle(Buffer::Data* data)
{
NS_LOG_FUNCTION(data);
NS_ASSERT(data->m_count == 0);
#ifdef NS3_MTP
std::atomic_thread_fence(std::memory_order_acquire);
#endif
Deallocate(data);
}
@@ -255,8 +258,7 @@ Buffer::operator=(const Buffer& o)
if (m_data != o.m_data)
{
// not assignment to self.
m_data->m_count--;
if (m_data->m_count == 0)
if (m_data->m_count-- == 1)
{
Recycle(m_data);
}
@@ -278,8 +280,7 @@ Buffer::~Buffer()
NS_LOG_FUNCTION(this);
NS_ASSERT(CheckInternalState());
g_recommendedStart = std::max(g_recommendedStart, m_maxZeroAreaStart);
m_data->m_count--;
if (m_data->m_count == 0)
if (m_data->m_count-- == 1)
{
Recycle(m_data);
}
@@ -322,8 +323,7 @@ Buffer::AddAtStart(uint32_t start)
uint32_t newSize = GetInternalSize() + start;
Buffer::Data* newData = Buffer::Create(newSize);
memcpy(newData->m_data + start, m_data->m_data + m_start, GetInternalSize());
m_data->m_count--;
if (m_data->m_count == 0)
if (m_data->m_count-- == 1)
{
Buffer::Recycle(m_data);
}
@@ -368,8 +368,7 @@ Buffer::AddAtEnd(uint32_t end)
uint32_t newSize = GetInternalSize() + end;
Buffer::Data* newData = Buffer::Create(newSize);
memcpy(newData->m_data, m_data->m_data + m_start, GetInternalSize());
m_data->m_count--;
if (m_data->m_count == 0)
if (m_data->m_count-- == 1)
{
Buffer::Recycle(m_data);
}

View File

@@ -9,12 +9,15 @@
#define BUFFER_H
#include "ns3/assert.h"
#include "ns3/atomic-counter.h"
#include <ostream>
#include <stdint.h>
#include <vector>
#ifndef NS3_MTP
#define BUFFER_FREE_LIST 1
#endif
namespace ns3
{
@@ -654,7 +657,11 @@ class Buffer
* The reference count of an instance of this data structure.
* Each buffer which references an instance holds a count.
*/
#ifdef NS3_MTP
AtomicCounter m_count;
#else
uint32_t m_count;
#endif
/**
* the size of the m_data field below.
*/

View File

@@ -7,13 +7,17 @@
*/
#include "byte-tag-list.h"
#include "ns3/atomic-counter.h"
#include "ns3/log.h"
#include <cstring>
#include <limits>
#include <vector>
#ifndef NS3_MTP
#define USE_FREE_LIST 1
#endif
#define FREE_LIST_SIZE 1000
#define OFFSET_MAX (std::numeric_limits<int32_t>::max())
@@ -32,7 +36,11 @@ NS_LOG_COMPONENT_DEFINE("ByteTagList");
struct ByteTagListData
{
uint32_t size; //!< size of the data
#ifdef NS3_MTP
AtomicCounter count;
#else
uint32_t count; //!< use counter (for smart deallocation)
#endif
uint32_t dirty; //!< number of bytes actually in use
uint8_t data[4]; //!< data
};
@@ -396,8 +404,8 @@ ByteTagListData*
ByteTagList::Allocate(uint32_t size)
{
NS_LOG_FUNCTION(this << size);
uint8_t* buffer = new uint8_t[size + sizeof(ByteTagListData) - 4];
ByteTagListData* data = (ByteTagListData*)buffer;
auto* buffer = new uint8_t[size + sizeof(ByteTagListData) - 4];
auto* data = (ByteTagListData*)buffer;
data->count = 1;
data->size = size;
data->dirty = 0;
@@ -408,14 +416,16 @@ void
ByteTagList::Deallocate(ByteTagListData* data)
{
NS_LOG_FUNCTION(this << data);
if (data == 0)
if (data == nullptr)
{
return;
}
data->count--;
if (data->count == 0)
if (data->count-- == 1)
{
uint8_t* buffer = (uint8_t*)data;
#ifdef NS3_MTP
std::atomic_thread_fence(std::memory_order_acquire);
#endif
auto* buffer = (uint8_t*)data;
delete[] buffer;
}
}

View File

@@ -120,6 +120,13 @@ Node::GetSystemId() const
return m_sid;
}
void
Node::SetSystemId(uint32_t systemId)
{
NS_LOG_FUNCTION(this << systemId);
m_sid = systemId;
}
uint32_t
Node::AddDevice(Ptr<NetDevice> device)
{

View File

@@ -83,6 +83,13 @@ class Node : public Object
*/
uint32_t GetSystemId() const;
/**
* @brief Set the system ID for auto-partition in the multithreaded simulator
*
* @param systemId the system ID this node will be
*/
void SetSystemId(uint32_t systemId);
/**
* @brief Associate a NetDevice to this node.
*

View File

@@ -29,6 +29,9 @@ bool PacketMetadata::m_metadataSkipped = false;
uint32_t PacketMetadata::m_maxSize = 0;
uint16_t PacketMetadata::m_chunkUid = 0;
PacketMetadata::DataFreeList PacketMetadata::m_freeList;
#ifdef NS3_MTP
std::atomic<bool> PacketMetadata::m_freeListUsing(false);
#endif
PacketMetadata::DataFreeList::~DataFreeList()
{
@@ -69,8 +72,7 @@ PacketMetadata::ReserveCopy(uint32_t size)
PacketMetadata::Data* newData = PacketMetadata::Create(m_used + size);
memcpy(newData->m_data, m_data->m_data, m_used);
newData->m_dirtyEnd = m_used;
m_data->m_count--;
if (m_data->m_count == 0)
if (m_data->m_count-- == 1)
{
PacketMetadata::Recycle(m_data);
}
@@ -559,6 +561,11 @@ PacketMetadata::Create(uint32_t size)
{
m_maxSize = size;
}
#ifdef NS3_MTP
while (m_freeListUsing.exchange(true, std::memory_order_acquire))
{
};
#endif
while (!m_freeList.empty())
{
PacketMetadata::Data* data = m_freeList.back();
@@ -567,11 +574,17 @@ PacketMetadata::Create(uint32_t size)
{
NS_LOG_LOGIC("create found size=" << data->m_size);
data->m_count = 1;
#ifdef NS3_MTP
m_freeListUsing.store(false, std::memory_order_release);
#endif
return data;
}
NS_LOG_LOGIC("create dealloc size=" << data->m_size);
PacketMetadata::Deallocate(data);
}
#ifdef NS3_MTP
m_freeListUsing.store(false, std::memory_order_release);
#endif
NS_LOG_LOGIC("create alloc size=" << m_maxSize);
return PacketMetadata::Allocate(m_maxSize);
}
@@ -582,9 +595,17 @@ PacketMetadata::Recycle(PacketMetadata::Data* data)
NS_LOG_FUNCTION(data);
if (!m_enable)
{
#ifdef NS3_MTP
std::atomic_thread_fence(std::memory_order_acquire);
#endif
PacketMetadata::Deallocate(data);
return;
}
#ifdef NS3_MTP
while (m_freeListUsing.exchange(true, std::memory_order_acquire))
{
};
#endif
NS_LOG_LOGIC("recycle size=" << data->m_size << ", list=" << m_freeList.size());
NS_ASSERT(data->m_count == 0);
if (m_freeList.size() > 1000 || data->m_size < m_maxSize)
@@ -595,6 +616,9 @@ PacketMetadata::Recycle(PacketMetadata::Data* data)
{
m_freeList.push_back(data);
}
#ifdef NS3_MTP
m_freeListUsing.store(false, std::memory_order_release);
#endif
}
PacketMetadata::Data*

View File

@@ -11,6 +11,7 @@
#include "buffer.h"
#include "ns3/assert.h"
#include "ns3/atomic-counter.h"
#include "ns3/callback.h"
#include "ns3/type-id.h"
@@ -415,7 +416,11 @@ class PacketMetadata
struct Data
{
/** number of references to this struct Data instance. */
#ifdef NS3_MTP
AtomicCounter m_count;
#else
uint32_t m_count;
#endif
/** size (in bytes) of m_data buffer below */
uint32_t m_size;
/** max of the m_used field over all objects which reference this struct Data instance */
@@ -664,6 +669,9 @@ class PacketMetadata
*/
static void Deallocate(PacketMetadata::Data* data);
#ifdef NS3_MTP
static std::atomic<bool> m_freeListUsing;
#endif
static DataFreeList m_freeList; //!< the metadata data storage
static bool m_enable; //!< Enable the packet metadata
static bool m_enableChecking; //!< Enable the packet metadata checking
@@ -728,8 +736,7 @@ PacketMetadata::operator=(const PacketMetadata& o)
{
// not self assignment
NS_ASSERT(m_data != nullptr);
m_data->m_count--;
if (m_data->m_count == 0)
if (m_data->m_count-- == 1)
{
PacketMetadata::Recycle(m_data);
}
@@ -747,8 +754,7 @@ PacketMetadata::operator=(const PacketMetadata& o)
PacketMetadata::~PacketMetadata()
{
NS_ASSERT(m_data != nullptr);
m_data->m_count--;
if (m_data->m_count == 0)
if (m_data->m_count-- == 1)
{
PacketMetadata::Recycle(m_data);
}

View File

@@ -13,6 +13,7 @@
\brief Defines a linked list of Packet tags, including copy-on-write semantics.
*/
#include "ns3/atomic-counter.h"
#include "ns3/type-id.h"
#include <ostream>
@@ -131,7 +132,11 @@ class PacketTagList
struct TagData
{
TagData* next; //!< Pointer to next in list
#ifdef NS3_MTP
AtomicCounter count;
#else
uint32_t count; //!< Number of incoming links
#endif
TypeId tid; //!< Type of the tag serialized into #data
uint32_t size; //!< Size of the \c data buffer
uint8_t data[1]; //!< Serialization buffer
@@ -347,11 +352,13 @@ PacketTagList::RemoveAll()
TagData* prev = nullptr;
for (TagData* cur = m_next; cur != nullptr; cur = cur->next)
{
cur->count--;
if (cur->count > 0)
if (cur->count-- > 1)
{
break;
}
#ifdef NS3_MTP
std::atomic_thread_fence(std::memory_order_acquire);
#endif
if (prev != nullptr)
{
prev->~TagData();

View File

@@ -795,6 +795,8 @@ SocketIpTosTag::GetTos() const
return m_ipTos;
}
NS_OBJECT_ENSURE_REGISTERED(SocketIpTosTag);
TypeId
SocketIpTosTag::GetTypeId()
{
@@ -851,6 +853,8 @@ SocketPriorityTag::GetPriority() const
return m_priority;
}
NS_OBJECT_ENSURE_REGISTERED(SocketPriorityTag);
TypeId
SocketPriorityTag::GetTypeId()
{
@@ -907,6 +911,8 @@ SocketIpv6TclassTag::GetTclass() const
return m_ipv6Tclass;
}
NS_OBJECT_ENSURE_REGISTERED(SocketIpv6TclassTag);
TypeId
SocketIpv6TclassTag::GetTypeId()
{

View File

@@ -30,9 +30,23 @@ NS_LOG_COMPONENT_DEFINE("NixVectorRouting");
NS_OBJECT_TEMPLATE_CLASS_DEFINE(NixVectorRouting, Ipv4RoutingProtocol);
NS_OBJECT_TEMPLATE_CLASS_DEFINE(NixVectorRouting, Ipv6RoutingProtocol);
#ifdef NS3_MTP
template <typename T>
std::atomic<bool> NixVectorRouting<T>::g_isCacheDirty(false);
template <typename T>
std::atomic<bool> NixVectorRouting<T>::g_cacheFlushing(false);
template <typename T>
std::atomic<bool> NixVectorRouting<T>::g_isMapBuilt(false);
template <typename T>
std::atomic<bool> NixVectorRouting<T>::g_mapBuilding(false);
#else
/// Flag to mark when caches are dirty and need to be flushed
template <typename T>
bool NixVectorRouting<T>::g_isCacheDirty = false;
#endif
/// Epoch starts from one to make it easier to spot an uninitialized NixVector during debug.
template <typename T>
@@ -160,6 +174,9 @@ NixVectorRouting<T>::FlushGlobalNixRoutingCache() const
// IP address to node mapping is potentially invalid so clear it.
// Will be repopulated in lazy evaluation when mapping is needed.
g_ipAddressToNodeMap.clear();
#ifdef NS3_MTP
g_isMapBuilt.store(false, std::memory_order_release);
#endif
}
template <typename T>
@@ -510,10 +527,28 @@ NixVectorRouting<T>::GetNodeByIp(IpAddress dest) const
NS_LOG_FUNCTION(this << dest);
// Populate lookup table if is empty.
#ifdef NS3_MTP
if (!g_isMapBuilt.load(std::memory_order_acquire))
{
if (g_mapBuilding.exchange(true, std::memory_order_relaxed))
{
while (!g_isMapBuilt.load(std::memory_order_acquire))
{
};
}
else
{
BuildIpAddressToNodeMap();
g_isMapBuilt.store(true, std::memory_order_release);
g_mapBuilding.store(false, std::memory_order_release);
}
}
#else
if (g_ipAddressToNodeMap.empty())
{
BuildIpAddressToNodeMap();
}
#endif
Ptr<Node> destNode;
@@ -537,10 +572,28 @@ Ptr<typename NixVectorRouting<T>::IpInterface>
NixVectorRouting<T>::GetInterfaceByNetDevice(Ptr<NetDevice> netDevice) const
{
// Populate lookup table if is empty.
#ifdef NS3_MTP
if (!g_isMapBuilt.load(std::memory_order_acquire))
{
if (g_mapBuilding.exchange(true, std::memory_order_relaxed))
{
while (!g_isMapBuilt.load(std::memory_order_acquire))
{
};
}
else
{
BuildIpAddressToNodeMap();
g_isMapBuilt.store(true, std::memory_order_release);
g_mapBuilding.store(false, std::memory_order_release);
}
}
#else
if (g_netdeviceToIpInterfaceMap.empty())
{
BuildIpAddressToNodeMap();
}
#endif
Ptr<IpInterface> ipInterface;
@@ -1065,28 +1118,44 @@ template <typename T>
void
NixVectorRouting<T>::NotifyInterfaceUp(uint32_t i)
{
#ifdef NS3_MTP
g_isCacheDirty.store(true, std::memory_order_release);
#else
g_isCacheDirty = true;
#endif
}
template <typename T>
void
NixVectorRouting<T>::NotifyInterfaceDown(uint32_t i)
{
#ifdef NS3_MTP
g_isCacheDirty.store(true, std::memory_order_release);
#else
g_isCacheDirty = true;
#endif
}
template <typename T>
void
NixVectorRouting<T>::NotifyAddAddress(uint32_t interface, IpInterfaceAddress address)
{
#ifdef NS3_MTP
g_isCacheDirty.store(true, std::memory_order_release);
#else
g_isCacheDirty = true;
#endif
}
template <typename T>
void
NixVectorRouting<T>::NotifyRemoveAddress(uint32_t interface, IpInterfaceAddress address)
{
#ifdef NS3_MTP
g_isCacheDirty.store(true, std::memory_order_release);
#else
g_isCacheDirty = true;
#endif
}
template <typename T>
@@ -1097,7 +1166,11 @@ NixVectorRouting<T>::NotifyAddRoute(IpAddress dst,
uint32_t interface,
IpAddress prefixToUse)
{
#ifdef NS3_MTP
g_isCacheDirty.store(true, std::memory_order_release);
#else
g_isCacheDirty = true;
#endif
}
template <typename T>
@@ -1108,7 +1181,11 @@ NixVectorRouting<T>::NotifyRemoveRoute(IpAddress dst,
uint32_t interface,
IpAddress prefixToUse)
{
#ifdef NS3_MTP
g_isCacheDirty.store(true, std::memory_order_release);
#else
g_isCacheDirty = true;
#endif
}
template <typename T>
@@ -1412,12 +1489,30 @@ template <typename T>
void
NixVectorRouting<T>::CheckCacheStateAndFlush() const
{
#ifdef NS3_MTP
if (g_isCacheDirty.load(std::memory_order_acquire))
{
if (g_cacheFlushing.exchange(true, std::memory_order_relaxed))
{
while (g_isCacheDirty.load(std::memory_order_acquire))
{
};
}
else
{
FlushGlobalNixRoutingCache();
g_isCacheDirty.store(false, std::memory_order_release);
g_cacheFlushing.store(false, std::memory_order_release);
}
}
#else
if (g_isCacheDirty)
{
FlushGlobalNixRoutingCache();
g_epoch++;
g_isCacheDirty = false;
}
#endif
}
/* Public template function declarations */

View File

@@ -30,6 +30,7 @@
#include "ns3/node-list.h"
#include "ns3/nstime.h"
#include <atomic>
#include <map>
#include <unordered_map>
@@ -473,7 +474,14 @@ class NixVectorRouting : public std::enable_if_t<std::is_same_v<Ipv4RoutingProto
* Flag to mark when caches are dirty and need to be flushed.
* Used for lazy cleanup of caches when there are many topology changes.
*/
#ifdef NS3_MTP
static std::atomic<bool> g_isCacheDirty;
static std::atomic<bool> g_cacheFlushing;
static std::atomic<bool> g_isMapBuilt;
static std::atomic<bool> g_mapBuilding;
#else
static bool g_isCacheDirty;
#endif
/**
* Nix Epoch, incremented each time a flush is performed.