diff --git a/doc/testing/Makefile b/doc/testing/Makefile
new file mode 100644
index 000000000..ce641f161
--- /dev/null
+++ b/doc/testing/Makefile
@@ -0,0 +1,44 @@
+TEXI2HTML = texi2html
+TEXI2PDF = texi2dvi --pdf
+EPSTOPDF = epstopdf
+DIA = dia
+CONVERT = convert
+CSS = --css-include=testing.css
+SPLIT = --split section
+
+FIGURES = figures
+VPATH = $(FIGURES)
+
+IMAGES_EPS = \
+
+IMAGES_PNG = ${IMAGES_EPS:.eps=.png}
+IMAGES_PDF = ${IMAGES_EPS:.eps=.pdf}
+
+IMAGES = $(IMAGES_EPS) $(IMAGES_PNG) $(IMAGES_PDF)
+
+CHAPTERS = \
+ testing.texi \
+ overview.texi \
+ propagation-loss.texi \
+
+%.eps : %.dia; $(DIA) -t eps $< -e $@
+%.png : %.dia; $(DIA) -t png $< -e $@
+%.pdf : %.eps; $(EPSTOPDF) $< -o=$@
+
+all: $(IMAGES) testing.pdf testing.html testing/testing.html
+
+testing.pdf: $(IMAGES) $(CHAPTERS)
+ $(TEXI2PDF) testing.texi
+
+testing.html: $(IMAGES) $(CHAPTERS)
+ $(TEXI2HTML) ${CSS} testing.texi
+
+testing/testing.html: $(IMAGES) $(CHAPTERS)
+ $(TEXI2HTML) ${CSS} ${SPLIT} testing.texi
+
+figures-clean:
+ rm -rf $(IMAGES)
+
+clean: figures-clean
+ rm -rf testing.aux testing.cp testing.cps testing.fn testing.ky testing.pg
+ rm -rf testing.tp testing.vr testing.toc testing.log testing.pdf testing.html testing/
diff --git a/doc/testing/background.texi b/doc/testing/background.texi
new file mode 100644
index 000000000..3c58ea2ce
--- /dev/null
+++ b/doc/testing/background.texi
@@ -0,0 +1,215 @@
+@c ========================================================================
+@c Background
+@c ========================================================================
+
+@node Background
+@chapter Background
+
+Writing defect-free software is a difficult proposition. There are many
+dimensions to the problem and there is much confusion regarding what is
+meant by different terms in different contexts. We have found it worthwhile
+to spend a little time reviewing the subject and defining some terms.
+
+Software testing may be loosely defined as the process of executing a program
+with the intent of finding errors. When one enters a discussion regarding
+software testing, it quickly becomes apparent that there are many distinct
+mind-sets with which one can approach the subject.
+
+For example, one could break the process into broad functional categories
+like ``correctness testing,'' ``performance testing,'' ``robustness testing''
+and ``security testing.'' Another way to look at the problem is by life-cycle:
+``requirements testing,'' ``design testing,'' ``acceptance testing,'' and
+``maintenance testing.'' Yet another view is by the scope of the tested system.
+In this case one may speak of ``unit testing,'' ``component testing,''
+``integration testing,'' and ``system testing.'' These terms are also not
+standardized in any way, and so ``maintenance testing'' and ``regression
+testing'' may be heard interchangeably. Additionally, these terms are often
+misused.
+
+There are also a number of different philosophical approaches to software
+testing. For example, some organizations advocate writing test programs
+before actually imlementing the desired software, yielding ``test-driven
+development.'' Some organizations advocate testing from a customer perspective
+as soon as possible, following a parallel with the agile development process:
+``test early and test often.'' This is sometimes called ``agile testing.'' It
+seems that there is at least one approach to testing for every development
+methodology.
+
+The @command{ns-3} project is not in the business of advocating for any one of
+these processes, but the project as a whole has requirements that help inform
+the test process.
+
+Like all major software products, @command{ns-3} has a number of qualities that
+must be present for the product to succeed. From a testing perspective, some
+of these qualities that must be addressed are that @command{ns-3} must be
+``correct,'' ``robust,'' ``performant'' and ``maintainable.'' Ideally there
+should be metrics for each of these dimensions that are checked by the tests
+to identify when the product fails to meed its expectations / requirements.
+
+@node Correctness
+@section Correctness
+
+The essential purpose of testing is to determine that a piece of software
+behaves ``correctly.'' For @command{ns-3} this means that if we simulate
+something, the simulation should faithfully represent some physical entity or
+process to a specified accuracy and precision.
+
+It turns out that there are two perspectives from which one can view
+correctness. Verifying that a particular process is implemented according
+to its specification is generically called verification. The process of
+deciding that the specification is correct is generically called validation.
+
+@node ValidationAndVerification
+@section Validation and Verification
+
+A computer model is a mathematical or logical representation of something. It
+can represent a vehicle, a frog or a networking card. Models can also represent
+processes such as global warming, freeway traffic flow or a specification of a
+networking protocol. Models can be completely faithful representations of a
+logical process specification, but they necessarily can never completely
+simulate a physical object or process. In most cases, a number of
+simplifications are made to the model to make simulation computationally
+tractable.
+
+Every model has a @emph{target system} that it is attempting to simulate. The
+first step in creating a simulation model is to identify this target system and
+the level of detail and accuracy that the simulation is desired to reproduce.
+In the case of a logical process, the target system may be identified as ``TCP
+as defined by RFC 793.'' In this case, it will probably be desirable to create
+a model that completely and faithfully reproduces RFC 793. In the case of a
+physical process this will not be possible. If, for example, you would like to
+simulate a wireless networking card, you may determine that you need, ``an
+accurate MAC-level implementation of the 802.11 specification and [...] a
+not-so-slow PHY-level model of the 802.11a specification.''
+
+Once this is done, one can develop an abstract model of the target system. This
+is typically an exercise in managing the tradeoffs between complexity, resource
+requiremens and accuracy. The process of developing an abstract model has been
+called @emph{model qualification} in the literature. In the case of a TCP
+protocol, this process results in a design for a collection of objects,
+interactions and behaviors that will fully implement RFC 793 in @command{ns-3}.
+In the case of the wireless card, this process results in a number of tradeoffs
+to allow the physical layer to be simulated and the design of a network device
+and channel for ns-3, along with the desired objects, interactions and behaviors.
+
+This abstract model is then developed into an @command{ns-3} model that
+implements the abstract model as a computer program. The process of getting the
+implementation to agree with the abstract model is called @emph{model
+verification} in the literature.
+
+The process so far is open loop. What remains is to make a determination that a
+given ns-3 model has some connection to some reality -- that a model is an
+accurate representation of a real system, whether a logical process or a physical
+entity.
+
+If one is going to use a simulation model to try and predict how some real
+system is going to behave, there must be some reason to believe your results --
+i.e., can one trust that an inference made from the model translates into a
+correct prediction for the real system. The process of getting the ns-3 model
+behavior to agree with the desired target system behavior as defined by the model
+qualification process is called @emph{model validation} in the literature. In the
+case of a TCP implementation, you may want to compare the behavior of your ns-3
+TCP model to some reference implementation in order to validate your model. In
+the case of a wireless physical layer simulation, you may want to compare the
+behavior of your model to that of real hardware in a controlled setting,
+
+The @command{ns-3} testing environment provides tools to allow for both model
+validation and testing, and encourages the publication of validation results.
+
+@node Robustness
+@section Robustness
+
+Robustness is the quality of being able to withstand stresses, or changes in
+environments, inputs or calculations, etc. A system or design is ``robust''
+if it can deal with such changes with minimal loss of functionality.
+
+This kind of testing is usually done with a particular focus. For example, the
+system as a whole can be run on many different system configurations to
+demonstrate that it can perform correctly in a large number of environments.
+
+The system can be also be stressed by operating close to or beyond capacity by
+generating or simulating resource exhaustion of various kinds. This genre of
+testing is called ``stress testing.''
+
+The system and its components may be exposed to so-called ``clean tests'' that
+demostrate a positive result -- that is that the system operates correctly in
+response to a large variation of expected configurations.
+
+The system and its components may also be exposed to ``dirty tests'' which
+provide inputs outside the expected range. For example, if a module expects a
+zero-terminated string representation of an integer, a dirty test might provide
+an unterminated string of random characters to verify that the system does not
+crash as a result of this unexpected input. Unfortunately, detecting such
+``dirty'' input and taking preventive measures to ensure the system does not
+fail catasrophically can require a huge amount of development overhead. In
+order to reduce development time, a decision was taken early on in the project
+to minimize the amount of parameter validation and error handling in the
+@command{ns-3} codebase. For this reason, we do not spend much time on dirty
+testing -- it would just uncover the results of the design decision we know
+we took.
+
+We do want to deonstrate that @command{ns-3} software does work across some set
+of conditions. We borrow a couple of definitions to narrow this down a bit.
+The @emph{domain of applicability} is a set of prescribed conditions for which
+the model has been tested, compared against reality to the extent possible, and
+judged suitable for use. The @emph{range of accuracy} is an agreement between
+the computerized model and reality within a domain of applicability.
+
+The @command{ns-3} testing environment provides tools to allow for setting up
+and running test environments over multiple systems (buildbot) and provides
+classes to encourage clean tests to verify the operation of the system over the
+expected ``domain of applicability'' and ``range of accuraccy.''
+
+@node Performant
+@section Performant
+
+Okay, ``performant'' isn't a real English word. It is, however, a very concise
+neologism that is quite often used to describe what we want @command{ns-3} to
+be: powerful and fast enough to get the job done.
+
+This is really about the broad subject of software performance testing. One of
+the key things that is done is to compare two systems to find which performs
+better (cf benchmarks). This is used to demonstrate that, for example,
+@code{ns-3} can perform a basic kind of simulation at least as fast as a
+competing product, or can be used to identify parts of the system that perform
+badly.
+
+In the @code{ns-3} test framework, we provide support for timing various kinds
+of tests.
+
+@node Maintainability
+@section Maintainability
+
+A software product must be maintainable. This is, again, a very broad
+statement, but a testing framework can help with the task. Once a model has
+been developed, validated and verified, we can repeatedly execute the suite
+of tests for the entire system to ensure that it remains valid and verified
+over its lifetime.
+
+When a feature stops functioning as intended after some kind of change to the
+system is integrated, it is called generically a regression. Originally the
+term regression referred to a change that caused a previously fixed bug to
+reappear, but the term has evolved to describe any kind of change that breaks
+existing functionality. There are many kinds of regressions that may occur
+in practice.
+
+A @emph{local regression} is one in which a change affects the changed component
+directy. For example, if a component is modified to allocate and free memory
+but stale pointers are used, the component itself fails.
+
+A @emph{remote regression} is one in which a change to one component breaks
+functionality in another component. This reflects violation of an implied but
+possibly unrecognized contract between components.
+
+An @emph{unmasked regression} is one that creates a situation where a previously
+existing bug that had no affect is suddenly exposed in the system. This may
+be as simple as exercising a code path for the first time.
+
+A @emph{performance regression} is one that causes the performance requirements
+of the system to be violated. For example, doing some work in a low level
+function that may be repeated large numbers of times may suddenly render the
+system unusable from certain perspectives.
+
+The @command{ns-3} testing framework provides tools for automating the process
+used to validate and verify the code in nightly test suites to help quickly
+identify possible regressions.
diff --git a/doc/testing/how-to-write-tests.texi b/doc/testing/how-to-write-tests.texi
new file mode 100644
index 000000000..16b8cac34
--- /dev/null
+++ b/doc/testing/how-to-write-tests.texi
@@ -0,0 +1,8 @@
+@c ========================================================================
+@c How to write tests
+@c ========================================================================
+
+@node How to write tests
+@chapter How to write tests
+
+To be completed.
diff --git a/doc/testing/overview.texi b/doc/testing/overview.texi
new file mode 100644
index 000000000..0217c08fc
--- /dev/null
+++ b/doc/testing/overview.texi
@@ -0,0 +1,16 @@
+@c ========================================================================
+@c Overview
+@c ========================================================================
+
+@node Overview
+@chapter Overview
+
+This document is concerned with the testing and validation of @command{ns-3}
+software.
+
+This document provides
+@itemize @bullet
+@item a description of the ns-3 testing framework;
+@item a guide to model developers or new model contributors for how to write tests;
+@item validation and verification results reported to date.
+@end itemize
diff --git a/doc/testing/propagation-loss.texi b/doc/testing/propagation-loss.texi
new file mode 100644
index 000000000..5fdf229a3
--- /dev/null
+++ b/doc/testing/propagation-loss.texi
@@ -0,0 +1,121 @@
+@node Propagation Loss Models
+@chapter Propagation Loss Models
+@anchor{chap:propagation-loss-models}
+
+This chapter describes validation of ns-3 propagation loss models.
+
+@section FriisPropagationLossModel
+
+@subsection Model reference
+
+From source: @uref{http://www.scribd.com/doc/6650712/Wireless-CommunicationsPrinciples-and-Practice-Theodore-S,, Wireless Communications-Principles and Practice ,Theodore S Rappaport pg. 71 }
+
+Given equation:
+@verbatim
+Pr = Pt*Gt*Gr*lmb^2/((4*pi)^2*d^2*L)
+
+Pt = 10^(17.0206/10)/10^3 = .05035702
+Pr = .05035702*.125^2/((4*pi)^2*d*1) = 4.98265e-6/d^2
+
+bandwidth = 2.2*10^7
+m_noiseFigure = 5.01187
+noiseFloor = ((Thermal noise (K)* BOLTZMANN * bandwidth)* m_noiseFigure)
+noiseFloor = ((290*1.3803*10^-23*2.2*10^7)*5.01187) = 4.41361e-13W
+no interference, so SNR = Pr/4.41361e-13W
+
+Distance :: Pr :: SNR
+100 4.98265e-10W 1128.93
+500 1.99306e-11W 45.1571
+1000 4.98265e-12W 11.2893
+2000 1.24566e-12W 2.82232
+3000 5.53628e-13W 1.25436
+4000 3.11416e-13W 0.70558
+5000 1.99306e-13W 0.451571
+6000 1.38407e-13W 0.313591
+@end verbatim
+
+@subsection Validation test
+
+Test program available online at: @uref{http://xxx.xxx.com,,}
+
+Taken at default settings (packetSize = 1000, numPackets = 1, lambda = 0.125, 802.11b at 2.4GHz):
+@verbatim
+Distance :: Pr :: SNR
+100 4.98265e-10W 1128.93
+500 1.99306e-11W 45.1571
+1000 4.98265e-12W 11.2893
+2000 1.24566e-12W 2.82232
+3000 5.53628e-13W 1.25436
+4000 3.11416e-13W 0.70558
+5000 1.99306e-13W 0.451571
+6000 1.38407e-13W 0.313591
+7000 1.01687e-13W 0.230393
+8000 7.78539e-14W 0.176395
+@end verbatim
+
+@subsection Discussion
+
+As can be seen, the SNR outputted from the simulator, and the SNR computed from the source's equation are identical.
+
+@section LogDistancePropagationLossModel
+
+@subsection Model reference
+
+From source: @uref{http://www.plextek.co.uk/papers/aps2005mcw.pdf,, Urban Propagation Measurements and Statistical Path Loss Model at 3.5 GHz, Marcus C. Walden, Frank J. Rowsell}
+
+Given equation:
+@verbatim
+PL{dBm} = PL(d0) + 10*n*log(d/d0) + Xs
+
+PL(1) from friis at 2.4GHz: 40.045997dBm
+PL{dBm} = 10*log(.050357/Pr) = 40.045997 + 10*n*log(d) + Xg
+Pr = .050357/(10^((40.045997 + 10*n*log(d) + Xg)/10))
+
+bandwidth = 2.2*10^7
+m_noiseFigure = 5.01187
+no interference, so SNR = Pr/4.41361e-13W
+@end verbatim
+
+taking Xg to be constant at 0 to match ns-3 output:
+@verbatim
+Distance :: Pr :: SNR
+10 4.98265e-9 11289.3
+20 6.22831e-10 1411.16
+40 7.78539e-11 176.407
+60 2.30678e-11 52.2652
+80 9.73173e-12 22.0494
+100 4.98265e-12 11.2893
+200 6.22831e-13 1.41116
+500 3.98612e-14 .090314
+1000 4.98265e-15 .011289
+@end verbatim
+
+@subsection Validation test
+
+Test program available online at: @uref{http://xxx.xxx.com,,}
+
+Taken at default settings (packetSize = 1000, numPackets = 1, exponent = 3, reference loss = 46.6777, 802.11b at 2.4GHz)
+@verbatim
+Distance :: Pr :: snr
+10 4.98471e-9 11293.9
+20 6.23089e-10 1411.74
+40 7.78861e-11 176.468
+60 2.30774e-11 52.2868
+80 9.72576e-12 22.0585
+100 4.98471e-12 11.2939
+200 6.23089e-13 1.41174
+500 3.98777e-14 0.0903516
+1000 4.98471e-15 0.0112939
+@end verbatim
+
+
+@subsection Discussion
+There is a ~.04% error between these results. I do not believe this is
+due to rounding, as the results taken from the equation from the source
+match exactly with the Friis results taken at one less power of ten.
+(Friis and LogDistance can be modeled by Pt*Gt*Gr*lmb^2/((4*pi)^2*d^n*L),
+where n is the exponent. n is 2 for Friis, and 3 for logDistance, which
+accounts for the power of ten. ie: Friis at 100m is equivalent to LogDistance
+at 10m.) Perhaps the ns-3 takes the random number into account despite
+not being listed in the source.
+
diff --git a/doc/testing/testing-framework.texi b/doc/testing/testing-framework.texi
new file mode 100644
index 000000000..1540c2d6b
--- /dev/null
+++ b/doc/testing/testing-framework.texi
@@ -0,0 +1,567 @@
+@c ========================================================================
+@c Testing framework
+@c ========================================================================
+
+@node TestingFramework
+@chapter Testing Framework
+
+@node BuildBots
+@section Buildbots
+
+The @command{ns-3} testing framework is composed of several major pieces. At
+the highest level are the buildbots (build robots). If you are unfamiliar with
+this system look at @uref{http://djmitche.github.com/buildbot/docs/0.7.11/}.
+This is an open-source automated system that allows @command{ns-3} to be rebuilt
+and tested each time something has changed. By running the buildbots on a number
+of different systems we can ensure that @command{ns-3} builds and executes
+properly on all of its supported systems.
+
+Users (and developers) typically will not interact with the buildbot system other
+than to read its messages regarding test results. If a failure is detected in
+one of the automated build and test jobs, the buildbot will send an email to the
+@emph{ns-developers} mailing list. This email will look something like:
+
+@verbatim
+ The Buildbot has detected a new failure of osx-ppc-g++-4.2 on NsNam.
+ Full details are available at:
+ http://ns-regression.ee.washington.edu:8010/builders/osx-ppc-g%2B%2B-4.2/builds/0
+
+ Buildbot URL: http://ns-regression.ee.washington.edu:8010/
+
+ Buildslave for this Build: darwin-ppc
+
+ Build Reason: The web-page 'force build' button was pressed by 'ww': ww
+
+ Build Source Stamp: HEAD
+ Blamelist:
+
+ BUILD FAILED: failed shell_5 shell_6 shell_7 shell_8 shell_9 shell_10 shell_11 shell_12 shell_13 shell_14 shell_15
+
+ sincerely,
+ -The Buildbot
+@end verbatim
+
+In the full details URL shown in the email, one can search for the keyword
+@code{failed} and select the @code{stdio} link for the corresponding step to see
+the reason for the failure.
+
+The buildbot will do its job quietly if there are no errors, and the system will
+undergo build and test cycles every day to verify that all is well.
+
+@node Testpy
+@section Test.py
+The buildbots use a Python program, @command{test.py}, that is reponsible for
+running all of the tests and collecting the resulting reports into a human-
+readable form. This program is also available for use by users and developers
+as well.
+
+@command{test.py} is very flexible in allowing the user to specify the number
+and kind of tests to run; and also the amount and kind of output to generate.
+
+By default, @command{test.py} will run all available tests and report status
+back in a very concise form. Running the command,
+
+@verbatim
+ ./test.py
+@end verbatim
+
+will result in a number of @code{PASS}, @code{FAIL}, @code{CRASH} or @code{SKIP}
+indications followed by the kind of test that was run and its display name.
+
+@verbatim
+ Waf: Entering directory `/home/craigdo/repos/ns-3-allinone-test/ns-3-dev/build'
+ Waf: Leaving directory `/home/craigdo/repos/ns-3-allinone-test/ns-3-dev/build'
+ 'build' finished successfully (0.939s)
+ FAIL: TestSuite ns3-wifi-propagation-loss-models
+ PASS: TestSuite object-name-service
+ PASS: TestSuite pcap-file-object
+ PASS: TestSuite ns3-tcp-cwnd
+ ...
+
+ PASS: TestSuite ns3-tcp-interoperability
+ PASS: Example csma-broadcast
+ PASS: Example csma-multicast
+@end verbatim
+
+This mode is indented to be used by users who are interested in determining if
+their distribution is working correctly, and by developers who are interested
+in determining if changes they have made have caused any regressions.
+
+If one specifies an optional output style, one can generate detailed descriptions
+of the tests and status. Available styles are @command{text} and @command{HTML}.
+The buildbots will select the HTML option to generate HTML test reports for the
+nightly builds using,
+
+@verbatim
+ ./test.py --html=nightly.html
+@end verbatim
+
+In this case, an HTML file named ``nightly.html'' would be created with a pretty
+summary of the testing done. A ``human readable'' format is available for users
+interested in the details.
+
+@verbatim
+ ./test.py --text=results.txt
+@end verbatim
+
+In the example above, the test suite checking the @command{ns-3} wireless
+device propagation loss models failed. By default no further information is
+provided.
+
+To further explore the failure, @command{test.py} allows a single test suite
+to be specified. Running the command,
+
+@verbatim
+ ./test.py --suite=ns3-wifi-propagation-loss-models
+@end verbatim
+
+results in that single test suite being run.
+
+@verbatim
+ FAIL: TestSuite ns3-wifi-propagation-loss-models
+@end verbatim
+
+To find detailed information regarding the failure, one must specify the kind
+of output desired. For example, most people will probably be interested in
+a text file:
+
+@verbatim
+ ./test.py --suite=ns3-wifi-propagation-loss-models --text=results.txt
+@end verbatim
+
+This will result in that single test suite being run with the test status written to
+the file ``results.txt''.
+
+You should find something similar to the following in that file:
+
+@verbatim
+FAIL: Test Suite ``ns3-wifi-propagation-loss-models'' (real 0.02 user 0.01 system 0.00)
+ PASS: Test Case "Check ... Friis ... model ..." (real 0.01 user 0.00 system 0.00)
+ FAIL: Test Case "Check ... Log Distance ... model" (real 0.01 user 0.01 system 0.00)
+ Details:
+ Message: Got unexpected SNR value
+ Condition: [long description of what actually failed]
+ Actual: 176.395
+ Limit: 176.407 +- 0.0005
+ File: ../src/test/ns3wifi/propagation-loss-models-test-suite.cc
+ Line: 360
+@end verbatim
+
+Notice that the Test Suite is composed of two Test Cases. The first test case
+checked the Friis propagation loss model and passed. The second test case
+failed checking the Log Distance propagation model. In this case, an SNR of
+176.395 was found, and the test expected a value of 176.407 correct to three
+decimal places. The file which implemented the failing test is listed as well
+as the line of code which triggered the failure.
+
+If you desire, you could just as easily have written an HTML file using the
+@code{--html} option as described above.
+
+Typically a user will run all tests at least once after downloading
+@command{ns-3} to ensure that his or her enviornment has been built correctly
+and is generating correct results according to the test suites. Developers
+will typically run the test suites before and after making a change to ensure
+that they have not introduced a regression with their changes. In this case,
+developers may not want to run all tests, but only a subset. For example,
+the developer might only want to run the unit tests periodically while making
+changes to a repository. In this case, @code{test.py} can be told to constrain
+the types of tests being run to a particular class of tests. The follwoing
+command will result in only the unit tests being run:
+
+@verbatim
+ ./test.py --constrain=unit
+@end verbatim
+
+Similarly, the following command will result in only the example smoke tests
+being run:
+
+@verbatim
+ ./test.py --constrain=unit
+@end verbatim
+
+To see a quick list of the legal kinds of constraints, you can ask for them
+to be listed. The following command
+
+@verbatim
+ ./test.py --kinds
+@end verbatim
+
+will result in the following list being displayed:
+
+@verbatim
+ Waf: Entering directory `/home/craigdo/repos/ns-3-allinone-test/ns-3-dev/build'
+ Waf: Leaving directory `/home/craigdo/repos/ns-3-allinone-test/ns-3-dev/build'
+ 'build' finished successfully (0.939s)Waf: Entering directory `/home/craigdo/repos/ns-3-allinone-test/ns-3-dev/build'
+ bvt: Build Verification Tests (to see if build completed successfully)
+ unit: Unit Tests (within modules to check basic functionality)
+ system: System Tests (spans modules to check integration of modules)
+ example: Examples (to see if example programs run successfully)
+ performance: Performance Tests (check to see if the system is as fast as expected)
+@end verbatim
+
+This list is displayed in increasing order of complexity of the tests. Any of these
+kinds of test can be provided as a constraint using the @code{--constraint} option.
+
+To see a quick list of all of the test suites available, you can ask for them
+to be listed. The following command,
+
+@verbatim
+ ./test.py --list
+@end verbatim
+
+will result in a list of the test suite being displayed, similar to :
+
+@verbatim
+ Waf: Entering directory `/home/craigdo/repos/ns-3-allinone-test/ns-3-dev/build'
+ Waf: Leaving directory `/home/craigdo/repos/ns-3-allinone-test/ns-3-dev/build'
+ 'build' finished successfully (0.939s)
+ ns3-wifi-propagation-loss-models
+ ns3-tcp-cwnd
+ ns3-tcp-interoperability
+ pcap-file-object
+ object-name-service
+ random-number-generators
+@end verbatim
+
+Any of these listed suites can be selected to be run by itself using the
+@code{--suite} option as shown above.
+
+Similarly to test suites, one can run a single example program using the @code{--example}
+option.
+
+@verbatim
+ ./test.py --example=udp-echo
+@end verbatim
+
+results in that single example being run.
+
+@verbatim
+ PASS: Example udp-echo
+@end verbatim
+
+Normally when example programs are executed, they write a large amount of trace
+file data. This is normally saved to the base directory of the distribution
+(e.g., /home/user/ns-3-dev). When @command{test.py} runs an example, it really
+is completely unconcerned with the trace files. It just wants to to determine
+if the example can be built and run without error. Since this is the case, the
+trace files are written into a @code{/tmp/unchecked-traces} directory. If you
+run the above example, you should be able to find the associated
+@code{udp-echo.tr} and @code{udp-echo-n-1.pcap} files there.
+
+The list of available examples is defined by the contents of the ``examples''
+directory in the distribution. If you select an example for execution using
+the @code{--example} option, @code{test.py} will not make any attempt to decide
+if the example has been configured or not, it will just try to run it and
+report the result of the attempt.
+
+When @command{test.py} runs, by default it will first ensure that the system has
+been completely built. This can be defeated by selecting the @code{--nowaf}
+option.
+
+@verbatim
+ ./test.py --list --nowaf
+@end verbatim
+
+will result in a list of the currently built test suites being displayed, similar to :
+
+@verbatim
+ ns3-wifi-propagation-loss-models
+ ns3-tcp-cwnd
+ ns3-tcp-interoperability
+ pcap-file-object
+ object-name-service
+ random-number-generators
+@end verbatim
+
+Note the absence of the @command{Waf} build messages.
+
+Finally, @code{test.py} provides a @command{--verbose} option which will print
+large amounts of information about its progress. It is not expected that this
+will be terribly useful for most users.
+
+@node TestTaxonomy
+@section Test Taxonomy
+
+As mentioned above, tests are grouped into a number of broadly defined
+classifications to allow users to selectively run tests to address the different
+kinds of testing that need to be done.
+
+@itemize @bullet
+@item Build Verification Tests
+@item Unit Tests
+@item System Tests
+@item Examples
+@item Performance Tests
+@end itemize
+
+@node BuildVerificationTests
+@subsection Build Verification Tests
+
+These are relatively simple tests that are built along with the distribution
+and are used to make sure that the build is pretty much working. Our
+current unit tests live in the source files of the code they test and are
+built into the ns-3 modules; and so fit the description of BVTs. BVTs live
+in the same source code that is built into the ns-3 code. Our current tests
+are examples of this kind of test.
+
+@node UnitTests
+@subsection Unit Tests
+
+Unit tests are more involved tests that go into detail to make sure that a
+piece of code works as advertized in isolation. There is really no reason
+for this kind of test to be built into an ns-3 module. It turns out, for
+example, that the unit tests for the object name service are about the same
+size as the object name service code itself. Unit tests are tests that
+check a single bit of functionality that are not built into the ns-3 code,
+but live in the same directory as the code it tests. It is possible that
+these tests check integration of multiple implementation files in a module
+as well. The file src/core/names-test-suite.cc is an example of this kind
+of test. The file src/common/pcap-file-test-suite.cc is another example
+that uses a known good pcap file as a test vector file. This file is stored
+locally in the src/common directory.
+
+@node SystemTests
+@subsection System Tests
+
+System tests are those that involve more than one module in the system. We
+have lots of this kind of test running in our current regression framework,
+but they are overloaded examples. We provide a new place for this kind of
+test in the directory ``src/tests''. The file
+src/test/ns3tcp/ns3-interop-test-suite.cc is an example of this kind of
+test. It uses NSC TCP to test the ns-3 TCP implementation. Often there
+will be test vectors required for this kind of test, and they are stored in
+the directory where the test lives. For example,
+ns3tcp-interop-response-vectors.pcap is a file consisting of a number of TCP
+headers that are used as the expected responses of the ns-3 TCP under test
+to a stimulus generated by the NSC TCP which is used as a ``known good''
+implementation.
+
+@node Examples
+@subsection Examples
+
+The examples are tested by the framework to make sure they built and will
+run. Nothing is checked, and currently the pcap files are just written off
+into /tmp to be discarded. If the examples run (don't crash) they pass this
+smoke test.
+
+@node PerformanceTests
+@subsection Performance Tests
+
+Performance tests are those which exercise a particular part of the system
+and determine if the tests have executed to completion in a reasonable time.
+
+@node RunningTests
+@section Running Tests
+
+Tests are typically run using the high level @code{test.py} program. They
+can also be run ``manually'' using a low level test-runner executable directly
+from @code{waf}.
+
+@node RunningTestsUnderTestRunnerExecutable
+@section Running Tests Under the Test Runner Executable
+
+The test-runner is the bridge from generic Python code to @command{ns-3} code.
+It is written in C++ and uses the automatic test discovery process in the
+@command{ns-3} code to find and allow execution of all of the various tests.
+
+Although it may not be used directly very often, it is good to understand how
+@code{test.py} actually runs the various tests.
+
+In order to execute the test-runner, you run it like any other ns-3 executable
+-- using @code{waf}. To get a list of available options, you can type:
+
+@verbatim
+ ./waf --run "test-runner --help"
+@end verbatim
+
+You should see something like the following:
+
+@verbatim
+ Waf: Entering directory `/home/craigdo/repos/ns-3-allinone-test/ns-3-dev/build'
+ Waf: Leaving directory `/home/craigdo/repos/ns-3-allinone-test/ns-3-dev/build'
+ 'build' finished successfully (0.353s)
+ --basedir=dir: Set the base directory (where to find src) to ``dir''
+ --constrain=test-type: Constrain checks to test suites of type ``test-type''
+ --help: Print this message
+ --kinds: List all of the available kinds of tests
+ --list: List all of the test suites (optionally constrained by test-type)
+ --out=file-name: Set the test status output file to ``file-name''
+ --suite=suite-name: Run the test suite named ``suite-name''
+ --verbose: Turn on messages in the run test suites
+@end verbatim
+
+There are a number of things available to you which will be familiar to you if
+you have looked at @command{test.py}. This should be expected since the test-
+runner is just an interface between @code{test.py} and @command{ns-3}. You
+may notice that example-related commands are missing here. That is because
+the examples are really not @command{ns-3} tests. @command{test.py} runs them
+as if they were to present a unified testing environment, but they are really
+completely different and not to be found here.
+
+One new option that appears here is the @code{--basedir} option. It turns out
+that the tests may need to reference the source directory of the @code{ns-3}
+distribution to find local data, so a base directory is always required to run
+a test. To run one of the tests directly from the test-runner, you will need
+to specify the test suite to run along with the base directory. So you could do,
+
+@verbatim
+ ./waf --run "test-runner --basedir=`pwd` --suite=pcap-file-object"
+@end verbatim
+
+Note the ``backward'' quotation marks on the @code{pwd} command. This will run
+the @code{pcap-file-object} test quietly. The only indication that
+you will get that the test passed is the @emph{absence} of a message from
+@code{waf} saying that the program returned something other than a zero
+exit code. To get some output from the test, you need to specify an output
+file to which the tests will write their XML status using the @code{--out}
+option. You need to be careful interpreting the results because the test
+suites will @emph{append} results onto this file. Try,
+
+@verbatim
+ ./waf --run "test-runner --basedir=`pwd` --suite=pcap-file-object --out=myfile.xml''
+@end verbatim
+
+If you look at the file @code{myfile.xml} you should see something like,
+
+@verbatim
+
+ pcap-file-object
+
+ Check to see that PcapFile::Open with mode ``w'' works
+ PASS
+ real 0.00 user 0.00 system 0.00
+
+
+ Check to see that PcapFile::Open with mode ``r'' works
+ PASS
+ real 0.00 user 0.00 system 0.00
+
+
+ Check to see that PcapFile::Open with mode ``a'' works
+ PASS
+ real 0.00 user 0.00 system 0.00
+
+
+ Check to see that PcapFileHeader is managed correctly
+ PASS
+ real 0.00 user 0.00 system 0.00
+
+
+ Check to see that PcapRecordHeader is managed correctly
+ PASS
+ real 0.00 user 0.00 system 0.00
+
+
+ Check to see that PcapFile can read out a known good pcap file
+ PASS
+ real 0.00 user 0.00 system 0.00
+
+ PASS
+ real 0.00 user 0.00 system 0.00
+
+@end verbatim
+
+If you are familiar with XML this should be fairly self-explanatory. It is
+also not a complete XML file since test suites are designed to have their
+output appended to a master XML status file as described in the @command{test.py}
+section.
+
+@node ClassTestRunner
+@section Class TestRunner
+
+The executables that run dedicated test programs use a TestRunner class. This
+class provides for automatic test registration and listing, as well as a way to
+exeute the individual tests. Individual test suites use C++ global constructors
+to add themselves to a collection of test suites managed by the test runner.
+The test runner is used to list all of the available tests and to select a test
+to be run. This is a quite simple class that provides three static methods to
+provide or Adding and Getting test suites to a collection of tests. See the
+doxygen for class @code{ns3::TestRunner} for details
+
+@node TestSuite
+@section Test Suite
+
+All @command{ns-3} tests are classified into Test Suites and Test Cases. A
+test suite is a collection of test cases that completely exercise a given kind
+of functionality. As described above, test suites can be classified as,
+
+@itemize @bullet
+@item Build Verification Tests
+@item Unit Tests
+@item System Tests
+@item Examples
+@item Performance Tests
+@end itemize
+
+This classification is exported from the TestSuite class. This class is quite
+simple, existing only as a place to export this type and to accumulate test
+cases. From a user perspective, in order to create a new TestSuite in the
+system one only has to define a new class that inherits from class @code{TestSuite}
+and perform these two duties.
+
+The following code will define a new class that can be run by @code{test.py}
+as a ``unit'' test with the display name, ``my-test-suite-name''.
+
+@verbatim
+ class MySuite : public TestSuite
+ {
+ public:
+ MyTestSuite ();
+ };
+
+ MyTestSuite::MyTestSuite ()
+ : TestSuite ("my-test-suite-name", UNIT)
+ {
+ AddTestCase (new MyTestCase);
+ }
+
+ MyTestSuite myTestSuite;
+@end verbatim
+
+The base class takes care of all of the registration and reporting required to
+be a good citizen in the test framework.
+
+@node TestCase
+@section Test Case
+
+Individual tests are created using a TestCase class. Common models for the use
+of a test case include "one test case per feature", and "one test case per method."
+Mixtures of these models may be used.
+
+In order to create a new test case in the system, all one has to do is to inherit
+from the @code{TestCase} base class, override the constructor to give the test
+case a name and override the @code{DoRun} method to run the test.
+
+@verbatim
+class MyTestCase : public TestCase
+{
+ MyTestCase ();
+ virtual bool DoRun (void);
+};
+
+MyTestCase::MyTestCase ()
+ : TestCase ("Check some bit of functionality")
+{
+}
+
+bool
+MyTestCase::DoRun (void)
+{
+ NS_TEST_ASSERT_MSG_EQ (true, true, "Some failure message");
+ return GetErrorStatus ();
+}
+@end verbatim
+
+@node Utilities
+@section Utilities
+
+There are a number of utilities of various kinds that are also part of the
+testing framework. Examples include a generalized pcap file useful for
+storing test vectors; a generic container useful for transient storage of
+test vectors during test execution; and tools for generating presentations
+based on validation and verification testing results.
+
+
+
+
+
diff --git a/doc/testing/testing.css b/doc/testing/testing.css
new file mode 100644
index 000000000..a7586ac83
--- /dev/null
+++ b/doc/testing/testing.css
@@ -0,0 +1,156 @@
+body {
+ font-family: "Trebuchet MS", "Bitstream Vera Sans", verdana, lucida, arial, helvetica, sans-serif;
+ background: white;
+ color: black;
+ font-size: 11pt;
+}
+
+h1, h2, h3, h4, h5, h6 {
+# color: #990000;
+ color: #009999;
+}
+
+pre {
+ font-size: 10pt;
+ background: #e0e0e0;
+ color: black;
+}
+
+a:link, a:visited {
+ font-weight: normal;
+ text-decoration: none;
+ color: #0047b9;
+}
+
+a:hover {
+ font-weight: normal;
+ text-decoration: underline;
+ color: #0047b9;
+}
+
+img {
+ border: 0px;
+}
+
+#main th {
+ font-size: 12pt;
+ background: #b0b0b0;
+}
+
+.odd {
+ font-size: 12pt;
+ background: white;
+}
+
+.even {
+ font-size: 12pt;
+ background: #e0e0e0;
+}
+
+.answer {
+ font-size: large;
+ font-weight: bold;
+}
+
+.answer p {
+ font-size: 12pt;
+ font-weight: normal;
+}
+
+.answer ul {
+ font-size: 12pt;
+ font-weight: normal;
+}
+
+#container {
+ position: absolute;
+ width: 100%;
+ height: 100%;
+ top: 0px;
+}
+
+#feedback {
+ color: #b0b0b0;
+ font-size: 9pt;
+ font-style: italic;
+}
+
+#header {
+ position: absolute;
+ margin: 0px;
+ top: 10px;
+ height:96px;
+ left: 175px;
+ right: 10em;
+ bottom: auto;
+ background: white;
+ clear: both;
+}
+
+#middle {
+ position: absolute;
+ left: 0;
+ height: auto;
+ width: 100%;
+}
+
+#main {
+ position: absolute;
+ top: 50px;
+ left: 175px;
+ right: 100px;
+ background: white;
+ padding: 0em 0em 0em 0em;
+}
+
+#navbar {
+ position: absolute;
+ top: 75px;
+ left: 0em;
+ width: 146px;
+ padding: 0px;
+ margin: 0px;
+ font-size: 10pt;
+}
+
+#navbar a:link, #navbar a:visited {
+ font-weight: normal;
+ text-decoration: none;
+ color: #0047b9;
+}
+
+#navbar a:hover {
+ font-weight: normal;
+ text-decoration: underline;
+ color: #0047b9;
+}
+
+#navbar dl {
+ width: 146px;
+ padding: 0;
+ margin: 0 0 10px 0px;
+ background: #99ffff url(images/box_bottom2.gif) no-repeat bottom left;
+}
+
+#navbar dt {
+ padding: 6px 10px;
+ font-size: 100%;
+ font-weight: bold;
+ background: #009999;
+ margin: 0px;
+ border-bottom: 1px solid #fff;
+ color: white;
+ background: #009999 url(images/box_top2.gif) no-repeat top left;
+}
+
+#navbar dd {
+ font-size: 100%;
+ margin: 0 0 0 0px;
+ padding: 6px 10px;
+ color: #0047b9;
+}
+
+dd#selected {
+ background: #99ffff url(images/arrow.gif) no-repeat;
+ background-position: 4px 10px;
+}
diff --git a/doc/testing/testing.texi b/doc/testing/testing.texi
new file mode 100644
index 000000000..47496ad7b
--- /dev/null
+++ b/doc/testing/testing.texi
@@ -0,0 +1,98 @@
+\input texinfo @c -*-texinfo-*-
+@c %**start of header
+@setfilename ns-3.info
+@settitle ns-3 manual
+@c @setchapternewpage odd
+@c %**end of header
+
+@ifinfo
+Documentation for the @command{ns-3} project is available in
+several documents and the wiki:
+@itemize @bullet
+@item @uref{http://www.nsnam.org/doxygen/index.html,,ns-3 Doxygen/Manual}: Documentation of the public APIs of the simulator
+@item @uref{http://www.nsnam.org/tutorial/index.html,,ns-3 Tutorial}
+@item @uref{http://www.nsnam.org/doc//index.html,,ns-3 Tutorial}
+@item Reference Manual
+@item @uref{http://www.nsnam.org/wiki/index.php,, ns-3 wiki}
+@end itemize
+
+This document is written in GNU Texinfo and is to be maintained in
+revision control on the @command{ns-3} code server. Both PDF and HTML versions
+should be available on the server. Changes to
+the document should be discussed on the ns-developers@@isi.edu mailing list.
+@end ifinfo
+
+@copying
+
+This is an @command{ns-3} reference manual.
+Primary documentation for the @command{ns-3} project is available in
+four forms:
+@itemize @bullet
+@item @uref{http://www.nsnam.org/doxygen/index.html,,ns-3 Doxygen}: Documentation of the public APIs of the simulator
+@item @uref{http://www.nsnam.org/docs/tutorial/index.html,,ns-3 Tutorial}
+@item @uref{http://www.nsnam.org/docs/manual/index.html,,ns-3 Manual}
+@item Testing and Validation (this document)
+@item @uref{http://www.nsnam.org/wiki/index.php,, ns-3 wiki}
+@end itemize
+
+This document is written in GNU Texinfo and is to be maintained in
+revision control on the @command{ns-3} code server. Both PDF and HTML
+versions should be available on the server. Changes to
+the document should be discussed on the ns-developers@@isi.edu mailing list.
+
+This software is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2 of the License, or
+(at your option) any later version.
+
+This software is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see @uref{http://www.gnu.org/licenses/}.
+@end copying
+
+@titlepage
+@title ns-3 Testing and Validation
+@author ns-3 project
+@author feedback: ns-developers@@isi.edu
+@today{}
+
+@c @page
+@vskip 0pt plus 1filll
+@insertcopying
+@end titlepage
+
+@c So the toc is printed at the start.
+@anchor{Full Table of Contents}
+@contents
+
+@ifnottex
+@node Top, Overview, Full Table of Contents
+@top ns-3 Manual (html version)
+
+For a pdf version of this document,
+see @uref{http://www.nsnam.org/docs/testing.pdf}.
+
+@insertcopying
+@end ifnottex
+
+@menu
+* Overview::
+* Background::
+* Testing framework::
+* How to write tests::
+* Propagation Loss Models::
+@end menu
+
+@include overview.texi
+@include background.texi
+@include testing-framework.texi
+@include how-to-write-tests.texi
+@include propagation-loss.texi
+
+@printindex cp
+
+@bye
diff --git a/src/common/known.pcap b/src/common/known.pcap
new file mode 100644
index 000000000..f5f6ae194
Binary files /dev/null and b/src/common/known.pcap differ
diff --git a/src/common/pcap-file-test-suite.cc b/src/common/pcap-file-test-suite.cc
new file mode 100644
index 000000000..c96f7135a
--- /dev/null
+++ b/src/common/pcap-file-test-suite.cc
@@ -0,0 +1,965 @@
+/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation;
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include
+#include
+#include
+#include
+
+#include "ns3/test.h"
+#include "ns3/pcap-file.h"
+
+using namespace ns3;
+
+// ===========================================================================
+// Some utility functions for the tests.
+// ===========================================================================
+
+uint16_t
+Swap (uint16_t val)
+{
+ return ((val >> 8) & 0x00ff) | ((val << 8) & 0xff00);
+}
+
+uint32_t
+Swap (uint32_t val)
+{
+ return ((val >> 24) & 0x000000ff) | ((val >> 8) & 0x0000ff00) | ((val << 8) & 0x00ff0000) | ((val << 24) & 0xff000000);
+}
+
+bool
+CheckFileExists (std::string filename)
+{
+ FILE * p = fopen (filename.c_str (), "rb");
+ if (p == 0)
+ {
+ return false;
+ }
+
+ fclose (p);
+ return true;
+}
+
+
+bool
+CheckFileLength (std::string filename, uint64_t sizeExpected)
+{
+ FILE * p = fopen (filename.c_str (), "rb");
+ if (p == 0)
+ {
+ return false;
+ }
+
+ fseek (p, 0, SEEK_END);
+
+ uint64_t sizeActual = ftell (p);
+ fclose (p);
+
+ return sizeActual == sizeExpected;
+}
+
+// ===========================================================================
+// Test case to make sure that the Pcap File Object can do its most basic job
+// and create an empty pcap file.
+// ===========================================================================
+class WriteModeCreateTestCase : public TestCase
+{
+public:
+ WriteModeCreateTestCase ();
+ virtual ~WriteModeCreateTestCase ();
+
+private:
+ virtual void DoSetup (void);
+ virtual bool DoRun (void);
+ virtual void DoTeardown (void);
+
+ std::string m_testFilename;
+};
+
+WriteModeCreateTestCase::WriteModeCreateTestCase ()
+ : TestCase ("Check to see that PcapFile::Open with mode \"w\" works")
+{
+}
+
+WriteModeCreateTestCase::~WriteModeCreateTestCase ()
+{
+}
+
+void
+WriteModeCreateTestCase::DoSetup (void)
+{
+ std::stringstream filename;
+ uint32_t n = rand ();
+ filename << n;
+ m_testFilename = "/tmp/" + filename.str () + ".pcap";
+}
+
+void
+WriteModeCreateTestCase::DoTeardown (void)
+{
+ remove (m_testFilename.c_str ());
+}
+
+bool
+WriteModeCreateTestCase::DoRun (void)
+{
+ PcapFile f;
+
+ //
+ // Opening a new file in write mode should result in an empty file of the
+ // given name.
+ //
+ bool err = f.Open (m_testFilename, "w");
+
+ NS_TEST_ASSERT_MSG_EQ (err, false, "Open (" << m_testFilename << ", \"w\") returns error");
+ f.Close ();
+
+ NS_TEST_ASSERT_MSG_EQ (CheckFileExists (m_testFilename), true,
+ "Open (" << m_testFilename << ", \"w\") does not create file");
+ NS_TEST_ASSERT_MSG_EQ (CheckFileLength (m_testFilename, 0), true,
+ "Open (" << m_testFilename << ", \"w\") does not result in an empty file");
+
+ //
+ // Calling Init() on a file created with "w" should result in a file just
+ // long enough to contain the pcap file header.
+ //
+ err = f.Open (m_testFilename, "w");
+ NS_TEST_ASSERT_MSG_EQ (err, false, "Open (" << m_testFilename << ", \"w\") returns error");
+
+ err = f.Init (1234, 5678, 7);
+ NS_TEST_ASSERT_MSG_EQ (err, false, "Init (1234, 5678, 7) returns error");
+
+ f.Close ();
+
+ NS_TEST_ASSERT_MSG_EQ (CheckFileLength (m_testFilename, 24), true,
+ "Init () does not result in a file with a pcap file header");
+
+ //
+ // Opening an existing file in write mode should result in that file being
+ // emptied.
+ //
+ err = f.Open (m_testFilename, "w");
+ NS_TEST_ASSERT_MSG_EQ (err, false, "Open (" << m_testFilename << ", \"w\") returns error");
+
+ f.Close ();
+
+ NS_TEST_ASSERT_MSG_EQ (CheckFileLength (m_testFilename, 0), true,
+ "Open (" << m_testFilename << ", \"w\") does not result in an empty file");
+
+ //
+ // Initialize the file again.
+ //
+ err = f.Open (m_testFilename, "w");
+ NS_TEST_ASSERT_MSG_EQ (err, false,
+ "Open (" << m_testFilename << ", \"w\") returns error");
+
+ err = f.Init (1234, 5678, 7);
+ NS_TEST_ASSERT_MSG_EQ (err, false, "Init (1234, 5678, 7) returns error");
+
+ //
+ // Now we should be able to write to it since it was opened in "w" mode.
+ // This is just a permissions check so we don't actually look at the
+ // data.
+ //
+ uint8_t buffer[128];
+ err = f.Write (0, 0, buffer, 128);
+ NS_TEST_ASSERT_MSG_EQ (err, false, "Write (write-only-file " << m_testFilename << ") returns error");
+
+ return false;
+}
+
+// ===========================================================================
+// Test case to make sure that the Pcap File Object can open an existing pcap
+// file.
+// ===========================================================================
+class ReadModeCreateTestCase : public TestCase
+{
+public:
+ ReadModeCreateTestCase ();
+ virtual ~ReadModeCreateTestCase ();
+
+private:
+ virtual void DoSetup (void);
+ virtual bool DoRun (void);
+ virtual void DoTeardown (void);
+
+ std::string m_testFilename;
+};
+
+ReadModeCreateTestCase::ReadModeCreateTestCase ()
+ : TestCase ("Check to see that PcapFile::Open with mode \"r\" works")
+{
+}
+
+ReadModeCreateTestCase::~ReadModeCreateTestCase ()
+{
+}
+
+void
+ReadModeCreateTestCase::DoSetup (void)
+{
+ std::stringstream filename;
+ uint32_t n = rand ();
+ filename << n;
+ m_testFilename = "/tmp/" + filename.str () + ".pcap";
+}
+
+void
+ReadModeCreateTestCase::DoTeardown (void)
+{
+ remove (m_testFilename.c_str ());
+}
+
+bool
+ReadModeCreateTestCase::DoRun (void)
+{
+ PcapFile f;
+
+ //
+ // Opening a non-existing file in read mode should result in an error.
+ //
+ bool err = f.Open (m_testFilename, "r");
+ NS_TEST_ASSERT_MSG_EQ (err, true, "Open (non-existing-filename " << m_testFilename << ", \"r\") does not return error");
+
+ NS_TEST_ASSERT_MSG_EQ (CheckFileExists (m_testFilename), false,
+ "Open (" << m_testFilename << ", \"r\") unexpectedly created a file");
+
+ //
+ // Okay, now create an uninitialized file using previously tested operations
+ //
+ err = f.Open (m_testFilename, "w");
+ NS_TEST_ASSERT_MSG_EQ (err, false, "Open (filename, \"w\") returns error");
+ f.Close ();
+
+ //
+ // Opening this file should result in an error since it has no pcap file header.
+ //
+ err = f.Open (m_testFilename, "r");
+ NS_TEST_ASSERT_MSG_EQ (err, true, "Open (non-initialized-filename " << m_testFilename << ", \"r\") does not return error");
+
+ //
+ // Okay, now open that non-initialized file in write mode and initialize it
+ // Note that we open it in write mode to initialize it.
+ //
+ err = f.Open (m_testFilename, "w");
+ NS_TEST_ASSERT_MSG_EQ (err, false, "Open (" << m_testFilename << ", \"w\") returns error");
+
+ err = f.Init (1234, 5678, 7);
+ NS_TEST_ASSERT_MSG_EQ (err, false, "Init (1234, 5678, 7) returns error");
+ f.Close ();
+
+ //
+ // Opening this file should now work since it has a pcap file header.
+ //
+ err = f.Open (m_testFilename, "r");
+ NS_TEST_ASSERT_MSG_EQ (err, false, "Open (initialized-filename " << m_testFilename << ", \"r\") returns error");
+
+ //
+ // Now we should not be able to write to it since it was opened in "r" mode
+ // even if it has been initialized..
+ //
+ uint8_t buffer[128];
+ err = f.Write (0, 0, buffer, 128);
+ NS_TEST_ASSERT_MSG_EQ (err, true, "Write (read-only-file " << m_testFilename << ") does not return error");
+
+ f.Close ();
+
+ return false;
+}
+
+// ===========================================================================
+// Test case to make sure that the Pcap File Object can open an existing pcap
+// file for appending.
+// ===========================================================================
+class AppendModeCreateTestCase : public TestCase
+{
+public:
+ AppendModeCreateTestCase ();
+ virtual ~AppendModeCreateTestCase ();
+
+private:
+ virtual void DoSetup (void);
+ virtual bool DoRun (void);
+ virtual void DoTeardown (void);
+
+ std::string m_testFilename;
+};
+
+AppendModeCreateTestCase::AppendModeCreateTestCase ()
+ : TestCase ("Check to see that PcapFile::Open with mode \"a\" works")
+{
+}
+
+AppendModeCreateTestCase::~AppendModeCreateTestCase ()
+{
+}
+
+void
+AppendModeCreateTestCase::DoSetup (void)
+{
+ std::stringstream filename;
+ uint32_t n = rand ();
+ filename << n;
+ m_testFilename = "/tmp/" + filename.str () + ".pcap";
+}
+
+void
+AppendModeCreateTestCase::DoTeardown (void)
+{
+ remove (m_testFilename.c_str ());
+}
+
+bool
+AppendModeCreateTestCase::DoRun (void)
+{
+ PcapFile f;
+
+ //
+ // Opening a non-existing file in append mode should result in an error.
+ //
+ bool err = f.Open (m_testFilename, "a");
+ NS_TEST_ASSERT_MSG_EQ (err, true, "Open (non-existing-filename " << m_testFilename << ", \"a\") does not return error");
+ f.Close ();
+
+ NS_TEST_ASSERT_MSG_EQ (CheckFileExists (m_testFilename), false,
+ "Open (" << m_testFilename << ", \"a\") unexpectedly created a file");
+
+ //
+ // Okay, now create an uninitialized file using previously tested operations
+ //
+ err = f.Open (m_testFilename, "w");
+ NS_TEST_ASSERT_MSG_EQ (err, false, "Open (" << m_testFilename << ", \"w\") returns error");
+ f.Close ();
+
+ //
+ // Opening this file should result in an error since it has no pcap file header.
+ //
+ err = f.Open (m_testFilename, "a");
+ NS_TEST_ASSERT_MSG_EQ (err, true, "Open (non-initialized-filename " << m_testFilename << ", \"a\") does not return error");
+
+ //
+ // Okay, now open that non-initialized file in write mode and initialize it.
+ //
+ err = f.Open (m_testFilename, "w");
+ NS_TEST_ASSERT_MSG_EQ (err, false, "Open (non-initialized-filename " << m_testFilename << ", \"w\") returns error");
+
+ err = f.Init (1234, 5678, 7);
+ NS_TEST_ASSERT_MSG_EQ (err, false, "Init (1234, 5678, 7) returns error");
+ f.Close ();
+
+ //
+ // Opening this file should now work since it has a pcap file header.
+ //
+ err = f.Open (m_testFilename, "a");
+ NS_TEST_ASSERT_MSG_EQ (err, false, "Open (initialized-filename " << m_testFilename << ", \"r\") returns error");
+
+ //
+ // We should be able to write to it since it was opened in "a" mode.
+ //
+ uint8_t buffer[128];
+ err = f.Write (0, 0, buffer, 128);
+ NS_TEST_ASSERT_MSG_EQ (err, false, "Write (append-mode-file " << m_testFilename << ") returns error");
+
+ f.Close ();
+
+ return false;
+}
+
+// ===========================================================================
+// Test case to make sure that the Pcap File Object can write out correct pcap
+// file headers in both endian cases, and then read them in correctly.
+// ===========================================================================
+class FileHeaderTestCase : public TestCase
+{
+public:
+ FileHeaderTestCase ();
+ virtual ~FileHeaderTestCase ();
+
+private:
+ virtual void DoSetup (void);
+ virtual bool DoRun (void);
+ virtual void DoTeardown (void);
+
+ std::string m_testFilename;
+};
+
+FileHeaderTestCase::FileHeaderTestCase ()
+ : TestCase ("Check to see that PcapFileHeader is managed correctly")
+{
+}
+
+FileHeaderTestCase::~FileHeaderTestCase ()
+{
+}
+
+void
+FileHeaderTestCase::DoSetup (void)
+{
+ std::stringstream filename;
+ uint32_t n = rand ();
+ filename << n;
+ m_testFilename = "/tmp/" + filename.str () + ".pcap";
+}
+
+void
+FileHeaderTestCase::DoTeardown (void)
+{
+ remove (m_testFilename.c_str ());
+}
+
+bool
+FileHeaderTestCase::DoRun (void)
+{
+ PcapFile f;
+
+ //
+ // Create an uninitialized file using previously tested operations
+ //
+ bool err = f.Open (m_testFilename, "w");
+ NS_TEST_ASSERT_MSG_EQ (err, false, "Open (" << m_testFilename << ", \"w\") returns error");
+
+ //
+ // Initialize the pcap file header.
+ //
+ err = f.Init (1234, 5678, 7);
+ NS_TEST_ASSERT_MSG_EQ (err, false,
+ "Init (1234, 5678, 7) returns error");
+ f.Close ();
+
+ //
+ // Take a look and see what was done to the file
+ //
+ FILE *p = fopen (m_testFilename.c_str (), "r+b");
+ NS_TEST_ASSERT_MSG_NE (p, 0, "fopen(" << m_testFilename << ") should have been able to open a correctly created pcap file");
+
+ uint32_t val32;
+ uint16_t val16;
+
+ size_t result = fread (&val32, sizeof(val32), 1, p);
+ NS_TEST_ASSERT_MSG_EQ (result, 1, "Unable to fread() magic number");
+ NS_TEST_ASSERT_MSG_EQ (val32, 0xa1b2c3d4, "Magic number written incorrectly");
+
+ result = fread (&val16, sizeof(val16), 1, p);
+ NS_TEST_ASSERT_MSG_EQ (result, 1, "Unable to fread() version major");
+ NS_TEST_ASSERT_MSG_EQ (val16, 2, "Version major written incorrectly");
+
+ result = fread (&val16, sizeof(val16), 1, p);
+ NS_TEST_ASSERT_MSG_EQ (result, 1, "Unable to fread() version minor");
+ NS_TEST_ASSERT_MSG_EQ (val16, 4, "Version minor written incorrectly");
+
+ result = fread (&val32, sizeof(val32), 1, p);
+ NS_TEST_ASSERT_MSG_EQ (result, 1, "Unable to fread() time zone correction");
+ NS_TEST_ASSERT_MSG_EQ (val32, 7, "Version minor written incorrectly");
+
+ result = fread (&val32, sizeof(val32), 1, p);
+ NS_TEST_ASSERT_MSG_EQ (result, 1, "Unable to fread() sig figs");
+ NS_TEST_ASSERT_MSG_EQ (val32, 0, "Sig figs written incorrectly");
+
+ result = fread (&val32, sizeof(val32), 1, p);
+ NS_TEST_ASSERT_MSG_EQ (result, 1, "Unable to fread() snap length");
+ NS_TEST_ASSERT_MSG_EQ (val32, 5678, "Snap length written incorrectly");
+
+ result = fread (&val32, sizeof(val32), 1, p);
+ NS_TEST_ASSERT_MSG_EQ (result, 1, "Unable to fread() data link type");
+ NS_TEST_ASSERT_MSG_EQ (val32, 1234, "Data length type written incorrectly");
+
+ fclose (p);
+ p = 0;
+
+ //
+ // We wrote a native-endian file out correctly, now let's see if we can read
+ // it back in correctly.
+ //
+ err = f.Open (m_testFilename, "r");
+ NS_TEST_ASSERT_MSG_EQ (err, false, "Open (existing-initialized-file " << m_testFilename << ", \"r\") returns error");
+
+ NS_TEST_ASSERT_MSG_EQ (f.GetMagic (), 0xa1b2c3d4, "Read back magic number incorrectly");
+ NS_TEST_ASSERT_MSG_EQ (f.GetVersionMajor (), 2, "Read back version major incorrectly");
+ NS_TEST_ASSERT_MSG_EQ (f.GetVersionMinor (), 4, "Read back version minor incorrectly");
+ NS_TEST_ASSERT_MSG_EQ (f.GetTimeZoneOffset (), 7, "Read back time zone offset incorrectly");
+ NS_TEST_ASSERT_MSG_EQ (f.GetSigFigs (), 0, "Read back sig figs incorrectly");
+ NS_TEST_ASSERT_MSG_EQ (f.GetSnapLen (), 5678, "Read back snap len incorrectly");
+ NS_TEST_ASSERT_MSG_EQ (f.GetDataLinkType (), 1234, "Read back data link type incorrectly");
+
+ //
+ // Re-open the file to erase its contents.
+ //
+ err = f.Open (m_testFilename, "w");
+ NS_TEST_ASSERT_MSG_EQ (err, false, "Open (" << m_testFilename << ", \"w\") returns error");
+
+ //
+ // Initialize the pcap file header, turning on swap mode manually to force
+ // the pcap file header to be written out in foreign-endian form, whichever
+ // endian-ness that might be.
+ //
+ err = f.Init (1234, 5678, 7, true);
+ NS_TEST_ASSERT_MSG_EQ (err, false, "Init (1234, 5678, 7) returns error");
+ f.Close ();
+
+ //
+ // Take a look and see what was done to the file. Everything should now
+ // appear byte-swapped.
+ //
+ p = fopen (m_testFilename.c_str (), "r+b");
+ NS_TEST_ASSERT_MSG_NE (p, 0, "fopen(" << m_testFilename << ") should have been able to open a correctly created pcap file");
+
+ result = fread (&val32, sizeof(val32), 1, p);
+ NS_TEST_ASSERT_MSG_EQ (result, 1, "Unable to fread() magic number");
+ NS_TEST_ASSERT_MSG_EQ (val32, Swap(uint32_t (0xa1b2c3d4)), "Magic number written incorrectly");
+
+ result = fread (&val16, sizeof(val16), 1, p);
+ NS_TEST_ASSERT_MSG_EQ (result, 1, "Unable to fread() version major");
+ NS_TEST_ASSERT_MSG_EQ (val16, Swap(uint16_t (2)), "Version major written incorrectly");
+
+ result = fread (&val16, sizeof(val16), 1, p);
+ NS_TEST_ASSERT_MSG_EQ (result, 1, "Unable to fread() version minor");
+ NS_TEST_ASSERT_MSG_EQ (val16, Swap(uint16_t (4)), "Version minor written incorrectly");
+
+ result = fread (&val32, sizeof(val32), 1, p);
+ NS_TEST_ASSERT_MSG_EQ (result, 1, "Unable to fread() time zone correction");
+ NS_TEST_ASSERT_MSG_EQ (val32, Swap(uint32_t (7)), "Version minor written incorrectly");
+
+ result = fread (&val32, sizeof(val32), 1, p);
+ NS_TEST_ASSERT_MSG_EQ (result, 1, "Unable to fread() sig figs");
+ NS_TEST_ASSERT_MSG_EQ (val32, 0, "Sig figs written incorrectly");
+
+ result = fread (&val32, sizeof(val32), 1, p);
+ NS_TEST_ASSERT_MSG_EQ (result, 1, "Unable to fread() snap length");
+ NS_TEST_ASSERT_MSG_EQ (val32, Swap(uint32_t (5678)), "Snap length written incorrectly");
+
+ result = fread (&val32, sizeof(val32), 1, p);
+ NS_TEST_ASSERT_MSG_EQ (result, 1, "Unable to fread() data link type");
+ NS_TEST_ASSERT_MSG_EQ (val32, Swap(uint32_t (1234)), "Data length type written incorrectly");
+
+ fclose (p);
+ p = 0;
+
+ //
+ // We wrote an opposite-endian file out correctly, now let's see if we can read
+ // it back in correctly.
+ //
+ err = f.Open (m_testFilename, "r");
+ NS_TEST_ASSERT_MSG_EQ (err, false, "Open (existing-initialized-file " << m_testFilename << ", \"r\") returns error");
+
+ NS_TEST_ASSERT_MSG_EQ (f.GetSwapMode (), true, "Byte-swapped file not correctly indicated");
+
+ NS_TEST_ASSERT_MSG_EQ (f.GetMagic (), 0xa1b2c3d4, "Read back magic number incorrectly");
+ NS_TEST_ASSERT_MSG_EQ (f.GetVersionMajor (), 2, "Read back version major incorrectly");
+ NS_TEST_ASSERT_MSG_EQ (f.GetVersionMinor (), 4, "Read back version minor incorrectly");
+ NS_TEST_ASSERT_MSG_EQ (f.GetTimeZoneOffset (), 7, "Read back time zone offset incorrectly");
+ NS_TEST_ASSERT_MSG_EQ (f.GetSigFigs (), 0, "Read back sig figs incorrectly");
+ NS_TEST_ASSERT_MSG_EQ (f.GetSnapLen (), 5678, "Read back snap len incorrectly");
+ NS_TEST_ASSERT_MSG_EQ (f.GetDataLinkType (), 1234, "Read back data link type incorrectly");
+
+ f.Close ();
+
+ return false;
+}
+
+// ===========================================================================
+// Test case to make sure that the Pcap File Object can write pcap packet
+// records in both endian cases, and then read them in correctly.
+// ===========================================================================
+class RecordHeaderTestCase : public TestCase
+{
+public:
+ RecordHeaderTestCase ();
+ virtual ~RecordHeaderTestCase ();
+
+private:
+ virtual void DoSetup (void);
+ virtual bool DoRun (void);
+ virtual void DoTeardown (void);
+
+ std::string m_testFilename;
+};
+
+RecordHeaderTestCase::RecordHeaderTestCase ()
+ : TestCase ("Check to see that PcapRecordHeader is managed correctly")
+{
+}
+
+RecordHeaderTestCase::~RecordHeaderTestCase ()
+{
+}
+
+void
+RecordHeaderTestCase::DoSetup (void)
+{
+ std::stringstream filename;
+ uint32_t n = rand ();
+ filename << n;
+ m_testFilename = "/tmp/" + filename.str () + ".pcap";
+}
+
+void
+RecordHeaderTestCase::DoTeardown (void)
+{
+ remove (m_testFilename.c_str ());
+}
+
+bool
+RecordHeaderTestCase::DoRun (void)
+{
+ PcapFile f;
+
+ //
+ // Create an uninitialized file using previously tested operations
+ //
+ bool err = f.Open (m_testFilename, "w");
+ NS_TEST_ASSERT_MSG_EQ (err, false, "Open (" << m_testFilename << ", \"w\") returns error");
+
+ //
+ // Initialize the pcap file header.
+ //
+ err = f.Init (37, 43, -7);
+ NS_TEST_ASSERT_MSG_EQ (err, false, "Init (37, 43, -7) returns error");
+
+ //
+ // Initialize a buffer with a counting pattern to check the data later.
+ //
+ uint8_t bufferOut[128];
+ for (uint32_t i = 0; i < 128; ++i)
+ {
+ bufferOut[i] = i;
+ }
+
+ //
+ // Now we should be able to write a packet to it since it was opened in "w"
+ // mode. The packet data written should be limited to 43 bytes in length
+ // by the Init() call above.
+ //
+ err = f.Write (1234, 5678, bufferOut, 128);
+ NS_TEST_ASSERT_MSG_EQ (err, false, "Write (write-only-file " << m_testFilename << ") returns error");
+ f.Close ();
+
+ //
+ // Let's peek into the file and see what actually went out for that
+ // packet.
+ //
+ FILE *p = fopen (m_testFilename.c_str (), "r+b");
+ NS_TEST_ASSERT_MSG_NE (p, 0, "fopen() should have been able to open a correctly created pcap file");
+
+ //
+ // A pcap file header takes up 24 bytes, a pcap record header takes up 16 bytes
+ // and we wrote in 43 bytes, so the file must be 83 bytes long. Let's just
+ // double check that this is exactly what happened.
+ //
+ fseek (p, 0, SEEK_END);
+ uint64_t size = ftell (p);
+ NS_TEST_ASSERT_MSG_EQ (size, 83, "Pcap file with one 43 byte packet is incorrect size");
+
+ //
+ // A pcap file header takes up 24 bytes, so we should see a pcap record header
+ // starting there in the file. We've tested this all before so we just assume
+ // it's all right and just seek to just past that point..
+ //
+ fseek (p, 24, SEEK_SET);
+
+ uint32_t val32;
+
+ size_t result = fread (&val32, sizeof(val32), 1, p);
+ NS_TEST_ASSERT_MSG_EQ (result, 1, "Unable to fread() seconds timestamp");
+ NS_TEST_ASSERT_MSG_EQ (val32, 1234, "Seconds timestamp written incorrectly");
+
+ result = fread (&val32, sizeof(val32), 1, p);
+ NS_TEST_ASSERT_MSG_EQ (result, 1, "Unable to fread() microseconds timestamp");
+ NS_TEST_ASSERT_MSG_EQ (val32, 5678, "Microseconds timestamp written incorrectly");
+
+ result = fread (&val32, sizeof(val32), 1, p);
+ NS_TEST_ASSERT_MSG_EQ (result, 1, "Unable to fread() included length");
+ NS_TEST_ASSERT_MSG_EQ (val32, 43, "Included length written incorrectly");
+
+ result = fread (&val32, sizeof(val32), 1, p);
+ NS_TEST_ASSERT_MSG_EQ (result, 1, "Unable to fread() actual length");
+ NS_TEST_ASSERT_MSG_EQ (val32, 128, "Actual length written incorrectly");
+
+ //
+ // Take a look and see what went out into the file. The packet data
+ // should be unchanged (unswapped).
+ //
+ uint8_t bufferIn[128];
+
+ result = fread (bufferIn, 1, 43, p);
+ NS_TEST_ASSERT_MSG_EQ (result, 43, "Unable to fread() packet data of expected length");
+
+ for (uint32_t i = 0; i < 43; ++i)
+ {
+ NS_TEST_ASSERT_MSG_EQ (bufferIn[i], bufferOut[i], "Incorrect packet data written");
+ }
+
+ fclose (p);
+ p = 0;
+
+ //
+ // Let's see if the PcapFile object can figure out how to do the same thing
+ // correctly read in a packet.
+ //
+ err = f.Open (m_testFilename, "r");
+ NS_TEST_ASSERT_MSG_EQ (err, false, "Open (" << m_testFilename << ", \"r\") of existing good file returns error");
+
+ uint32_t tsSec, tsUsec, inclLen, origLen, readLen;
+
+ err = f.Read (bufferIn, sizeof(bufferIn), tsSec, tsUsec, inclLen, origLen, readLen);
+ NS_TEST_ASSERT_MSG_EQ (err, false, "Read() of known good packet returns error");
+ NS_TEST_ASSERT_MSG_EQ (tsSec, 1234, "Incorrectly read seconds timestap from known good packet");
+ NS_TEST_ASSERT_MSG_EQ (tsUsec, 5678, "Incorrectly read microseconds timestap from known good packet");
+ NS_TEST_ASSERT_MSG_EQ (inclLen, 43, "Incorrectly read included length from known good packet");
+ NS_TEST_ASSERT_MSG_EQ (origLen, 128, "Incorrectly read original length from known good packet");
+ NS_TEST_ASSERT_MSG_EQ (readLen, 43, "Incorrectly constructed actual read length from known good packet given buffer size");
+
+ //
+ // Did the data come back correctly?
+ //
+ for (uint32_t i = 0; i < 43; ++i)
+ {
+ NS_TEST_ASSERT_MSG_EQ (bufferIn[i], bufferOut[i], "Incorrect packet data read from known good packet");
+ }
+
+ //
+ // We have to check to make sure that the pcap record header is swapped
+ // correctly. Open the file in write mode to clear the data.
+ //
+ err = f.Open (m_testFilename, "w");
+ NS_TEST_ASSERT_MSG_EQ (err, false, "Open (" << m_testFilename << ", \"w\") returns error");
+
+ //
+ // Initialize the pcap file header, forcing the object into swap mode.
+ //
+ err = f.Init (37, 43, -7, true);
+ NS_TEST_ASSERT_MSG_EQ (err, false, "Init (37, 43, -7) returns error");
+
+ //
+ // Now we should be able to write a packet to it since it was opened in "w"
+ // mode. The packet data written should be limited to 43 bytes in length
+ // by the Init() call above.
+ //
+ err = f.Write (1234, 5678, bufferOut, 128);
+ NS_TEST_ASSERT_MSG_EQ (err, false, "Write (write-only-file " << m_testFilename << ") returns error");
+ f.Close ();
+
+ //
+ // Let's peek into the file and see what actually went out for that
+ // packet.
+ //
+ p = fopen (m_testFilename.c_str (), "r+b");
+ NS_TEST_ASSERT_MSG_NE (p, 0, "fopen() should have been able to open a correctly created pcap file");
+
+ //
+ // A pcap file header takes up 24 bytes, a pcap record header takes up 16 bytes
+ // and we wrote in 43 bytes, so the file must be 83 bytes long. Let's just
+ // double check that this is exactly what happened.
+ //
+ fseek (p, 0, SEEK_END);
+ size = ftell (p);
+ NS_TEST_ASSERT_MSG_EQ (size, 83, "Pcap file with one 43 byte packet is incorrect size");
+
+ //
+ // A pcap file header takes up 24 bytes, so we should see a pcap record header
+ // starting there in the file. We've tested this all before so we just assume
+ // it's all right and just seek past it.
+ //
+ fseek (p, 24, SEEK_SET);
+
+ result = fread (&val32, sizeof(val32), 1, p);
+ NS_TEST_ASSERT_MSG_EQ (result, 1, "Unable to fread() seconds timestamp");
+ NS_TEST_ASSERT_MSG_EQ (val32, Swap (uint32_t (1234)), "Swapped seconds timestamp written incorrectly");
+
+ result = fread (&val32, sizeof(val32), 1, p);
+ NS_TEST_ASSERT_MSG_EQ (result, 1, "Unable to fread() microseconds timestamp");
+ NS_TEST_ASSERT_MSG_EQ (val32, Swap (uint32_t (5678)), "Swapped microseconds timestamp written incorrectly");
+
+ result = fread (&val32, sizeof(val32), 1, p);
+ NS_TEST_ASSERT_MSG_EQ (result, 1, "Unable to fread() included length");
+ NS_TEST_ASSERT_MSG_EQ (val32, Swap (uint32_t (43)), "Swapped included length written incorrectly");
+
+ result = fread (&val32, sizeof(val32), 1, p);
+ NS_TEST_ASSERT_MSG_EQ (result, 1, "Unable to fread() actual length");
+ NS_TEST_ASSERT_MSG_EQ (val32, Swap (uint32_t (128)), "Swapped Actual length written incorrectly");
+
+ //
+ // Take a look and see what went out into the file. The packet data
+ // should be unchanged (unswapped).
+ //
+ result = fread (bufferIn, 1, 43, p);
+ NS_TEST_ASSERT_MSG_EQ (result, 43, "Unable to fread() packet data of expected length");
+
+ for (uint32_t i = 0; i < 43; ++i)
+ {
+ NS_TEST_ASSERT_MSG_EQ (bufferIn[i], bufferOut[i], "Incorrect packet data written");
+ }
+
+ fclose (p);
+ p = 0;
+
+ //
+ // Let's see if the PcapFile object can figure out how to do the same thing and
+ // correctly read in a packet. The record header info should come back to us
+ // swapped back into correct form.
+ //
+ err = f.Open (m_testFilename, "r");
+ NS_TEST_ASSERT_MSG_EQ (err, false, "Open (" << m_testFilename << ", \"r\") of existing good file returns error");
+
+ err = f.Read (bufferIn, sizeof(bufferIn), tsSec, tsUsec, inclLen, origLen, readLen);
+ NS_TEST_ASSERT_MSG_EQ (err, false, "Read() of known good packet returns error");
+ NS_TEST_ASSERT_MSG_EQ (tsSec, 1234, "Incorrectly read seconds timestap from known good packet");
+ NS_TEST_ASSERT_MSG_EQ (tsUsec, 5678, "Incorrectly read microseconds timestap from known good packet");
+ NS_TEST_ASSERT_MSG_EQ (inclLen, 43, "Incorrectly read included length from known good packet");
+ NS_TEST_ASSERT_MSG_EQ (origLen, 128, "Incorrectly read original length from known good packet");
+ NS_TEST_ASSERT_MSG_EQ (readLen, 43, "Incorrectly constructed actual read length from known good packet given buffer size");
+
+ //
+ // Did the data come back correctly (unchanged / unswapped)?
+ //
+ for (uint32_t i = 0; i < 43; ++i)
+ {
+ NS_TEST_ASSERT_MSG_EQ (bufferIn[i], bufferOut[i], "Incorrect packet data read from known good packet");
+ }
+
+ f.Close ();
+
+ return false;
+}
+
+// ===========================================================================
+// Test case to make sure that the Pcap File Object can read out the contents
+// of a known good pcap file.
+// ===========================================================================
+class ReadFileTestCase : public TestCase
+{
+public:
+ ReadFileTestCase ();
+ virtual ~ReadFileTestCase ();
+
+private:
+ virtual void DoSetup (void);
+ virtual bool DoRun (void);
+ virtual void DoTeardown (void);
+
+ std::string m_testFilename;
+};
+
+ReadFileTestCase::ReadFileTestCase ()
+ : TestCase ("Check to see that PcapFile can read out a known good pcap file")
+{
+}
+
+ReadFileTestCase::~ReadFileTestCase ()
+{
+}
+
+void
+ReadFileTestCase::DoSetup (void)
+{
+}
+
+void
+ReadFileTestCase::DoTeardown (void)
+{
+}
+
+const uint32_t N_KNOWN_PACKETS = 6;
+const uint32_t N_PACKET_BYTES = 16;
+
+typedef struct PACKET_ENTRY {
+ uint32_t tsSec;
+ uint32_t tsUsec;
+ uint32_t inclLen;
+ uint32_t origLen;
+ uint16_t data[N_PACKET_BYTES];
+} PacketEntry;
+
+PacketEntry knownPackets[] = {
+ {2, 3696, 46, 46, {0x0001, 0x0800, 0x0604, 0x0001, 0x0000, 0x0000, 0x0003, 0x0a01,
+ 0x0201, 0xffff, 0xffff, 0xffff, 0x0a01, 0x0204, 0x0000, 0x0000}},
+ {2, 3707, 46, 46, {0x0001, 0x0800, 0x0604, 0x0002, 0x0000, 0x0000, 0x0006, 0x0a01,
+ 0x0204, 0x0000, 0x0000, 0x0003, 0x0a01, 0x0201, 0x0000, 0x0000}},
+ {2, 3801, 1070, 1070, {0x4500, 0x041c, 0x0000, 0x0000, 0x3f11, 0x0000, 0x0a01, 0x0101,
+ 0x0a01, 0x0204, 0xc001, 0x0009, 0x0408, 0x0000, 0x0000, 0x0000}},
+ {2, 3811, 46, 46, {0x0001, 0x0800, 0x0604, 0x0001, 0x0000, 0x0000, 0x0006, 0x0a01,
+ 0x0204, 0xffff, 0xffff, 0xffff, 0x0a01, 0x0201, 0x0000, 0x0000}},
+ {2, 3822, 46, 46, {0x0001, 0x0800, 0x0604, 0x0002, 0x0000, 0x0000, 0x0003, 0x0a01,
+ 0x0201, 0x0000, 0x0000, 0x0006, 0x0a01, 0x0204, 0x0000, 0x0000}},
+ {2, 3915, 1070, 1070, {0x4500, 0x041c, 0x0000, 0x0000, 0x4011, 0x0000, 0x0a01, 0x0204,
+ 0x0a01, 0x0101, 0x0009, 0xc001, 0x0408, 0x0000, 0x0000, 0x0000}}
+};
+
+
+bool
+ReadFileTestCase::DoRun (void)
+{
+ PcapFile f;
+
+ //
+ //
+ std::string filename = NS_TEST_SOURCEDIR + "known.pcap";
+ bool err = f.Open (filename, "r");
+ NS_TEST_ASSERT_MSG_EQ (err, false, "Open (" << filename << ", \"w\") returns error");
+
+ //
+ // We are going to read out the file header and all of the packets to make
+ // sure that we read what we know, a priori, to be there.
+ //
+ // The packet data was gotten using "tcpdump -nn -tt -r known.pcap -x"
+ // and the timestamp and first 32 bytes of the resulting dump were
+ // duplicated in the structure above.
+ //
+ uint8_t data[N_PACKET_BYTES];
+ uint32_t tsSec, tsUsec, inclLen, origLen, readLen;
+
+ PacketEntry *p = knownPackets;
+
+ for (uint32_t i = 0; i < N_KNOWN_PACKETS; ++i, ++p)
+ {
+ err = f.Read (data, sizeof(data), tsSec, tsUsec, inclLen, origLen, readLen);
+ NS_TEST_ASSERT_MSG_EQ (err, false, "Read() of known good pcap file returns error");
+ NS_TEST_ASSERT_MSG_EQ (tsSec, p->tsSec, "Incorrectly read seconds timestap from known good pcap file");
+ NS_TEST_ASSERT_MSG_EQ (tsUsec, p->tsUsec, "Incorrectly read microseconds timestap from known good pcap file");
+ NS_TEST_ASSERT_MSG_EQ (inclLen, p->inclLen, "Incorrectly read included length from known good packet");
+ NS_TEST_ASSERT_MSG_EQ (origLen, p->origLen, "Incorrectly read original length from known good packet");
+ NS_TEST_ASSERT_MSG_EQ (readLen, N_PACKET_BYTES, "Incorrect actual read length from known good packet given buffer size");
+ }
+
+ //
+ // The file should now be at EOF since we've read all of the packets.
+ // Another packet read should return an error.
+ //
+ err = f.Read (data, 1, tsSec, tsUsec, inclLen, origLen, readLen);
+ NS_TEST_ASSERT_MSG_EQ (err, true, "Read() of known good pcap file at EOF does not return error");
+
+ f.Close ();
+
+ return false;
+}
+
+class PcapFileTestSuite : public TestSuite
+{
+public:
+ PcapFileTestSuite ();
+};
+
+PcapFileTestSuite::PcapFileTestSuite ()
+ : TestSuite ("pcap-file-object", UNIT)
+{
+ AddTestCase (new WriteModeCreateTestCase);
+ AddTestCase (new ReadModeCreateTestCase);
+ AddTestCase (new AppendModeCreateTestCase);
+ AddTestCase (new FileHeaderTestCase);
+ AddTestCase (new RecordHeaderTestCase);
+ AddTestCase (new ReadFileTestCase);
+}
+
+PcapFileTestSuite pcapFileTestSuite;
diff --git a/src/common/pcap-file.cc b/src/common/pcap-file.cc
new file mode 100644
index 000000000..c652d9808
--- /dev/null
+++ b/src/common/pcap-file.cc
@@ -0,0 +1,519 @@
+/* -*- Mode: C++; c-file-style: "gnu"; indent-tabs-mode:nil; -*- */
+/*
+ * Copyright (c) 2009 University of Washington
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation;
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include
+#include
+#include
+
+#include "pcap-file.h"
+
+//
+// This file is used as part of the ns-3 test framework, so please refrain from
+// adding any ns-3 specific constructs such as Packet to this file.
+//
+namespace ns3 {
+
+const uint32_t MAGIC = 0xa1b2c3d4; /**< Magic number identifying standard pcap file format */
+const uint32_t SWAPPED_MAGIC = 0xd4c3b2a1; /**< Looks this way if byte swapping is required */
+
+const uint32_t NS_MAGIC = 0xa1b23cd4; /**< Magic number identifying nanosec resolution pcap file format */
+const uint32_t NS_SWAPPED_MAGIC = 0xd43cb2a1; /**< Looks this way if byte swapping is required */
+
+const uint16_t VERSION_MAJOR = 2; /**< Major version of supported pcap file format */
+const uint16_t VERSION_MINOR = 4; /**< Minor version of supported pcap file format */
+const int32_t SIGFIGS_DEFAULT = 0; /**< Significant figures for timestamps (libpcap doesn't even bother) */
+
+PcapFile::PcapFile ()
+ : m_filename (""),
+ m_filePtr (0),
+ m_haveFileHeader (false),
+ m_swapMode (false)
+{
+}
+
+PcapFile::~PcapFile ()
+{
+ Close ();
+}
+
+void
+PcapFile::Close (void)
+{
+ if (m_filePtr)
+ {
+ fclose (m_filePtr);
+ }
+ m_filePtr = 0;
+ m_filename = "";
+ m_haveFileHeader = false;
+}
+
+uint32_t
+PcapFile::GetMagic (void)
+{
+ return m_fileHeader.m_magicNumber;
+}
+
+uint16_t
+PcapFile::GetVersionMajor (void)
+{
+ return m_fileHeader.m_versionMajor;
+}
+
+uint16_t
+PcapFile::GetVersionMinor (void)
+{
+ return m_fileHeader.m_versionMinor;
+}
+
+int32_t
+PcapFile::GetTimeZoneOffset (void)
+{
+ return m_fileHeader.m_zone;
+}
+
+uint32_t
+PcapFile::GetSigFigs (void)
+{
+ return m_fileHeader.m_sigFigs;
+}
+
+uint32_t
+PcapFile::GetSnapLen (void)
+{
+ return m_fileHeader.m_snapLen;
+}
+
+uint32_t
+PcapFile::GetDataLinkType (void)
+{
+ return m_fileHeader.m_type;
+}
+
+bool
+PcapFile::GetSwapMode (void)
+{
+ return m_swapMode;
+}
+
+uint8_t
+PcapFile::Swap (uint8_t val)
+{
+ return val;
+}
+
+uint16_t
+PcapFile::Swap (uint16_t val)
+{
+ return ((val >> 8) & 0x00ff) | ((val << 8) & 0xff00);
+}
+
+uint32_t
+PcapFile::Swap (uint32_t val)
+{
+ return ((val >> 24) & 0x000000ff) | ((val >> 8) & 0x0000ff00) | ((val << 8) & 0x00ff0000) | ((val << 24) & 0xff000000);
+}
+
+void
+PcapFile::Swap (PcapFileHeader *from, PcapFileHeader *to)
+{
+ to->m_magicNumber = Swap (from->m_magicNumber);
+ to->m_versionMajor = Swap (from->m_versionMajor);
+ to->m_versionMinor = Swap (from->m_versionMinor);
+ to->m_zone = Swap (uint32_t(from->m_zone));
+ to->m_sigFigs = Swap (from->m_sigFigs);
+ to->m_snapLen = Swap (from->m_snapLen);
+ to->m_type = Swap (from->m_type);
+}
+
+void
+PcapFile::Swap (PcapRecordHeader *from, PcapRecordHeader *to)
+{
+ to->m_tsSec = Swap (from->m_tsSec);
+ to->m_tsUsec = Swap (from->m_tsUsec);
+ to->m_inclLen = Swap (from->m_inclLen);
+ to->m_origLen = Swap (from->m_origLen);
+}
+
+bool
+PcapFile::WriteFileHeader (void)
+{
+ //
+ // If we're initializing the file, we need to write the pcap file header
+ // at the start of the file.
+ //
+ int result = fseek (m_filePtr, 0, SEEK_SET);
+ if (result)
+ {
+ return true;
+ }
+
+ //
+ // We have the ability to write out the pcap file header in a foreign endian
+ // format, so we need a temp place to swap on the way out.
+ //
+ PcapFileHeader header;
+
+ //
+ // the pointer headerOut selects either the swapped or non-swapped version of
+ // the pcap file header.
+ //
+ PcapFileHeader *headerOut = 0;
+
+ if (m_swapMode == false)
+ {
+ headerOut = &m_fileHeader;
+ }
+ else
+ {
+ Swap (&m_fileHeader, &header);
+ headerOut = &header;
+ }
+
+ //
+ // Watch out for memory alignment differences between machines, so write
+ // them all individually.
+ //
+ result = 0;
+
+ result |= (fwrite (&headerOut->m_magicNumber, sizeof(headerOut->m_magicNumber), 1, m_filePtr) != 1);
+ result |= (fwrite (&headerOut->m_versionMajor, sizeof(headerOut->m_versionMajor), 1, m_filePtr) != 1);
+ result |= (fwrite (&headerOut->m_versionMinor, sizeof(headerOut->m_versionMinor), 1, m_filePtr) != 1);
+ result |= (fwrite (&headerOut->m_zone, sizeof(headerOut->m_zone), 1, m_filePtr) != 1);
+ result |= (fwrite (&headerOut->m_sigFigs, sizeof(headerOut->m_sigFigs), 1, m_filePtr) != 1);
+ result |= (fwrite (&headerOut->m_snapLen, sizeof(headerOut->m_snapLen), 1, m_filePtr) != 1);
+ result |= (fwrite (&headerOut->m_type, sizeof(headerOut->m_type), 1, m_filePtr) != 1);
+
+ //
+ // If any of the fwrites above did not succeed in writinging the correct
+ // number of objects, result will be nonzero and will indicate an error.
+ //
+ return result != 0;
+}
+
+bool
+PcapFile::ReadAndVerifyFileHeader (void)
+{
+ //
+ // Pcap file header is always at the start of the file
+ //
+ int result = fseek (m_filePtr, 0, SEEK_SET);
+ if (result)
+ {
+ return true;
+ }
+
+ //
+ // Watch out for memory alignment differences between machines, so read
+ // them all individually.
+ //
+ result = 0;
+
+ result |= (fread (&m_fileHeader.m_magicNumber, sizeof(m_fileHeader.m_magicNumber), 1, m_filePtr) != 1);
+ result |= (fread (&m_fileHeader.m_versionMajor, sizeof(m_fileHeader.m_versionMajor), 1, m_filePtr) != 1);
+ result |= (fread (&m_fileHeader.m_versionMinor, sizeof(m_fileHeader.m_versionMinor), 1, m_filePtr) != 1);
+ result |= (fread (&m_fileHeader.m_zone, sizeof(m_fileHeader.m_zone), 1, m_filePtr) != 1);
+ result |= (fread (&m_fileHeader.m_sigFigs, sizeof(m_fileHeader.m_sigFigs), 1, m_filePtr) != 1);
+ result |= (fread (&m_fileHeader.m_snapLen, sizeof(m_fileHeader.m_snapLen), 1, m_filePtr) != 1);
+ result |= (fread (&m_fileHeader.m_type, sizeof(m_fileHeader.m_type), 1, m_filePtr) != 1);
+
+ //
+ // If any of the freads above did not succeed in reading the correct number of
+ // objects, result will be nonzero.
+ //
+ if (result)
+ {
+ return true;
+ }
+
+ //
+ // There are four possible magic numbers that can be there. Normal and byte
+ // swapped versions of the standard magic number, and normal and byte swapped
+ // versions of the magic number indicating nanosecond resolution timestamps.
+ //
+ if (m_fileHeader.m_magicNumber != MAGIC && m_fileHeader.m_magicNumber != SWAPPED_MAGIC &&
+ m_fileHeader.m_magicNumber != NS_MAGIC && m_fileHeader.m_magicNumber != NS_SWAPPED_MAGIC)
+ {
+ return true;
+ }
+
+ //
+ // If the magic number is swapped, then we can assume that everything else we read
+ // is swapped.
+ //
+ m_swapMode = (m_fileHeader.m_magicNumber == SWAPPED_MAGIC || m_fileHeader.m_magicNumber == NS_SWAPPED_MAGIC) ? true : false;
+
+ if (m_swapMode)
+ {
+ Swap (&m_fileHeader, &m_fileHeader);
+ }
+
+ //
+ // We only deal with one version of the pcap file format.
+ //
+ if (m_fileHeader.m_versionMajor != VERSION_MAJOR || m_fileHeader.m_versionMinor != VERSION_MINOR)
+ {
+ return true;
+ }
+
+ //
+ // A quick test of reasonablness for the time zone offset corresponding to
+ // a real place on the planet.
+ //
+ if (m_fileHeader.m_zone < -12 || m_fileHeader.m_zone > 12)
+ {
+ return true;
+ }
+
+ m_haveFileHeader = true;
+ return false;
+}
+
+bool
+PcapFile::Open (std::string const &filename, std::string const &mode)
+{
+ //
+ // If opening a new file, implicit close of any existing file required.
+ //
+ Close ();
+
+ //
+ // All pcap files are binary files, so we just do this automatically.
+ //
+ std::string realMode = mode + "b";
+
+ //
+ // Our modes may be subtly different from the standard fopen semantics since
+ // we need to have a pcap file header to succeed in some cases; so we need
+ // to process different modes according to our own definitions of the modes.
+ //
+ // In the case of read modes, we must read, check and save the pcap file
+ // header as well as just opening the file.
+ //
+ // In the case of write modes, we just pass the call on through to the
+ // library.
+ //
+ // In the case of append modes, we change the semantics to require the
+ // given file to exist. We can't just create a file since we can't make up
+ // a pcap file header on our own.
+ //
+ if (realMode == "rb" || realMode == "r+b")
+ {
+ m_filePtr = fopen (filename.c_str (), realMode.c_str ());
+ if (m_filePtr == 0)
+ {
+ return true;
+ }
+ m_filename = filename;
+ return ReadAndVerifyFileHeader ();
+ }
+ else if (realMode == "wb" || realMode == "w+b")
+ {
+ m_filePtr = fopen (filename.c_str (), realMode.c_str ());
+ if (m_filePtr)
+ {
+ m_filename = filename;
+ return false;
+ }
+ else
+ {
+ return true;
+ }
+ }
+ else if (realMode == "ab" || realMode == "a+b")
+ {
+ //
+ // Remember that semantics for append are different here. We never create
+ // a file since we can't make up a pcap file header. We first have to
+ // open the file in read-only mode and check to see that it exists and
+ // read the file header. If this all works out, then we can go ahead and
+ // open the file in append mode and seek to the end (imlicitly).
+ //
+ m_filePtr = fopen (filename.c_str (), "rb");
+ if (m_filePtr == 0)
+ {
+ return true;
+ }
+
+ bool result = ReadAndVerifyFileHeader ();
+ if (result == true)
+ {
+ Close ();
+ return true;
+ }
+
+ //
+ // We have a properly initialized file and have the pcap file header
+ // loaded and checked. This means that the file meets all of the
+ // critera for opening in append mode, but the file is in read-only mode
+ // now -- we must close it and open it in the correct mode.
+ //
+ fclose (m_filePtr);
+ m_filePtr = 0;
+
+ m_filePtr = fopen (filename.c_str (), realMode.c_str ());
+ if (m_filePtr == 0)
+ {
+ return true;
+ }
+
+ m_filename = filename;
+ return false;
+ }
+ else
+ {
+ return true;
+ }
+}
+
+bool
+PcapFile::Init (uint32_t dataLinkType, uint32_t snapLen, int32_t timeZoneCorrection, bool swapMode)
+{
+ //
+ // Initialize the in-memory file header.
+ //
+ m_fileHeader.m_magicNumber = MAGIC;
+ m_fileHeader.m_versionMajor = VERSION_MAJOR;
+ m_fileHeader.m_versionMinor = VERSION_MINOR;
+ m_fileHeader.m_zone = timeZoneCorrection;
+ m_fileHeader.m_sigFigs = 0;
+ m_fileHeader.m_snapLen = snapLen;
+ m_fileHeader.m_type = dataLinkType;
+
+ m_haveFileHeader = true;
+ m_swapMode = swapMode;
+
+ return WriteFileHeader ();
+}
+
+bool
+PcapFile::Write (uint32_t tsSec, uint32_t tsUsec, uint8_t const * const data, uint32_t totalLen)
+{
+ if (m_haveFileHeader == false)
+ {
+ return true;
+ }
+
+ uint32_t inclLen = totalLen > m_fileHeader.m_snapLen ? m_fileHeader.m_snapLen : totalLen;
+
+ PcapRecordHeader header;
+ header.m_tsSec = tsSec;
+ header.m_tsUsec = tsUsec;
+ header.m_inclLen = inclLen;
+ header.m_origLen = totalLen;
+
+ if (m_swapMode)
+ {
+ Swap (&header, &header);
+ }
+
+ //
+ // Watch out for memory alignment differences between machines, so write
+ // them all individually.
+ //
+ uint32_t result = 0;
+
+ result |= (fwrite (&header.m_tsSec, sizeof(header.m_tsSec), 1, m_filePtr) != 1);
+ result |= (fwrite (&header.m_tsUsec, sizeof(header.m_tsUsec), 1, m_filePtr) != 1);
+ result |= (fwrite (&header.m_inclLen, sizeof(header.m_inclLen), 1, m_filePtr) != 1);
+ result |= (fwrite (&header.m_origLen, sizeof(header.m_origLen), 1, m_filePtr) != 1);
+
+ result |= fwrite (data, 1, inclLen, m_filePtr) != inclLen;
+
+ return result != 0;
+}
+
+bool
+PcapFile::Read (
+ uint8_t * const data,
+ uint32_t maxBytes,
+ uint32_t &tsSec,
+ uint32_t &tsUsec,
+ uint32_t &inclLen,
+ uint32_t &origLen,
+ uint32_t &readLen)
+{
+ if (m_haveFileHeader == false)
+ {
+ return true;
+ }
+
+ PcapRecordHeader header;
+
+ //
+ // Watch out for memory alignment differences between machines, so read
+ // them all individually.
+ //
+ uint32_t result = 0;
+
+ result |= (fread (&header.m_tsSec, sizeof(header.m_tsSec), 1, m_filePtr) != 1);
+ result |= (fread (&header.m_tsUsec, sizeof(header.m_tsUsec), 1, m_filePtr) != 1);
+ result |= (fread (&header.m_inclLen, sizeof(header.m_inclLen), 1, m_filePtr) != 1);
+ result |= (fread (&header.m_origLen, sizeof(header.m_origLen), 1, m_filePtr) != 1);
+
+ //
+ // If any of the freads above did not succeed in reading the correct number of
+ // objects, result will be nonzero.
+ //
+ if (result)
+ {
+ return true;
+ }
+
+ if (m_swapMode)
+ {
+ Swap (&header, &header);
+ }
+
+ tsSec = header.m_tsSec;
+ tsUsec = header.m_tsUsec;
+ inclLen = header.m_inclLen;
+ origLen = header.m_origLen;
+
+ //
+ // We don't always want to force the client to keep a maximum length buffer
+ // around so we allow her to specify a minimum number of bytes to read.
+ // Usually 64 bytes is enough information to print all of the headers, so
+ // it isn't typically necessary to read all thousand bytes of an echo packet,
+ // for example, to figure out what is going on.
+ //
+ readLen = maxBytes < header.m_inclLen ? maxBytes : header.m_inclLen;
+ result = fread (data, 1, readLen, m_filePtr) != readLen;
+ if (result)
+ {
+ return result;
+ }
+
+ //
+ // To keep the file pointer pointed in the right place, however, we always
+ // need to account for the entire packet as stored originally.
+ //
+ if (readLen < header.m_inclLen)
+ {
+ uint64_t pos = ftell (m_filePtr);
+ int result = fseek (m_filePtr, pos + header.m_inclLen - readLen, SEEK_SET);
+ if (result)
+ {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+} //namespace ns3
diff --git a/src/common/pcap-file.h b/src/common/pcap-file.h
new file mode 100644
index 000000000..2e9fe03e7
--- /dev/null
+++ b/src/common/pcap-file.h
@@ -0,0 +1,194 @@
+/* -*- Mode: C++; c-file-style: "gnu"; indent-tabs-mode:nil; -*- */
+/*
+ * Copyright (c) 2009 University of Washington
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation;
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef PCAP_FILE_H
+#define PCAP_FILE_H
+
+#include
+#include
+
+namespace ns3 {
+
+/*
+ * A class representing a pcap file. This allows easy creation, writing and
+ * reading of files composed of stored packets; which may be viewed using
+ * standard tools.
+ */
+
+class PcapFile
+{
+public:
+ static const int32_t ZONE_DEFAULT = 0; /**< Time zone offset for current location */
+ static const uint32_t SNAPLEN_DEFAULT = 65535; /**< Default value for maximum octets to save per packet */
+
+public:
+ PcapFile ();
+ ~PcapFile ();
+
+ /**
+ * Create a new pcap file or open an existing pcap file. Semantics are
+ * similar to the C standard library function \c fopen, but differ in that
+ * positions in the file are based on packets not characters. For example
+ * if the file is opened for reading, the file position indicator (seek
+ * position) points to the beginning of the first packet in the file, not
+ * zero (which would point to the start of the pcap header).
+ *
+ * Possible modes are:
+ *
+ * \verbatim
+ * "r": Open a file for reading. The file must exist. The pcap header
+ * is assumed to exist in the file and will be read and checked.
+ * The file seek position indicator is set to point to the first
+ * packet on exit.
+ *
+ * "w": Create an empty file for writing. If a file with the same name
+ * already exists its content is erased and the file is treated as a
+ * new empty pcap file. The file is assumed not to have a pcap
+ * header and the caller is responsible for calling Init before saving
+ * any packet data. The file seek position indicator is set to point
+ * to the beginning of the file on exit since there will be no pcap
+ * header.
+ *
+ * "a": Append to an existing file. This mode allows for adding packet data
+ * to the end of an existing pcap file. The file must exist and have a
+ * valid pcap header written (N.B. this is different from standard fopen
+ * semantics). The file seek position indicator is set to point
+ * to the end of the file on exit.
+ *
+ * "r+": Open a file for update -- both reading and writing. The file must
+ * exist. The pcap header is assumed to have been written to the
+ * file and will be read and checked. The file seek position indicator
+ * is set to point to the first packet on exit.
+ *
+ * "w+": Create an empty file for both reading and writing. If a file with
+ * the same name already exists, its content is erased and the file is
+ * treated as a new empty pcap file. Since this new file will not have
+ * a pcap header, the caller is responsible for calling Init before
+ * saving any packet data. On exit, the file seek position indicator is
+ * set to point to the beginning of the file.
+ *
+ * "a+" Open a file for reading and appending. The file must exist and have a
+ * valid pcap header written (N.B. this is different from standard fopen
+ * semantics). The file seek position indicator is set to point
+ * to the end of the file on exit. Existing content is preserved.
+ * \endverbatim
+ *
+ * Since a pcap file is always a binary file, the file type is automatically
+ * selected as a binary file. For example, providing a mode string "a+"
+ * results in the underlying OS file being opened in "a+b" mode.
+ *
+ * \param filename String containing the name of the file.
+ *
+ * \param mode String containing the access mode for the file.
+ *
+ * \returns Error indication that should be interpreted as, "did an error
+ * happen"? That is, the method returns false if the open succeeds, true
+ * otherwise. The errno variable will be set by the OS to to provide a
+ * more descriptive failure indication.
+ */
+ bool Open (std::string const &filename, std::string const &mode);
+
+ void Close (void);
+
+ /**
+ * Initialize the pcap file associated with this object. This file must have
+ * been previously opened with write permissions.
+ *
+ * \param dataLinkType A data link type as defined in the pcap library. If
+ * you want to make resulting pcap files visible in existing tools, the
+ * data link type must match existing definitions, such as PCAP_ETHERNET,
+ * PCAP_PPP, PCAP_80211, etc. If you are storing different kinds of packet
+ * data, such as naked TCP headers, you are at liberty to locally define your
+ * own data link types. According to the pcap-linktype man page, "well-known"
+ * pcap linktypes range from 0 to 177. If you use a large random number for
+ * your type, chances are small for a collision.
+ *
+ * \param snapLen An optional maximum size for packets written to the file.
+ * Defaults to 65535. If packets exceed this length they are truncated.
+ *
+ * \param timeZoneCorrection An integer describing the offset of your local
+ * time zone from UTC/GMT. For example, Pacific Standard Time in the US is
+ * GMT-8, so one would enter -8 for that correction. Defaults to 0 (UTC).
+ *
+ * \returns false if the open succeeds, true otherwise.
+ *
+ * \warning Calling this method on an existing file will result in the loss
+ * any existing data.
+ */
+ bool Init (uint32_t dataLinkType,
+ uint32_t snapLen = SNAPLEN_DEFAULT,
+ int32_t timeZoneCorrection = ZONE_DEFAULT,
+ bool swapMode = false);
+
+ bool Write (uint32_t tsSec, uint32_t tsUsec, uint8_t const * const data, uint32_t totalLen);
+
+ bool Read (uint8_t * const data,
+ uint32_t maxBytes,
+ uint32_t &tsSec,
+ uint32_t &tsUsec,
+ uint32_t &inclLen,
+ uint32_t &origLen,
+ uint32_t &readLen);
+
+ bool GetSwapMode (void);
+
+ uint32_t GetMagic (void);
+ uint16_t GetVersionMajor (void);
+ uint16_t GetVersionMinor (void);
+ int32_t GetTimeZoneOffset (void);
+ uint32_t GetSigFigs (void);
+ uint32_t GetSnapLen (void);
+ uint32_t GetDataLinkType (void);
+
+private:
+ typedef struct {
+ uint32_t m_magicNumber; /**< Magic number identifying this as a pcap file */
+ uint16_t m_versionMajor; /**< Major version identifying the version of pcap used in this file */
+ uint16_t m_versionMinor; /**< Minor version identifying the version of pcap used in this file */
+ int32_t m_zone; /**< Time zone correction to be applied to timestamps of packets */
+ uint32_t m_sigFigs; /**< Unused by pretty much everybody */
+ uint32_t m_snapLen; /**< Maximum length of packet data stored in records */
+ uint32_t m_type; /**< Data link type of packet data */
+ } PcapFileHeader;
+
+ typedef struct {
+ uint32_t m_tsSec; /**< seconds part of timestamp */
+ uint32_t m_tsUsec; /**< microseconds part of timestamp (nsecs for PCAP_NSEC_MAGIC) */
+ uint32_t m_inclLen; /**< number of octets of packet saved in file */
+ uint32_t m_origLen; /**< actual length of original packet */
+ } PcapRecordHeader;
+
+ uint8_t Swap (uint8_t val);
+ uint16_t Swap (uint16_t val);
+ uint32_t Swap (uint32_t val);
+ void Swap (PcapFileHeader *from, PcapFileHeader *to);
+ void Swap (PcapRecordHeader *from, PcapRecordHeader *to);
+
+ bool WriteFileHeader (void);
+ bool ReadAndVerifyFileHeader (void);
+
+ std::string m_filename;
+ FILE *m_filePtr;
+ PcapFileHeader m_fileHeader;
+ bool m_haveFileHeader;
+ bool m_swapMode;
+};
+
+}//namespace ns3
+
+#endif /* PCAP_FILE_H */
diff --git a/src/common/wscript b/src/common/wscript
index 2ca057376..077d139d4 100644
--- a/src/common/wscript
+++ b/src/common/wscript
@@ -18,6 +18,8 @@ def build(bld):
'tag-buffer.cc',
'packet-tag-list.cc',
'ascii-writer.cc',
+ 'pcap-file.cc',
+ 'pcap-file-test-suite.cc',
]
headers = bld.new_task_gen('ns3header')
@@ -38,4 +40,5 @@ def build(bld):
'packet-tag-list.h',
'ascii-writer.h',
'sgi-hashmap.h',
+ 'pcap-file.h',
]
diff --git a/src/core/names-test-suite.cc b/src/core/names-test-suite.cc
new file mode 100644
index 000000000..626e9c605
--- /dev/null
+++ b/src/core/names-test-suite.cc
@@ -0,0 +1,975 @@
+/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation;
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "test.h"
+#include "names.h"
+
+using namespace ns3;
+
+// ===========================================================================
+// Cook up a couple of simple object class that we can use in the object
+// naming tests. They do nothing but be of the right type.
+// ===========================================================================
+class TestObject : public Object
+{
+public:
+ static TypeId GetTypeId (void)
+ {
+ static TypeId tid = TypeId ("TestObject")
+ .SetParent (Object::GetTypeId ())
+ .HideFromDocumentation ()
+ .AddConstructor ();
+ return tid;
+ }
+ TestObject () {}
+ virtual void Dispose (void) {}
+};
+
+class AlternateTestObject : public Object
+{
+public:
+ static TypeId GetTypeId (void)
+ {
+ static TypeId tid = TypeId ("AlternateTestObject")
+ .SetParent (Object::GetTypeId ())
+ .HideFromDocumentation ()
+ .AddConstructor ();
+ return tid;
+ }
+ AlternateTestObject () {}
+ virtual void Dispose (void) {}
+};
+
+// ===========================================================================
+// Test case to make sure that the Object Name Service can do its most basic
+// job and add associations between Objects using the lowest level add
+// function, which is:
+//
+// Add (Ptr