add webpages for the front page and for nfs-load file generation and use
authorRonnie Sahlberg <ronniesahlberg@gmail.com>
Thu, 7 Aug 2008 00:44:50 +0000 (10:44 +1000)
committerRonnie Sahlberg <ronniesahlberg@gmail.com>
Thu, 7 Aug 2008 00:44:50 +0000 (10:44 +1000)
Signed-off-by: Ronnie Sahlberg <ronniesahlberg@gmail.com>
web/bar1.jpg [new file with mode: 0644]
web/dbenchlogo.png [new file with mode: 0644]
web/footer.html [new file with mode: 0644]
web/header.html [new file with mode: 0644]
web/index.html [new file with mode: 0644]
web/nfs-loadfiles.html [new file with mode: 0644]
web/nfs.cap [new file with mode: 0644]
web/nfsloadfile.sh [new file with mode: 0755]

diff --git a/web/bar1.jpg b/web/bar1.jpg
new file mode 100644 (file)
index 0000000..7c6acf3
Binary files /dev/null and b/web/bar1.jpg differ
diff --git a/web/dbenchlogo.png b/web/dbenchlogo.png
new file mode 100644 (file)
index 0000000..46d95c4
Binary files /dev/null and b/web/dbenchlogo.png differ
diff --git a/web/footer.html b/web/footer.html
new file mode 100644 (file)
index 0000000..a66b5c9
--- /dev/null
@@ -0,0 +1,39 @@
+</td>
+</tr>
+
+  <TR ALIGN="center">
+    <TD><BR><a name="search"></a><img src="bar1.jpg" WIDTH="493" HEIGHT="26" BORDER="0" alt="=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=">
+
+<!-- SiteSearch Google -->
+<form method="get" action="http://www.google.com/custom">
+<table border="0">
+<tr><td nowrap="nowrap" valign="top" align="left" height="32">
+<a href="http://www.google.com/"><img src="http://www.google.com/logos/Logo_25wht.gif" border="0" alt="Google" /></a>
+</td><td nowrap="nowrap">
+<input type="hidden" name="domains" value="samba.org" />
+<input type="text" name="q" size="31" maxlength="255" value="CTDB " />
+<input type="submit" name="sa" value="Search" />
+</td></tr><tr><td>&nbsp;</td>
+<td nowrap="nowrap">
+<table><tr><td>
+<input type="radio" name="sitesearch" value="" />
+<font size="-1" color="#000000">Search WWW</font>
+</td><td>
+<input type="radio" name="sitesearch" value="samba.org" checked="checked" />
+<font size="-1" color="#000000">Search samba.org</font>
+</td></tr></table>
+<input type="hidden" name="client" value="pub-1444957896811922" />
+<input type="hidden" name="forid" value="1" />
+<input type="hidden" name="ie" value="ISO-8859-1" />
+<input type="hidden" name="oe" value="ISO-8859-1" />
+<input type="hidden" name="cof"
+       value="GALT:#008000;GL:1;DIV:#336699;VLC:663399;AH:center;BGC:FFFFFF;LBGC:FFFFFF;ALC:0000FF;LC:0000FF;T:000000;GFNT:0000FF;GIMP:0000FF;LH:60;LW:470;L:http://samba.org/samba/images/samba_banner.gif;S:http://samba.org/;FORID:1;"
+       />
+<input type="hidden" name="hl" value="en" />
+</td></tr></table>
+</form>
+<!-- SiteSearch Google -->
+
+  </TD>
+  </TR>
+</TABLE>
diff --git a/web/header.html b/web/header.html
new file mode 100644 (file)
index 0000000..942c80f
--- /dev/null
@@ -0,0 +1,38 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2//EN">
+<HTML>
+<HEAD>
+<TITLE><!--#echo var="TITLE" --></TITLE>
+<meta http-equiv="Content-Type" content="text/html;charset=utf-8" >
+</HEAD>
+
+<BODY BGCOLOR="#ffffff" TEXT="#000000" VLINK="#292555" LINK="#292555"
+      ALINK="#cc0033">
+<TABLE BORDER=0 WIDTH="75%" ALIGN="CENTER">
+  <tr VALIGN="middle">
+    <td ALIGN="left">
+       <ul>
+      <li><small><a href=".">home</a></small>
+       </ul>
+    </td>
+    <td align="center">
+      <a href="."><img src="dbenchlogo.png" border="0" alt="CTDB"></a>
+    </td>
+    <td align="left">
+      <ul>
+      <li><small><a href="nfs-loadfiles.html">download</a></small>
+      </ul>
+    </td>
+  </tr>
+
+  <TR ALIGN="center">
+    <TD COLSPAN="3">
+    <img src="bar1.jpg" WIDTH="493" HEIGHT="26"
+    BORDER="0"
+    alt="=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=">
+    </TD>
+  </TR>
+</TABLE>
+
+<TABLE BORDER=0 WIDTH="60%" ALIGN="CENTER">
+  <tr VALIGN="middle">
+    <td ALIGN="left">
diff --git a/web/index.html b/web/index.html
new file mode 100644 (file)
index 0000000..4d82204
--- /dev/null
@@ -0,0 +1,61 @@
+<!--#set var="TITLE" value="DBENCH" -->
+<!--#include virtual="header.html" -->
+
+<H2 align="center">Welcome to the DBENCH web pages</H2>
+
+DBENCH is a tool to generate I/O workloads to either a filesystem or to a
+networked NFS server. DBENCH can be used to stress a filesystem or a server
+to see which workload it becomes saturated and can also be used for preditcion
+analysis to determine "How many concurrent clients/applications performing
+this workload can my server handle before response starts to lag?"
+
+<p>DBENCH provides a similar benchmarking and client emulation that is
+implemented in SMBTORTURE using the BENCH-NBENCH test for CIFS, but DBENCH
+can play these loadfiles onto a local filesystem instead of to a CIFS server.
+Using a different type of loadfiles DBENCH can also generate and measure
+latency for NFS. </p>
+
+
+<p>Features include:
+<ul>
+<li>Reading SMBTORTURE BENCH-NBENCH loadfiles and emulating this workload
+as posix calls to a local filesystem
+<li>NFS style loadfiles which allows DBENCH to mimic the i/o pattern of a real application doing real i/o to a real server.
+</ul>
+
+<h2>Loadfiles</h2>
+
+<p>
+At the heart of DBENCH is the concept of a "loadfile". A loadfile is
+essentially a sequence of operations to be performed once statement at a time.
+This could be operations such as "Open file XYZ", "Read 5 bytes from offset ABC", "Close the file", etc etc.
+</p>
+
+<p>
+By carefully crafting a loadfile it is possible to describe an I/O pattern
+that almost exactly matches what a particular application performs. While 
+cumbersome to produce such a loadfile it does allow you to describe exactly
+how/what an application performs and "replay" this sequence of operations
+any time you want.
+</p>
+
+<p>
+DBENCH loadfiles also contain a timestamp for each operation so that while
+DBENCH is emulating the workload it also tries to keep the same rate of 
+operations as the original application that we created the loadfile from.
+This is very useful since this does allow to perform accurate scalability
+predictions based on the exact application we are interested in and not an
+artificial benchmark which may or may not be relevant to our particular
+applications workload pattern.
+</p>
+
+
+<hr>
+<h2>Developers</h2>
+<ul>
+<li><a href="http://samba.org/~tridge/">Andrew Tridgell</a><br>
+<li>Ronnie Sahlberg<br>
+<li>and a lot of others ...
+</ul>
+
+<!--#include virtual="footer.html" -->
diff --git a/web/nfs-loadfiles.html b/web/nfs-loadfiles.html
new file mode 100644 (file)
index 0000000..9893f97
--- /dev/null
@@ -0,0 +1,264 @@
+<!--#set var="TITLE" value="NFS loadfiles" -->
+<!--#include virtual="header.html" -->
+
+<h1>NFS Loadfiles</h1>
+
+<p>
+This will illustrate how to create and run an NFS loadfile using DBENCH.
+This example is based on that you do start the session with a completely empty
+share and add all the files and data as part of the actual process of running
+the application/job. It is possible to create loadfiles that use
+pregenerated/preexisting data as well but that is a much more complex 
+scenario.
+</p>
+
+<h2>Network trace</h2>
+
+<p>
+First we need to take a network trace to capture the sequence of operations
+that the NFS client generates.
+</p>
+
+<p>
+We do this by using the tshark tool from the <a href="http://www.wireshark.org">wireshark</a> package.
+We will, as of this writing, later need the very latest version of wireshark
+so unless you already use the development version of wireshark, download the
+source and compile up a brand new version...
+</p>
+
+<p>
+Once you are ready, start a network trace on the client and capture all
+the traffic to/from the server. Note, you MUST start the trace before
+the client even mounts the share since we must have the actual
+MOUNT operations present in the network trace. It is also important that we 
+filter the trace to only contain the traffic to/from the server in order
+to keep the trace as small as possible.
+</p>
+
+<pre>
+tshark -i eth0 -s 0 -w <a href="nfs.cap">nfs.cap</a> host 10.0.0.21
+</pre>
+
+<p>
+This example network trace was taken when I mapped a share and did 
+some file operations.
+
+<h2>Processing step 1</h2>
+
+<p>
+Once we have a network trace we need to do some initial processing on the trace
+to create the start of what will become our loadfile.
+Download, edit and run this script that converts a trace into a loadfile:
+</p>
+
+<pre>
+<a href="nfsloadfile.sh">nfsloadfile.sh</a> <a href="nfs.cap">nfs.cap</a> > nfs.loadfile
+</pre>
+
+<p>
+This should generate an initial loadfile that would look something like this :
+</p>
+
+<pre>
+XXX unknown packet 14   0.001604   10.1.1.101 -> 10.0.0.21    NFS V3 NULL Call  frame.time_relative == 0.001604000  nfs.procedure_v3 == 0  rpc.msgtyp == 0  rpc.xid == 0x2318b639
+XXX unknown packet 16   0.001998    10.0.0.21 -> 10.1.1.101   NFS V3 NULL Reply (Call In 14)  frame.time_relative == 0.001998000  nfs.procedure_v3 == 0  rpc.msgtyp == 1  rpc.xid == 0x2318b639
+XXX unknown packet 54   0.012970   10.1.1.101 -> 10.0.0.21    NFS V3 NULL Call  frame.time_relative == 0.012970000  nfs.procedure_v3 == 0  rpc.msgtyp == 0  rpc.xid == 0x3645ff3b
+XXX unknown packet 56   0.013270    10.0.0.21 -> 10.1.1.101   NFS V3 NULL Reply (Call In 54)  frame.time_relative == 0.013270000  nfs.procedure_v3 == 0  rpc.msgtyp == 1  rpc.xid == 0x3645ff3b
+0.013381000 FSINFO3 0x00000000
+0.013903000 GETATTR3 "10.0.0.21:/gpfs/nfsshare" 0x00000000
+0.014291000 FSINFO3 0x00000000
+0.014720000 GETATTR3 "10.0.0.21:/gpfs/nfsshare" 0x00000000
+1.027163000 ACCESS3 "10.0.0.21:/gpfs/nfsshare" 0 0 0x00000000
+5.726435000 ACCESS3 "10.0.0.21:/gpfs/nfsshare" 0 0 0x00000000
+6.799179000 GETATTR3 "10.0.0.21:/gpfs/nfsshare" 0x00000000
+6.799669000 READDIRPLUS3 "10.0.0.21:/gpfs/nfsshare" 0x00000000
+8.347222000 GETATTR3 "10.0.0.21:/gpfs/nfsshare" 0x00000000
+35.087310000 LOOKUP3 "10.0.0.21:/gpfs/nfsshare/small-file" 0x00000002
+35.087938000 CREATE3 "10.0.0.21:/gpfs/nfsshare/small-file" 0 0x00000000
+35.102625000 WRITE3 "10.0.0.21:/gpfs/nfsshare/small-file" 0 10240 2 0x00000000
+35.104245000 GETATTR3 "10.0.0.21:/gpfs/nfsshare/small-file" 0x00000000
+41.827776000 GETATTR3 "10.0.0.21:/gpfs/nfsshare" 0x00000000
+41.828343000 READDIRPLUS3 "10.0.0.21:/gpfs/nfsshare" 0x00000000
+41.828880000 LOOKUP3 "10.0.0.21:/gpfs/nfsshare/small-file" 0x00000000
+45.548004000 GETATTR3 "10.0.0.21:/gpfs/nfsshare" 0x00000000
+49.805150000 GETATTR3 "10.0.0.21:/gpfs/nfsshare" 0x00000000
+49.806402000 GETATTR3 "10.0.0.21:/gpfs/nfsshare/small-file" 0x00000000
+52.777193000 GETATTR3 "10.0.0.21:/gpfs/nfsshare/small-file" 0x00000000
+52.778562000 ACCESS3 "10.0.0.21:/gpfs/nfsshare/small-file" 0 0 0x00000000
+52.779051000 READ3 "10.0.0.21:/gpfs/nfsshare/small-file" 0 10240 0x00000000
+</pre>
+
+<h2>Processing step 2</h2>
+
+<p>
+Some lines are marked with XXX and this means that the generator does not yet
+know how to implement this call into a loadfile. When this happens there are
+a few options, we can either refine the generator and teach it about this
+new command (patches welcome) or we can try to manually adjust it in the
+loadfile by substituting it with a similar operation.
+In this example these are just NULL procedures which we dont care about so we just delete them.
+</p>
+
+<p> Now we need to process the file and re-target the directory where we want
+to replay this loadfile. In our example we used the server 10.0.0.21 and the nfs export /gpfs/nfsshare. To do this conversion we now replace the server/share
+string "10.0.0.21:/gpfs/nfsshare" with the string "/clients/client1".
+</p>
+
+<p>
+"/clients/client1" is in DBENCH a "magic" path we use. This special path is
+inside DBENCH substituted into "/clients/client$CLIENTID" which allows us to
+run multiple clients in parallell, with each client process using its own
+dedicated directory to do I/O to. That is a pretty neat feature in DBENCH.
+</p>
+
+<p>
+We should then end up with something that would look something like this:
+</p>
+
+<pre>
+0.013381000 FSINFO3 0x00000000
+0.013903000 GETATTR3 "/clients/client1" 0x00000000
+0.014291000 FSINFO3 0x00000000
+0.014720000 GETATTR3 "/clients/client1" 0x00000000
+1.027163000 ACCESS3 "/clients/client1" 0 0 0x00000000
+5.726435000 ACCESS3 "/clients/client1" 0 0 0x00000000
+6.799179000 GETATTR3 "/clients/client1" 0x00000000
+6.799669000 READDIRPLUS3 "/clients/client1" 0x00000000
+8.347222000 GETATTR3 "/clients/client1" 0x00000000
+35.087310000 LOOKUP3 "/clients/client1/small-file" 0x00000002
+35.087938000 CREATE3 "/clients/client1/small-file" 0 0x00000000
+35.102625000 WRITE3 "/clients/client1/small-file" 0 10240 2 0x00000000
+35.104245000 GETATTR3 "/clients/client1/small-file" 0x00000000
+41.827776000 GETATTR3 "/clients/client1" 0x00000000
+41.828343000 READDIRPLUS3 "/clients/client1" 0x00000000
+41.828880000 LOOKUP3 "/clients/client1/small-file" 0x00000000
+45.548004000 GETATTR3 "/clients/client1" 0x00000000
+49.805150000 GETATTR3 "/clients/client1" 0x00000000
+49.806402000 GETATTR3 "/clients/client1/small-file" 0x00000000
+52.777193000 GETATTR3 "/clients/client1/small-file" 0x00000000
+52.778562000 ACCESS3 "/clients/client1/small-file" 0 0 0x00000000
+52.779051000 READ3 "/clients/client1/small-file" 0 10240 0x00000000
+</pre>
+
+
+
+<h2>Processing step 3</h2>
+
+<p>
+Finally we need a little header that will cleanup and delete the client
+directory everytime the loadfile wraps.
+This is done by adding a header such as this to the file :
+</p>
+
+<pre>
+# DELTREE if we want to delete the client directory everytime we restart 
+# the script. Remove these two lines if the script is only "read-only"
+# reading from pre-existing files in the /clients/clientX/ tree
+0.000 Deltree "/clients/client1" 0x00000000
+#
+# Make sure these directories exist. We specify * as the status code
+# since these directories might already exist and in which case
+# we dont care that the command returned an error.
+0.000 MKDIR3 "/clients" *
+0.000 MKDIR3 "/clients/client1" *
+</pre>
+
+<p>
+I also change all timestamps in this example to 0.000 just to make it run a bit faster.
+And we should end up with a loadfile looking like this
+</p>
+
+<pre>
+# DELTREE if we want to delete the client directory everytime we restart 
+# the script. Remove these two lines if the script is only "read-only"
+# reading from pre-existing files in the /clients/clientX/ tree
+0.000 Deltree "/clients/client1" 0x00000000
+#
+# Make sure these directories exist. We specify * as the status code
+# since these directories might already exist and in which case
+# we dont care that the command returned an error.
+0.000 MKDIR3 "/clients" *
+0.000 MKDIR3 "/clients/client1" *
+0.000 FSINFO3 0x00000000
+0.000 GETATTR3 "/clients/client1" 0x00000000
+0.000 FSINFO3 0x00000000
+0.000 GETATTR3 "/clients/client1" 0x00000000
+0.000 ACCESS3 "/clients/client1" 0 0 0x00000000
+0.000 ACCESS3 "/clients/client1" 0 0 0x00000000
+0.000 GETATTR3 "/clients/client1" 0x00000000
+0.000 READDIRPLUS3 "/clients/client1" 0x00000000
+0.000 GETATTR3 "/clients/client1" 0x00000000
+0.000 LOOKUP3 "/clients/client1/small-file" 0x00000002
+0.000 CREATE3 "/clients/client1/small-file" 0 0x00000000
+0.000 WRITE3 "/clients/client1/small-file" 0 10240 2 0x00000000
+0.000 GETATTR3 "/clients/client1/small-file" 0x00000000
+0.000 GETATTR3 "/clients/client1" 0x00000000
+0.000 READDIRPLUS3 "/clients/client1" 0x00000000
+0.000 LOOKUP3 "/clients/client1/small-file" 0x00000000
+0.000 GETATTR3 "/clients/client1" 0x00000000
+0.000 GETATTR3 "/clients/client1" 0x00000000
+0.000 GETATTR3 "/clients/client1/small-file" 0x00000000
+0.000 GETATTR3 "/clients/client1/small-file" 0x00000000
+0.000 ACCESS3 "/clients/client1/small-file" 0 0 0x00000000
+0.000 READ3 "/clients/client1/small-file" 0 10240 0x00000000
+</pre>
+
+<h2>Running the loadfile</h2>
+
+<p>
+Now to run this loadfile for 10 seconds using 3 concurrent clients we can use :
+</p>
+
+<pre>
+./dbench -B nfs --server=10.0.0.21 --export=/gpfs/nfsshare --protocol=tcp -c tmp.loadfile  -t 20 3 2>/dev/null
+</pre>
+
+
+<p>
+Which should result in output similar to :
+</p>
+<pre>
+dbench version 4.00 - Copyright Andrew Tridgell 1999-2004
+
+Running for 10 seconds with load 'tmp.loadfile' and minimum warmup 2 secs
+0 of 3 processes prepared for launch   0 sec
+3 of 3 processes prepared for launch   0 sec
+releasing clients
+   3      2041     3.88 MB/sec  warmup   1 sec  latency 4.695 ms
+   3      6120     3.93 MB/sec  execute   1 sec  latency 8.853 ms
+   3      8192     3.94 MB/sec  execute   2 sec  latency 14.410 ms
+   3     10085     3.84 MB/sec  execute   3 sec  latency 12.082 ms
+   3     12002     3.79 MB/sec  execute   4 sec  latency 4.949 ms
+   3     13097     3.46 MB/sec  execute   5 sec  latency 437.859 ms
+   3     15156     3.53 MB/sec  execute   6 sec  latency 6.671 ms
+   3     17219     3.59 MB/sec  execute   7 sec  latency 5.066 ms
+   3     19223     3.62 MB/sec  execute   8 sec  latency 6.746 ms
+   3     21103     3.62 MB/sec  execute   9 sec  latency 11.987 ms
+   3  cleanup  10 sec
+   0  cleanup  10 sec
+
+ Operation                Count    AvgLat    MaxLat
+ --------------------------------------------------
+ Deltree                   1667     4.652   701.966
+ GETATTR3                 16646     0.460     7.096
+ LOOKUP3                   3329     0.482    12.073
+ CREATE3                   1665     0.671     4.390
+ WRITE3                    1665     1.274    14.404
+ READ3                     1665     0.865     4.789
+ ACCESS3                   4993     0.458     9.187
+ MKDIR3                    3328     0.743   435.178
+ FSINFO3                   3328     0.446     7.927
+ READDIRPLUS3              3329     0.534     3.474
+
+Throughput 3.62071 MB/sec  3 clients  3 procs  max_latency=437.859 ms
+</pre>
+
+<p>
+Dont worry about the low throughput. This loadfile does virtually no
+reads/writes at all. We can now also scale this up to 10 clients  or 20
+clients ... and compare how the server response times are affected.
+</p>
+
+
+<!--#include virtual="footer.html" -->
+
diff --git a/web/nfs.cap b/web/nfs.cap
new file mode 100644 (file)
index 0000000..1ac705e
Binary files /dev/null and b/web/nfs.cap differ
diff --git a/web/nfsloadfile.sh b/web/nfsloadfile.sh
new file mode 100755 (executable)
index 0000000..d3229e2
--- /dev/null
@@ -0,0 +1,207 @@
+#!/bin/sh
+
+TSHARK=./tshark
+ECHO=/bin/echo
+DBDIR=/tmp/nldbd
+
+extract_field() {
+       $ECHO "$1" | sed -e "s/^.*$2 == //" -e "s/ .*$//"
+}
+
+extract_quoted_field() {
+       $ECHO "$1" | sed -e "s/^.*$2 == \"//" -e "s/\" .*$//"
+}
+
+
+do_readdirplus() {
+       if [ $2 != "1" ]; then
+               if [ $4 != "0" ]; then
+                       echo $TIMESTAMP READDIRPLUS3 \"$3\" >$DBDIR/$1
+               fi
+       else
+               CMD=`cat $DBDIR/$1`
+               # we only generate the operation if we saw the request
+               if [ "${CMD}x" != "x" ]; then
+                       echo $CMD $4
+               fi
+       fi
+}
+
+do_write() {
+       if [ $2 != "1" ]; then
+               echo $TIMESTAMP WRITE3 \"$3\" $4 $5 $6 >$DBDIR/$1
+       else
+               CMD=`cat $DBDIR/$1`
+               echo $CMD $7
+       fi
+}
+
+do_read() {
+       if [ $2 != "1" ]; then
+               echo $TIMESTAMP READ3 \"$3\" $4 $5 >$DBDIR/$1
+       else
+               CMD=`cat $DBDIR/$1`
+               echo $CMD $6
+       fi
+}
+
+do_create() {
+       if [ $2 != "1" ]; then
+               echo $TIMESTAMP CREATE3 \"$3\" $4 >$DBDIR/$1
+       else
+               CMD=`cat $DBDIR/$1`
+               echo $CMD $5
+       fi
+}
+
+do_lookup() {
+       if [ $2 != "1" ]; then
+               echo $TIMESTAMP LOOKUP3 \"$3\" >$DBDIR/$1
+       else
+               CMD=`cat $DBDIR/$1`
+               echo $CMD $4
+       fi
+}
+
+do_getattr() {
+       if [ $2 != "1" ]; then
+               echo $TIMESTAMP GETATTR3 \"$3\" >$DBDIR/$1
+       else
+               CMD=`cat $DBDIR/$1`
+               echo $CMD $4
+       fi
+}
+
+do_fsinfo() {
+       if [ $2 != "1" ]; then
+               echo $TIMESTAMP FSINFO3 >$DBDIR/$1
+       else
+               CMD=`cat $DBDIR/$1`
+               echo $CMD $3
+       fi
+}
+
+do_access() {
+       if [ $2 != "1" ]; then
+               echo $TIMESTAMP ACCESS3 \"$3\" 0 0 >$DBDIR/$1
+       else
+               CMD=`cat $DBDIR/$1`
+               echo $CMD $4
+       fi
+}
+
+
+
+
+rm -rf $DBDIR
+mkdir -p $DBDIR
+
+$TSHARK -n -r $1 -R "nfs" \
+       -z "proto,colinfo,rpc.xid,rpc.xid" \
+       -z "proto,colinfo,rpc.msgtyp,rpc.msgtyp" \
+       -z "proto,colinfo,nfs.nfsstat3,nfs.nfsstat3" \
+       -z "proto,colinfo,nfs.name,nfs.name" \
+       -z "proto,colinfo,nfs.full_name,nfs.full_name" \
+       -z "proto,colinfo,nfs.createmode,nfs.createmode" \
+       -z "proto,colinfo,nfs.offset3,nfs.offset3" \
+       -z "proto,colinfo,nfs.count3,nfs.count3" \
+       -z "proto,colinfo,nfs.cookie3,nfs.cookie3" \
+       -z "proto,colinfo,nfs.write.stable,nfs.write.stable" \
+       -z "proto,colinfo,nfs.procedure_v3,nfs.procedure_v3" \
+       -z "proto,colinfo,frame.time_relative,frame.time_relative" \
+| while read PACKET; do
+
+TIMESTAMP=`extract_field "$PACKET" "frame.time_relative"`
+
+#echo
+#echo 
+#echo packet:$PACKET
+
+case "$PACKET" in
+       # READDIRPLUS
+       *"nfs.procedure_v3 == 17"*)
+               XID=`extract_field "$PACKET" "rpc.xid"`
+               MSGTYP=`extract_field "$PACKET" "rpc.msgtyp"`
+               STATUS=`extract_field "$PACKET" "nfs.nfsstat3" | awk '{ printf "0x%08x", $1 }'`
+               FULLNAME=`extract_quoted_field "$PACKET" "nfs.full_name"`
+               COOKIE=`extract_field "$PACKET" "nfs.cookie3"`
+               do_readdirplus $XID $MSGTYP "$FULLNAME" $COOKIE3 $STATUS
+               
+               ;;
+       # READ
+       *"nfs.procedure_v3 == 6"*)
+               XID=`extract_field "$PACKET" "rpc.xid"`
+               MSGTYP=`extract_field "$PACKET" "rpc.msgtyp"`
+               STATUS=`extract_field "$PACKET" "nfs.nfsstat3" | awk '{ printf "0x%08x", $1 }'`
+               NAME=`extract_quoted_field "$PACKET" "nfs.name"`
+               FULLNAME=`extract_quoted_field "$PACKET" "nfs.full_name"`
+               OFFSET=`extract_field "$PACKET" "nfs.offset3"`
+               COUNT=`extract_field "$PACKET" "nfs.count3"`
+               do_read $XID $MSGTYP "$FULLNAME" $OFFSET $COUNT $STATUS
+               
+               ;;
+       # WRITE
+       *"nfs.procedure_v3 == 7"*)
+               XID=`extract_field "$PACKET" "rpc.xid"`
+               MSGTYP=`extract_field "$PACKET" "rpc.msgtyp"`
+               STATUS=`extract_field "$PACKET" "nfs.nfsstat3" | awk '{ printf "0x%08x", $1 }'`
+               NAME=`extract_quoted_field "$PACKET" "nfs.name"`
+               FULLNAME=`extract_quoted_field "$PACKET" "nfs.full_name"`
+               OFFSET=`extract_field "$PACKET" "nfs.offset3"`
+               COUNT=`extract_field "$PACKET" "nfs.count3"`
+               STABLE=`extract_field "$PACKET" "nfs.write.stable"`
+               do_write $XID $MSGTYP "$FULLNAME" $OFFSET $COUNT $STABLE $STATUS
+               
+               ;;
+       # CREATE
+       *"nfs.procedure_v3 == 8"*)
+               XID=`extract_field "$PACKET" "rpc.xid"`
+               MSGTYP=`extract_field "$PACKET" "rpc.msgtyp"`
+               STATUS=`extract_field "$PACKET" "nfs.nfsstat3" | awk '{ printf "0x%08x", $1 }'`
+               NAME=`extract_quoted_field "$PACKET" "nfs.name"`
+               FULLNAME=`extract_quoted_field "$PACKET" "nfs.full_name"`
+               MODE=`extract_field "$PACKET" "nfs.createmode"`
+               do_create $XID $MSGTYP "$FULLNAME/$NAME" $MODE $STATUS
+               
+               ;;
+       # LOOKUP
+       *"nfs.procedure_v3 == 3"*)
+               XID=`extract_field "$PACKET" "rpc.xid"`
+               MSGTYP=`extract_field "$PACKET" "rpc.msgtyp"`
+               STATUS=`extract_field "$PACKET" "nfs.nfsstat3" | awk '{ printf "0x%08x", $1 }'`
+               NAME=`extract_quoted_field "$PACKET" "nfs.name"`
+               FULLNAME=`extract_quoted_field "$PACKET" "nfs.full_name"`
+               do_lookup $XID $MSGTYP "$FULLNAME/$NAME" $STATUS
+               
+               ;;
+       # FSINFO
+       *"nfs.procedure_v3 == 19 "*)
+               XID=`extract_field "$PACKET" "rpc.xid"`
+               MSGTYP=`extract_field "$PACKET" "rpc.msgtyp"`
+               STATUS=`extract_field "$PACKET" "nfs.nfsstat3" | awk '{ printf "0x%08x", $1 }'`
+               do_fsinfo $XID $MSGTYP $STATUS
+               ;;
+       # GETATTR
+       *"nfs.procedure_v3 == 1 "*)
+               XID=`extract_field "$PACKET" "rpc.xid"`
+               MSGTYP=`extract_field "$PACKET" "rpc.msgtyp"`
+               STATUS=`extract_field "$PACKET" "nfs.nfsstat3" | awk '{ printf "0x%08x", $1 }'`
+               FULLNAME=`extract_quoted_field "$PACKET" "nfs.full_name"`
+               do_getattr $XID $MSGTYP $FULLNAME $STATUS
+               ;;
+       # ACCESS
+       *"nfs.procedure_v3 == 4"*)
+               XID=`extract_field "$PACKET" "rpc.xid"`
+               MSGTYP=`extract_field "$PACKET" "rpc.msgtyp"`
+               STATUS=`extract_field "$PACKET" "nfs.nfsstat3" | awk '{ printf "0x%08x", $1 }'`
+               FULLNAME=`extract_quoted_field "$PACKET" "nfs.full_name"`
+               do_access $XID $MSGTYP $FULLNAME $STATUS
+               ;;
+       *)
+               echo "XXX unknown packet $PACKET"
+               ;;
+esac
+
+done
+
+rm -rf $DBDIR