Wednesday, December 3, 2014

[Xorp] How to use Xorp in RouteFlow

If you use RouteFlow before, you will notice that the routing software it installs is Quagga by default. So, how to change to Xorp in RouteFlow? Here is an example for rftest2 case to do this. I assume the current path is under RouteFlow's directory.

1. Add new configuration files in rftest/config for rfvmA to rfvmD:

rftest/config/rfvmA/rootfs/usr/etc/xorp.conf
    protocols {
        ospf4 {
            router-id: 1.1.1.1
            area 0.0.0.0 {
                interface eth2 {
                    vif eth2 {
                        address 10.0.0.1 {
                        }
                    }
                }
            }
            area 0.0.0.1 {
                interface eth3 {
                    vif eth3 {
                        address 30.0.0.1 {
                        }
                    }
                }
            }
            area 0.0.0.4 {
                interface eth4 {
                    vif eth4 {
                        address 50.0.0.1 {
                        }
                    }
                }
            }
            export: "redis.connect"
        }
    }
    policy {
        policy-statement "redis.connect" {
            term connect {
                from {
                    protocol: "connected"
                }
            }
        }
    }
    fea {
        unicast-forwarding4 {
        }
    }
    interfaces {
    interface eth1 {
            description: "eth1"
            vif eth1 {
                address 172.31.1.1 {
                    prefix-length: 24
                }
            }
        }
        interface eth2 {
            description: "eth2"
            vif eth2 {
                address 10.0.0.1 {
                    prefix-length: 24
                }
            }
        }
        interface eth3 {
            description: "eth3"
            vif eth3 {
                address 30.0.0.1 {
                    prefix-length: 24
                }
            }
        }
        interface eth4 {
            description: "eth4"
            vif eth4 {
                address 50.0.0.1 {
                    prefix-length: 24
                }
            }
        }
        interface lo {
            vif lo {
                address 1.1.1.1 {
                    prefix-length: 32
                }
            }
        }
    }

rftest/config/rfvmB/rootfs/usr/etc/xorp.conf
    protocols {
        ospf4 {
            router-id: 2.2.2.2
            area 0.0.0.0 {
                interface eth2 {
                    vif eth2 {
                        address 10.0.0.2 {
                        }
                    }
                }
            }
            area 0.0.0.2 {
                interface eth3 {
                    vif eth3 {
                        address 40.0.0.2 {
                        }
                    }
                }
            }
            export: "redis.connect"
        }
    }
    policy {
        policy-statement "redis.connect" {
            term connect {
                from {
                    protocol: "connected"
                }
            }
        }
    }
    fea {
        unicast-forwarding4 {
        }
    }
    interfaces {
    interface eth1 {
            description: "eth1"
            vif eth1 {
                address 172.31.2.1 {
                    prefix-length: 24
                }
            }
        }
        interface eth2 {
            description: "eth2"
            vif eth2 {
                address 10.0.0.2 {
                    prefix-length: 24
                }
            }
        }
        interface eth3 {
            description: "eth3"
            vif eth3 {
                address 40.0.0.2 {
                    prefix-length: 24
                }
            }
        }
        interface lo {
            vif lo {
                address 2.2.2.2 {
                    prefix-length: 32
                }
            }
        }

    }

rftest/config/rfvmC/rootfs/usr/etc/xorp.conf
    protocols {
        ospf4 {
            router-id: 3.3.3.3
            area 0.0.0.1 {
                interface eth3 {
                    vif eth3 {
                        address 30.0.0.3 {
                        }
                    }
                }
            }
            area 0.0.0.3 {
                interface eth2 {
                    vif eth2 {
                        address 20.0.0.3 {
                        }
                    }
                }
            }

            export: "redis.connect"
        }
    }
    policy {
        policy-statement "redis.connect" {
            term connect {
                from {
                    protocol: "connected"
                }
            }
        }
    }
    fea {
        unicast-forwarding4 {
        }
    }
    interfaces {
    interface eth1 {
            description: "eth1"
            vif eth1 {
                address 172.31.3.1 {
                    prefix-length: 24
                }
            }
        }
        interface eth2 {
            description: "eth2"
            vif eth2 {
                address 20.0.0.3 {
                    prefix-length: 24
                }
            }
        }
        interface eth3 {
            description: "eth3"
            vif eth3 {
                address 30.0.0.3 {
                    prefix-length: 24
                }
            }
        }
        interface lo {
            vif lo {
                address 3.3.3.3 {
                    prefix-length: 32
                }
            }
        }
    }

rftest/config/rfvmD/rootfs/usr/etc/xorp.conf
    protocols {
        ospf4 {
            router-id: 4.4.4.4
            area 0.0.0.2 {
                interface eth2 {
                    vif eth2 {
                        address 40.0.0.4 {
                        }
                    }
                }
            }
            area 0.0.0.3 {
                interface eth3 {
                    vif eth3 {
                        address 20.0.0.4 {
                        }
                    }
                }
            }
            area 0.0.0.4 {
                interface eth4 {
                    vif eth4 {
                        address 50.0.0.4 {
                        }
                    }
                }
            }
            export: "redis.connect"
        }
    }
    policy {
        policy-statement "redis.connect" {
            term connect {
                from {
                    protocol: "connected"
                }
            }
        }
    }
    fea {
        unicast-forwarding4 {
        }
    }
    interfaces {
    interface eth1 {
            description: "eth1"
            vif eth1 {
                address 172.31.4.1 {
                    prefix-length: 24
                }
            }
        }
        interface eth2 {
            description: "eth2"
            vif eth2 {
                address 40.0.0.4 {
                    prefix-length: 24
                }
            }
        }
        interface eth3 {
            description: "eth3"
            vif eth3 {
                address 20.0.0.4 {
                    prefix-length: 24
                }
            }
        }
        interface eth4 {
            description: "eth4"
            vif eth4 {
                address 50.0.0.4 {
                    prefix-length: 24
                }
            }
        }
        interface lo {
            vif lo {
                address 4.4.4.4 {
                    prefix-length: 32
                }
            }
        }
    }

2. Modify the "rftest/create" shell script

chroot $LXCDIR/base/rootfs apt-get -y --force-yes install xorp quagga libboost-thread-dev libboost-system-dev libboost-filesystem-dev libboost-program-options-dev rsyslog vlan tcpdump

chroot $LXCDIR/base/rootfs usermod -a -G xorp root
chroot $LXCDIR/rfvmA/rootfs usermod -a -G xorp root
chroot $LXCDIR/rfvmB/rootfs usermod -a -G xorp root
chroot $LXCDIR/rfvmC/rootfs usermod -a -G xorp root
chroot $LXCDIR/rfvmD/rootfs usermod -a -G xorp root

3. Modify the "rftest/rftest2" shell script

SCRIPT_NAME="rftest_xorp"
...
...
echo "#!/bin/sh" > /var/lib/lxc/rfvmA/rootfs/root/run_rfclient.sh
echo "sleep 3" >> /var/lib/lxc/rfvmA/rootfs/root/run_rfclient.sh
if [ "$SCRIPT_NAME" == "rftest_xorp" ]
then
    echo "sudo xorp_rtrmgr" >> /var/lib/lxc/rfvmA/rootfs/root/run_rfclient.sh
else
    echo "/etc/init.d/quagga start" >> /var/lib/lxc/rfvmA/rootfs/root/run_rfclient.sh
fi
echo "/opt/rfclient/rfclient > /var/log/rfclient.log" >> /var/lib/lxc/rfvmA/rootfs/root/run_rfclient.sh

echo "#!/bin/sh" > /var/lib/lxc/rfvmB/rootfs/root/run_rfclient.sh
echo "sleep 3" >> /var/lib/lxc/rfvmB/rootfs/root/run_rfclient.sh
if [ "$SCRIPT_NAME" == "rftest_xorp" ]
then
    echo "sudo xorp_rtrmgr" >> /var/lib/lxc/rfvmB/rootfs/root/run_rfclient.sh
else
    echo "/etc/init.d/quagga start" >> /var/lib/lxc/rfvmB/rootfs/root/run_rfclient.sh
fi
echo "/opt/rfclient/rfclient > /var/log/rfclient.log" >> /var/lib/lxc/rfvmB/rootfs/root/run_rfclient.sh

echo "#!/bin/sh" > /var/lib/lxc/rfvmC/rootfs/root/run_rfclient.sh
echo "sleep 3" >> /var/lib/lxc/rfvmC/rootfs/root/run_rfclient.sh
if [ "$SCRIPT_NAME" == "rftest_xorp" ]
then
    echo "sudo xorp_rtrmgr" >> /var/lib/lxc/rfvmC/rootfs/root/run_rfclient.sh
else
    echo "/etc/init.d/quagga start" >> /var/lib/lxc/rfvmC/rootfs/root/run_rfclient.sh
fi
echo "/opt/rfclient/rfclient > /var/log/rfclient.log" >> /var/lib/lxc/rfvmC/rootfs/root/run_rfclient.sh

echo "#!/bin/sh" > /var/lib/lxc/rfvmD/rootfs/root/run_rfclient.sh
echo "sleep 3" >> /var/lib/lxc/rfvmD/rootfs/root/run_rfclient.sh
if [ "$SCRIPT_NAME" == "rftest_xorp" ]
then
    echo "sudo xorp_rtrmgr" >> /var/lib/lxc/rfvmD/rootfs/root/run_rfclient.sh
else
    echo "/etc/init.d/quagga start" >> /var/lib/lxc/rfvmD/rootfs/root/run_rfclient.sh
fi
echo "/opt/rfclient/rfclient > /var/log/rfclient.log" >> /var/lib/lxc/rfvmD/rootfs/root/run_rfclient.sh

4. Then, you can execute the script file: "create" and "rftest"

5. Verify  ==> for instance, rfvmD

sudo lxc-console -n rfvmD

ubuntu@rfvmD:~$ sudo xorpsh
[sudo] password for ubuntu:
Welcome to XORP on rfvmD
root@rfvmD> show ospf4 neighbor
  Address         Interface             State      ID              Pri  Dead
40.0.0.2         eth2/eth2              Full      2.2.2.2          128    38
20.0.0.3         eth3/eth3              Full      3.3.3.3          128    39
50.0.0.1         eth4/eth4              Full      1.1.1.1          128    39

root@rfvmD> show interfaces
eth2/eth2: Flags:<ENABLED,BROADCAST,MULTICAST> mtu 1500 speed 1 Gbps
        inet 40.0.0.4 subnet 40.0.0.0/24 broadcast 40.0.0.255
        physical index 161
        ether 2:d2:d2:d2:d2:d2
eth3/eth3: Flags:<ENABLED,BROADCAST,MULTICAST> mtu 1500 speed 1 Gbps
        inet 20.0.0.4 subnet 20.0.0.0/24 broadcast 20.0.0.255
        physical index 163
        ether 2:d3:d3:d3:d3:d3
eth4/eth4: Flags:<ENABLED,BROADCAST,MULTICAST> mtu 1500 speed 1 Gbps
        inet 50.0.0.4 subnet 50.0.0.0/24 broadcast 50.0.0.255
        physical index 165
        ether 2:d4:d4:d4:d4:d4
lo/lo: Flags:<ENABLED,LOOPBACK> mtu 16436 speed unknown
        inet 4.4.4.4 subnet 4.4.4.4/32
        physical index 168
root@rfvmD>

Wednesday, November 5, 2014

[NETCONF] Try Netopeer ( NETCONF tools )

Netopeer is a set of NETCONF tools built on the libnetconf library. 

It has single level server, multiple level server, and cli to use. 
Basically I think if we don't try, we won't truly understand what NETCONF/YANG look like and how it works. During the compilation and setup, I encounter some problems, but I find the following information that save me a lot of time to do trouble shooting.

Here is a URL about how to setup Netopeer server and to use it with cli.
http://seguesoft.com/how-to-set-up-netopeer-server-to-use-with-netconfc

This URL provides a lot of YANG Modules and examples to refer:
http://www.netconfcentral.org/

Monday, November 3, 2014

[TTP] What is Table Type Patterns?

As we know ( if you have to deal with the multiple flow tables of OF 1.1+ ), the multiple flow tables provide the scaling capability but increasing the complex of usage. For software switch either running on hypervisor or on operation system, it can use memory to map to the implementation of such table design. But for hardware switch based on ASIC to handle packet process pipline, the forwarding pipeline and the definition of flow table is specified and fixed. That will generate a big issue: How come application running on SDN controller knows how to deal with these specified limitation. Recently I noticed "TTP", stands for Table Type Patterns, to allow for an OpenFlow controller and OpenFlow switch to agree on a set of functionality to help manage the increased diversity made possible with OpenFlow versions 1.1+. I think that is very useful for the case: OF-DPA.

https://www.sdncentral.com/education/openflow-table-type-patterns-opendaylight-next-release-colin-dixon/2014/08/
This article mentions these capabilities have made OpenFlow a much more interesting tool in two major ways:

1. Better for developers. Simply put, the new features allow developers to do more interesting things: handle IPv6 traffic, provide better multipathing, and separate logical concerns into different tables. Of course, this is all limited to software switches if hardware doesn’t expose these features via OpenFlow, and it can’t do that if OpenFlow provides poor abstractions for hardware.

2. Better mapping onto hardware. The original model of OpenFlow (a single, very flexible table) was actually a poor fit for real network hardware. The new model allows for an OpenFlow table pipeline that can much more closely match the pipelines in real networking ASICs. This allows hardware to both expose more of its capabilities and expose them to controllers (and thus developers) in a way they can efficiently take advantage of.


https://github.com/OpenNetworkingFoundation/TTP_Repository/blob/master/TTP-FAQ.md
This FAQ document give us more information in details:

TTPs will be particularly helpful in simplifying the coding of advanced OpenFlow datapath control (where many flow tables or optional functions are needed). Because TTPs describe the expected controller/switch messaging unambiguously, they will also improve interoperability.

TTPs will also be helpful in testing or benchmarking contexts where participants want advance notice to ensure conformance or optimize performance. Such participants expect precise descriptions of what messages will be used during testing or benchmarking.

To legitimately claim support for a TTP, a switch must implement all non-optional functionality described by the TTP. As mentioned above, a TTP may describe some functionality that is optional.


PS: OpenDaylight has support TTP in Helium version
https://wiki.opendaylight.org/view/Table_Type_Patterns:Helium_Release_Notes

Monday, September 22, 2014

[OpenFlow] Install OpenFlow Dissector to Wireshark

It is for OpenFlow 1.1 and higher version.

Get sources (for Debian-based)
git clone git://github.com/CPqD/ofdissector
sudo apt-get install wireshark wireshark-dev
export WIRESHARK=/usr/include/wireshark
Install scons and build openflow library
cd ofdissector/src
sudo -s
apt-get install scons
scons install
#mv openflow.so /usr/lib/wireshark/libwireshark1/plugins/
#cd /usr/lib/wireshark/libwireshark1/plugins/
#chown root:root openflow.so
#chmod 644 openflow.so
Run Wireshark.
wireshark

Friday, September 12, 2014

[Linux Kernel] How to build Linux kernel on Ubuntu?

The general way to build Linux kernel:
  • apt-get install libncurses-dev kernel-package bzip2 make ncurses-dev fakeroot module-init-tools patch 
  • apt-get install fakeroot build-essential kernel-package libncurses5 libncurses5-dev
  • To find the stable version from https://www.kernel.org/ and download it and tar the tar.bz2 file into /usr/src/
  • cd /usr/src/linux-source-3.2.0/   <== it is my case
  • make mrproper
  • make menuconfig
  • make -j 16
  • sudo make modules_install install
  • sudo update-grub2
  • reboot
Booting Menu:
/boot/grub/grub.cfg

Booting Linux Options:
/etc/default/grub


The Ubuntu way to do so:

  • make-kpkg clean
  • make-kpkg --initrd --append-to-version=danny4400 kernel_image kernel-headers 
  • cd ..
  • dpkg -i   linux-image-Push tab button)
  • ls -l /boot/
  • reboot

Sunday, August 31, 2014

[Lagopus] Install Lagopus software switch on Ubuntu 12.04

 I attended NTT ( Ryu/Lagopus ) seminar in Aug. at NCTU, Taiwan and noticed that Lagopus(SDN/OpenFlow Software Switch) is amazing.
Its L2 switch performance with 10GbE X 2 (RFC2889 test ) for most of packet size are near to 10Gbps and the test platform is 
Intel xeon E5-2660 (8 cores, 16 threads), Intel X520-DA2 DDR3-1600 64GB. 
For more information please see the attachment pictures that I took from the seminar.

The following is the features:
  • Best OpenFlow 1.3 compliant software-based switch
    • Multi tables, Group tables support
    • MPLS, PBB, QinQ, support
  • ONF standard specification support
    • OpenFlow Switch Specification 1.3.3
    • OF-CONFIG 1.1
  • Multiple data-plane configuration
    • High performance software data-plane on Intel x86 bare-metal server
      • Intel DPDK, Raw socket
    • Bare metal switch
  • Various management/configuration interfaces
    • OF-CONFIG, OVSDB, CLI
    • SNMP, Ethernet-OAM functionality

For installing Lagopus switch, you can refer to the following URL. It can give us a common installation guide for Lagopus switch.
https://github.com/lagopus/lagopus/blob/master/QUICKSTART.md


About my lagopus environment:sudo vi /usr/local/etc/lagopus/lagopus.conf
 interface {  
   ethernet {  
     eth0;  
     eth1;  
     eth2;  
   }  
 }  
 bridge-domains {  
   br0 {  
     port {  
       eth0;  
       eth1;  
       eth2;  
     }  
     controller {  
       127.0.0.1;  
     }  
   }  
 }  


But, I put 2 shell scripts for quickly installing and setting up the DPDK. I use DPDK-1.6.0 so that all the scripts are based on this version.

compile_dpdk.sh
 #!/bin/sh  
 export RTE_SDK=/home/myname/git/DPDK-1.6.0  
 export RTE_TARGET="x86_64-default-linuxapp-gcc"  
 make config T=${RTE_TARGET}  
 make install T=${RTE_TARGET}  

install_dpdk.sh
 #!/bin/sh  
 export RTE_SDK=/home/myname/git/DPDK-1.6.0
 export RTE_TARGET="x86_64-default-linuxapp-gcc"  
 DPDK_NIC_PCIS="0000:00:08.0 0000:00:09.0 0000:00:0a.0"  
 HUGEPAGE_NOPAGES="1024"  
 set_numa_pages()  
 {  
     for d in /sys/devices/system/node/node? ; do  
         sudo sh -c "echo ${HUGEPAGE_NOPAGES} > $d/hugepages/hugepages-2048kB/nr_hugepages"  
     done  
 }  
 set_no_numa_pages()  
 {  
     sudo sh -c "echo ${HUGEPAGE_NOPAGES} > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages"  
 }  
 # install module  
 sudo modprobe uio  
 sudo insmod ${RTE_SDK}/${RTE_TARGET}/kmod/igb_uio.ko  
 sudo insmod ${RTE_SDK}/${RTE_TARGET}/kmod/rte_kni.ko  
 # unbind e1000 NICs from igb and bind igb_uio for DPDK  
 sudo ${RTE_SDK}/tools/pci_unbind.py --bind=igb_uio ${DPDK_NIC_PCIS}  
 sudo ${RTE_SDK}/tools/pci_unbind.py --status  
 # mount fugepagefs  
 echo "Set hugepagesize=${HUGEPAGE_NOPAGES} of 2MB page"  
 NCPUS=$(find /sys/devices/system/node/node? -maxdepth 0 -type d | wc -l)  
 if [ ${NCPUS} -gt 1 ] ; then  
     set_numa_pages  
 else  
     set_no_numa_pages  
 fi  
 echo "Creating /mnt/huge and mounting as hugetlbfs"  
 sudo mkdir -p /mnt/huge  
 grep -s '/mnt/huge' /proc/mounts > /dev/null  
 if [ $? -ne 0 ] ; then  
     sudo mount -t hugetlbfs nodev /mnt/huge  
 fi  
 unset RTE_SDK  
 unset RTE_TARGET  

Here is one thing needs to be notice. 
The variable DPDK_NIC_PCIS is my Linux eth1, eth2, and eth3's bus info as follows:
DPDK_NIC_PCIS="0000:00:08.0 0000:00:09.0 0000:00:0a.0"
You have to change them by running ethtool to see your eth bus info.

So, we need to use the comand "ethtool" to find out the NIC's bus-info as follows:
# ethtool -i eth4
driver: igb
version: 5.0.5-k
firmware-version: 3.11, 0x8000046e
bus-info: 0000:02:00.0
supports-statistics: yes
supports-test: yes
supports-eeprom-access: yes
supports-register-dump: yes
supports-priv-flags: no


After executing the 2 shell scripts, then we can start the lagopus switch by this:
sudo lagopus -d -- -c3 -n1 -- -p3


Monday, August 4, 2014

[Indigo] The architecture of Indigo 2.0

After checking with the source code of Indigo 2.0, OF-DPA (CDP) ,and IVS on GitHub, I just draw a simple architecture diagram to show the idea of hardware abstraction layer (HAL). I think the most important part is that Big Switches uses this HAL concept as hardware agnostic to adopt the different forwarding engine / port management implementation in different hardware or platform.


[RYU] Try the RYU Web GUI with Mininet

This post is about the displaying of RYU Web GUI. We can see what the GUI looks like. My environment is with 2 virtual machines running on Virtula-Box. I skep the installation guide with RYU and GUI because there is already some documents to tell how to do so. If interested, please check there:
http://blog.linton.tw/posts/2014/02/15/note-install-ryu-36-sdn-framework
http://blog.linton.tw/posts/2014/02/11/note-how-to-set-up-ryu-controller-with-gui-component

P.S: Maybe need to do this:
pip install --upgrade pip or pip install -U pip

First, I started with my RYU server and executed the command:

  • > ryu-manager --verbose --observe-links ryu.topology.switches ryu.app.rest_topology ryu.app.ofctl_rest ryu.app.simple_switch

P.S: Currently the GUI doesn't support OF1.3.

Second, open another console to execute this command under your ryu directory. It is a middle-ware between Web and Controller.

  • > ./ryu/gui/controller.py


For Mininet, I just downloaded the Mininet Virtual Machine and directed to use it. The following command can generate the 3 tiers network topology quickly.

>  sudo mn --controller=remote,ip=10.3.207.81 --topo tree,3


Back to the RYU server, open the browser with the URL: http://127.0.0.1:8000/   








Monday, June 23, 2014

[OF-DPA] Glance at the source code of OF-DPA on GitHub

I just quickly glance at the code of OF-DPA and draw a skeleton of it. Actually it is only about the integration Indigo with OF-DPA API ( blue color part ), and lock in with SDK and switch device. There is no OF-DPA/SDK source code ( red color part ). As the diagram described, the OEM/ODM Development Package has the full source code distributed under Broadcom SLA.



Wednesday, June 18, 2014

Monday, May 19, 2014

[Ruby] Cross-Compile Ruby to MIPS platform

I have spent several days to deal with cross-compiling Ruby to MIPS platform and have encountered some problems that bother me for a while. Hopefully I finish all the problems and get work done. Awesome!
For the sake of avoiding these kind of problems, I give the steps and scripts about how to do it:

  • Cross Compile OpenSSL

>./config --prefix=$PWD/build --cross-compile-prefix=/home/liudanny/git/NL/toolchains_bin/mipscross/linux/bin/mips64-nlm-linux-elf32btsmip- shared no-asm

We need to check the Makefile with “PLATFORM=mips” and without “-m64”

>make 2>&1 | tee make.out; make install

  • Cross Compile zlib
>CC=your_cross_compile_gcc ./configure --prefix=$PWD/build
>make 2>&1 | tee make.out; make install


  • Cross Compile Berkeley DB

>CC=your_cross_compile_gcc ../dist/configure --prefix=$PWD/build
>make 2>&1 | tee make.out; make install


  • Cross Compile OpenLDAP

>CC=your_cross_compile_gcc LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/liudanny/git/db-6.0.30.NC/build_unix/build/lib:/home/liudanny/git/openssl-1.0.1e/build/ssl/lib LDFLAGS=" -L/home/liudanny/git/db-6.0.30.NC/build_unix/build/lib -L/home/liudanny/git/openssl-1.0.1e/build/ssl/lib" CPPFLAGS=" -I/usr/local/include -I/home/liudanny/git/db-6.0.30.NC/build_unix/build/include -I/home/liudanny/git/openssl-1.0.1e/build/ssl/include" ./configure --prefix=$PWD/mybuild/ --enable-bdb --enable-crypt --host=mips64-nlm-linux --with-yielding_select=yes –with-tls=openssl

Before calling make, commenting out the line in include/portable.h ?
// #define NEED_MEMCMP_REPLACEMENT 1

Modify build/shtool to avoid from stripping error ?
Goto line 980 and find:

if [ “.$opt_s” = .yes ]; then
    if [ “.$opt_t” = .yes ]; then
    echo “strip $dsttmp” 1>&2
    fi
    strip $dsttmp || shtool_exit $?
    fi
 
    Change to the following ?
 
    if [ “.$opt_s” = .yes ]; then
    if [ “.$opt_t” = .yes ]; then
    echo “arm-none-linux-gnueabi-strip $dsttmp” 1>&2
    fi
    arm-none-linux-gnueabi-strip $dsttmp || shtool_exit $?
    fi

>make depend; make; make install


  • Cross Compile Ruby 1.8.7

Before cross-compiling Ruby, it must compiles and builds on the server first, then cross-compiling will use its own Ruby to generate the Makefile and other important steps.
Due to Ruby can have many extension libraries, we need to modify Setup.emx to enable the ext libs that we need. And also, copy the needed ruby-ext-libs into ext/ directory In the picture, we add and enable “shadow”, “openssl”, “socket”, “zlib”, and “ldap” ext libs as follows:

>export ac_cv_func_getpgrp_void=yes
>export ac_cv_func_setpgrp_void=yes
>export PATH=/home/liudanny/ruby-1.8.7-p352/build/bin:$PATH
>CC=your_cross_compile_gcc ./configure --prefix=$PWD/build/ --host=mips64-nlm-linux --with-openssl-dir=/home/liudanny/git/openssl-1.0.1e/build --disable-ipv6 --with-openssl-dir=/home/liudanny/git/openssl-1.0.1e/build --with-zlib-dir=/home/liudanny/git/zlib-1.2.5/build --with-ldap-dir=/home/liudanny/git/openldap-2.4.39/mybuild 2>&1 | tee config.out
>make 2>&1 | tee make.out; make install

Friday, March 14, 2014

[Cross-Compile] What's the difference of `./configure` option `--build`, `--host` and `--target`?

When using ./configure especially in cross-compiling purpose, I kind of confuse about the option --build and --host so that the following content is what I found on searching:

some remarks on specifying --host=<host>, --target=<target> and --build=<build
# kindly provided by Keith Marshall:
# 1) build
# this is *always* the platform on which you are running the build
# process; since we are building on Linux, this is unequivocally going to
# specify `linux', with the canonical form being `i686-pc-linux-gnu'.
#
# 2) host
# this is a tricky one: it specifies the platform on which whatever we
# are building is going to be run; for the cross-compiler itself, that's
# also `i686-pc-linux-gnu', but when we get to the stage of building the
# runtime support libraries to go with that cross-compiler, they must
# contain code which will run on the `i686-pc-mingw32' host, so the `host'
# specification should change to this, for the `runtime' and `w32api'
# stages of the build.
#
# 3) target
# this is probably the one which causes the most confusion; it is only
# relevant when building a cross-compiler, and it specifies where the code
# which is built by that cross-compiler itself will ultimately run; it
# should not need to be specified at all, for the `runtime' or `w32api',
# since these are already targetted to `i686-pc-mingw32' by a correct
# `host' specification.

And I found an answer after posting this question.. Still posting it here in case it helps someone else in the future.
http://jingfenghanmax.blogspot.in/2010/09/configure-with-host-target-and-build.html

As per this blog in my case
build will be i686-pc-linux-gnu ( My PC)
host will be mipsel-linux ( The platform I am going to run my code on)
target will be used if I am building a cross-compiling toolchain.
Since I am not building a toolchain, I didnt have to specify target.

 You will have to cross-compile libusb and then copy the library and
header files to a location where your toolchain can locate them. In
the case of CodeSourcery, you can put them in
cs_root/arm-none-linux-gnueabi/lib and
cs_root/arm-none-linux-gnueabi/include for example. You will also need
the library on the target's root filesystem unless you link it
statically, please mind the licencing implications if you do though.

Wednesday, March 12, 2014

[NETCONF] The summary of NETCONF Content

The following content is about the summary of the NET-CONF web site: 
http://www.netconfcentral.org/netconf_docs

Session Initiation For Clients

   <?xml version="1.0" encoding="UTF-8"?>
          <hello xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
            <capabilities>
       <capability>urn:ietf:params:netconf:base:1.0</capability>
            </capabilities>
          </hello>]]>]]>

Protocol Capabilities

<capability>
    urn:ietf:params:netconf:capability:writable-running:1.0
</capability>

 <capability>urn:ietf:params:netconf:base:1.0</capability>

Standard Capabilities

:candidate
:confirmed-commit
:interleave
:notification
:partial-lock
:rollback-on-error
:startup
:url
:validate
:writable-running
:xpath

Configuration Databases

<running/>
<candidate/>
<startup/>

Protocol Operations

Once a NETCONF session is established, the client knows which capabilities the server supports. The client then can send RPC method requests and receive RPC replies from the server. The server's request queue is serialized, so requests will be processed in the order received.

OperationUsageDescription
close-session:baseTerminate this session
commit:base AND :candidateCommit the contents of the <candidate/> configuration database to the <running/> configuration database
copy-config:baseCopy a configuration database
create-subscription:notificationCreate a NETCONF notification subscription
delete-config:baseDelete a configuration database
discard-changes:base AND :candidateClear all changes from the <candidate/> configuration database and make it match the <running/> configuration database
edit-config:baseModify a configuration database
get:baseRetrieve data from the running configuration database and/or device statistics
get-config:baseRetrieve data from the running configuration database
kill-session:baseTerminate another session
lock:baseLock a configuration database so only my session can write
unlock:baseUnlock a configuration database so any session can write
validate:base AND :validateValidate the entire contents of a configuration database

Tuesday, February 25, 2014

[OpenStack] The resource list for studying OpenStack Neutron

The following list is about the resource documents for those who want to study OpenStack Neutron. It may reduce the time you spend on searching. Here you go:

Introduction to OpenStack Quantum
  • To warm up for jumping into the network world of OpenStack
OpenStack Admin Guide Chapter 7. Networking
  • It is very important to take a look at this offical document first

  • This contains the Quantum architecture in details
  • Cicso's Plugins are a little bit complicated because There are several versions those are different in the configuration and prerequisite.
Cisco Nexus Plug-in for OpenStack Neutron 
  • Give a data sheet to list the functionality and feature it supports
What's new Neutron?
  • Give a overall look of Neutron
  • Another doc: http://www.slideshare.net/kamesh001/whats-new-in-neutron-for-open-stack-havana
  • What components are in Neutron: http://www.slideshare.net/emaganap/open-stack-overview-meetups-oct-2013
  • It is very technical to explain the the detail of code and message.
  • we can see how Open vSwitch is implemented in Neutron 
  • It introduces VM booting workflow with Nova and Networking
  • Neurton deployment components
  • To explain why ML2 will come out
Modular Layer 2 in OpenStack Neutron
  • New Feature: ToR Switch Control
  • The video: http://www.youtube.com/watch?v=whmcQ-vHams

ML2
  • Modular Layer 2 (ML2) The Modular Layer 2 (ML2) plugin is a new OpenSource plugin to Neutron. This plugin is a framework allowing OpenStack Networking to simultaneously utilize the variety of layer 2 networking technologies found in complex real-world data centers. It currently works with the existing Open vSwitch, Linux Bridge, and L2 agents. The ML2 plugin supports local, flat, VLAN, GRE and VXLAN network types via a type drivers and different mechanism drivers.
OpenStack-Network-ML2


OVSDB User Guide
  • OpenDaylight has OVSDB project that is related with ML2 plugin
Cisco OpenStack Overview
  • Introduce Cisco Neutron Plugin
  • Cisco Virtual Switch: Nexus 1000v
  • Cisco UCSM

OpenStack RDO Deployment 
  1. http://blog.csdn.net/cloudtech/article/details/19936249
  2. http://blog.csdn.net/cloudtech/article/details/19936425
  3. http://blog.csdn.net/cloudtech/article/details/19936487
  • https://wiki.openstack.org/wiki/Arista-neutron-ml2-driver
  • Arista Related Information about Mechanism Driver
  • https://blueprints.launchpad.net/neutron/+spec/arista-ml2-mechanism-driver
  • http://www.bradreese.com/blog/4-1-2013-2.pdf
  • https://docs.google.com/document/d/1efFprzY69h-vaikRE8hoGQuLzOzVNtyVZZLa2GHbXLI/edit
Developer Guide

How to write a Neutron Plugin

Neutron/LBaaS/PluginDrivers

http://www.slideshare.net/MiguelLavalle/network-virtualization-with-open-stack-quantum


Monday, February 24, 2014

[Thoughts] Cumulus Networks and Big Switch Networks

These two companies, Cumulus Networks and Big Switch Networks, are two of  my most favorite network companies in the world because they have a strong technical skill and creative ability/thought to build their networking products. Unfortunately, they walk in two different paths and directions. Big Switch Networks is based on OpenFlow but Cumulus Networks is not. For more information in details, please see below:

http://www.jedelman.com/1/post/2014/02/big-switch-cumulus-and-openflow.html
http://vimeo.com/87216036

[SDN} SDN Migration Use Cases

This document provides three migration use cases and I think they are very useful for those who work in networking field and are interested in SDN and need to take a look at. Here you go:
http://www.businesswire.com/news/home/20140211005653/en/Open-Networking-Foundation-Publishes-Open-SDN-Migration

Tuesday, January 14, 2014

[Thoughts] RESTful control of switches

OpenFlow is already the standard Southbound API in SDN field, but OpenFlow is the one of the many SouthBound approaches. In SDN solution, we don't necessarily need to use OpenFlow protocol to control data plane. RESTful API is another way to control or configure switches ( data plane ) if they supports. Arista Networks has provides Arista eAPI as RESTful control of switches. For more information in details, please refer to this article: http://blog.sflow.com/2013/08/restful-control-of-switches.html

[LXC] How to use LXC?

At the first glimpse, I was amazed by its way to provide a lightweight container in virtual environment. With shell scripts combining, we can use these to build a convenient and powerful automation solution to test all kind of programs that need multiple virtual machines within a server host ( at least my focus is on the automation test...XD ). There are already a bunch of articles to introduce LXC. Here I only list some common use commands for reference quickly:

# Install LXC
sudo apt-get install lxc

# Create a Linux Container named base ( -t: template, -n: namespace )
sudo lxc-create -t ubuntu -n base

# Start the Linux Container ( -d: daemon )
sudo lxc-start -n base -d

# Stop the Linux Container
sudo lxc-stop -n base

# List Linux Containers
lxc-ls --fancy

# Clone the Linux Container
lxc-clone -o base -n newvm1

# Access the container
lxc-console -n newvm1

# Shudown
lxc-shutdown -n test-container

# Destroy
lxc-destroy -n test-container


LXC can be controlled via Libvirt:
http://blog.scottlowe.org/2013/11/27/linux-containers-via-lxc-and-libvirt/

Exploring LXC Networking:

Autostart
By default, containers will not be started after a reboot, even if they were running prior to the shutdown.
To make a container autostart, you simply need to symlink its config file into the /etc/lxc/auto directory:
ln -s /var/lib/lxc/test-container/config /etc/lxc/auto/test-container.conf

Reference:
https://www.digitalocean.com/community/articles/getting-started-with-lxc-on-an-ubuntu-13-04-vps
http://www.janoszen.com/2013/05/14/lxc-tutorial/

Tuesday, January 7, 2014

[Open vSwitch] The basic introduction of Open vSwitch

I post a slide to give a basic introduction of Open vSwitch. For the more in details, please check out the SlideShare URL as follows:
http://www.slideshare.net/teyenliu/the-basic-introduction-of-open-vswitch


Some Useful example of the OVS commands:
sudo ovs-vsctl show
sudo ovs-vsctl add-br mybridge
sudo ovs-vsctl del-br mybridge
sudo ovs-vsctl add-port mybridge port-name
sudo ovs-vsctl del-port mybridge port-name
sudo ovs-vsctl list Bridge/Port/Interface/...
sudo ovs-appctl fdb/show mybridge
sudo ovs-ofctl show mybridge
sudo ovs-ofctl dump-flows mybridge
sudo ovs-ofctl add-flow mybridge dl_src=02:a2:a2:a2:a2:a2,dl_dst=02:b2:b2:b2:b2:b2,in_port=2,dl_type=0x0800,nw_src=10.0.0.1,nw_dst=10.0.0.2,actions=output:6
sudo ovs-ofctl del-flows mybridge dl_src=02:a2:a2:a2:a2:a2,dl_dst=02:b2:b2:b2:b2:b2,in_port=2,dl_type=0x0800,nw_src=10.0.0.1,nw_dst=10.0.0.2
sudo ovs-ofctl add-flow dp0 in_port=2,actions=output:6
# This will delete all the flow entries in the flow table
sudo ovs-ofctl del-flows mybridge

Friday, January 3, 2014

[GNS3] All the related URLs with GNS3 and Open vSwitch

To play with Openvswitch in GNS3, here is Openvswitch 1.2.2 installed on Microcore 4.0 Linux as Qemu image.
http://brezular.com/2013/09/17/linux-core-qemu-and-virtualbox-appliances-download/

Here are the installation steps.
http://brezular.com/2011/09/03/part1-openvswich-creating-and-submitting-openvswitch-extension-to-microcore-upstream/

And GNS3 labs are available here.
http://brezular.com/2011/06/25/part2-openvswich-vlans-trunks-l3-vlan-interface-intervlan-routing-configuration-and-testing/

[KVM and OVS] Installing KVM and Open vSwitch on Ubuntu

These articles provides a very good explanation about how to install KVM and Open vSwitch on Ubuntu.
http://blog.scottlowe.org/2012/08/17/installing-kvm-and-open-vswitch-on-ubuntu/
http://networkstatic.net/installing-open-vswitch-with-kvm/

I summarize the scripts from the above URLs as follows:
  • ### Installing KVM and Open vSwitch on Ubuntu ###
sudo apt-get update && apt-get dist-upgrade
sudo apt-get install kvm qemu-kvm libvirt-bin virtinst virt-manager
sudo virsh net-destroy default
sudo virsh net-autostart --disable default
sudo aptitude purge ebtables
  • ### Open vSwitch on Ubuntu ###
sudo apt-get install openvswitch-controller openvswitch-brcompat \
sudo openvswitch-switch openvswitch-datapath-source

# Edit this: /etc/default/openvswitch-switch and change this line:
  #BRCOMPAT=no ==> #BRCOMPAT=yes 

# to build and install the necessary module
sudo module-assistant auto-install openvswitch-datapath
  • ### Add Open vSwitch bridge ###
sudo ovs-vsctl add-br br0
sudo ovs-vsctl add-port br0 eth0
sudo ovs-vsctl list port
  • ### Change your eth0 IP to your new br0 interface ###
sudo ifconfig eth0 0
sudo ifconfig br0 192.168.1.x netmask 255.255.255.0
sudo route add default gw 192.168.1.1 br0

These two scripts bring up the KVM Tap interfaces into your
bridge from the CLI.

$ cat /etc/ovs-ifup
#!/bin/sh
switch='br0'
/sbin/ifconfig $1 0.0.0.0 up
ovs-vsctl add-port ${switch} $1

$ cat /etc/ovs-ifdown
#!/bin/sh
switch='br0'
/sbin/ifconfig $1 0.0.0.0 down
ovs-vsctl del-port ${switch} $1

#Then make executable
chmod +x /etc/ovs-ifup /etc/ovs-ifdown
  • ### BOOT HD IMG ###
Here are some KVM examples. Starting with the CD is easiest. No build needed for testing.

sudo kvm -m 1024 -hda /media/Storage/imgs/centos.kvm -net nic,macaddr=00:11:22:CC:CC:C5 -net tap,script=/etc/ovs-ifup,downscript=/etc/ovs-ifdown &
OS Install

sudo kvm -m 512 -hda /media/Storage/imgs/centos.kvm -net nic,macaddr=00:11:22:CC:CC:C5 -net tap,script=/etc/ovs-ifup,downscript=/etc/ovs-ifdown -cdrom /media/Storage/vm-images/CentOS-6.2-x86_64-LiveCD.iso &
CD Boot

sudo kvm -m 512 -net nic,macaddr=00:11:22:CC:CC:10 -net tap,script=/etc/ovs-ifup,downscript=/etc/ovs-ifdown -cdrom /HD/Storage/vm-images/ubuntu-11.10-desktop-amd64.iso &
Make an Image from CD

sudo qemu-img create -f qcow2 /media/Storage/imgs/uCentOS-6.2.img 6G

Tuesday, December 3, 2013

[NSX] Network Visualization Gets Physical

In VMware NSX, how to let network visualization gets physical?
The answer is "Layer 2 Gateway Services". The physical switches have to implement the OVSDB and related APIs that communicate with NSX Controller so that they can provide the mapping of the segment in virtual network and VLAN in physical network. For more information in details, you can check out the following URLs.

http://networkheresy.com/2013/08/15/network-virtualization-gets-physical/


https://www.youtube.com/watch?v=QDOlggwyrVA&feature=c4-overview-vl&list=PLdYldEmmLm2lz5Bd0bzGCDKSULL52ytJT

Ivan's NSX Architecture Introduction, it is very good.
http://demo.ipspace.net/get/NSXArch

Tuesday, November 19, 2013

[OpenVNet] The introduction of OpenVNet

Why I pay attention to OpenVNet is because it uses Trema-Edge as OpenFlow 1.3 Controller to communicate with Open vSwitch 1.10. It gives us a great example to use Trema-Edge in Virtual Network Environment and learn more about some use cases. Please check out the following lists:

What is OpenVNet?
http://www.slideshare.net/akirayokokawa/openvnet

Offical Web Site:
http://openvnet.com/

Github Source:
https://github.com/axsh/openvnet

[Quagga] How to compile and install Quagga on Ubuntu 12.04

The following steps are the compilation and installation for Quagga on Ubuntu 12.04. Hopefully it is helpful for you.
  • Install all dependency(package) 
    • sudo apt-get build-dep quagga
  • Give the path for all configurations files and libraries used by quagga for starting. I use this one /opt/quagga .... and this is done by this comand: 
    • ./configure --enable-vtysh --prefix=/opt/quagga --localstatedir=/opt/quagga sysconfdir=/opt/quagga 
  • Create new folder: 
    • sudo mkdir /opt/quagga
  • Add new user : 
    • sudo adduser quagga 
  • Give the priviledge for the user quagga over the folder: /opt/quagga and for the folders and subfolders you give the privilegde for all rights : reading, changing and executing
    • sudo chown quagga:quagga /opt/quagga
    • sudo chmod 777 /opt/quagga 
  • Then you will be able to comiple Quagga: 
    • make
    • sudo make install 
Quagga configuration
--------------------
quagga version          : 0.99.22
host operating system   : linux-gnu
source code location    : .
compiler                : gcc
compiler flags          : -Os -fno-omit-frame-pointer -g -std=gnu99 -Wall -Wsign-compare -Wpointer-arith -Wbad-function-cast -Wwrite-strings -Wmissing-prototypes -Wmissing-declarations -Wchar-subscripts -Wcast-qual
make                    : make
includes                : 
linker flags            :  -lcrypt   -lrt -lcap  -lm
state file directory    : /opt/quagga
config file directory   : /opt/quagga
example directory       : /opt/quagga
user to run as  : quagga
group to run as  : quagga
group for vty sockets : 
config file mask        : 0600
log file mask           : 0600

Now, let us try Quagga for some examples
  • Zebra
    • Copy sample to conf file:
      • cd /opt/quagga
      • cp zebra.conf.sample zebra.conf
    • Start zebra daemon, you use this command 
      • sudo /opt/quagga/sbin/zebra &
    • Check the daemon zebra:
      • netstat -a | grep zebra 
    • Get into the zebra, you use 
      • telnet localhost zebra
  • BGP
    • Copy sample to conf file:
      • cd /opt/quagga
      • cp bgpd.conf.sample bgpd.conf
    • Start bgpd: 
      • sudo /opt/quagga/sbin/bgpd &
    • Check the daemon zebra:
      • netstat -a | grep bgpd
    • Get into the bgpd, you use 
      • telnet localhost bgpd
  • vtysh
    • cd /opt/quagga
    • vi /opt/quagga/vtysh.conf
    • sudo /opt/quagga/bin/vtysh
! Sample configuration file for vtysh. ! !service integrated-vtysh-config hostname quagga-router username root nopassword !
Stop Quagga Daemon ( for example: bgpd ):
  • sudo kill `cat /opt/quagga/bgpd.pid`
Port Number:

  • zebra: 2601
  • ripd:  2602
  • ripng: 2603
  • ospfd: 2604
  • bgpd:  2605
  • ospf6d: 2606



Friday, November 8, 2013

[Puppet] The studying track of Puppet

After taking some time to study Puppet a while, I become to believe more and more that it is a great auto configuration tool. The following URLs are my studying track of Puppet.

For the beginner, it is recommended to walk through for learning the basic concept
Learning Puppet
http://docs.puppetlabs.com/learning/index.html

Once you finish the above document, you probably want to do your own type and provider. Here you go:
Customer Type
http://docs.puppetlabs.com/guides/custom_types.html
Provider Development
http://docs.puppetlabs.com/guides/provider_development.html

Juniper provides its open source project for using Puppet to manage and configure the switches that support Puppet Agent. It is a very good example for you to understand how to define customer type and to implement provider.

To define customer type:
https://github.com/NetdevOps/puppet-netdev-stdlib
To implement provider:
https://github.com/Juniper/puppet-netdev-stdlib-junos

Monday, November 4, 2013

[OpenFlow 1.X] The Flow Table Usage

When OpenFlow 1.0 guys try to reach the version 1.1 or more, the first question coming up with would most likely be "how to use multi-flow tables ?" Well, we could see an example in RYU OpenFlow Controller as follow: http://www.slideshare.net/yamahata/ryu-sdnframeworkupload
In page 33, there are 3 flow tables which contains match conditons and actions. It can give an initial idea for that question.






P.S: Pica8 works with Broadcom to double flow table size in its OF1.3 Switch
http://searchsdn.techtarget.com/news/2240214709/Pica8-doubles-flow-rule-capacity-in-its-new-OpenFlow-13-switch

Wednesday, October 30, 2013

[Trema] A good example with Trema to build OpenFlow Controller ( use case )

Previously I just wondered how to use Trema to build a useful and piratical solution with a good architecture for users to leverage its functionality. Well, it does exist. The following URL is a good example of a good use case containing a good architecture.
http://www.slideshare.net/chibayasunobu/developing-production-open-flow-controller-with-trema-201304160

Source Code:
https://github.com/trema/virtual-network-platform


Design:


Implementation:

Friday, October 25, 2013

[Network] EtherTypes Reference

802 EtherTypes Reference:

0x0000
-
0x05DC
IEEE 802.3 length.
0x0600XEROX NS IDP.
0x0660
0x0661
DLOG.
0x0800IP, Internet Protocol.
0x0801X.75 Internet.
0x0802NBS Internet.
0x0803ECMA Internet.
0x0804Chaosnet.
0x0805X.25 Level 3.
0x0806ARP, Address Resolution Protocol.
0x0807XNS compatability.
0x0808Frame Relay ARP.
0x8035DRARP, Dynamic RARP.
RARP, Reverse Address Resolution Protocol.
0x80F3AARP, AppleTalk Address Resolution Protocol.
0x8100EAPS, Ethernet Automatic Protection Switching.
0x8137IPX, Internet Packet Exchange.
0x814CSNMP, Simple Network Management Protocol.
0x86DDIPv6, Internet Protocol version 6.
0x8808MPCP, Multi-Point Control Protocol.
0x880BPPP, Point-to-Point Protocol.
0x880CGSMP, General Switch Management Protocol.
0x8847MPLS, Multi-Protocol Label Switching (unicast).
0x8848MPLS, Multi-Protocol Label Switching (multicast).
0x8863PPPoE, PPP Over Ethernet (Discovery Stage).
0x8864PPPoE, PPP Over Ethernet (PPP Session Stage).
0x886FNetwork Load Balancing.
0x888EEAPOL, EAP over LAN.
0x88A2AoE, ATA over Ethernet.
0x88A4EtherCAT.
0x88CATIPC, Transparent Inter Process Communication Protocol.
0x88BBLWAPP, Light Weight Access Point Protocol.
0x88CCLLDP, Link Layer Discovery Protocol.
0x88DCWSMP, WAVE S

Monday, October 21, 2013

[Static Analysis] Tools for static analysis ( C/C++ )

Here are the list of some software tools for static analysis ( C/C++ ). FYI.
  • Klocwork Insight ( Commercial )
  • Cppcheck
  • LintProject Pro
  • scan-build