some refactoring of LinkCharacteristics
This commit is contained in:
parent
bdba7f3bf1
commit
894579b91c
184
core/topo.py
184
core/topo.py
@ -11,114 +11,108 @@ class NetemAt(object):
|
|||||||
self.delta = 0
|
self.delta = 0
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return "Netem... at " + str(self.at) + "(" + str(self.delta) + \
|
return "netem at {} ({}) will be {}".format(self.at, self.delta, self.cmd)
|
||||||
") will be " + self.cmd
|
|
||||||
|
|
||||||
|
|
||||||
class LinkCharacteristics(object):
|
class LinkCharacteristics(object):
|
||||||
tcNetemParent = "1:1"
|
"""
|
||||||
tcHtbClassid = "10"
|
Network characteristics associated to a link
|
||||||
tcNetemHandle = "1:10"
|
|
||||||
|
|
||||||
def bandwidthDelayProductDividedByMTU(self):
|
Attributes:
|
||||||
rtt = 2 * float(self.delay)
|
id the identifier of the link
|
||||||
""" Since bandwidth is in Mbps and rtt in ms """
|
delay the one-way delay introduced by the link in ms
|
||||||
bandwidthDelayProduct = (float(self.bandwidth) * 125000.0) * (rtt / 1000.0)
|
queue_size the size of the link buffer, in packets
|
||||||
return int(math.ceil(bandwidthDelayProduct * 1.0 / 1500.0))
|
bandwidth the bandwidth of the link in Mbps
|
||||||
|
loss the random loss rate in percentage
|
||||||
def bufferSize(self):
|
queuing_delay the maximum time that a packet can stay in the link buffer (computed over queue_size)
|
||||||
return (1500.0 * self.bandwidthDelayProductDividedByMTU()) + (float(self.bandwidth) * 1000.0 * float(self.queuingDelay) / 8)
|
netem_at list of NetemAt instances applicable to the link
|
||||||
|
backup integer indicating if this link is a backup one or not (useful for MPTCP)
|
||||||
def extractQueuingDelay(self, queueSize, bandwidth, delay, mtu=1500):
|
"""
|
||||||
# rtt = 2 * float(delay)
|
def __init__(self, id, delay, queue_size, bandwidth, loss, backup=False):
|
||||||
# bdp_queue_size = int((float(rtt) * float(bandwidth) * 1024 * 1024) / (int(mtu) * 8 * 1000))
|
|
||||||
# if int(queueSize) <= bdp_queue_size:
|
|
||||||
# Returning 0 seems to bypass everything, then only limited by CPU.
|
|
||||||
# This is not what we want...
|
|
||||||
# return 1
|
|
||||||
|
|
||||||
# queuingQueueSize = int(queueSize) - bdp_queue_size
|
|
||||||
queuingDelay = (int(queueSize) * int(mtu) * 8.0 * 1000.0) / (float(bandwidth) * 1024 * 1024)
|
|
||||||
return max(int(queuingDelay), 1)
|
|
||||||
|
|
||||||
def __init__(self, id, delay, queueSize, bandwidth, loss, back_up=False):
|
|
||||||
self.id = id
|
self.id = id
|
||||||
self.delay = delay
|
self.delay = delay
|
||||||
self.queueSize = queueSize
|
self.queue_size = queue_size
|
||||||
self.bandwidth = bandwidth
|
self.bandwidth = bandwidth
|
||||||
self.loss = loss
|
self.loss = loss
|
||||||
self.queuingDelay = str(self.extractQueuingDelay(queueSize, bandwidth, delay))
|
self.queuing_delay = str(self.extract_queuing_delay(queue_size, bandwidth, delay))
|
||||||
self.netemAt = []
|
self.netem_at = []
|
||||||
self.back_up = back_up
|
self.backup = backup
|
||||||
|
|
||||||
def addNetemAt(self, n):
|
def bandwidth_delay_product_divided_by_mtu(self):
|
||||||
if len(self.netemAt) == 0:
|
"""
|
||||||
|
Get the bandwidth-delay product in terms of packets (hence, dividing by the MTU)
|
||||||
|
"""
|
||||||
|
rtt = 2 * float(self.delay)
|
||||||
|
""" Since bandwidth is in Mbps and rtt in ms """
|
||||||
|
bandwidth_delay_product = (float(self.bandwidth) * 125000.0) * (rtt / 1000.0)
|
||||||
|
return int(math.ceil(bandwidth_delay_product * 1.0 / 1500.0))
|
||||||
|
|
||||||
|
def buffer_size(self):
|
||||||
|
"""
|
||||||
|
Return the buffer size in bytes
|
||||||
|
"""
|
||||||
|
return (1500.0 * self.bandwidth_delay_product_divided_by_mtu()) + \
|
||||||
|
(float(self.bandwidth) * 1000.0 * float(self.queuing_delay) / 8)
|
||||||
|
|
||||||
|
def extract_queuing_delay(self, queue_size, bandwidth, delay, mtu=1500):
|
||||||
|
queuing_delay = (int(queue_size) * int(mtu) * 8.0 * 1000.0) / \
|
||||||
|
(float(bandwidth) * 1024 * 1024)
|
||||||
|
return max(int(queuing_delay), 1)
|
||||||
|
|
||||||
|
def add_netem_at(self, n):
|
||||||
|
if len(self.netem_at) == 0:
|
||||||
n.delta = n.at
|
n.delta = n.at
|
||||||
self.netemAt.append(n)
|
self.netem_at.append(n)
|
||||||
else:
|
else:
|
||||||
if n.at > self.netemAt[-1].at:
|
if n.at > self.netem_at[-1].at:
|
||||||
n.delta = n.at - self.netemAt[-1].at
|
n.delta = n.at - self.netem_at[-1].at
|
||||||
self.netemAt.append(n)
|
self.netem_at.append(n)
|
||||||
else:
|
else:
|
||||||
print("Do not take into account " + n.__str__() + \
|
logging.error("{}: not taken into account because not specified in order in the topo param file".format(n))
|
||||||
"because ooo !")
|
|
||||||
pass
|
|
||||||
|
|
||||||
def buildBwCmd(self, ifname):
|
def build_bandwidth_cmd(self, ifname):
|
||||||
cmd = ""
|
return "&&".join(
|
||||||
for n in self.netemAt:
|
["sleep {} && tc qdisc del {} root ; tc qdisc add dev {} root handle 5:0 tbf rate {}mbit burst 15000 limit {} ".format(
|
||||||
cmd = cmd + "sleep {}".format(n.delta)
|
n.delta, ifname, ifname, self.bandwidth, self.buffer_size) for n in self.netem_at] + ["true &"]
|
||||||
cmd = cmd + " && tc qdisc del dev {} root ".format(ifname)
|
)
|
||||||
cmd = cmd + " ; tc qdisc add dev {} root handle 5:0 tbf rate {}mbit burst 15000 limit {} &&".format(ifname, self.bandwidth, int(self.bufferSize()))
|
|
||||||
|
|
||||||
cmd = cmd + " true &"
|
def build_netem_cmd(self, ifname):
|
||||||
return cmd
|
return "&&".join(
|
||||||
|
["sleep {} && tc qdisc del deev {} root ; tc qdisc add dev {} root handle 10: netem {} delay {}ms limit 50000 ".format(
|
||||||
|
n.delta, ifname, ifname, n.cmd, self.delay) for n in self.netem_at] + ["true &"]
|
||||||
|
)
|
||||||
|
|
||||||
def buildNetemCmd(self, ifname):
|
def build_policing_cmd(self, ifname):
|
||||||
cmd = ""
|
return "&&".join(
|
||||||
for n in self.netemAt:
|
["sleep {} && tc qdisc del dev {} ingress ; tc qdisc add dev {} handle ffff: ingress && \
|
||||||
cmd = cmd + "sleep " + str(n.delta)
|
tc filter add dev {} parent ffff: u32 match u32 0 0 police rate {}mbit burst {} drop ".format(
|
||||||
cmd = cmd + " && tc qdisc del dev " + ifname + " root "
|
n.delta, ifname, ifname, ifname, self.bandwidth, int(self.buffer_size() * 1.2)) for n in self.netem_at] + ["true &"]
|
||||||
cmd = cmd + " ; tc qdisc add dev {} root handle 10: netem {} delay {}ms limit 50000 &&".format(ifname, n.cmd, self.delay)
|
)
|
||||||
|
|
||||||
cmd = cmd + " true &"
|
def as_dict(self):
|
||||||
return cmd
|
return {
|
||||||
|
"bw": float(self.bandwidth),
|
||||||
def buildPolicingCmd(self, ifname):
|
"delay": "{}ms".format(self.delay),
|
||||||
cmd = ""
|
"loss": float(self.loss),
|
||||||
for n in self.netemAt:
|
"max_queue_size": int(self.queue_size)
|
||||||
cmd = cmd + "sleep {}".format(n.delta)
|
}
|
||||||
cmd = cmd + " && tc qdisc del dev {} ingress".format(ifname)
|
|
||||||
cmd = cmd + " ; tc qdisc add dev {} handle ffff: ingress".format(ifname)
|
|
||||||
cmd = cmd + " && tc filter add dev {} parent ffff: u32 match u32 0 0 police rate {}mbit burst {} drop && ".format(ifname, self.bandwidth, int(self.bufferSize() * 1.2))
|
|
||||||
|
|
||||||
cmd = cmd + " true &"
|
|
||||||
return cmd
|
|
||||||
|
|
||||||
def asDict(self):
|
|
||||||
d = {}
|
|
||||||
d['bw'] = float(self.bandwidth)
|
|
||||||
d['delay'] = self.delay + "ms"
|
|
||||||
d['loss'] = float(self.loss)
|
|
||||||
d['max_queue_size'] = int(self.queueSize)
|
|
||||||
return d
|
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
s = "Link id : " + str(self.id) + "\n"
|
return """
|
||||||
s = s + "\tDelay : " + str(self.delay) + "\n"
|
Link id: {}
|
||||||
s = s + "\tQueue Size : " + str(self.queueSize) + "\n"
|
Delay: {}
|
||||||
s = s + "\tBandwidth : " + str(self.bandwidth) + "\n"
|
Queue Size: {}
|
||||||
s = s + "\tLoss : " + str(self.loss) + "\n"
|
Bandwidth: {}
|
||||||
s = s + "\tBack up : " + str(self.back_up) + "\n"
|
Loss: {}
|
||||||
for l in self.netemAt:
|
Backup: {}
|
||||||
s = s + "\t" + l.__str__() + "\n"
|
""".format(self.id, self.delay, self.queue_size, self.bandwidth, self.loss, self.backup) + \
|
||||||
return s
|
"".join(["\t {} \n".format(n) for n in self.netem_at])
|
||||||
|
|
||||||
|
|
||||||
class TopoParameter(Parameter):
|
class TopoParameter(Parameter):
|
||||||
LSUBNET = "leftSubnet"
|
LSUBNET = "leftSubnet"
|
||||||
RSUBNET = "rightSubnet"
|
RSUBNET = "rightSubnet"
|
||||||
netemAt = "netemAt_"
|
netem_at = "netem_at_"
|
||||||
changeNetem = "changeNetem"
|
changeNetem = "changeNetem"
|
||||||
DEFAULT_PARAMETERS = {}
|
DEFAULT_PARAMETERS = {}
|
||||||
DEFAULT_PARAMETERS[LSUBNET] = "10.1."
|
DEFAULT_PARAMETERS[LSUBNET] = "10.1."
|
||||||
@ -136,8 +130,8 @@ class TopoParameter(Parameter):
|
|||||||
if not self.get(TopoParameter.changeNetem) == "yes":
|
if not self.get(TopoParameter.changeNetem) == "yes":
|
||||||
return
|
return
|
||||||
for k in sorted(self.parameters):
|
for k in sorted(self.parameters):
|
||||||
if k.startswith(TopoParameter.netemAt):
|
if k.startswith(TopoParameter.netem_at):
|
||||||
i = int(k[len(TopoParameter.netemAt):])
|
i = int(k[len(TopoParameter.netem_at):])
|
||||||
val = self.parameters[k]
|
val = self.parameters[k]
|
||||||
if not isinstance(val, list):
|
if not isinstance(val, list):
|
||||||
tmp = val
|
tmp = val
|
||||||
@ -151,12 +145,12 @@ class TopoParameter(Parameter):
|
|||||||
if len(tab)==2:
|
if len(tab)==2:
|
||||||
o = NetemAt(float(tab[0]), tab[1])
|
o = NetemAt(float(tab[0]), tab[1])
|
||||||
if id < len(self.linkCharacteristics):
|
if id < len(self.linkCharacteristics):
|
||||||
self.linkCharacteristics[id].addNetemAt(o)
|
self.linkCharacteristics[id].add_netem_at(o)
|
||||||
else:
|
else:
|
||||||
print("Error can't set netem for link " + str(id))
|
print("Error can't set netem for link " + str(id))
|
||||||
else:
|
else:
|
||||||
print("Netem wrong line : " + n)
|
print("Netem wrong line : " + n)
|
||||||
print(self.linkCharacteristics[id].netemAt)
|
print(self.linkCharacteristics[id].netem_at)
|
||||||
|
|
||||||
def loadLinkCharacteristics(self):
|
def loadLinkCharacteristics(self):
|
||||||
i = 0
|
i = 0
|
||||||
@ -334,24 +328,24 @@ class TopoConfig(object):
|
|||||||
print(str(lname) + " " + str(lif))
|
print(str(lname) + " " + str(lif))
|
||||||
print(str(rname) + " " + str(rif))
|
print(str(rname) + " " + str(rif))
|
||||||
print("boxes " + str(lbox) + " " + str(rbox))
|
print("boxes " + str(lbox) + " " + str(rbox))
|
||||||
cmd = l.buildBwCmd(lif)
|
cmd = l.build_bandwidth_cmd(lif)
|
||||||
print(cmd)
|
print(cmd)
|
||||||
self.topo.command_to(lbox, cmd)
|
self.topo.command_to(lbox, cmd)
|
||||||
cmd = l.buildBwCmd(rif)
|
cmd = l.build_bandwidth_cmd(rif)
|
||||||
print(cmd)
|
print(cmd)
|
||||||
self.topo.command_to(rbox, cmd)
|
self.topo.command_to(rbox, cmd)
|
||||||
ilif = self.getMidL2RIncomingInterface(i)
|
ilif = self.getMidL2RIncomingInterface(i)
|
||||||
irif = self.getMidR2LIncomingInterface(i)
|
irif = self.getMidR2LIncomingInterface(i)
|
||||||
cmd = l.buildPolicingCmd(ilif)
|
cmd = l.build_policing_cmd(ilif)
|
||||||
print(cmd)
|
print(cmd)
|
||||||
self.topo.command_to(lbox, cmd)
|
self.topo.command_to(lbox, cmd)
|
||||||
cmd = l.buildPolicingCmd(irif)
|
cmd = l.build_policing_cmd(irif)
|
||||||
print(cmd)
|
print(cmd)
|
||||||
self.topo.command_to(rbox, cmd)
|
self.topo.command_to(rbox, cmd)
|
||||||
cmd = l.buildNetemCmd(irif)
|
cmd = l.build_netem_cmd(irif)
|
||||||
print(cmd)
|
print(cmd)
|
||||||
self.topo.command_to(rbox, cmd)
|
self.topo.command_to(rbox, cmd)
|
||||||
cmd = l.buildNetemCmd(ilif)
|
cmd = l.build_netem_cmd(ilif)
|
||||||
print(cmd)
|
print(cmd)
|
||||||
self.topo.command_to(lbox, cmd)
|
self.topo.command_to(lbox, cmd)
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ class ECMPSingleInterfaceTopo(Topo):
|
|||||||
self.routers.append(self.addOneRouterPerLink(l))
|
self.routers.append(self.addOneRouterPerLink(l))
|
||||||
print("added : " + self.routers[-1])
|
print("added : " + self.routers[-1])
|
||||||
self.addLink(self.lswitch, self.routers[-1])
|
self.addLink(self.lswitch, self.routers[-1])
|
||||||
self.addLink(self.rswitch, self.routers[-1], **l.asDict())
|
self.addLink(self.rswitch, self.routers[-1], **l.as_dict())
|
||||||
|
|
||||||
def addOneRouterPerLink(self, link):
|
def addOneRouterPerLink(self, link):
|
||||||
return self.addHost(Topo.routerNamePrefix +
|
return self.addHost(Topo.routerNamePrefix +
|
||||||
|
@ -15,7 +15,7 @@ class MultiInterfaceTopo(Topo):
|
|||||||
self.switchClient.append(self.addSwitch1ForLink(l))
|
self.switchClient.append(self.addSwitch1ForLink(l))
|
||||||
self.addLink(self.client,self.switchClient[-1])
|
self.addLink(self.client,self.switchClient[-1])
|
||||||
self.switchServer.append(self.addSwitch2ForLink(l))
|
self.switchServer.append(self.addSwitch2ForLink(l))
|
||||||
self.addLink(self.switchClient[-1], self.switchServer[-1], **l.asDict())
|
self.addLink(self.switchClient[-1], self.switchServer[-1], **l.as_dict())
|
||||||
self.addLink(self.switchServer[-1],self.router)
|
self.addLink(self.switchServer[-1],self.router)
|
||||||
self.addLink(self.router, self.server)
|
self.addLink(self.router, self.server)
|
||||||
|
|
||||||
@ -90,7 +90,7 @@ class MultiInterfaceConfig(TopoConfig):
|
|||||||
clientIntfMac = self.client.intf(self.get_client_interface(i)).MAC()
|
clientIntfMac = self.client.intf(self.get_client_interface(i)).MAC()
|
||||||
self.topo.command_to(self.router, "arp -s " + self.getClientIP(i) + " " + clientIntfMac)
|
self.topo.command_to(self.router, "arp -s " + self.getClientIP(i) + " " + clientIntfMac)
|
||||||
|
|
||||||
if(links[i].back_up):
|
if(links[i].backup):
|
||||||
cmd = self.interface_backup_command(
|
cmd = self.interface_backup_command(
|
||||||
self.get_client_interface(i))
|
self.get_client_interface(i))
|
||||||
self.topo.command_to(self.client, cmd)
|
self.topo.command_to(self.client, cmd)
|
||||||
|
@ -21,7 +21,7 @@ class MultiInterfaceCongTopo(Topo):
|
|||||||
self.addLink(self.client,self.switch[-1])
|
self.addLink(self.client,self.switch[-1])
|
||||||
self.cong_clients.append(self.addHost(MultiInterfaceCongTopo.congClientName + str(len(self.cong_clients))))
|
self.cong_clients.append(self.addHost(MultiInterfaceCongTopo.congClientName + str(len(self.cong_clients))))
|
||||||
self.addLink(self.cong_clients[-1], self.switch[-1])
|
self.addLink(self.cong_clients[-1], self.switch[-1])
|
||||||
self.addLink(self.switch[-1],self.router, **l.asDict())
|
self.addLink(self.switch[-1],self.router, **l.as_dict())
|
||||||
self.addLink(self.router, self.server)
|
self.addLink(self.router, self.server)
|
||||||
for i in range(len(self.cong_clients)):
|
for i in range(len(self.cong_clients)):
|
||||||
self.cong_servers.append(self.addHost(MultiInterfaceCongTopo.congServerName + str(len(self.cong_servers))))
|
self.cong_servers.append(self.addHost(MultiInterfaceCongTopo.congServerName + str(len(self.cong_servers))))
|
||||||
@ -143,7 +143,7 @@ class MultiInterfaceCongConfig(TopoConfig):
|
|||||||
clientIntfMac = self.client.intf(self.get_client_interface(i)).MAC()
|
clientIntfMac = self.client.intf(self.get_client_interface(i)).MAC()
|
||||||
self.topo.command_to(self.router, "arp -s " + self.getClientIP(i) + " " + clientIntfMac)
|
self.topo.command_to(self.router, "arp -s " + self.getClientIP(i) + " " + clientIntfMac)
|
||||||
|
|
||||||
if(links[i].back_up):
|
if(links[i].backup):
|
||||||
cmd = self.interface_backup_command(
|
cmd = self.interface_backup_command(
|
||||||
self.get_client_interface(i))
|
self.get_client_interface(i))
|
||||||
self.topo.command_to(self.client, cmd)
|
self.topo.command_to(self.client, cmd)
|
||||||
|
@ -25,7 +25,7 @@ class TwoInterfaceCongestionTopo(Topo):
|
|||||||
# Link between c1 and r2
|
# Link between c1 and r2
|
||||||
self.switch.append(self.addOneSwitchPerLink(self.topoParam.linkCharacteristics[0]))
|
self.switch.append(self.addOneSwitchPerLink(self.topoParam.linkCharacteristics[0]))
|
||||||
self.addLink(self.client, self.switch[-1])
|
self.addLink(self.client, self.switch[-1])
|
||||||
self.addLink(self.switch[-1], self.router, **self.topoParam.linkCharacteristics[0].asDict())
|
self.addLink(self.switch[-1], self.router, **self.topoParam.linkCharacteristics[0].as_dict())
|
||||||
|
|
||||||
# Link between c1 and r1
|
# Link between c1 and r1
|
||||||
self.addLink(self.client, self.routerCong)
|
self.addLink(self.client, self.routerCong)
|
||||||
@ -33,12 +33,12 @@ class TwoInterfaceCongestionTopo(Topo):
|
|||||||
# Link between c2 and r1
|
# Link between c2 and r1
|
||||||
self.switch.append(self.addOneSwitchPerLink(self.topoParam.linkCharacteristics[2]))
|
self.switch.append(self.addOneSwitchPerLink(self.topoParam.linkCharacteristics[2]))
|
||||||
self.addLink(self.clientCong, self.switch[-1])
|
self.addLink(self.clientCong, self.switch[-1])
|
||||||
self.addLink(self.switch[-1], self.routerCong, **self.topoParam.linkCharacteristics[2].asDict())
|
self.addLink(self.switch[-1], self.routerCong, **self.topoParam.linkCharacteristics[2].as_dict())
|
||||||
|
|
||||||
# Link between r1 and r2
|
# Link between r1 and r2
|
||||||
self.switch.append(self.addOneSwitchPerLink(self.topoParam.linkCharacteristics[1]))
|
self.switch.append(self.addOneSwitchPerLink(self.topoParam.linkCharacteristics[1]))
|
||||||
self.addLink(self.routerCong, self.switch[-1])
|
self.addLink(self.routerCong, self.switch[-1])
|
||||||
self.addLink(self.switch[-1], self.router, **self.topoParam.linkCharacteristics[1].asDict())
|
self.addLink(self.switch[-1], self.router, **self.topoParam.linkCharacteristics[1].as_dict())
|
||||||
|
|
||||||
# Link between r2 and s1
|
# Link between r2 and s1
|
||||||
self.addLink(self.router, self.server)
|
self.addLink(self.router, self.server)
|
||||||
@ -150,7 +150,7 @@ class TwoInterfaceCongestionConfig(TopoConfig):
|
|||||||
# Link 0: Client - Router
|
# Link 0: Client - Router
|
||||||
self.configureInterface(self.client, self.router, Topo.clientName + "-eth0", "10.0.0.1", netmask)
|
self.configureInterface(self.client, self.router, Topo.clientName + "-eth0", "10.0.0.1", netmask)
|
||||||
|
|
||||||
if(links[0].back_up):
|
if(links[0].backup):
|
||||||
cmd = self.interface_backup_command(Topo.clientName + "-eth0")
|
cmd = self.interface_backup_command(Topo.clientName + "-eth0")
|
||||||
self.topo.command_to(self.client, cmd)
|
self.topo.command_to(self.client, cmd)
|
||||||
|
|
||||||
@ -160,7 +160,7 @@ class TwoInterfaceCongestionConfig(TopoConfig):
|
|||||||
# Client - Router cong
|
# Client - Router cong
|
||||||
self.configureInterface(self.client, self.routerCong, Topo.clientName + "-eth1", "10.0.1.1", netmask)
|
self.configureInterface(self.client, self.routerCong, Topo.clientName + "-eth1", "10.0.1.1", netmask)
|
||||||
|
|
||||||
if(links[1].back_up):
|
if(links[1].backup):
|
||||||
cmd = self.interface_backup_command(Topo.clientName + "-eth1")
|
cmd = self.interface_backup_command(Topo.clientName + "-eth1")
|
||||||
self.topo.command_to(self.client, cmd)
|
self.topo.command_to(self.client, cmd)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user