Jon Hall | 8412ed1 | 2014-07-24 16:33:58 -0700 | [diff] [blame] | 1 | |
| 2 | class TopoONOS2 : |
| 3 | |
| 4 | def __init__(self) : |
| 5 | self.default = '' |
| 6 | |
| 7 | #********************************************************************************************************************************************************************************************** |
| 8 | #Test startup |
| 9 | #Tests the startup of Zookeeper1, RamCloud1, and ONOS1 to be certain that all started up successfully |
| 10 | def CASE1(self,main) : #Check to be sure ZK, Cass, and ONOS are up, then get ONOS version |
| 11 | import time |
| 12 | main.ONOS1.handle.sendline("cp ~/onos.properties.reactive ~/ONOS/conf/onos.properties") |
| 13 | main.ONOS2.handle.sendline("cp ~/onos.properties.reactive ~/ONOS/conf/onos.properties") |
| 14 | main.ONOS3.handle.sendline("cp ~/onos.properties.reactive ~/ONOS/conf/onos.properties") |
| 15 | main.ONOS4.handle.sendline("cp ~/onos.properties.reactive ~/ONOS/conf/onos.properties") |
| 16 | |
| 17 | main.ONOS1.stop_all() |
| 18 | main.ONOS2.stop_all() |
| 19 | main.ONOS3.stop_all() |
| 20 | main.ONOS4.stop_all() |
| 21 | main.Zookeeper1.start() |
| 22 | main.Zookeeper2.start() |
| 23 | main.Zookeeper3.start() |
| 24 | main.Zookeeper4.start() |
| 25 | main.RamCloud1.stop_coor() |
| 26 | main.RamCloud1.stop_serv() |
| 27 | main.RamCloud2.stop_serv() |
| 28 | main.RamCloud3.stop_serv() |
| 29 | main.RamCloud4.stop_serv() |
| 30 | time.sleep(10) |
| 31 | main.RamCloud1.del_db() |
| 32 | main.RamCloud2.del_db() |
| 33 | main.RamCloud3.del_db() |
| 34 | main.RamCloud4.del_db() |
| 35 | time.sleep(10) |
| 36 | main.log.report("Pulling latest code from github to all nodes") |
| 37 | for i in range(2): |
| 38 | uptodate = main.ONOS1.git_pull() |
| 39 | main.ONOS2.git_pull() |
| 40 | main.ONOS3.git_pull() |
| 41 | main.ONOS4.git_pull() |
| 42 | ver1 = main.ONOS1.get_version() |
| 43 | ver2 = main.ONOS4.get_version() |
| 44 | if ver1==ver2: |
| 45 | break |
| 46 | elif i==1: |
| 47 | main.ONOS2.git_pull("ONOS1 master") |
| 48 | main.ONOS3.git_pull("ONOS1 master") |
| 49 | main.ONOS4.git_pull("ONOS1 master") |
| 50 | if uptodate==0: |
| 51 | main.ONOS1.git_compile() |
| 52 | main.ONOS2.git_compile() |
| 53 | main.ONOS3.git_compile() |
| 54 | main.ONOS4.git_compile() |
| 55 | main.ONOS1.print_version() |
| 56 | # main.RamCloud1.git_pull() |
| 57 | # main.RamCloud2.git_pull() |
| 58 | # main.RamCloud3.git_pull() |
| 59 | # main.RamCloud4.git_pull() |
| 60 | # main.ONOS1.get_version() |
| 61 | # main.ONOS2.get_version() |
| 62 | # main.ONOS3.get_version() |
| 63 | # main.ONOS4.get_version() |
| 64 | main.RamCloud1.start_coor() |
| 65 | time.sleep(1) |
| 66 | main.RamCloud1.start_serv() |
| 67 | main.RamCloud2.start_serv() |
| 68 | main.RamCloud3.start_serv() |
| 69 | main.RamCloud4.start_serv() |
| 70 | main.ONOS1.start("env JVM_OPTS=\"-Xmx2g -Xms2g -Xmn800m\" ") |
| 71 | time.sleep(5) |
| 72 | main.ONOS2.start("env JVM_OPTS=\"-Xmx2g -Xms2g -Xmn800m\" ") |
| 73 | main.ONOS3.start("env JVM_OPTS=\"-Xmx2g -Xms2g -Xmn800m\" ") |
| 74 | main.ONOS4.start("env JVM_OPTS=\"-Xmx2g -Xms2g -Xmn800m\" ") |
| 75 | main.ONOS1.start_rest() |
| 76 | time.sleep(10) |
| 77 | test= main.ONOS1.rest_status() |
| 78 | if test == main.FALSE: |
| 79 | main.ONOS1.start_rest() |
| 80 | main.ONOS1.get_version() |
| 81 | main.log.report("Startup check Zookeeper1, RamCloud1, and ONOS1 connections") |
| 82 | main.case("Checking if the startup was clean...") |
| 83 | main.step("Testing startup Zookeeper") |
| 84 | data = main.Zookeeper1.isup() |
| 85 | utilities.assert_equals(expect=main.TRUE,actual=data,onpass="Zookeeper is up!",onfail="Zookeeper is down...") |
| 86 | main.step("Testing startup RamCloud") |
| 87 | data = main.RamCloud1.status_serv() |
| 88 | if data == main.FALSE: |
| 89 | main.RamCloud1.stop_coor() |
| 90 | main.RamCloud1.stop_serv() |
| 91 | main.RamCloud2.stop_serv() |
| 92 | main.RamCloud3.stop_serv() |
| 93 | main.RamCloud4.stop_serv() |
| 94 | |
| 95 | time.sleep(5) |
| 96 | main.RamCloud1.start_coor() |
| 97 | main.RamCloud1.start_serv() |
| 98 | main.RamCloud2.start_serv() |
| 99 | main.RamCloud3.start_serv() |
| 100 | main.RamCloud4.start_serv() |
| 101 | utilities.assert_equals(expect=main.TRUE,actual=data,onpass="RamCloud is up!",onfail="RamCloud is down...") |
| 102 | main.step("Testing startup ONOS") |
| 103 | data = main.ONOS1.isup() |
| 104 | for i in range(3): |
| 105 | if data == main.FALSE: |
| 106 | #main.log.report("Something is funny... restarting ONOS") |
| 107 | #main.ONOS1.stop() |
| 108 | time.sleep(3) |
| 109 | #main.ONOS1.start() |
| 110 | #time.sleep(5) |
| 111 | data = main.ONOS1.isup() |
| 112 | else: |
| 113 | break |
| 114 | utilities.assert_equals(expect=main.TRUE,actual=data,onpass="ONOS is up and running!",onfail="ONOS didn't start...") |
| 115 | time.sleep(10) |
| 116 | |
| 117 | |
| 118 | #********************************************************************************************************************************************************************************************** |
| 119 | #Assign Controllers |
| 120 | #This test first checks the ip of a mininet host, to be certain that the mininet exists(Host is defined in Params as <CASE1><destination>). |
| 121 | #Then the program assignes each ONOS instance a single controller to a switch(To be the initial master), then assigns all controllers. |
| 122 | #NOTE: The reason why all four controllers are assigned although one was already assigned as the master is due to the 'ovs-vsctl set-controller' command erases all present controllers if |
| 123 | # the controllers already assigned to the switch are not specified. |
| 124 | |
| 125 | def CASE2(self,main) : #Make sure mininet exists, then assign controllers to switches |
| 126 | import time |
| 127 | main.log.report("Check if mininet started properly, then assign controllers ONOS 1,2,3 and 4") |
| 128 | main.case("Checking if one MN host exists") |
| 129 | main.step("Host IP Checking using checkIP") |
| 130 | result = main.Mininet1.checkIP(main.params['CASE1']['destination']) |
| 131 | main.step("Verifying the result") |
| 132 | utilities.assert_equals(expect=main.TRUE,actual=result,onpass="Host IP address configured",onfail="Host IP address not configured") |
| 133 | main.step("assigning ONOS controllers to switches") |
| 134 | for i in range(25): |
| 135 | if i < 3: |
| 136 | j=i+1 |
| 137 | main.Mininet1.assign_sw_controller(sw=str(j),ip1=main.params['CTRL']['ip1'],port1=main.params['CTRL']['port1']) |
| 138 | time.sleep(1) |
| 139 | main.Mininet1.assign_sw_controller(sw=str(j),count=4,ip1=main.params['CTRL']['ip1'],port1=main.params['CTRL']['port1'],ip2=main.params['CTRL']['ip2'],port2=main.params['CTRL']['port2'],ip3=main.params['CTRL']['ip3'],port3=main.params['CTRL']['port3'],ip4=main.params['CTRL']['ip4'],port4=main.params['CTRL']['port4']) |
| 140 | elif i >= 3 and i < 5: |
| 141 | j=i+1 |
| 142 | main.Mininet1.assign_sw_controller(sw=str(j),ip1=main.params['CTRL']['ip2'],port1=main.params['CTRL']['port2']) |
| 143 | time.sleep(1) |
| 144 | main.Mininet1.assign_sw_controller(sw=str(j),count=4,ip1=main.params['CTRL']['ip1'],port1=main.params['CTRL']['port1'],ip2=main.params['CTRL']['ip2'],port2=main.params['CTRL']['port2'],ip3=main.params['CTRL']['ip3'],port3=main.params['CTRL']['port3'],ip4=main.params['CTRL']['ip4'],port4=main.params['CTRL']['port4']) |
| 145 | elif i >= 5 and i < 15: |
| 146 | j=i+1 |
| 147 | main.Mininet1.assign_sw_controller(sw=str(j),ip1=main.params['CTRL']['ip3'],port1=main.params['CTRL']['port3']) |
| 148 | time.sleep(1) |
| 149 | main.Mininet1.assign_sw_controller(sw=str(j),count=4,ip1=main.params['CTRL']['ip1'],port1=main.params['CTRL']['port1'],ip2=main.params['CTRL']['ip2'],port2=main.params['CTRL']['port2'],ip3=main.params['CTRL']['ip3'],port3=main.params['CTRL']['port3'],ip4=main.params['CTRL']['ip4'],port4=main.params['CTRL']['port4']) |
| 150 | else: |
| 151 | j=i+16 |
| 152 | main.Mininet1.assign_sw_controller(sw=str(j),ip1=main.params['CTRL']['ip4'],port1=main.params['CTRL']['port4']) |
| 153 | time.sleep(1) |
| 154 | main.Mininet1.assign_sw_controller(sw=str(j),count=4,ip1=main.params['CTRL']['ip1'],port1=main.params['CTRL']['port1'],ip2=main.params['CTRL']['ip2'],port2=main.params['CTRL']['port2'],ip3=main.params['CTRL']['ip3'],port3=main.params['CTRL']['port3'],ip4=main.params['CTRL']['ip4'],port4=main.params['CTRL']['port4']) |
| 155 | result = main.Mininet1.get_sw_controller("s1") |
| 156 | if result: |
| 157 | result = main.TRUE |
| 158 | utilities.assert_equals(expect=main.TRUE,actual=result,onpass="S1 assigned to controller",onfail="S1 not assigned to controller") |
| 159 | |
| 160 | for i in range(9): |
| 161 | if result == main.FALSE: |
| 162 | time.sleep(3) |
| 163 | result = main.ONOS1.check_status_report(main.params['RestIP'],main.params['NR_Switches'],main.params['NR_Links']) |
| 164 | else: |
| 165 | main.ONOS2.check_status_report(main.params['RestIP'],main.params['NR_Switches'],main.params['NR_Links']) |
| 166 | main.ONOS3.check_status_report(main.params['RestIP'],main.params['NR_Switches'],main.params['NR_Links']) |
| 167 | main.ONOS4.check_status_report(main.params['RestIP'],main.params['NR_Switches'],main.params['NR_Links']) |
| 168 | break |
| 169 | |
| 170 | # ********************************************************************************************************************************************************************************************** |
| 171 | #Add Flows |
| 172 | #Deletes any remnant flows from any previous test, add flows from the file labeled <FLOWDEF>, then runs the check flow test |
| 173 | #NOTE: THE FLOWDEF FILE MUST BE PRESENT ON TESTON VM!!! TestON will copy the file from its home machine into /tmp/flowtmp on the machine the ONOS instance is present on |
| 174 | |
| 175 | def CASE3(self,main) : #Delete any remnant flows, then add flows, and time how long it takes flow tables to update |
| 176 | main.log.report("Delete any flows from previous tests, then add flows using intents and wait for switch flow tables to update") |
| 177 | import time |
| 178 | |
| 179 | result = main.ONOS1.check_status_report(main.params['RestIP'],main.params['NR_Switches'],main.params['NR_Links']) |
| 180 | for counter in range(9): |
| 181 | if result == main.FALSE: |
| 182 | time.sleep(3) |
| 183 | result = main.ONOS1.check_status_report(main.params['RestIP'],main.params['NR_Switches'],main.params['NR_Links']) |
| 184 | else: |
| 185 | main.ONOS2.check_status_report(main.params['RestIP'],main.params['NR_Switches'],main.params['NR_Links']) |
| 186 | main.ONOS3.check_status_report(main.params['RestIP'],main.params['NR_Switches'],main.params['NR_Links']) |
| 187 | main.ONOS4.check_status_report(main.params['RestIP'],main.params['NR_Switches'],main.params['NR_Links']) |
| 188 | break |
| 189 | utilities.assert_equals(expect=main.TRUE,actual=result,onpass="Topology check pass",onfail="Topology check FAIL") |
| 190 | |
| 191 | main.case("Taking care of these flows!") |
| 192 | main.step("Cleaning out any leftover flows...") |
| 193 | #main.ONOS1.delete_flow("all") |
| 194 | main.ONOS1.rm_intents() |
| 195 | time.sleep(5) |
| 196 | main.ONOS1.purge_intents() |
| 197 | strtTime = time.time() |
| 198 | main.ONOS1.add_intents() |
| 199 | main.case("Checking flows with pings") |
| 200 | |
| 201 | pingAttempts = main.params['pingAttempts'] |
| 202 | pingSleep = main.params['pingSleep'] |
| 203 | |
| 204 | count = 1 |
| 205 | i = 6 |
| 206 | while i < 16 : |
| 207 | main.log.info("\n\t\t\t\th"+str(i)+" IS PINGING h"+str(i+25) ) |
| 208 | ping = main.Mininet1.pingHost(src="h"+str(i),target="h"+str(i+25)) |
| 209 | if ping == main.FALSE and count < int(pingAttempts): |
| 210 | count = count + 1 |
| 211 | # i = 6 |
| 212 | main.log.report("Ping failed, making attempt number "+str(count)+" in "+str(pingSleep)+" seconds") |
| 213 | time.sleep(int(pingSleep)) |
| 214 | elif ping == main.FALSE and count == int(pingAttempts): |
| 215 | main.log.error("Ping test failed") |
| 216 | i = 17 |
| 217 | result2 = main.FALSE |
| 218 | elif ping == main.TRUE: |
| 219 | i = i + 1 |
| 220 | result2 = main.TRUE |
| 221 | endTime = time.time() |
| 222 | result = result and result2 |
| 223 | if result == main.TRUE: |
| 224 | main.log.report("\n\t\t\t\tTime from pushing intents to successful ping test: "+str(round(endTime-strtTime,2))+" seconds") |
| 225 | else: |
| 226 | main.log.report("\tFlows failed check") |
| 227 | |
| 228 | main.step("Verifying the result") |
| 229 | utilities.assert_equals(expect=main.TRUE,actual=result2,onpass="NO PACKET LOSS, HOST IS REACHABLE",onfail="PACKET LOST, HOST IS NOT REACHABLE") |
| 230 | utilities.assert_equals(expect=main.TRUE,actual=result,onpass="Testcase passed",onfail="Testcase failed") |
| 231 | |
| 232 | #********************************************************************************************************************************************************************************************** |
| 233 | #This test case removes Controllers 2,3, and 4 then performs a ping test. |
| 234 | #The assign controller is used because the ovs-vsctl module deletes all current controllers when a new controller is assigned. |
| 235 | #The ping test performs single pings on hosts from opposite sides of the topology. If one ping fails, the test waits 5 seconds before trying again. |
| 236 | #If the ping test fails 6 times, then the test case will return false |
| 237 | |
| 238 | def CASE4(self,main) : |
| 239 | main.log.report("Assign all switches to just one ONOS instance then ping until all hosts are reachable or fail after 6 attempts") |
| 240 | import time |
| 241 | import random |
| 242 | |
| 243 | random.seed(None) |
| 244 | |
| 245 | num = random.randint(1,4) |
| 246 | if num == 1: |
| 247 | ip = main.params['CTRL']['ip1'] |
| 248 | port = main.params['CTRL']['port1'] |
| 249 | elif num == 2: |
| 250 | ip = main.params['CTRL']['ip2'] |
| 251 | port = main.params['CTRL']['port2'] |
| 252 | elif num == 3: |
| 253 | ip = main.params['CTRL']['ip3'] |
| 254 | port = main.params['CTRL']['port3'] |
| 255 | else: |
| 256 | ip = main.params['CTRL']['ip4'] |
| 257 | port = main.params['CTRL']['port4'] |
| 258 | |
| 259 | main.log.report("ONOS"+str(num)+" will be the sole controller") |
| 260 | for i in range(25): |
| 261 | if i < 15: |
| 262 | j=i+1 |
| 263 | main.Mininet1.assign_sw_controller(sw=str(j),ip1=ip,port1=port) #Assigning a single controller removes all other controllers |
| 264 | else: |
| 265 | j=i+16 |
| 266 | main.Mininet1.assign_sw_controller(sw=str(j),ip1=ip,port1=port) |
| 267 | |
| 268 | strtTime = time.time() |
| 269 | result = main.ONOS1.check_status_report(main.params['RestIP'],main.params['NR_Switches'],main.params['NR_Links']) |
| 270 | for counter in range(9): |
| 271 | if result == main.FALSE: |
| 272 | time.sleep(3) |
| 273 | result = main.ONOS1.check_status_report(main.params['RestIP'],main.params['NR_Switches'],main.params['NR_Links']) |
| 274 | else: |
| 275 | main.ONOS2.check_status_report(main.params['RestIP'],main.params['NR_Switches'],main.params['NR_Links']) |
| 276 | main.ONOS3.check_status_report(main.params['RestIP'],main.params['NR_Switches'],main.params['NR_Links']) |
| 277 | main.ONOS4.check_status_report(main.params['RestIP'],main.params['NR_Switches'],main.params['NR_Links']) |
| 278 | break |
| 279 | utilities.assert_equals(expect=main.TRUE,actual=result,onpass="Topology check pass",onfail="Topology check FAIL") |
| 280 | |
| 281 | pingAttempts = main.params['pingAttempts'] |
| 282 | pingSleep = main.params['pingSleep'] |
| 283 | |
| 284 | count = 1 |
| 285 | i = 6 |
| 286 | while i < 16 : |
| 287 | main.log.info("\n\t\t\t\th"+str(i)+" IS PINGING h"+str(i+25) ) |
| 288 | ping = main.Mininet1.pingHost(src="h"+str(i),target="h"+str(i+25)) |
| 289 | if ping == main.FALSE and count < int(pingAttempts): |
| 290 | count = count + 1 |
| 291 | # i = 6 |
| 292 | main.log.report("Ping failed, making attempt number "+str(count)+" in "+str(pingSleep)+" seconds") |
| 293 | time.sleep(int(pingSleep)) |
| 294 | elif ping == main.FALSE and count == int(pingAttempts): |
| 295 | main.log.error("Ping test failed") |
| 296 | i = 17 |
| 297 | result2 = main.FALSE |
| 298 | elif ping == main.TRUE: |
| 299 | i = i + 1 |
| 300 | result2 = main.TRUE |
| 301 | endTime = time.time() |
| 302 | result = result and result2 |
| 303 | if result == main.TRUE: |
| 304 | main.log.report("\tTime to complete ping test: "+str(round(endTime-strtTime,2))+" seconds") |
| 305 | else: |
| 306 | main.log.report("\tPING TEST FAIL") |
| 307 | main.ONOS1.show_intent(main.params['RestIP']) |
| 308 | utilities.assert_equals(expect=main.TRUE,actual=result2,onpass="NO PACKET LOSS, HOST IS REACHABLE",onfail="PACKET LOST, HOST IS NOT REACHABLE") |
| 309 | utilities.assert_equals(expect=main.TRUE,actual=result,onpass="Testcase passed",onfail="Testcase failed") |
| 310 | |
| 311 | # ********************************************************************************************************************************************************************************************** |
| 312 | #This test case restores the controllers removed by Case 4 then performs a ping test. |
| 313 | |
| 314 | def CASE5(self,main) : |
| 315 | main.log.report("Restore switch assignments to all 4 ONOS instances then ping until all hosts are reachable or fail after 6 attempts") |
| 316 | import time |
| 317 | |
| 318 | #add a wait as a work around for a known bug where topology changes after a switch mastership change causes intents to not reroute |
| 319 | time.sleep(10) |
| 320 | |
| 321 | for i in range(25): |
| 322 | if i < 15: |
| 323 | j=i+1 |
| 324 | main.Mininet1.assign_sw_controller(sw=str(j),count=4,ip1=main.params['CTRL']['ip1'],port1=main.params['CTRL']['port1'],ip2=main.params['CTRL']['ip2'],port2=main.params['CTRL']['port2'],ip3=main.params['CTRL']['ip3'],port3=main.params['CTRL']['port3'],ip4=main.params['CTRL']['ip4'],port4=main.params['CTRL']['port4']) |
| 325 | else: |
| 326 | j=i+16 |
| 327 | main.Mininet1.assign_sw_controller(sw=str(j),count=4,ip1=main.params['CTRL']['ip1'],port1=main.params['CTRL']['port1'],ip2=main.params['CTRL']['ip2'],port2=main.params['CTRL']['port2'],ip3=main.params['CTRL']['ip3'],port3=main.params['CTRL']['port3'],ip4=main.params['CTRL']['ip4'],port4=main.params['CTRL']['port4']) |
| 328 | |
| 329 | strtTime = time.time() |
| 330 | result = main.ONOS1.check_status_report(main.params['RestIP'],main.params['NR_Switches'],main.params['NR_Links']) |
| 331 | for counter in range(9): |
| 332 | if result == main.FALSE: |
| 333 | time.sleep(3) |
| 334 | result = main.ONOS1.check_status_report(main.params['RestIP'],main.params['NR_Switches'],main.params['NR_Links']) |
| 335 | else: |
| 336 | main.ONOS2.check_status_report(main.params['RestIP'],main.params['NR_Switches'],main.params['NR_Links']) |
| 337 | main.ONOS3.check_status_report(main.params['RestIP'],main.params['NR_Switches'],main.params['NR_Links']) |
| 338 | main.ONOS4.check_status_report(main.params['RestIP'],main.params['NR_Switches'],main.params['NR_Links']) |
| 339 | break |
| 340 | utilities.assert_equals(expect=main.TRUE,actual=result,onpass="Topology check pass",onfail="Topology check FAIL") |
| 341 | |
| 342 | pingAttempts = main.params['pingAttempts'] |
| 343 | pingSleep = main.params['pingSleep'] |
| 344 | |
| 345 | count = 1 |
| 346 | i = 6 |
| 347 | while i < 16 : |
| 348 | main.log.info("\n\t\t\t\th"+str(i)+" IS PINGING h"+str(i+25) ) |
| 349 | ping = main.Mininet1.pingHost(src="h"+str(i),target="h"+str(i+25)) |
| 350 | if ping == main.FALSE and count < int(pingAttempts): |
| 351 | count = count + 1 |
| 352 | # i = 6 |
| 353 | main.log.report("Ping failed, making attempt number "+str(count)+" in "+str(pingSleep)+" seconds") |
| 354 | time.sleep(int(pingSleep)) |
| 355 | elif ping == main.FALSE and count == int(pingAttempts): |
| 356 | main.log.error("Ping test failed") |
| 357 | i = 17 |
| 358 | result2 = main.FALSE |
| 359 | elif ping == main.TRUE: |
| 360 | i = i + 1 |
| 361 | result2 = main.TRUE |
| 362 | endTime = time.time() |
| 363 | result = result and result2 |
| 364 | if result == main.TRUE: |
| 365 | main.log.report("\tTime to complete ping test: "+str(round(endTime-strtTime,2))+" seconds") |
| 366 | else: |
| 367 | main.log.report("\tPING TEST FAILED") |
| 368 | main.ONOS1.show_intent(main.params['RestIP']) |
| 369 | utilities.assert_equals(expect=main.TRUE,actual=result2,onpass="NO PACKET LOSS, HOST IS REACHABLE",onfail="PACKET LOST, HOST IS NOT REACHABLE") |
| 370 | utilities.assert_equals(expect=main.TRUE,actual=result,onpass="Testcase passed",onfail="Testcase failed") |
| 371 | |
| 372 | # ********************************************************************************************************************************************************************************************** |
| 373 | #Brings a link that all flows pass through in the mininet down, then runs a ping test to view reroute time |
| 374 | |
| 375 | def CASE6(self,main) : |
| 376 | main.log.report("Bring Link between s1 and s2 down, then ping until all hosts are reachable or fail after 10 attempts") |
| 377 | import time |
| 378 | |
| 379 | #add a wait as a work around for a known bug where topology changes after a switch mastership change causes intents to not reroute |
| 380 | time.sleep(10) |
| 381 | |
| 382 | main.case("Bringing Link down... ") |
| 383 | result = main.Mininet1.link(END1=main.params['LINK']['begin'],END2=main.params['LINK']['end'],OPTION="down") |
| 384 | utilities.assert_equals(expect=main.TRUE,actual=result,onpass="Link DOWN!",onfail="Link not brought down...") |
| 385 | |
| 386 | strtTime = time.time() |
| 387 | result1 = main.ONOS1.check_status_report(main.params['RestIP'],main.params['NR_Switches'],str(int(main.params['NR_Links'])-2)) |
| 388 | for counter in range(9): |
| 389 | if result1 == main.FALSE: |
| 390 | time.sleep(3) |
| 391 | result1 = main.ONOS1.check_status_report(main.params['RestIP'],main.params['NR_Switches'],str(int(main.params['NR_Links'])-2)) |
| 392 | else: |
| 393 | main.ONOS2.check_status_report(main.params['RestIP'],main.params['NR_Switches'],str(int(main.params['NR_Links'])-2)) |
| 394 | main.ONOS3.check_status_report(main.params['RestIP'],main.params['NR_Switches'],str(int(main.params['NR_Links'])-2)) |
| 395 | main.ONOS4.check_status_report(main.params['RestIP'],main.params['NR_Switches'],str(int(main.params['NR_Links'])-2)) |
| 396 | break |
| 397 | utilities.assert_equals(expect=main.TRUE,actual=result1,onpass="Topology check pass",onfail="Topology check FAIL") |
| 398 | |
| 399 | pingAttempts = main.params['pingAttempts'] |
| 400 | pingSleep = main.params['pingSleep'] |
| 401 | |
| 402 | count = 1 |
| 403 | i = 6 |
| 404 | while i < 16 : |
| 405 | main.log.info("\n\t\t\t\th"+str(i)+" IS PINGING h"+str(i+25) ) |
| 406 | ping = main.Mininet1.pingHost(src="h"+str(i),target="h"+str(i+25)) |
| 407 | if ping == main.FALSE and count < int(pingAttempts): |
| 408 | count = count + 1 |
| 409 | main.log.report("Ping failed, making attempt number "+str(count)+" in "+str(pingSleep)+" seconds") |
| 410 | #i = 6 |
| 411 | time.sleep(int(pingSleep)) |
| 412 | elif ping == main.FALSE and count == int(pingAttempts): |
| 413 | main.log.error("Ping test failed") |
| 414 | i = 17 |
| 415 | result2 = main.FALSE |
| 416 | elif ping == main.TRUE: |
| 417 | i = i + 1 |
| 418 | result2 = main.TRUE |
| 419 | endTime = time.time() |
| 420 | result = result and result2 and result1 |
| 421 | if result == main.TRUE: |
| 422 | main.log.report("\tTime to complete ping test: "+str(round(endTime-strtTime,2))+" seconds") |
| 423 | else: |
| 424 | main.log.report("\tPING TEST FAILED") |
| 425 | main.ONOS1.show_intent(main.params['RestIP']) |
| 426 | utilities.assert_equals(expect=main.TRUE,actual=result2,onpass="NO PACKET LOSS, HOST IS REACHABLE",onfail="PACKET LOST, HOST IS NOT REACHABLE") |
| 427 | utilities.assert_equals(expect=main.TRUE,actual=result,onpass="Testcase passed",onfail="Testcase failed") |
| 428 | |
| 429 | # ********************************************************************************************************************************************************************************************** |
| 430 | #Brings the link that Case 6 took down back up, then runs a ping test to view reroute time |
| 431 | |
| 432 | def CASE7(self,main) : |
| 433 | main.log.report("Bring Link between s1 and s2 up, then ping until all hosts are reachable or fail after 10 attempts") |
| 434 | import time |
| 435 | main.case("Bringing Link up... ") |
| 436 | |
| 437 | #add a wait as a work around for a known bug where topology changes after a switch mastership change causes intents to not reroute |
| 438 | time.sleep(10) |
| 439 | |
| 440 | result = main.Mininet1.link(END1=main.params['LINK']['begin'],END2=main.params['LINK']['end'],OPTION="up") |
| 441 | utilities.assert_equals(expect=main.TRUE,actual=result,onpass="Link UP!",onfail="Link not brought up...") |
| 442 | |
| 443 | strtTime = time.time() |
| 444 | result1 = main.ONOS1.check_status_report(main.params['RestIP'],main.params['NR_Switches'],main.params['NR_Links']) |
| 445 | for counter in range(9): |
| 446 | if result1 == main.FALSE: |
| 447 | time.sleep(3) |
| 448 | result1 = main.ONOS1.check_status_report(main.params['RestIP'],main.params['NR_Switches'],main.params['NR_Links']) |
| 449 | else: |
| 450 | main.ONOS2.check_status_report(main.params['RestIP'],main.params['NR_Switches'],main.params['NR_Links']) |
| 451 | main.ONOS3.check_status_report(main.params['RestIP'],main.params['NR_Switches'],main.params['NR_Links']) |
| 452 | main.ONOS4.check_status_report(main.params['RestIP'],main.params['NR_Switches'],main.params['NR_Links']) |
| 453 | break |
| 454 | utilities.assert_equals(expect=main.TRUE,actual=result1,onpass="Topology check pass",onfail="Topology check FAIL") |
| 455 | |
| 456 | pingAttempts = main.params['pingAttempts'] |
| 457 | pingSleep = main.params['pingSleep'] |
| 458 | |
| 459 | strtTime = time.time() |
| 460 | count = 1 |
| 461 | i = 6 |
| 462 | while i < 16 : |
| 463 | main.log.info("\n\t\t\t\th"+str(i)+" IS PINGING h"+str(i+25) ) |
| 464 | ping = main.Mininet1.pingHost(src="h"+str(i),target="h"+str(i+25)) |
| 465 | if ping == main.FALSE and count < int(pingAttempts): |
| 466 | count = count + 1 |
| 467 | main.log.report("Ping failed, making attempt number "+str(count)+" in " +str(pingSleep)+" seconds") |
| 468 | #i = 6 |
| 469 | time.sleep(int(pingSleep)) |
| 470 | elif ping == main.FALSE and count == int(pingAttempts): |
| 471 | main.log.error("Ping test failed") |
| 472 | i = 17 |
| 473 | result2 = main.FALSE |
| 474 | elif ping == main.TRUE: |
| 475 | i = i + 1 |
| 476 | result2 = main.TRUE |
| 477 | endTime = time.time() |
| 478 | result = result and result2 and result1 |
| 479 | if result == main.TRUE: |
| 480 | main.log.report("\tTime to complete ping test: "+str(round(endTime-strtTime,2))+" seconds") |
| 481 | else: |
| 482 | main.log.report("\tPING TESTS FAILED") |
| 483 | main.ONOS1.show_intent(main.params['RestIP']) |
| 484 | |
| 485 | print main.ONOS1.check_exceptions() |
| 486 | print main.ONOS2.check_exceptions() |
| 487 | print main.ONOS3.check_exceptions() |
| 488 | print main.ONOS4.check_exceptions() |
| 489 | |
| 490 | utilities.assert_equals(expect=main.TRUE,actual=result2,onpass="NO PACKET LOSS, HOST IS REACHABLE",onfail="PACKET LOST, HOST IS NOT REACHABLE") |
| 491 | utilities.assert_equals(expect=main.TRUE,actual=result,onpass="Testcase passed",onfail="Testcase failed") |
| 492 | |
| 493 | |
| 494 | # ********************************************************************************************************************************************************************************************** |
| 495 | # Runs reactive ping test |
| 496 | def CASE8(self,main) : |
| 497 | main.log.report("Reactive flow ping test:ping until the routes are active or fail after 10 attempts") |
| 498 | import time |
| 499 | |
| 500 | strtTime = time.time() |
| 501 | result = main.ONOS1.check_status_report(main.params['RestIP'],main.params['NR_Switches'],main.params['NR_Links']) |
| 502 | for counter in range(9): |
| 503 | if result == main.FALSE: |
| 504 | time.sleep(3) |
| 505 | result = main.ONOS1.check_status_report(main.params['RestIP'],main.params['NR_Switches'],main.params['NR_Links']) |
| 506 | else: |
| 507 | main.ONOS2.check_status_report(main.params['RestIP'],main.params['NR_Switches'],main.params['NR_Links']) |
| 508 | main.ONOS3.check_status_report(main.params['RestIP'],main.params['NR_Switches'],main.params['NR_Links']) |
| 509 | main.ONOS4.check_status_report(main.params['RestIP'],main.params['NR_Switches'],main.params['NR_Links']) |
| 510 | break |
| 511 | utilities.assert_equals(expect=main.TRUE,actual=result,onpass="Topology check pass",onfail="Topology check FAIL") |
| 512 | |
| 513 | pingAttempts = main.params['pingAttempts'] |
| 514 | pingSleep = main.params['pingSleep'] |
| 515 | |
| 516 | strtTime = time.time() |
| 517 | count = 1 |
| 518 | i = 6 |
| 519 | while i < 16 : |
| 520 | main.log.info("\n\t\t\t\th"+str(i)+" IS PINGING h"+str(46-i) ) |
| 521 | ping = main.Mininet1.pingHost(src="h"+str(i),target="h"+str(46-i)) |
| 522 | if ping == main.FALSE and count < int(pingAttempts): |
| 523 | count = count + 1 |
| 524 | main.log.report("Ping failed, making attempt number "+str(count)+" in " +str(pingSleep)+" seconds") |
| 525 | #i = 6 |
| 526 | time.sleep(int(pingSleep)) |
| 527 | elif ping == main.FALSE and count == int(pingAttempts): |
| 528 | main.log.error("Ping test failed") |
| 529 | i = 17 |
| 530 | result2 = main.FALSE |
| 531 | elif ping == main.TRUE: |
| 532 | i = i + 1 |
| 533 | result2 = main.TRUE |
| 534 | endTime = time.time() |
| 535 | result = result and result2 |
| 536 | if result == main.TRUE: |
| 537 | main.log.report("\tTime to complete ping test: "+str(round(endTime-strtTime,2))+" seconds") |
| 538 | else: |
| 539 | main.log.report("\tPING TESTS FAILED") |
| 540 | main.ONOS1.show_intent(main.params['RestIP']) |
| 541 | |
| 542 | print main.ONOS1.check_exceptions() |
| 543 | print main.ONOS2.check_exceptions() |
| 544 | print main.ONOS3.check_exceptions() |
| 545 | print main.ONOS4.check_exceptions() |
| 546 | |
| 547 | utilities.assert_equals(expect=main.TRUE,actual=result2,onpass="NO PACKET LOSS, HOST IS REACHABLE",onfail="PACKET LOST, HOST IS NOT REACHABLE") |
| 548 | utilities.assert_equals(expect=main.TRUE,actual=result,onpass="Testcase passed",onfail="Testcase failed") |
| 549 | # ********************************************************************************************************************************************************************************************** |
| 550 | #Brings a link that all flows pass through in the mininet down, then runs a ping test to view reroute time |
| 551 | # This is the same as case 6, but specifically for the reactive tests |
| 552 | |
| 553 | def CASE61(self,main) : |
| 554 | main.log.report("Bring Link between s1 and s2 down, then ping until all hosts are reachable or fail after 10 attempts") |
| 555 | import time |
| 556 | |
| 557 | #add a wait as a work around for a known bug where topology changes after a switch mastership change causes intents to not reroute |
| 558 | time.sleep(10) |
| 559 | |
| 560 | main.case("Bringing Link down... ") |
| 561 | result = main.Mininet1.link(END1=main.params['LINK']['begin'],END2=main.params['LINK']['end'],OPTION="down") |
| 562 | utilities.assert_equals(expect=main.TRUE,actual=result,onpass="Link DOWN!",onfail="Link not brought down...") |
| 563 | |
| 564 | strtTime = time.time() |
| 565 | result1 = main.ONOS1.check_status_report(main.params['RestIP'],main.params['NR_Switches'],str(int(main.params['NR_Links'])-2)) |
| 566 | for counter in range(9): |
| 567 | if result1 == main.FALSE: |
| 568 | time.sleep(3) |
| 569 | result1 = main.ONOS1.check_status_report(main.params['RestIP'],main.params['NR_Switches'],str(int(main.params['NR_Links'])-2)) |
| 570 | else: |
| 571 | main.ONOS2.check_status_report(main.params['RestIP'],main.params['NR_Switches'],str(int(main.params['NR_Links'])-2)) |
| 572 | main.ONOS3.check_status_report(main.params['RestIP'],main.params['NR_Switches'],str(int(main.params['NR_Links'])-2)) |
| 573 | main.ONOS4.check_status_report(main.params['RestIP'],main.params['NR_Switches'],str(int(main.params['NR_Links'])-2)) |
| 574 | break |
| 575 | utilities.assert_equals(expect=main.TRUE,actual=result1,onpass="Topology check pass",onfail="Topology check FAIL") |
| 576 | |
| 577 | pingAttempts = main.params['pingAttempts'] |
| 578 | pingSleep = main.params['pingSleep'] |
| 579 | |
| 580 | count = 1 |
| 581 | i = 6 |
| 582 | while i < 16 : |
| 583 | main.log.info("\n\t\t\t\th"+str(i)+" IS PINGING h"+str(46-i) ) |
| 584 | ping = main.Mininet1.pingHost(src="h"+str(i),target="h"+str(46-i)) |
| 585 | if ping == main.FALSE and count < int(pingAttempts): |
| 586 | count = count + 1 |
| 587 | main.log.report("Ping failed, making attempt number "+str(count)+" in "+str(pingSleep)+" seconds") |
| 588 | #i = 6 |
| 589 | time.sleep(int(pingSleep)) |
| 590 | elif ping == main.FALSE and count == int(pingAttempts): |
| 591 | main.log.error("Ping test failed") |
| 592 | i = 17 |
| 593 | result2 = main.FALSE |
| 594 | elif ping == main.TRUE: |
| 595 | i = i + 1 |
| 596 | result2 = main.TRUE |
| 597 | endTime = time.time() |
| 598 | result = result and result2 and result1 |
| 599 | if result == main.TRUE: |
| 600 | main.log.report("\tTime to complete ping test: "+str(round(endTime-strtTime,2))+" seconds") |
| 601 | else: |
| 602 | main.log.report("\tPING TEST FAILED") |
| 603 | main.ONOS1.show_intent(main.params['RestIP']) |
| 604 | utilities.assert_equals(expect=main.TRUE,actual=result2,onpass="NO PACKET LOSS, HOST IS REACHABLE",onfail="PACKET LOST, HOST IS NOT REACHABLE") |
| 605 | utilities.assert_equals(expect=main.TRUE,actual=result,onpass="Testcase passed",onfail="Testcase failed") |
| 606 | |
| 607 | |
| 608 | # ********************************************************************************************************************************************************************************************** |
| 609 | #Brings the link that Case 6 took down back up, then runs a ping test to view reroute time |
| 610 | # Specifically for the Reactive tests |
| 611 | |
| 612 | def CASE71(self,main) : |
| 613 | main.log.report("Bring Link between s1 and s2 up, then ping until all hosts are reachable or fail after 10 attempts") |
| 614 | import time |
| 615 | main.case("Bringing Link up... ") |
| 616 | |
| 617 | #add a wait as a work around for a known bug where topology changes after a switch mastership change causes intents to not reroute |
| 618 | time.sleep(10) |
| 619 | |
| 620 | result = main.Mininet1.link(END1=main.params['LINK']['begin'],END2=main.params['LINK']['end'],OPTION="up") |
| 621 | utilities.assert_equals(expect=main.TRUE,actual=result,onpass="Link UP!",onfail="Link not brought up...") |
| 622 | |
| 623 | strtTime = time.time() |
| 624 | result1 = main.ONOS1.check_status_report(main.params['RestIP'],main.params['NR_Switches'],main.params['NR_Links']) |
| 625 | for counter in range(9): |
| 626 | if result1 == main.FALSE: |
| 627 | time.sleep(3) |
| 628 | result1 = main.ONOS1.check_status_report(main.params['RestIP'],main.params['NR_Switches'],main.params['NR_Links']) |
| 629 | else: |
| 630 | main.ONOS2.check_status_report(main.params['RestIP'],main.params['NR_Switches'],main.params['NR_Links']) |
| 631 | main.ONOS3.check_status_report(main.params['RestIP'],main.params['NR_Switches'],main.params['NR_Links']) |
| 632 | main.ONOS4.check_status_report(main.params['RestIP'],main.params['NR_Switches'],main.params['NR_Links']) |
| 633 | break |
| 634 | utilities.assert_equals(expect=main.TRUE,actual=result1,onpass="Topology check pass",onfail="Topology check FAIL") |
| 635 | |
| 636 | pingAttempts = main.params['pingAttempts'] |
| 637 | pingSleep = main.params['pingSleep'] |
| 638 | |
| 639 | strtTime = time.time() |
| 640 | count = 1 |
| 641 | i = 6 |
| 642 | while i < 16 : |
| 643 | main.log.info("\n\t\t\t\th"+str(i)+" IS PINGING h"+str(46-i) ) |
| 644 | ping = main.Mininet1.pingHost(src="h"+str(i),target="h"+str(46-i)) |
| 645 | if ping == main.FALSE and count < int(pingAttempts): |
| 646 | count = count + 1 |
| 647 | main.log.report("Ping failed, making attempt number "+str(count)+" in " +str(pingSleep)+" seconds") |
| 648 | #i = 6 |
| 649 | time.sleep(int(pingSleep)) |
| 650 | elif ping == main.FALSE and count == int(pingAttempts): |
| 651 | main.log.error("Ping test failed") |
| 652 | i = 17 |
| 653 | result2 = main.FALSE |
| 654 | elif ping == main.TRUE: |
| 655 | i = i + 1 |
| 656 | result2 = main.TRUE |
| 657 | endTime = time.time() |
| 658 | result = result and result2 and result1 |
| 659 | if result == main.TRUE: |
| 660 | main.log.report("\tTime to complete ping test: "+str(round(endTime-strtTime,2))+" seconds") |
| 661 | else: |
| 662 | main.log.report("\tPING TESTS FAILED") |
| 663 | main.ONOS1.show_intent(main.params['RestIP']) |
| 664 | |
| 665 | print main.ONOS1.check_exceptions() |
| 666 | print main.ONOS2.check_exceptions() |
| 667 | print main.ONOS3.check_exceptions() |
| 668 | print main.ONOS4.check_exceptions() |
| 669 | |
| 670 | utilities.assert_equals(expect=main.TRUE,actual=result2,onpass="NO PACKET LOSS, HOST IS REACHABLE",onfail="PACKET LOST, HOST IS NOT REACHABLE") |
| 671 | utilities.assert_equals(expect=main.TRUE,actual=result,onpass="Testcase passed",onfail="Testcase failed") |
| 672 | |
| 673 | |
| 674 | |
| 675 | |
| 676 | # ****************************************************************************************************************************************************************** |
| 677 | # Check for ONOS Components health |
| 678 | |
| 679 | def CASE9(self,main) : |
| 680 | main.case("Checking component status") |
| 681 | result = main.TRUE |
| 682 | |
| 683 | main.step("Checking Zookeeper status") |
| 684 | result1 = main.Zookeeper1.status() |
| 685 | if not result1: |
| 686 | main.log.report("Zookeeper1 encountered a tragic death!") |
| 687 | result2 = main.Zookeeper2.status() |
| 688 | if not result2: |
| 689 | main.log.report("Zookeeper2 encountered a tragic death!") |
| 690 | result3 = main.Zookeeper3.status() |
| 691 | if not result3: |
| 692 | main.log.report("Zookeeper3 encountered a tragic death!") |
| 693 | result4 = main.Zookeeper4.status() |
| 694 | if not result4: |
| 695 | main.log.report("Zookeeper4 encountered a tragic death!") |
| 696 | result = result and result1 and result2 and result3 and result4 |
| 697 | |
| 698 | main.step("Checking RamCloud status") |
| 699 | result5 = main.RamCloud1.status_coor() |
| 700 | if not result5: |
| 701 | main.log.report("RamCloud Coordinator1 encountered a tragic death!") |
| 702 | result6 = main.RamCloud1.status_serv() |
| 703 | if not result6: |
| 704 | main.log.report("RamCloud Server1 encountered a tragic death!") |
| 705 | result7 = main.RamCloud2.status_serv() |
| 706 | if not result7: |
| 707 | main.log.report("RamCloud Server2 encountered a tragic death!") |
| 708 | result8 = main.RamCloud3.status_serv() |
| 709 | if not result8: |
| 710 | main.log.report("RamCloud Server3 encountered a tragic death!") |
| 711 | result9 = main.RamCloud4.status_serv() |
| 712 | if not result9: |
| 713 | main.log.report("RamCloud Server4 encountered a tragic death!") |
| 714 | result = result and result5 and result6 and result7 and result8 and result9 |
| 715 | |
| 716 | |
| 717 | main.step("Checking ONOS status") |
| 718 | result10 = main.ONOS1.status() |
| 719 | if not result10: |
| 720 | main.log.report("ONOS1 core encountered a tragic death!") |
| 721 | result11 = main.ONOS2.status() |
| 722 | if not result11: |
| 723 | main.log.report("ONOS2 core encountered a tragic death!") |
| 724 | result12 = main.ONOS3.status() |
| 725 | if not result12: |
| 726 | main.log.report("ONOS3 core encountered a tragic death!") |
| 727 | result13 = main.ONOS4.status() |
| 728 | if not result13: |
| 729 | main.log.report("ONOS4 core encountered a tragic death!") |
| 730 | result = result and result10 and result11 and result12 and result13 |
| 731 | |
| 732 | |
| 733 | |
| 734 | rest_result = main.ONOS1.rest_status() |
| 735 | if not rest_result: |
| 736 | main.log.report("Simple Rest GUI server is not running on ONOS1") |
| 737 | |
| 738 | |
| 739 | utilities.assert_equals(expect=main.TRUE,actual=result,onpass="All Components are running",onfail="One or more components failed") |
| 740 | |
| 741 | # ****************************************************************************************************************************************************************** |
| 742 | # Test Device Discovery function by yanking s6:s6-eth0 interface and re-plug it into a switch |
| 743 | |
| 744 | def CASE21(self,main) : |
| 745 | import json |
| 746 | main.log.report("Test device discovery function, by attach, detach, and move host h1 from s1->s6->s1. Per mininet naming, the name of the switch port the host attaches to will remain as 's1-eth1' throughout the test.") |
| 747 | main.log.report("Check initially hostMAC/IP exist on the mininet...") |
| 748 | host = main.params['YANK']['hostname'] |
| 749 | mac = main.params['YANK']['hostmac'] |
| 750 | RestIP1 = main.params['RESTCALL']['restIP1'] |
| 751 | RestPort = main.params['RESTCALL']['restPort'] |
| 752 | url = main.params['RESTCALL']['restURL'] |
| 753 | |
| 754 | t_topowait = 5 |
| 755 | t_restwait = 0 |
| 756 | main.log.report( "Wait time from topo change to ping set to " + str(t_topowait)) |
| 757 | main.log.report( "Wait time from ping to rest call set to " + str(t_restwait)) |
| 758 | #print "host=" + host + "; RestIP=" + RestIP1 + "; RestPort=" + str(RestPort) |
| 759 | time.sleep(t_topowait) |
| 760 | main.log.info("\n\t\t\t\t ping issue one ping from " + str(host) + " to generate arp to switch. Ping result is not important" ) |
| 761 | ping = main.Mininet1.pingHost(src = str(host),target = "10.0.0.254") |
| 762 | time.sleep(t_restwait) |
| 763 | Reststatus, Switch, Port = main.ONOS1.find_host(RestIP1,RestPort,url, mac) |
| 764 | main.log.report("Number of host with MAC address = " + mac + " found by ONOS is: " + str(Reststatus)) |
| 765 | if Reststatus == 1: |
| 766 | main.log.report("\t PASSED - Found host mac = " + mac + "; attached to switchDPID = " +"".join(Switch) + "; at port = " + str(Port[0])) |
| 767 | result1 = main.TRUE |
| 768 | elif Reststatus > 1: |
| 769 | main.log.report("\t FAILED - Host " + host + " with MAC:" + mac + " has " + str(Reststatus) + " duplicated mac addresses. FAILED") |
| 770 | main.log.report("switches are: " + "; ".join(Switch)) |
| 771 | main.log.report("Ports are: " + "; ".join(Port)) |
| 772 | result1 = main.FALSE |
| 773 | elif Reststatus == 0 and Switch == []: |
| 774 | main.log.report("\t FAILED - Host " + host + " with MAC:" + mac + " does not exist. FAILED") |
| 775 | result1 = main.FALSE |
| 776 | else:# check if rest server is working |
| 777 | main.log.error("Issue with find host") |
| 778 | result1 = main.FALSE |
| 779 | |
| 780 | |
| 781 | ##### Step to yank out "s1-eth1" from s1, which is on autoONOS1 ##### |
| 782 | |
| 783 | main.log.report("Yank out s1-eth1") |
| 784 | main.case("Yankout s6-eth1 (link to h1) from s1") |
| 785 | result = main.Mininet1.yank(SW=main.params['YANK']['sw1'],INTF=main.params['YANK']['intf']) |
| 786 | time.sleep(t_topowait) |
| 787 | utilities.assert_equals(expect=main.TRUE,actual=result,onpass="Yank command suceeded",onfail="Yank command failed...") |
| 788 | |
| 789 | main.log.info("\n\t\t\t\t ping issue one ping from " + str(host) + " to generate arp to switch. Ping result is not important" ) |
| 790 | ping = main.Mininet1.pingHost(src = str(host),target = "10.0.0.254") |
| 791 | time.sleep(t_restwait) |
| 792 | Reststatus, Switch, Port = main.ONOS1.find_host(RestIP1,RestPort,url, mac) |
| 793 | |
| 794 | main.log.report("Number of host with MAC = " + mac + " found by ONOS is: " + str(Reststatus)) |
| 795 | if Reststatus == 1: |
| 796 | main.log.report("\tFAILED - Found host MAC = " + mac + "; attached to switchDPID = " + "".join(Switch) + "; at port = " + str(Port)) |
| 797 | result2 = main.FALSE |
| 798 | elif Reststatus > 1: |
| 799 | main.log.report("\t FAILED - Host " + host + " with MAC:" + str(mac) + " has " + str(Reststatus) + " duplicated IP addresses. FAILED") |
| 800 | main.log.report("switches are: " + "; ".join(Switch)) |
| 801 | main.log.report("Ports are: " + "; ".join(Port)) |
| 802 | main.log.report("MACs are: " + "; ".join(MAC)) |
| 803 | result2 = main.FALSE |
| 804 | elif Reststatus == 0 and Switch == []: |
| 805 | main.log.report("\t PASSED - Host " + host + " with MAC:" + str(mac) + " does not exist. PASSED - host is not supposed to be attached to the switch.") |
| 806 | result2 = main.TRUE |
| 807 | else:# check if rest server is working |
| 808 | main.log.error("Issue with find host") |
| 809 | result2 = main.FALSE |
| 810 | |
| 811 | ##### Step to plug "s1-eth1" to s6, which is on autoONOS3 ###### |
| 812 | main.log.report("Plug s1-eth1 into s6") |
| 813 | main.case("Plug s1-eth1 to s6") |
| 814 | result = main.Mininet1.plug(SW=main.params['PLUG']['sw6'],INTF=main.params['PLUG']['intf']) |
| 815 | time.sleep(t_topowait) |
| 816 | utilities.assert_equals(expect=main.TRUE,actual=result,onpass="Plug command suceeded",onfail="Plug command failed...") |
| 817 | main.log.info("\n\t\t\t\t ping issue one ping from " + str(host) + " to generate arp to switch. Ping result is not important" ) |
| 818 | |
| 819 | ping = main.Mininet1.pingHost(src = str(host),target = "10.0.0.254") |
| 820 | time.sleep(t_restwait) |
| 821 | Reststatus, Switch, Port = main.ONOS1.find_host(RestIP1,RestPort,url, mac) |
| 822 | |
| 823 | main.log.report("Number of host with MAC " + mac + " found by ONOS is: " + str(Reststatus)) |
| 824 | if Reststatus == 1: |
| 825 | main.log.report("\tPASSED - Found host MAC = " + mac + "; attached to switchDPID = " + "".join(Switch) + "; at port = " + str(Port[0])) |
| 826 | result3 = main.TRUE |
| 827 | elif Reststatus > 1: |
| 828 | main.log.report("\t FAILED - Host " + host + " with MAC:" + str(mac) + " has " + str(Reststatus) + " duplicated IP addresses. FAILED") |
| 829 | main.log.report("switches are: " + "; ".join(Switch)) |
| 830 | main.log.report("Ports are: " + "; ".join(Port)) |
| 831 | main.log.report("MACs are: " + "; ".join(MAC)) |
| 832 | result3 = main.FALSE |
| 833 | elif Reststatus == 0 and Switch == []: |
| 834 | main.log.report("\t FAILED - Host " + host + " with MAC:" + str(mac) + " does not exist. FAILED") |
| 835 | result3 = main.FALSE |
| 836 | else:# check if rest server is working |
| 837 | main.log.error("Issue with find host") |
| 838 | result3 = main.FALSE |
| 839 | |
| 840 | ###### Step to put interface "s1-eth1" back to s1"##### |
| 841 | main.log.report("Move s1-eth1 back on to s1") |
| 842 | main.case("Move s1-eth1 back to s1") |
| 843 | result = main.Mininet1.yank(SW=main.params['YANK']['sw6'],INTF=main.params['YANK']['intf']) |
| 844 | time.sleep(t_topowait) |
| 845 | result = main.Mininet1.plug(SW=main.params['PLUG']['sw1'],INTF=main.params['PLUG']['intf']) |
| 846 | utilities.assert_equals(expect=main.TRUE,actual=result,onpass="Yank/Plug command suceeded",onfail="Yank/Plug command failed...") |
| 847 | main.log.info("\n\t\t\t\t ping issue one ping from " + str(host) + " to generate arp to switch. Ping result is not important" ) |
| 848 | |
| 849 | ping = main.Mininet1.pingHost(src = str(host),target = "10.0.0.254") |
| 850 | time.sleep(t_restwait) |
| 851 | Reststatus, Switch, Port = main.ONOS1.find_host(RestIP1,RestPort,url, mac) |
| 852 | |
| 853 | main.log.report("Number of host with IP=10.0.0.1 found by ONOS is: " + str(Reststatus)) |
| 854 | if Reststatus == 1: |
| 855 | main.log.report("\tPASSED - Found host MAC = " + mac + "; attached to switchDPID = " + "".join(Switch) + "; at port = " + str(Port[0])) |
| 856 | result4 = main.TRUE |
| 857 | elif Reststatus > 1: |
| 858 | main.log.report("\t FAILED - Host " + host + " with MAC:" + str(mac) + " has " + str(Reststatuas) + " duplicated IP addresses. FAILED") |
| 859 | main.log.report("switches are: " + "; ".join(Switch)) |
| 860 | main.log.report("Ports are: " + "; ".join(Port)) |
| 861 | main.log.report("MACs are: " + "; ".join(MAC)) |
| 862 | result4 = main.FALSE |
| 863 | elif Reststatus == 0 and Switch == []: |
| 864 | main.log.report("\t FAILED -Host " + host + " with MAC:" + str(mac) + " does not exist. FAILED") |
| 865 | result4 = main.FALSE |
| 866 | else:# check if rest server is working |
| 867 | main.log.error("Issue with find host") |
| 868 | result4 = main.FALSE |
| 869 | time.sleep(20) |
| 870 | Reststatus, Switch, Port = main.ONOS1.find_host(RestIP1,RestPort,url,mac) |
| 871 | main.log.report("Number of host with IP=10.0.0.1 found by ONOS is: " + str(Reststatus)) |
| 872 | if Reststatus ==1: |
| 873 | main.log.report("\t FAILED - Host " + host + "with MAC:" + str(mac) + "was still found after expected timeout") |
| 874 | elif Reststatus>1: |
| 875 | main.log.report("\t FAILED - Host " + host + "with MAC:" + str(mac) + "was still found after expected timeout(multiple found)") |
| 876 | elif Reststatus==0: |
| 877 | main.log.report("\t PASSED - Device cleared after timeout") |
| 878 | |
| 879 | result = result1 and result2 and result3 and result4 |
| 880 | utilities.assert_equals(expect=main.TRUE,actual=result,onpass="DEVICE DISCOVERY TEST PASSED PLUG/UNPLUG/MOVE TEST",onfail="DEVICE DISCOVERY TEST FAILED") |
| 881 | |
| 882 | |
| 883 | def CASE100(self,main): |
| 884 | |
| 885 | from sts.topology.teston_topology import TestONTopology # assumes that sts already in you PYTHONPATH |
| 886 | |
| 887 | topo = TestONTopology(main.Mininet1, onos_controllers= |
| 888 | [(main.ONOS1, 'ONOS1', main.params['CTRL']['ip1'], main.params['CTRL']['port1']), |
| 889 | (main.ONOS2, 'ONOS2', main.params['CTRL']['ip2'], main.params['CTRL']['port2']), |
| 890 | (main.ONOS3, 'ONOS3', main.params['CTRL']['ip3'], main.params['CTRL']['port3']), |
| 891 | (main.ONOS4, 'ONOS4', main.params['CTRL']['ip4'], main.params['CTRL']['port4'])]) |
| 892 | |
| 893 | |
| 894 | |
| 895 | #for switch in topo.graph.switches: print "Switch name: %s, dpid: %s, ports: %s" % (switch.name, switch.dpid, [p.hw_addr for p in switch.ports.values()]) |
| 896 | print() |
| 897 | print() |
| 898 | print() |
| 899 | import json |
| 900 | ''' |
| 901 | output = '{"Switches":[' |
| 902 | for switch in topo.graph.switches: |
| 903 | ports = '%s' % [p.hw_addr for p in switch.ports.values()] |
| 904 | ports = ports.replace('\'','"') |
| 905 | output += '{"name": "%s", "dpid": "%s", "ports": %s},' % (switch.name, switch.dpid, ports) |
| 906 | output = output[:-1] |
| 907 | output += ']}' |
| 908 | ''' |
| 909 | output = {"Switches":[]} |
| 910 | for switch in topo.graph.switches: |
| 911 | print [p.hw_addr for p in switch.ports.values()] |
| 912 | ports = '%s' % [p.hw_addr for p in switch.ports.values()] |
| 913 | ports = ports.replace('\'','') |
| 914 | output['Switches'].append({"name": switch.name, "dpid": switch.dpid, "ports": ports}) |
| 915 | print output |
| 916 | #mn_json = json.loads(output) |
| 917 | print json.dumps(output, sort_keys=True,indent=4,separators=(',', ': ')) |
| 918 | print type(output) |
| 919 | print output.items() |
| 920 | mnDPIDs=[] |
| 921 | for switch in output['Switches']: |
| 922 | mnDPIDs.append(switch['dpid']) |
| 923 | mnDPIDs.append('mn1') |
| 924 | mnDPIDs.sort() |
| 925 | print mnDPIDs |
| 926 | print |
| 927 | print "Dumping ONOS view of Switches" |
| 928 | onos=main.ONOS1.get_json("10.128.11.1:8080/wm/onos/topology/switches") |
| 929 | onosDPIDs=[] |
| 930 | for switch in onos: |
| 931 | print switch |
| 932 | onosDPIDs.append(switch['dpid'].replace(":",'')) |
| 933 | onosDPIDs.append(00121) |
| 934 | onosDPIDs.sort() |
| 935 | print onosDPIDs |
| 936 | if mnDPIDs!=onosDPIDs: |
| 937 | print "Switches in MN but not in ONOS:" |
| 938 | print [switch for switch in mnDPIDs if switch not in onosDPIDs] |
| 939 | print "Switches in ONOS but not in MN:" |
| 940 | print [switch for switch in onosDPIDs if switch not in mnDPIDs] |
| 941 | print() |
| 942 | print() |
| 943 | print() |
| 944 | ''' |
| 945 | for host in topo.graph.hosts: print "Host: %s, interfaces: %s" % (host.name, [iface.hw_addr for iface in host.interfaces]) |
| 946 | print() |
| 947 | print() |
| 948 | print() |
| 949 | for link in topo.graph.links: print "Link: %s" % link |
| 950 | print() |
| 951 | print() |
| 952 | print() |
| 953 | # To print just internal network links (connecting switches to each other) |
| 954 | print "Just printing network links" |
| 955 | for link in topo.patch_panel.network_links: print "Link: %s" % link |
| 956 | ''' |
| 957 | ''' |
| 958 | # Bring links up and down |
| 959 | for link in topo.patch_panel.network_links: |
| 960 | topo.patch_panel.sever_network_link(link) |
| 961 | for link in topo.patch_panel.access_links: |
| 962 | topo.patch_panel.sever_access_link(link) |
| 963 | for link in topo.patch_panel.network_links: |
| 964 | topo.patch_panel.repair_network_link(link) |
| 965 | for link in topo.patch_panel.access_links: |
| 966 | topo.patch_panel.repair_access_link(link) |
| 967 | ''' |
| 968 | |
| 969 | |
| 970 | time.sleep(40) |
| 971 | |