andrewonlab | a57a304 | 2015-01-23 13:53:05 -0500 | [diff] [blame] | 1 | #TopoPerfNext |
| 2 | # |
| 3 | #Topology Performance test for ONOS-next |
| 4 | # |
| 5 | #andrew@onlab.us |
| 6 | # |
| 7 | #If your machine does not come with numpy |
| 8 | #run the following command: |
| 9 | #sudo apt-get install python-numpy python-scipy |
| 10 | |
| 11 | import time |
| 12 | import sys |
| 13 | import os |
| 14 | import re |
| 15 | |
| 16 | class TopoPerfNext: |
| 17 | def __init__(self): |
| 18 | self.default = '' |
| 19 | |
| 20 | def CASE1(self, main): |
| 21 | ''' |
| 22 | ONOS startup sequence |
| 23 | ''' |
| 24 | import time |
| 25 | |
| 26 | ## Global cluster count for scale-out purposes |
| 27 | global cluster_count |
| 28 | #Set initial cluster count |
| 29 | cluster_count = 1 |
| 30 | ## |
| 31 | |
| 32 | cell_name = main.params['ENV']['cellName'] |
| 33 | |
| 34 | git_pull = main.params['GIT']['autoPull'] |
| 35 | checkout_branch = main.params['GIT']['checkout'] |
| 36 | |
| 37 | ONOS1_ip = main.params['CTRL']['ip1'] |
| 38 | ONOS2_ip = main.params['CTRL']['ip2'] |
| 39 | ONOS3_ip = main.params['CTRL']['ip3'] |
| 40 | |
| 41 | #### Hardcoded ONOS nodes particular to my env #### |
| 42 | ONOS4_ip = "10.128.174.4" |
| 43 | ONOS5_ip = "10.128.174.5" |
| 44 | ONOS6_ip = "10.128.174.6" |
| 45 | ONOS7_ip = "10.128.174.7" |
| 46 | #### #### |
| 47 | |
| 48 | MN1_ip = main.params['MN']['ip1'] |
| 49 | BENCH_ip = main.params['BENCH']['ip'] |
| 50 | |
| 51 | topo_cfg_file = main.params['TEST']['topo_config_file'] |
| 52 | topo_cfg_name = main.params['TEST']['topo_config_name'] |
| 53 | |
| 54 | main.case("Setting up test environment") |
| 55 | main.log.info("Copying topology event accumulator config"+\ |
| 56 | " to ONOS /package/etc") |
| 57 | main.ONOSbench.handle.sendline("cp ~/"+\ |
| 58 | topo_cfg_file+\ |
| 59 | " ~/ONOS/tools/package/etc/"+\ |
| 60 | topo_cfg_name) |
| 61 | main.ONOSbench.handle.expect("\$") |
| 62 | |
| 63 | main.log.report("Setting up test environment") |
| 64 | |
| 65 | main.step("Cleaning previously installed ONOS if any") |
| 66 | main.ONOSbench.onos_uninstall(node_ip=ONOS2_ip) |
| 67 | main.ONOSbench.onos_uninstall(node_ip=ONOS3_ip) |
| 68 | main.ONOSbench.onos_uninstall(node_ip=ONOS4_ip) |
| 69 | main.ONOSbench.onos_uninstall(node_ip=ONOS5_ip) |
| 70 | main.ONOSbench.onos_uninstall(node_ip=ONOS6_ip) |
| 71 | main.ONOSbench.onos_uninstall(node_ip=ONOS7_ip) |
| 72 | |
| 73 | main.step("Creating cell file") |
| 74 | cell_file_result = main.ONOSbench.create_cell_file( |
| 75 | BENCH_ip, cell_name, MN1_ip, "onos-core,onos-app-metrics", |
| 76 | ONOS1_ip) |
| 77 | |
| 78 | main.step("Applying cell file to environment") |
| 79 | cell_apply_result = main.ONOSbench.set_cell(cell_name) |
| 80 | verify_cell_result = main.ONOSbench.verify_cell() |
| 81 | |
| 82 | #NOTE: This step may be removed after proper |
| 83 | # copy cat log functionality |
| 84 | main.step("Removing raft/copy-cat logs from ONOS nodes") |
| 85 | main.ONOSbench.onos_remove_raft_logs() |
| 86 | time.sleep(30) |
| 87 | |
| 88 | main.step("Git checkout and pull "+checkout_branch) |
| 89 | if git_pull == 'on': |
| 90 | checkout_result = \ |
| 91 | main.ONOSbench.git_checkout(checkout_branch) |
| 92 | pull_result = main.ONOSbench.git_pull() |
| 93 | else: |
| 94 | checkout_result = main.TRUE |
| 95 | pull_result = main.TRUE |
| 96 | main.log.info("Skipped git checkout and pull") |
| 97 | |
| 98 | main.log.report("Commit information - ") |
| 99 | main.ONOSbench.get_version(report=True) |
| 100 | |
| 101 | main.step("Using mvn clean & install") |
| 102 | mvn_result = main.ONOSbench.clean_install() |
| 103 | mvn_result = main.TRUE |
| 104 | |
| 105 | main.step("Set cell for ONOS cli env") |
| 106 | main.ONOS1cli.set_cell(cell_name) |
| 107 | #main.ONOS2cli.set_cell(cell_name) |
| 108 | #main.ONOS3cli.set_cell(cell_name) |
| 109 | |
| 110 | main.step("Creating ONOS package") |
| 111 | package_result = main.ONOSbench.onos_package() |
| 112 | |
| 113 | main.step("Installing ONOS package") |
| 114 | install1_result = main.ONOSbench.onos_install(node=ONOS1_ip) |
| 115 | #install2_result = main.ONOSbench.onos_install(node=ONOS2_ip) |
| 116 | #install3_result = main.ONOSbench.onos_install(node=ONOS3_ip) |
| 117 | |
| 118 | time.sleep(10) |
| 119 | |
| 120 | main.step("Start onos cli") |
| 121 | cli1 = main.ONOS1cli.start_onos_cli(ONOS1_ip) |
| 122 | #cli2 = main.ONOS2cli.start_onos_cli(ONOS2_ip) |
| 123 | #cli3 = main.ONOS3cli.start_onos_cli(ONOS3_ip) |
| 124 | |
| 125 | utilities.assert_equals(expect=main.TRUE, |
| 126 | actual= cell_file_result and cell_apply_result and\ |
| 127 | verify_cell_result and checkout_result and\ |
| 128 | pull_result and mvn_result and\ |
| 129 | install1_result, #and install2_result and\ |
| 130 | #install3_result, |
| 131 | onpass="Test Environment setup successful", |
| 132 | onfail="Failed to setup test environment") |
| 133 | |
| 134 | def CASE2(self, main): |
| 135 | ''' |
| 136 | Assign s1 to ONOS1 and measure latency |
| 137 | |
| 138 | There are 4 levels of latency measurements to this test: |
| 139 | 1) End-to-end measurement: Complete end-to-end measurement |
| 140 | from TCP (SYN/ACK) handshake to Graph change |
| 141 | 2) OFP-to-graph measurement: 'ONOS processing' snippet of |
| 142 | measurement from OFP Vendor message to Graph change |
| 143 | 3) OFP-to-device measurement: 'ONOS processing without |
| 144 | graph change' snippet of measurement from OFP vendor |
| 145 | message to Device change timestamp |
| 146 | 4) T0-to-device measurement: Measurement that includes |
| 147 | the switch handshake to devices timestamp without |
| 148 | the graph view change. (TCP handshake -> Device |
| 149 | change) |
| 150 | ''' |
| 151 | import time |
| 152 | import subprocess |
| 153 | import json |
| 154 | import requests |
| 155 | import os |
| 156 | import numpy |
| 157 | global cluster_count |
| 158 | |
| 159 | ONOS1_ip = main.params['CTRL']['ip1'] |
| 160 | ONOS2_ip = main.params['CTRL']['ip2'] |
| 161 | ONOS3_ip = main.params['CTRL']['ip3'] |
| 162 | ONOS4_ip = main.params['CTRL']['ip4'] |
| 163 | ONOS5_ip = main.params['CTRL']['ip5'] |
| 164 | ONOS6_ip = main.params['CTRL']['ip6'] |
| 165 | ONOS7_ip = main.params['CTRL']['ip7'] |
| 166 | |
| 167 | ONOS_user = main.params['CTRL']['user'] |
| 168 | |
| 169 | default_sw_port = main.params['CTRL']['port1'] |
| 170 | |
| 171 | #Number of iterations of case |
| 172 | num_iter = main.params['TEST']['numIter'] |
| 173 | #Number of first 'x' iterations to ignore: |
| 174 | iter_ignore = int(main.params['TEST']['iterIgnore']) |
| 175 | |
| 176 | #Timestamp 'keys' for json metrics output. |
| 177 | #These are subject to change, hence moved into params |
| 178 | deviceTimestamp = main.params['JSON']['deviceTimestamp'] |
| 179 | graphTimestamp = main.params['JSON']['graphTimestamp'] |
| 180 | |
| 181 | debug_mode = main.params['TEST']['debugMode'] |
| 182 | onos_log = main.params['TEST']['onosLogFile'] |
| 183 | |
| 184 | #Threshold for the test |
| 185 | threshold_str = main.params['TEST']['singleSwThreshold'] |
| 186 | threshold_obj = threshold_str.split(",") |
| 187 | threshold_min = int(threshold_obj[0]) |
| 188 | threshold_max = int(threshold_obj[1]) |
| 189 | |
| 190 | #List of switch add latency collected from |
| 191 | #all iterations |
| 192 | latency_end_to_end_list = [] |
| 193 | latency_ofp_to_graph_list = [] |
| 194 | latency_ofp_to_device_list = [] |
| 195 | latency_t0_to_device_list = [] |
| 196 | latency_tcp_to_ofp_list = [] |
| 197 | |
| 198 | #Directory/file to store tshark results |
| 199 | tshark_of_output = "/tmp/tshark_of_topo.txt" |
| 200 | tshark_tcp_output = "/tmp/tshark_tcp_topo.txt" |
| 201 | |
| 202 | #String to grep in tshark output |
| 203 | tshark_tcp_string = "TCP 74 "+default_sw_port |
| 204 | tshark_of_string = "OFP 86 Vendor" |
| 205 | |
| 206 | #Initialize assertion to TRUE |
| 207 | assertion = main.TRUE |
| 208 | |
| 209 | local_time = time.strftime('%x %X') |
| 210 | local_time = local_time.replace("/","") |
| 211 | local_time = local_time.replace(" ","_") |
| 212 | local_time = local_time.replace(":","") |
| 213 | if debug_mode == 'on': |
| 214 | main.ONOS1.tshark_pcap("eth0", |
| 215 | "/tmp/single_sw_lat_pcap_"+local_time) |
| 216 | |
| 217 | main.log.info("Debug mode is on") |
| 218 | |
| 219 | main.log.report("Latency of adding one switch to controller") |
| 220 | main.log.report("First "+str(iter_ignore)+" iterations ignored"+ |
| 221 | " for jvm warmup time") |
| 222 | main.log.report("Total iterations of test: "+str(num_iter)) |
| 223 | |
| 224 | for i in range(0, int(num_iter)): |
| 225 | main.log.info("Starting tshark capture") |
| 226 | |
| 227 | #* TCP [ACK, SYN] is used as t0_a, the |
| 228 | # very first "exchange" between ONOS and |
| 229 | # the switch for end-to-end measurement |
| 230 | #* OFP [Stats Reply] is used for t0_b |
| 231 | # the very last OFP message between ONOS |
| 232 | # and the switch for ONOS measurement |
| 233 | main.ONOS1.tshark_grep(tshark_tcp_string, |
| 234 | tshark_tcp_output) |
| 235 | main.ONOS1.tshark_grep(tshark_of_string, |
| 236 | tshark_of_output) |
| 237 | |
| 238 | #Wait and ensure tshark is started and |
| 239 | #capturing |
| 240 | time.sleep(10) |
| 241 | |
| 242 | main.log.info("Assigning s1 to controller") |
| 243 | |
| 244 | main.Mininet1.assign_sw_controller(sw="1", |
| 245 | ip1=ONOS1_ip, port1=default_sw_port) |
| 246 | |
| 247 | #Wait and ensure switch is assigned |
| 248 | #before stopping tshark |
| 249 | time.sleep(30) |
| 250 | |
| 251 | main.log.info("Stopping all Tshark processes") |
| 252 | main.ONOS1.stop_tshark() |
| 253 | |
| 254 | #tshark output is saved in ONOS. Use subprocess |
| 255 | #to copy over files to TestON for parsing |
| 256 | main.log.info("Copying over tshark files") |
| 257 | |
| 258 | #TCP CAPTURE **** |
| 259 | #Copy the tshark output from ONOS machine to |
| 260 | #TestON machine in tshark_tcp_output directory>file |
| 261 | os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+ |
| 262 | tshark_tcp_output+" /tmp/") |
| 263 | tcp_file = open(tshark_tcp_output, 'r') |
| 264 | temp_text = tcp_file.readline() |
| 265 | temp_text = temp_text.split(" ") |
| 266 | |
| 267 | main.log.info("Object read in from TCP capture: "+ |
| 268 | str(temp_text)) |
| 269 | if len(temp_text) > 1: |
| 270 | t0_tcp = float(temp_text[1])*1000.0 |
| 271 | else: |
| 272 | main.log.error("Tshark output file for TCP"+ |
| 273 | " returned unexpected results") |
| 274 | t0_tcp = 0 |
| 275 | assertion = main.FALSE |
| 276 | |
| 277 | tcp_file.close() |
| 278 | #**************** |
| 279 | |
| 280 | #OF CAPTURE **** |
| 281 | os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+ |
| 282 | tshark_of_output+" /tmp/") |
| 283 | of_file = open(tshark_of_output, 'r') |
| 284 | |
| 285 | line_ofp = "" |
| 286 | #Read until last line of file |
| 287 | while True: |
| 288 | temp_text = of_file.readline() |
| 289 | if temp_text !='': |
| 290 | line_ofp = temp_text |
| 291 | else: |
| 292 | break |
| 293 | obj = line_ofp.split(" ") |
| 294 | |
| 295 | main.log.info("Object read in from OFP capture: "+ |
| 296 | str(line_ofp)) |
| 297 | |
| 298 | if len(line_ofp) > 1: |
| 299 | t0_ofp = float(obj[1])*1000.0 |
| 300 | else: |
| 301 | main.log.error("Tshark output file for OFP"+ |
| 302 | " returned unexpected results") |
| 303 | t0_ofp = 0 |
| 304 | assertion = main.FALSE |
| 305 | |
| 306 | of_file.close() |
| 307 | #**************** |
| 308 | |
| 309 | json_str_1 = main.ONOS1cli.topology_events_metrics() |
| 310 | #Initialize scale-out variables |
| 311 | json_str_2 = "" |
| 312 | json_str_3 = "" |
| 313 | json_str_4 = "" |
| 314 | json_str_5 = "" |
| 315 | json_str_6 = "" |
| 316 | json_str_7 = "" |
| 317 | |
| 318 | json_obj_1 = json.loads(json_str_1) |
| 319 | json_obj_2 = json.loads(json_str_2) |
| 320 | json_obj_3 = json.loads(json_str_3) |
| 321 | #Initialize scale-out variables |
| 322 | json_obj_4 = "" |
| 323 | json_obj_5 = "" |
| 324 | json_obj_6 = "" |
| 325 | json_obj_7 = "" |
| 326 | |
| 327 | #Include scale-out measurements when applicable |
| 328 | if cluster_count == 5: |
| 329 | json_str_4 = main.ONOS4cli.topology_events_metrics() |
| 330 | json_str_5 = main.ONOS5cli.topology_events_metrics() |
| 331 | |
| 332 | json_obj_4 = json.loads(json_str_4) |
| 333 | json_obj_5 = json.loads(json_str_5) |
| 334 | elif cluster_count == 6: |
| 335 | main.log.info("TODO: create even number cluster events") |
| 336 | elif cluster_count == 7: |
| 337 | json_str_6 = main.ONOS6cli.topology_events_metrics() |
| 338 | json_str_7 = main.ONOS7cli.topology_events_metrics() |
| 339 | |
| 340 | json_obj_6 = json.loads(json_str_6) |
| 341 | json_obj_7 = json.loads(json_str_7) |
| 342 | |
| 343 | #Obtain graph timestamp. This timestsamp captures |
| 344 | #the epoch time at which the topology graph was updated. |
| 345 | graph_timestamp_1 = \ |
| 346 | json_obj_1[graphTimestamp]['value'] |
| 347 | graph_timestamp_2 = \ |
| 348 | json_obj_2[graphTimestamp]['value'] |
| 349 | graph_timestamp_3 = \ |
| 350 | json_obj_3[graphTimestamp]['value'] |
| 351 | |
| 352 | #Obtain device timestamp. This timestamp captures |
| 353 | #the epoch time at which the device event happened |
| 354 | device_timestamp_1 = \ |
| 355 | json_obj_1[deviceTimestamp]['value'] |
| 356 | device_timestamp_2 = \ |
| 357 | json_obj_2[deviceTimestamp]['value'] |
| 358 | device_timestamp_3 = \ |
| 359 | json_obj_3[deviceTimestamp]['value'] |
| 360 | |
| 361 | #t0 to device processing latency |
| 362 | delta_device_1 = int(device_timestamp_1) - int(t0_tcp) |
| 363 | delta_device_2 = int(device_timestamp_2) - int(t0_tcp) |
| 364 | delta_device_3 = int(device_timestamp_3) - int(t0_tcp) |
| 365 | |
| 366 | #Get average of delta from all instances |
| 367 | avg_delta_device = \ |
| 368 | (int(delta_device_1)+\ |
| 369 | int(delta_device_2)+\ |
| 370 | int(delta_device_3)) / 3 |
| 371 | |
| 372 | #Ensure avg delta meets the threshold before appending |
| 373 | if avg_delta_device > 0.0 and avg_delta_device < 10000\ |
| 374 | and int(i) > iter_ignore: |
| 375 | latency_t0_to_device_list.append(avg_delta_device) |
| 376 | else: |
| 377 | main.log.info("Results for t0-to-device ignored"+\ |
| 378 | "due to excess in threshold / warmup iteration.") |
| 379 | |
| 380 | #t0 to graph processing latency (end-to-end) |
| 381 | delta_graph_1 = int(graph_timestamp_1) - int(t0_tcp) |
| 382 | delta_graph_2 = int(graph_timestamp_2) - int(t0_tcp) |
| 383 | delta_graph_3 = int(graph_timestamp_3) - int(t0_tcp) |
| 384 | |
| 385 | #Get average of delta from all instances |
| 386 | #TODO: use max delta graph |
| 387 | #max_delta_graph = max(three) |
| 388 | avg_delta_graph = \ |
| 389 | (int(delta_graph_1)+\ |
| 390 | int(delta_graph_2)+\ |
| 391 | int(delta_graph_3)) / 3 |
| 392 | |
| 393 | #Ensure avg delta meets the threshold before appending |
| 394 | if avg_delta_graph > 0.0 and avg_delta_graph < 10000\ |
| 395 | and int(i) > iter_ignore: |
| 396 | latency_end_to_end_list.append(avg_delta_graph) |
| 397 | else: |
| 398 | main.log.info("Results for end-to-end ignored"+\ |
| 399 | "due to excess in threshold") |
| 400 | |
| 401 | #ofp to graph processing latency (ONOS processing) |
| 402 | delta_ofp_graph_1 = int(graph_timestamp_1) - int(t0_ofp) |
| 403 | delta_ofp_graph_2 = int(graph_timestamp_2) - int(t0_ofp) |
| 404 | delta_ofp_graph_3 = int(graph_timestamp_3) - int(t0_ofp) |
| 405 | |
| 406 | avg_delta_ofp_graph = \ |
| 407 | (int(delta_ofp_graph_1)+\ |
| 408 | int(delta_ofp_graph_2)+\ |
| 409 | int(delta_ofp_graph_3)) / 3 |
| 410 | |
| 411 | if avg_delta_ofp_graph > threshold_min \ |
| 412 | and avg_delta_ofp_graph < threshold_max\ |
| 413 | and int(i) > iter_ignore: |
| 414 | latency_ofp_to_graph_list.append(avg_delta_ofp_graph) |
| 415 | elif avg_delta_ofp_graph > (-10) and \ |
| 416 | avg_delta_ofp_graph < 0.0 and\ |
| 417 | int(i) > iter_ignore: |
| 418 | main.log.info("Sub-millisecond result likely; "+ |
| 419 | "negative result was rounded to 0") |
| 420 | #NOTE: Current metrics framework does not |
| 421 | #support sub-millisecond accuracy. Therefore, |
| 422 | #if the result is negative, we can reasonably |
| 423 | #conclude sub-millisecond results and just |
| 424 | #append the best rounded effort - 0 ms. |
| 425 | latency_ofp_to_graph_list.append(0) |
| 426 | else: |
| 427 | main.log.info("Results for ofp-to-graph "+\ |
| 428 | "ignored due to excess in threshold") |
| 429 | |
| 430 | #ofp to device processing latency (ONOS processing) |
| 431 | delta_ofp_device_1 = float(device_timestamp_1) - float(t0_ofp) |
| 432 | delta_ofp_device_2 = float(device_timestamp_2) - float(t0_ofp) |
| 433 | delta_ofp_device_3 = float(device_timestamp_3) - float(t0_ofp) |
| 434 | |
| 435 | avg_delta_ofp_device = \ |
| 436 | (float(delta_ofp_device_1)+\ |
| 437 | float(delta_ofp_device_2)+\ |
| 438 | float(delta_ofp_device_3)) / 3 |
| 439 | |
| 440 | #NOTE: ofp - delta measurements are occasionally negative |
| 441 | # due to system time misalignment. |
| 442 | latency_ofp_to_device_list.append(avg_delta_ofp_device) |
| 443 | |
| 444 | delta_ofp_tcp = int(t0_ofp) - int(t0_tcp) |
| 445 | if delta_ofp_tcp > threshold_min \ |
| 446 | and delta_ofp_tcp < threshold_max and\ |
| 447 | int(i) > iter_ignore: |
| 448 | latency_tcp_to_ofp_list.append(delta_ofp_tcp) |
| 449 | else: |
| 450 | main.log.info("Results fo tcp-to-ofp "+\ |
| 451 | "ignored due to excess in threshold") |
| 452 | |
| 453 | #TODO: |
| 454 | #Fetch logs upon threshold excess |
| 455 | |
| 456 | main.log.info("ONOS1 delta end-to-end: "+ |
| 457 | str(delta_graph_1) + " ms") |
| 458 | main.log.info("ONOS2 delta end-to-end: "+ |
| 459 | str(delta_graph_2) + " ms") |
| 460 | main.log.info("ONOS3 delta end-to-end: "+ |
| 461 | str(delta_graph_3) + " ms") |
| 462 | |
| 463 | main.log.info("ONOS1 delta OFP - graph: "+ |
| 464 | str(delta_ofp_graph_1) + " ms") |
| 465 | main.log.info("ONOS2 delta OFP - graph: "+ |
| 466 | str(delta_ofp_graph_2) + " ms") |
| 467 | main.log.info("ONOS3 delta OFP - graph: "+ |
| 468 | str(delta_ofp_graph_3) + " ms") |
| 469 | |
| 470 | main.log.info("ONOS1 delta device - t0: "+ |
| 471 | str(delta_device_1) + " ms") |
| 472 | main.log.info("ONOS2 delta device - t0: "+ |
| 473 | str(delta_device_2) + " ms") |
| 474 | main.log.info("ONOS3 delta device - t0: "+ |
| 475 | str(delta_device_3) + " ms") |
| 476 | |
| 477 | main.log.info("TCP to OFP delta: "+ |
| 478 | str(delta_ofp_tcp) + " ms") |
| 479 | #main.log.info("ONOS1 delta OFP - device: "+ |
| 480 | # str(delta_ofp_device_1) + " ms") |
| 481 | #main.log.info("ONOS2 delta OFP - device: "+ |
| 482 | # str(delta_ofp_device_2) + " ms") |
| 483 | #main.log.info("ONOS3 delta OFP - device: "+ |
| 484 | # str(delta_ofp_device_3) + " ms") |
| 485 | |
| 486 | main.step("Remove switch from controller") |
| 487 | main.Mininet1.delete_sw_controller("s1") |
| 488 | |
| 489 | time.sleep(5) |
| 490 | |
| 491 | #END of for loop iteration |
| 492 | |
| 493 | #If there is at least 1 element in each list, |
| 494 | #pass the test case |
| 495 | if len(latency_end_to_end_list) > 0 and\ |
| 496 | len(latency_ofp_to_graph_list) > 0 and\ |
| 497 | len(latency_ofp_to_device_list) > 0 and\ |
| 498 | len(latency_t0_to_device_list) > 0 and\ |
| 499 | len(latency_tcp_to_ofp_list) > 0: |
| 500 | assertion = main.TRUE |
| 501 | elif len(latency_end_to_end_list) == 0: |
| 502 | #The appending of 0 here is to prevent |
| 503 | #the min,max,sum functions from failing |
| 504 | #below |
| 505 | latency_end_to_end_list.append(0) |
| 506 | assertion = main.FALSE |
| 507 | elif len(latency_ofp_to_graph_list) == 0: |
| 508 | latency_ofp_to_graph_list.append(0) |
| 509 | assertion = main.FALSE |
| 510 | elif len(latency_ofp_to_device_list) == 0: |
| 511 | latency_ofp_to_device_list.append(0) |
| 512 | assertion = main.FALSE |
| 513 | elif len(latency_t0_to_device_list) == 0: |
| 514 | latency_t0_to_device_list.append(0) |
| 515 | assertion = main.FALSE |
| 516 | elif len(latency_tcp_to_ofp_list) == 0: |
| 517 | latency_tcp_to_ofp_list.append(0) |
| 518 | assertion = main.FALSE |
| 519 | |
| 520 | #Calculate min, max, avg of latency lists |
| 521 | latency_end_to_end_max = \ |
| 522 | int(max(latency_end_to_end_list)) |
| 523 | latency_end_to_end_min = \ |
| 524 | int(min(latency_end_to_end_list)) |
| 525 | latency_end_to_end_avg = \ |
| 526 | (int(sum(latency_end_to_end_list)) / \ |
| 527 | len(latency_end_to_end_list)) |
| 528 | latency_end_to_end_std_dev = \ |
| 529 | str(round(numpy.std(latency_end_to_end_list),1)) |
| 530 | |
| 531 | latency_ofp_to_graph_max = \ |
| 532 | int(max(latency_ofp_to_graph_list)) |
| 533 | latency_ofp_to_graph_min = \ |
| 534 | int(min(latency_ofp_to_graph_list)) |
| 535 | latency_ofp_to_graph_avg = \ |
| 536 | (int(sum(latency_ofp_to_graph_list)) / \ |
| 537 | len(latency_ofp_to_graph_list)) |
| 538 | latency_ofp_to_graph_std_dev = \ |
| 539 | str(round(numpy.std(latency_ofp_to_graph_list),1)) |
| 540 | |
| 541 | latency_ofp_to_device_max = \ |
| 542 | int(max(latency_ofp_to_device_list)) |
| 543 | latency_ofp_to_device_min = \ |
| 544 | int(min(latency_ofp_to_device_list)) |
| 545 | latency_ofp_to_device_avg = \ |
| 546 | (int(sum(latency_ofp_to_device_list)) / \ |
| 547 | len(latency_ofp_to_device_list)) |
| 548 | latency_ofp_to_device_std_dev = \ |
| 549 | str(round(numpy.std(latency_ofp_to_device_list),1)) |
| 550 | |
| 551 | latency_t0_to_device_max = \ |
| 552 | int(max(latency_t0_to_device_list)) |
| 553 | latency_t0_to_device_min = \ |
| 554 | int(min(latency_t0_to_device_list)) |
| 555 | latency_t0_to_device_avg = \ |
| 556 | (int(sum(latency_t0_to_device_list)) / \ |
| 557 | len(latency_t0_to_device_list)) |
| 558 | latency_ofp_to_device_std_dev = \ |
| 559 | str(round(numpy.std(latency_t0_to_device_list),1)) |
| 560 | |
| 561 | latency_tcp_to_ofp_max = \ |
| 562 | int(max(latency_tcp_to_ofp_list)) |
| 563 | latency_tcp_to_ofp_min = \ |
| 564 | int(min(latency_tcp_to_ofp_list)) |
| 565 | latency_tcp_to_ofp_avg = \ |
| 566 | (int(sum(latency_tcp_to_ofp_list)) / \ |
| 567 | len(latency_tcp_to_ofp_list)) |
| 568 | latency_tcp_to_ofp_std_dev = \ |
| 569 | str(round(numpy.std(latency_tcp_to_ofp_list),1)) |
| 570 | |
| 571 | main.log.report("Switch add - End-to-end latency: "+\ |
| 572 | "Avg: "+str(latency_end_to_end_avg)+" ms "+ |
| 573 | "Std Deviation: "+latency_end_to_end_std_dev+" ms") |
| 574 | main.log.report("Switch add - OFP-to-Graph latency: "+\ |
| 575 | "Note: results are not accurate to sub-millisecond. "+ |
| 576 | "Any sub-millisecond results are rounded to 0 ms. ") |
| 577 | main.log.report("Avg: "+str(latency_ofp_to_graph_avg)+" ms "+ |
| 578 | "Std Deviation: "+latency_ofp_to_graph_std_dev+" ms") |
| 579 | main.log.report("Switch add - TCP-to-OFP latency: "+\ |
| 580 | "Avg: "+str(latency_tcp_to_ofp_avg)+" ms "+ |
| 581 | "Std Deviation: "+latency_tcp_to_ofp_std_dev+" ms") |
| 582 | |
| 583 | if debug_mode == 'on': |
| 584 | main.ONOS1.cp_logs_to_dir("/opt/onos/log/karaf.log", |
| 585 | "/tmp/", copy_file_name="sw_lat_karaf") |
| 586 | |
| 587 | utilities.assert_equals(expect=main.TRUE, actual=assertion, |
| 588 | onpass="Switch latency test successful", |
| 589 | onfail="Switch latency test failed") |
| 590 | |
| 591 | def CASE3(self, main): |
| 592 | ''' |
| 593 | Bring port up / down and measure latency. |
| 594 | Port enable / disable is simulated by ifconfig up / down |
| 595 | |
| 596 | In ONOS-next, we must ensure that the port we are |
| 597 | manipulating is connected to another switch with a valid |
| 598 | connection. Otherwise, graph view will not be updated. |
| 599 | ''' |
| 600 | import time |
| 601 | import subprocess |
| 602 | import os |
| 603 | import requests |
| 604 | import json |
| 605 | import numpy |
| 606 | |
| 607 | ONOS1_ip = main.params['CTRL']['ip1'] |
| 608 | ONOS2_ip = main.params['CTRL']['ip2'] |
| 609 | ONOS3_ip = main.params['CTRL']['ip3'] |
| 610 | ONOS_user = main.params['CTRL']['user'] |
| 611 | |
| 612 | default_sw_port = main.params['CTRL']['port1'] |
| 613 | |
| 614 | assertion = main.TRUE |
| 615 | #Number of iterations of case |
| 616 | num_iter = main.params['TEST']['numIter'] |
| 617 | |
| 618 | #Timestamp 'keys' for json metrics output. |
| 619 | #These are subject to change, hence moved into params |
| 620 | deviceTimestamp = main.params['JSON']['deviceTimestamp'] |
| 621 | graphTimestamp = main.params['JSON']['graphTimestamp'] |
| 622 | |
| 623 | debug_mode = main.params['TEST']['debugMode'] |
| 624 | |
| 625 | local_time = time.strftime('%x %X') |
| 626 | local_time = local_time.replace("/","") |
| 627 | local_time = local_time.replace(" ","_") |
| 628 | local_time = local_time.replace(":","") |
| 629 | if debug_mode == 'on': |
| 630 | main.ONOS1.tshark_pcap("eth0", |
| 631 | "/tmp/port_lat_pcap_"+local_time) |
| 632 | |
| 633 | #Threshold for this test case |
| 634 | up_threshold_str = main.params['TEST']['portUpThreshold'] |
| 635 | down_threshold_str = main.params['TEST']['portDownThreshold'] |
| 636 | |
| 637 | up_threshold_obj = up_threshold_str.split(",") |
| 638 | down_threshold_obj = down_threshold_str.split(",") |
| 639 | |
| 640 | up_threshold_min = int(up_threshold_obj[0]) |
| 641 | up_threshold_max = int(up_threshold_obj[1]) |
| 642 | |
| 643 | down_threshold_min = int(down_threshold_obj[0]) |
| 644 | down_threshold_max = int(down_threshold_obj[1]) |
| 645 | |
| 646 | #NOTE: Some hardcoded variables you may need to configure |
| 647 | # besides the params |
| 648 | |
| 649 | tshark_port_status = "OFP 130 Port Status" |
| 650 | |
| 651 | tshark_port_up = "/tmp/tshark_port_up.txt" |
| 652 | tshark_port_down = "/tmp/tshark_port_down.txt" |
| 653 | interface_config = "s1-eth1" |
| 654 | |
| 655 | main.log.report("Port enable / disable latency") |
| 656 | main.log.report("Simulated by ifconfig up / down") |
| 657 | main.log.report("Total iterations of test: "+str(num_iter)) |
| 658 | |
| 659 | main.step("Assign switches s1 and s2 to controller 1") |
| 660 | main.Mininet1.assign_sw_controller(sw="1",ip1=ONOS1_ip, |
| 661 | port1=default_sw_port) |
| 662 | main.Mininet1.assign_sw_controller(sw="2",ip1=ONOS1_ip, |
| 663 | port1=default_sw_port) |
| 664 | |
| 665 | #Give enough time for metrics to propagate the |
| 666 | #assign controller event. Otherwise, these events may |
| 667 | #carry over to our measurements |
| 668 | time.sleep(15) |
| 669 | |
| 670 | port_up_device_to_ofp_list = [] |
| 671 | port_up_graph_to_ofp_list = [] |
| 672 | port_down_device_to_ofp_list = [] |
| 673 | port_down_graph_to_ofp_list = [] |
| 674 | |
| 675 | for i in range(0, int(num_iter)): |
| 676 | main.step("Starting wireshark capture for port status down") |
| 677 | main.ONOS1.tshark_grep(tshark_port_status, |
| 678 | tshark_port_down) |
| 679 | |
| 680 | time.sleep(5) |
| 681 | |
| 682 | #Disable interface that is connected to switch 2 |
| 683 | main.step("Disable port: "+interface_config) |
| 684 | main.Mininet1.handle.sendline("sh ifconfig "+ |
| 685 | interface_config+" down") |
| 686 | main.Mininet1.handle.expect("mininet>") |
| 687 | |
| 688 | time.sleep(3) |
| 689 | main.ONOS1.tshark_stop() |
| 690 | |
| 691 | main.step("Obtain t1 by metrics call") |
| 692 | json_str_up_1 = main.ONOS1cli.topology_events_metrics() |
| 693 | json_str_up_2 = main.ONOS2cli.topology_events_metrics() |
| 694 | json_str_up_3 = main.ONOS3cli.topology_events_metrics() |
| 695 | |
| 696 | json_obj_1 = json.loads(json_str_up_1) |
| 697 | json_obj_2 = json.loads(json_str_up_2) |
| 698 | json_obj_3 = json.loads(json_str_up_3) |
| 699 | |
| 700 | #Copy tshark output file from ONOS to TestON instance |
| 701 | #/tmp directory |
| 702 | os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+ |
| 703 | tshark_port_down+" /tmp/") |
| 704 | |
| 705 | f_port_down = open(tshark_port_down, 'r') |
| 706 | #Get first line of port down event from tshark |
| 707 | f_line = f_port_down.readline() |
| 708 | obj_down = f_line.split(" ") |
| 709 | if len(f_line) > 0: |
| 710 | timestamp_begin_pt_down = int(float(obj_down[1])*1000) |
| 711 | main.log.info("Port down begin timestamp: "+ |
| 712 | str(timestamp_begin_pt_down)) |
| 713 | else: |
| 714 | main.log.info("Tshark output file returned unexpected"+ |
| 715 | " results: "+str(obj_down)) |
| 716 | timestamp_begin_pt_down = 0 |
| 717 | |
| 718 | f_port_down.close() |
| 719 | |
| 720 | main.log.info("TEST tshark obj: "+str(obj_down)) |
| 721 | |
| 722 | time.sleep(3) |
| 723 | |
| 724 | #Obtain graph timestamp. This timestsamp captures |
| 725 | #the epoch time at which the topology graph was updated. |
| 726 | graph_timestamp_1 = \ |
| 727 | json_obj_1[graphTimestamp]['value'] |
| 728 | graph_timestamp_2 = \ |
| 729 | json_obj_2[graphTimestamp]['value'] |
| 730 | graph_timestamp_3 = \ |
| 731 | json_obj_3[graphTimestamp]['value'] |
| 732 | |
| 733 | main.log.info("TEST graph timestamp ONOS1: "+ |
| 734 | str(graph_timestamp_1)) |
| 735 | |
| 736 | #Obtain device timestamp. This timestamp captures |
| 737 | #the epoch time at which the device event happened |
| 738 | device_timestamp_1 = \ |
| 739 | json_obj_1[deviceTimestamp]['value'] |
| 740 | device_timestamp_2 = \ |
| 741 | json_obj_2[deviceTimestamp]['value'] |
| 742 | device_timestamp_3 = \ |
| 743 | json_obj_3[deviceTimestamp]['value'] |
| 744 | |
| 745 | #Get delta between graph event and OFP |
| 746 | pt_down_graph_to_ofp_1 = int(graph_timestamp_1) -\ |
| 747 | int(timestamp_begin_pt_down) |
| 748 | pt_down_graph_to_ofp_2 = int(graph_timestamp_2) -\ |
| 749 | int(timestamp_begin_pt_down) |
| 750 | pt_down_graph_to_ofp_3 = int(graph_timestamp_3) -\ |
| 751 | int(timestamp_begin_pt_down) |
| 752 | |
| 753 | #Get delta between device event and OFP |
| 754 | pt_down_device_to_ofp_1 = int(device_timestamp_1) -\ |
| 755 | int(timestamp_begin_pt_down) |
| 756 | pt_down_device_to_ofp_2 = int(device_timestamp_2) -\ |
| 757 | int(timestamp_begin_pt_down) |
| 758 | pt_down_device_to_ofp_3 = int(device_timestamp_3) -\ |
| 759 | int(timestamp_begin_pt_down) |
| 760 | |
| 761 | #Caluclate average across clusters |
| 762 | pt_down_graph_to_ofp_avg =\ |
| 763 | (int(pt_down_graph_to_ofp_1) + |
| 764 | int(pt_down_graph_to_ofp_2) + |
| 765 | int(pt_down_graph_to_ofp_3)) / 3 |
| 766 | pt_down_device_to_ofp_avg = \ |
| 767 | (int(pt_down_device_to_ofp_1) + |
| 768 | int(pt_down_device_to_ofp_2) + |
| 769 | int(pt_down_device_to_ofp_3)) / 3 |
| 770 | |
| 771 | if pt_down_graph_to_ofp_avg > down_threshold_min and \ |
| 772 | pt_down_graph_to_ofp_avg < down_threshold_max: |
| 773 | port_down_graph_to_ofp_list.append( |
| 774 | pt_down_graph_to_ofp_avg) |
| 775 | main.log.info("Port down: graph to ofp avg: "+ |
| 776 | str(pt_down_graph_to_ofp_avg) + " ms") |
| 777 | else: |
| 778 | main.log.info("Average port down graph-to-ofp result" + |
| 779 | " exceeded the threshold: "+ |
| 780 | str(pt_down_graph_to_ofp_avg)) |
| 781 | |
| 782 | if pt_down_device_to_ofp_avg > 0 and \ |
| 783 | pt_down_device_to_ofp_avg < 1000: |
| 784 | port_down_device_to_ofp_list.append( |
| 785 | pt_down_device_to_ofp_avg) |
| 786 | main.log.info("Port down: device to ofp avg: "+ |
| 787 | str(pt_down_device_to_ofp_avg) + " ms") |
| 788 | else: |
| 789 | main.log.info("Average port down device-to-ofp result" + |
| 790 | " exceeded the threshold: "+ |
| 791 | str(pt_down_device_to_ofp_avg)) |
| 792 | |
| 793 | #Port up events |
| 794 | main.step("Enable port and obtain timestamp") |
| 795 | main.step("Starting wireshark capture for port status up") |
| 796 | main.ONOS1.tshark_grep(tshark_port_status, tshark_port_up) |
| 797 | time.sleep(5) |
| 798 | |
| 799 | main.Mininet1.handle.sendline("sh ifconfig "+ |
| 800 | interface_config+" up") |
| 801 | main.Mininet1.handle.expect("mininet>") |
| 802 | |
| 803 | #Allow time for tshark to capture event |
| 804 | time.sleep(3) |
| 805 | main.ONOS1.tshark_stop() |
| 806 | |
| 807 | #Obtain metrics shortly afterwards |
| 808 | #This timestsamp captures |
| 809 | #the epoch time at which the topology graph was updated. |
| 810 | main.step("Obtain t1 by REST call") |
| 811 | json_str_up_1 = main.ONOS1cli.topology_events_metrics() |
| 812 | json_str_up_2 = main.ONOS2cli.topology_events_metrics() |
| 813 | json_str_up_3 = main.ONOS3cli.topology_events_metrics() |
| 814 | |
| 815 | json_obj_1 = json.loads(json_str_up_1) |
| 816 | json_obj_2 = json.loads(json_str_up_2) |
| 817 | json_obj_3 = json.loads(json_str_up_3) |
| 818 | |
| 819 | os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+ |
| 820 | tshark_port_up+" /tmp/") |
| 821 | |
| 822 | f_port_up = open(tshark_port_up, 'r') |
| 823 | f_line = f_port_up.readline() |
| 824 | obj_up = f_line.split(" ") |
| 825 | if len(f_line) > 0: |
| 826 | timestamp_begin_pt_up = int(float(obj_up[1])*1000) |
| 827 | main.log.info("Port up begin timestamp: "+ |
| 828 | str(timestamp_begin_pt_up)) |
| 829 | else: |
| 830 | main.log.info("Tshark output file returned unexpected"+ |
| 831 | " results.") |
| 832 | timestamp_begin_pt_up = 0 |
| 833 | |
| 834 | f_port_up.close() |
| 835 | |
| 836 | graph_timestamp_1 = \ |
| 837 | json_obj_1[graphTimestamp]['value'] |
| 838 | graph_timestamp_2 = \ |
| 839 | json_obj_2[graphTimestamp]['value'] |
| 840 | graph_timestamp_3 = \ |
| 841 | json_obj_3[graphTimestamp]['value'] |
| 842 | |
| 843 | #Obtain device timestamp. This timestamp captures |
| 844 | #the epoch time at which the device event happened |
| 845 | device_timestamp_1 = \ |
| 846 | json_obj_1[deviceTimestamp]['value'] |
| 847 | device_timestamp_2 = \ |
| 848 | json_obj_2[deviceTimestamp]['value'] |
| 849 | device_timestamp_3 = \ |
| 850 | json_obj_3[deviceTimestamp]['value'] |
| 851 | |
| 852 | #Get delta between graph event and OFP |
| 853 | pt_up_graph_to_ofp_1 = int(graph_timestamp_1) -\ |
| 854 | int(timestamp_begin_pt_up) |
| 855 | pt_up_graph_to_ofp_2 = int(graph_timestamp_2) -\ |
| 856 | int(timestamp_begin_pt_up) |
| 857 | pt_up_graph_to_ofp_3 = int(graph_timestamp_3) -\ |
| 858 | int(timestamp_begin_pt_up) |
| 859 | |
| 860 | #Get delta between device event and OFP |
| 861 | pt_up_device_to_ofp_1 = int(device_timestamp_1) -\ |
| 862 | int(timestamp_begin_pt_up) |
| 863 | pt_up_device_to_ofp_2 = int(device_timestamp_2) -\ |
| 864 | int(timestamp_begin_pt_up) |
| 865 | pt_up_device_to_ofp_3 = int(device_timestamp_3) -\ |
| 866 | int(timestamp_begin_pt_up) |
| 867 | |
| 868 | main.log.info("ONOS1 delta G2O: "+str(pt_up_graph_to_ofp_1)) |
| 869 | main.log.info("ONOS2 delta G2O: "+str(pt_up_graph_to_ofp_2)) |
| 870 | main.log.info("ONOS3 delta G2O: "+str(pt_up_graph_to_ofp_3)) |
| 871 | |
| 872 | main.log.info("ONOS1 delta D2O: "+str(pt_up_device_to_ofp_1)) |
| 873 | main.log.info("ONOS2 delta D2O: "+str(pt_up_device_to_ofp_2)) |
| 874 | main.log.info("ONOS3 delta D2O: "+str(pt_up_device_to_ofp_3)) |
| 875 | |
| 876 | pt_up_graph_to_ofp_avg = \ |
| 877 | (int(pt_up_graph_to_ofp_1) + |
| 878 | int(pt_up_graph_to_ofp_2) + |
| 879 | int(pt_up_graph_to_ofp_3)) / 3 |
| 880 | |
| 881 | pt_up_device_to_ofp_avg = \ |
| 882 | (int(pt_up_device_to_ofp_1) + |
| 883 | int(pt_up_device_to_ofp_2) + |
| 884 | int(pt_up_device_to_ofp_3)) / 3 |
| 885 | |
| 886 | if pt_up_graph_to_ofp_avg > up_threshold_min and \ |
| 887 | pt_up_graph_to_ofp_avg < up_threshold_max: |
| 888 | port_up_graph_to_ofp_list.append( |
| 889 | pt_up_graph_to_ofp_avg) |
| 890 | main.log.info("Port down: graph to ofp avg: "+ |
| 891 | str(pt_up_graph_to_ofp_avg) + " ms") |
| 892 | else: |
| 893 | main.log.info("Average port up graph-to-ofp result"+ |
| 894 | " exceeded the threshold: "+ |
| 895 | str(pt_up_graph_to_ofp_avg)) |
| 896 | |
| 897 | if pt_up_device_to_ofp_avg > up_threshold_min and \ |
| 898 | pt_up_device_to_ofp_avg < up_threshold_max: |
| 899 | port_up_device_to_ofp_list.append( |
| 900 | pt_up_device_to_ofp_avg) |
| 901 | main.log.info("Port up: device to ofp avg: "+ |
| 902 | str(pt_up_device_to_ofp_avg) + " ms") |
| 903 | else: |
| 904 | main.log.info("Average port up device-to-ofp result"+ |
| 905 | " exceeded the threshold: "+ |
| 906 | str(pt_up_device_to_ofp_avg)) |
| 907 | |
| 908 | #END ITERATION FOR LOOP |
| 909 | |
| 910 | #Check all list for latency existence and set assertion |
| 911 | if (port_down_graph_to_ofp_list and port_down_device_to_ofp_list\ |
| 912 | and port_up_graph_to_ofp_list and port_up_device_to_ofp_list): |
| 913 | assertion = main.TRUE |
| 914 | |
| 915 | #Calculate and report latency measurements |
| 916 | port_down_graph_to_ofp_min = min(port_down_graph_to_ofp_list) |
| 917 | port_down_graph_to_ofp_max = max(port_down_graph_to_ofp_list) |
| 918 | port_down_graph_to_ofp_avg = \ |
| 919 | (sum(port_down_graph_to_ofp_list) / |
| 920 | len(port_down_graph_to_ofp_list)) |
| 921 | port_down_graph_to_ofp_std_dev = \ |
| 922 | str(round(numpy.std(port_down_graph_to_ofp_list),1)) |
| 923 | |
| 924 | main.log.report("Port down graph-to-ofp "+ |
| 925 | "Avg: "+str(port_down_graph_to_ofp_avg)+" ms "+ |
| 926 | "Std Deviation: "+port_down_graph_to_ofp_std_dev+" ms") |
| 927 | |
| 928 | port_down_device_to_ofp_min = min(port_down_device_to_ofp_list) |
| 929 | port_down_device_to_ofp_max = max(port_down_device_to_ofp_list) |
| 930 | port_down_device_to_ofp_avg = \ |
| 931 | (sum(port_down_device_to_ofp_list) /\ |
| 932 | len(port_down_device_to_ofp_list)) |
| 933 | port_down_device_to_ofp_std_dev = \ |
| 934 | str(round(numpy.std(port_down_device_to_ofp_list),1)) |
| 935 | |
| 936 | main.log.report("Port down device-to-ofp "+ |
| 937 | "Avg: "+str(port_down_device_to_ofp_avg)+" ms "+ |
| 938 | "Std Deviation: "+port_down_device_to_ofp_std_dev+" ms") |
| 939 | |
| 940 | port_up_graph_to_ofp_min = min(port_up_graph_to_ofp_list) |
| 941 | port_up_graph_to_ofp_max = max(port_up_graph_to_ofp_list) |
| 942 | port_up_graph_to_ofp_avg = \ |
| 943 | (sum(port_up_graph_to_ofp_list) /\ |
| 944 | len(port_up_graph_to_ofp_list)) |
| 945 | port_up_graph_to_ofp_std_dev = \ |
| 946 | str(round(numpy.std(port_up_graph_to_ofp_list),1)) |
| 947 | |
| 948 | main.log.report("Port up graph-to-ofp "+ |
| 949 | "Avg: "+str(port_up_graph_to_ofp_avg)+" ms "+ |
| 950 | "Std Deviation: "+port_up_graph_to_ofp_std_dev+" ms") |
| 951 | |
| 952 | port_up_device_to_ofp_min = min(port_up_device_to_ofp_list) |
| 953 | port_up_device_to_ofp_max = max(port_up_device_to_ofp_list) |
| 954 | port_up_device_to_ofp_avg = \ |
| 955 | (sum(port_up_device_to_ofp_list) /\ |
| 956 | len(port_up_device_to_ofp_list)) |
| 957 | port_up_device_to_ofp_std_dev = \ |
| 958 | str(round(numpy.std(port_up_device_to_ofp_list),1)) |
| 959 | |
| 960 | main.log.report("Port up device-to-ofp "+ |
| 961 | "Avg: "+str(port_up_device_to_ofp_avg)+" ms "+ |
| 962 | "Std Deviation: "+port_up_device_to_ofp_std_dev+" ms") |
| 963 | |
| 964 | utilities.assert_equals(expect=main.TRUE, actual=assertion, |
| 965 | onpass="Port discovery latency calculation successful", |
| 966 | onfail="Port discovery test failed") |
| 967 | |
| 968 | def CASE4(self, main): |
| 969 | ''' |
| 970 | Link down event using loss rate 100% |
| 971 | |
| 972 | Important: |
| 973 | Use a simple 2 switch topology with 1 link between |
| 974 | the two switches. Ensure that mac addresses of the |
| 975 | switches are 1 / 2 respectively |
| 976 | ''' |
| 977 | import time |
| 978 | import subprocess |
| 979 | import os |
| 980 | import requests |
| 981 | import json |
| 982 | import numpy |
| 983 | |
| 984 | ONOS1_ip = main.params['CTRL']['ip1'] |
| 985 | ONOS2_ip = main.params['CTRL']['ip2'] |
| 986 | ONOS3_ip = main.params['CTRL']['ip3'] |
| 987 | ONOS_user = main.params['CTRL']['user'] |
| 988 | |
| 989 | default_sw_port = main.params['CTRL']['port1'] |
| 990 | |
| 991 | #Number of iterations of case |
| 992 | num_iter = main.params['TEST']['numIter'] |
| 993 | |
| 994 | #Timestamp 'keys' for json metrics output. |
| 995 | #These are subject to change, hence moved into params |
| 996 | deviceTimestamp = main.params['JSON']['deviceTimestamp'] |
| 997 | linkTimestamp = main.params['JSON']['linkTimestamp'] |
| 998 | graphTimestamp = main.params['JSON']['graphTimestamp'] |
| 999 | |
| 1000 | debug_mode = main.params['TEST']['debugMode'] |
| 1001 | |
| 1002 | local_time = time.strftime('%x %X') |
| 1003 | local_time = local_time.replace("/","") |
| 1004 | local_time = local_time.replace(" ","_") |
| 1005 | local_time = local_time.replace(":","") |
| 1006 | if debug_mode == 'on': |
| 1007 | main.ONOS1.tshark_pcap("eth0", |
| 1008 | "/tmp/link_lat_pcap_"+local_time) |
| 1009 | |
| 1010 | #Threshold for this test case |
| 1011 | up_threshold_str = main.params['TEST']['linkUpThreshold'] |
| 1012 | down_threshold_str = main.params['TEST']['linkDownThreshold'] |
| 1013 | |
| 1014 | up_threshold_obj = up_threshold_str.split(",") |
| 1015 | down_threshold_obj = down_threshold_str.split(",") |
| 1016 | |
| 1017 | up_threshold_min = int(up_threshold_obj[0]) |
| 1018 | up_threshold_max = int(up_threshold_obj[1]) |
| 1019 | |
| 1020 | down_threshold_min = int(down_threshold_obj[0]) |
| 1021 | down_threshold_max = int(down_threshold_obj[1]) |
| 1022 | |
| 1023 | assertion = main.TRUE |
| 1024 | #Link event timestamp to system time list |
| 1025 | link_down_link_to_system_list = [] |
| 1026 | link_up_link_to_system_list = [] |
| 1027 | #Graph event timestamp to system time list |
| 1028 | link_down_graph_to_system_list = [] |
| 1029 | link_up_graph_to_system_list = [] |
| 1030 | |
| 1031 | main.log.report("Link up / down discovery latency between "+ |
| 1032 | "two switches") |
| 1033 | main.log.report("Simulated by setting loss-rate 100%") |
| 1034 | main.log.report("'tc qdisc add dev <intfs> root netem loss 100%'") |
| 1035 | main.log.report("Total iterations of test: "+str(num_iter)) |
| 1036 | |
| 1037 | main.step("Assign all switches") |
| 1038 | main.Mininet1.assign_sw_controller(sw="1", |
| 1039 | ip1=ONOS1_ip, port1=default_sw_port) |
| 1040 | main.Mininet1.assign_sw_controller(sw="2", |
| 1041 | ip1=ONOS1_ip, port1=default_sw_port) |
| 1042 | |
| 1043 | main.step("Verifying switch assignment") |
| 1044 | result_s1 = main.Mininet1.get_sw_controller(sw="s1") |
| 1045 | result_s2 = main.Mininet1.get_sw_controller(sw="s2") |
| 1046 | |
| 1047 | #Allow time for events to finish before taking measurements |
| 1048 | time.sleep(10) |
| 1049 | |
| 1050 | link_down1 = False |
| 1051 | link_down2 = False |
| 1052 | link_down3 = False |
| 1053 | #Start iteration of link event test |
| 1054 | for i in range(0, int(num_iter)): |
| 1055 | main.step("Getting initial system time as t0") |
| 1056 | |
| 1057 | #System time in epoch ms |
| 1058 | timestamp_link_down_t0 = time.time() * 1000 |
| 1059 | #Link down is simulated by 100% loss rate using traffic |
| 1060 | #control command |
| 1061 | main.Mininet1.handle.sendline( |
| 1062 | "sh tc qdisc add dev s1-eth1 root netem loss 100%") |
| 1063 | |
| 1064 | #TODO: Iterate through 'links' command to verify that |
| 1065 | # link s1 -> s2 went down (loop timeout 30 seconds) |
| 1066 | # on all 3 ONOS instances |
| 1067 | main.log.info("Checking ONOS for link update") |
| 1068 | loop_count = 0 |
| 1069 | while( not (link_down1 and link_down2 and link_down3)\ |
| 1070 | and loop_count < 30 ): |
| 1071 | json_str1 = main.ONOS1cli.links() |
| 1072 | json_str2 = main.ONOS2cli.links() |
| 1073 | json_str3 = main.ONOS3cli.links() |
| 1074 | |
| 1075 | if not (json_str1 and json_str2 and json_str3): |
| 1076 | main.log.error("CLI command returned error ") |
| 1077 | break |
| 1078 | else: |
| 1079 | json_obj1 = json.loads(json_str1) |
| 1080 | json_obj2 = json.loads(json_str2) |
| 1081 | json_obj3 = json.loads(json_str3) |
| 1082 | for obj1 in json_obj1: |
| 1083 | if '01' not in obj1['src']['device']: |
| 1084 | link_down1 = True |
| 1085 | main.log.info("Link down from "+ |
| 1086 | "s1 -> s2 on ONOS1 detected") |
| 1087 | for obj2 in json_obj2: |
| 1088 | if '01' not in obj2['src']['device']: |
| 1089 | link_down2 = True |
| 1090 | main.log.info("Link down from "+ |
| 1091 | "s1 -> s2 on ONOS2 detected") |
| 1092 | for obj3 in json_obj3: |
| 1093 | if '01' not in obj3['src']['device']: |
| 1094 | link_down3 = True |
| 1095 | main.log.info("Link down from "+ |
| 1096 | "s1 -> s2 on ONOS3 detected") |
| 1097 | |
| 1098 | loop_count += 1 |
| 1099 | #If CLI doesn't like the continuous requests |
| 1100 | #and exits in this loop, increase the sleep here. |
| 1101 | #Consequently, while loop timeout will increase |
| 1102 | time.sleep(1) |
| 1103 | |
| 1104 | #Give time for metrics measurement to catch up |
| 1105 | #NOTE: May need to be configured more accurately |
| 1106 | time.sleep(10) |
| 1107 | #If we exited the while loop and link down 1,2,3 are still |
| 1108 | #false, then ONOS has failed to discover link down event |
| 1109 | if not (link_down1 and link_down2 and link_down3): |
| 1110 | main.log.info("Link down discovery failed") |
| 1111 | |
| 1112 | link_down_lat_graph1 = 0 |
| 1113 | link_down_lat_graph2 = 0 |
| 1114 | link_down_lat_graph3 = 0 |
| 1115 | link_down_lat_device1 = 0 |
| 1116 | link_down_lat_device2 = 0 |
| 1117 | link_down_lat_device3 = 0 |
| 1118 | |
| 1119 | assertion = main.FALSE |
| 1120 | else: |
| 1121 | json_topo_metrics_1 =\ |
| 1122 | main.ONOS1cli.topology_events_metrics() |
| 1123 | json_topo_metrics_2 =\ |
| 1124 | main.ONOS2cli.topology_events_metrics() |
| 1125 | json_topo_metrics_3 =\ |
| 1126 | main.ONOS3cli.topology_events_metrics() |
| 1127 | json_topo_metrics_1 = json.loads(json_topo_metrics_1) |
| 1128 | json_topo_metrics_2 = json.loads(json_topo_metrics_2) |
| 1129 | json_topo_metrics_3 = json.loads(json_topo_metrics_3) |
| 1130 | |
| 1131 | main.log.info("Obtaining graph and device timestamp") |
| 1132 | graph_timestamp_1 = \ |
| 1133 | json_topo_metrics_1[graphTimestamp]['value'] |
| 1134 | graph_timestamp_2 = \ |
| 1135 | json_topo_metrics_2[graphTimestamp]['value'] |
| 1136 | graph_timestamp_3 = \ |
| 1137 | json_topo_metrics_3[graphTimestamp]['value'] |
| 1138 | |
| 1139 | link_timestamp_1 = \ |
| 1140 | json_topo_metrics_1[linkTimestamp]['value'] |
| 1141 | link_timestamp_2 = \ |
| 1142 | json_topo_metrics_2[linkTimestamp]['value'] |
| 1143 | link_timestamp_3 = \ |
| 1144 | json_topo_metrics_3[linkTimestamp]['value'] |
| 1145 | |
| 1146 | if graph_timestamp_1 and graph_timestamp_2 and\ |
| 1147 | graph_timestamp_3 and link_timestamp_1 and\ |
| 1148 | link_timestamp_2 and link_timestamp_3: |
| 1149 | link_down_lat_graph1 = int(graph_timestamp_1) -\ |
| 1150 | int(timestamp_link_down_t0) |
| 1151 | link_down_lat_graph2 = int(graph_timestamp_2) -\ |
| 1152 | int(timestamp_link_down_t0) |
| 1153 | link_down_lat_graph3 = int(graph_timestamp_3) -\ |
| 1154 | int(timestamp_link_down_t0) |
| 1155 | |
| 1156 | link_down_lat_link1 = int(link_timestamp_1) -\ |
| 1157 | int(timestamp_link_down_t0) |
| 1158 | link_down_lat_link2 = int(link_timestamp_2) -\ |
| 1159 | int(timestamp_link_down_t0) |
| 1160 | link_down_lat_link3 = int(link_timestamp_3) -\ |
| 1161 | int(timestamp_link_down_t0) |
| 1162 | else: |
| 1163 | main.log.error("There was an error calculating"+ |
| 1164 | " the delta for link down event") |
| 1165 | link_down_lat_graph1 = 0 |
| 1166 | link_down_lat_graph2 = 0 |
| 1167 | link_down_lat_graph3 = 0 |
| 1168 | |
| 1169 | link_down_lat_device1 = 0 |
| 1170 | link_down_lat_device2 = 0 |
| 1171 | link_down_lat_device3 = 0 |
| 1172 | |
| 1173 | main.log.info("Link down latency ONOS1 iteration "+ |
| 1174 | str(i)+" (end-to-end): "+ |
| 1175 | str(link_down_lat_graph1)+" ms") |
| 1176 | main.log.info("Link down latency ONOS2 iteration "+ |
| 1177 | str(i)+" (end-to-end): "+ |
| 1178 | str(link_down_lat_graph2)+" ms") |
| 1179 | main.log.info("Link down latency ONOS3 iteration "+ |
| 1180 | str(i)+" (end-to-end): "+ |
| 1181 | str(link_down_lat_graph3)+" ms") |
| 1182 | |
| 1183 | main.log.info("Link down latency ONOS1 iteration "+ |
| 1184 | str(i)+" (link-event-to-system-timestamp): "+ |
| 1185 | str(link_down_lat_link1)+" ms") |
| 1186 | main.log.info("Link down latency ONOS2 iteration "+ |
| 1187 | str(i)+" (link-event-to-system-timestamp): "+ |
| 1188 | str(link_down_lat_link2)+" ms") |
| 1189 | main.log.info("Link down latency ONOS3 iteration "+ |
| 1190 | str(i)+" (link-event-to-system-timestamp): "+ |
| 1191 | str(link_down_lat_link3)) |
| 1192 | |
| 1193 | #Calculate avg of node calculations |
| 1194 | link_down_lat_graph_avg =\ |
| 1195 | (link_down_lat_graph1 + |
| 1196 | link_down_lat_graph2 + |
| 1197 | link_down_lat_graph3) / 3 |
| 1198 | link_down_lat_link_avg =\ |
| 1199 | (link_down_lat_link1 + |
| 1200 | link_down_lat_link2 + |
| 1201 | link_down_lat_link3) / 3 |
| 1202 | |
| 1203 | #Set threshold and append latency to list |
| 1204 | if link_down_lat_graph_avg > down_threshold_min and\ |
| 1205 | link_down_lat_graph_avg < down_threshold_max: |
| 1206 | link_down_graph_to_system_list.append( |
| 1207 | link_down_lat_graph_avg) |
| 1208 | else: |
| 1209 | main.log.info("Link down latency exceeded threshold") |
| 1210 | main.log.info("Results for iteration "+str(i)+ |
| 1211 | "have been omitted") |
| 1212 | if link_down_lat_link_avg > down_threshold_min and\ |
| 1213 | link_down_lat_link_avg < down_threshold_max: |
| 1214 | link_down_link_to_system_list.append( |
| 1215 | link_down_lat_link_avg) |
| 1216 | else: |
| 1217 | main.log.info("Link down latency exceeded threshold") |
| 1218 | main.log.info("Results for iteration "+str(i)+ |
| 1219 | "have been omitted") |
| 1220 | |
| 1221 | #NOTE: To remove loss rate and measure latency: |
| 1222 | # 'sh tc qdisc del dev s1-eth1 root' |
| 1223 | timestamp_link_up_t0 = time.time() * 1000 |
| 1224 | main.Mininet1.handle.sendline("sh tc qdisc del dev "+ |
| 1225 | "s1-eth1 root") |
| 1226 | main.Mininet1.handle.expect("mininet>") |
| 1227 | |
| 1228 | main.log.info("Checking ONOS for link update") |
| 1229 | |
| 1230 | link_down1 = True |
| 1231 | link_down2 = True |
| 1232 | link_down3 = True |
| 1233 | loop_count = 0 |
| 1234 | while( (link_down1 and link_down2 and link_down3)\ |
| 1235 | and loop_count < 30 ): |
| 1236 | json_str1 = main.ONOS1cli.links() |
| 1237 | json_str2 = main.ONOS2cli.links() |
| 1238 | json_str3 = main.ONOS3cli.links() |
| 1239 | if not (json_str1 and json_str2 and json_str3): |
| 1240 | main.log.error("CLI command returned error ") |
| 1241 | break |
| 1242 | else: |
| 1243 | json_obj1 = json.loads(json_str1) |
| 1244 | json_obj2 = json.loads(json_str2) |
| 1245 | json_obj3 = json.loads(json_str3) |
| 1246 | |
| 1247 | for obj1 in json_obj1: |
| 1248 | if '01' in obj1['src']['device']: |
| 1249 | link_down1 = False |
| 1250 | main.log.info("Link up from "+ |
| 1251 | "s1 -> s2 on ONOS1 detected") |
| 1252 | for obj2 in json_obj2: |
| 1253 | if '01' in obj2['src']['device']: |
| 1254 | link_down2 = False |
| 1255 | main.log.info("Link up from "+ |
| 1256 | "s1 -> s2 on ONOS2 detected") |
| 1257 | for obj3 in json_obj3: |
| 1258 | if '01' in obj3['src']['device']: |
| 1259 | link_down3 = False |
| 1260 | main.log.info("Link up from "+ |
| 1261 | "s1 -> s2 on ONOS3 detected") |
| 1262 | |
| 1263 | loop_count += 1 |
| 1264 | time.sleep(1) |
| 1265 | |
| 1266 | if (link_down1 and link_down2 and link_down3): |
| 1267 | main.log.info("Link up discovery failed") |
| 1268 | |
| 1269 | link_up_lat_graph1 = 0 |
| 1270 | link_up_lat_graph2 = 0 |
| 1271 | link_up_lat_graph3 = 0 |
| 1272 | link_up_lat_device1 = 0 |
| 1273 | link_up_lat_device2 = 0 |
| 1274 | link_up_lat_device3 = 0 |
| 1275 | |
| 1276 | assertion = main.FALSE |
| 1277 | else: |
| 1278 | json_topo_metrics_1 =\ |
| 1279 | main.ONOS1cli.topology_events_metrics() |
| 1280 | json_topo_metrics_2 =\ |
| 1281 | main.ONOS2cli.topology_events_metrics() |
| 1282 | json_topo_metrics_3 =\ |
| 1283 | main.ONOS3cli.topology_events_metrics() |
| 1284 | json_topo_metrics_1 = json.loads(json_topo_metrics_1) |
| 1285 | json_topo_metrics_2 = json.loads(json_topo_metrics_2) |
| 1286 | json_topo_metrics_3 = json.loads(json_topo_metrics_3) |
| 1287 | |
| 1288 | main.log.info("Obtaining graph and device timestamp") |
| 1289 | graph_timestamp_1 = \ |
| 1290 | json_topo_metrics_1[graphTimestamp]['value'] |
| 1291 | graph_timestamp_2 = \ |
| 1292 | json_topo_metrics_2[graphTimestamp]['value'] |
| 1293 | graph_timestamp_3 = \ |
| 1294 | json_topo_metrics_3[graphTimestamp]['value'] |
| 1295 | |
| 1296 | link_timestamp_1 = \ |
| 1297 | json_topo_metrics_1[linkTimestamp]['value'] |
| 1298 | link_timestamp_2 = \ |
| 1299 | json_topo_metrics_2[linkTimestamp]['value'] |
| 1300 | link_timestamp_3 = \ |
| 1301 | json_topo_metrics_3[linkTimestamp]['value'] |
| 1302 | |
| 1303 | if graph_timestamp_1 and graph_timestamp_2 and\ |
| 1304 | graph_timestamp_3 and link_timestamp_1 and\ |
| 1305 | link_timestamp_2 and link_timestamp_3: |
| 1306 | link_up_lat_graph1 = int(graph_timestamp_1) -\ |
| 1307 | int(timestamp_link_up_t0) |
| 1308 | link_up_lat_graph2 = int(graph_timestamp_2) -\ |
| 1309 | int(timestamp_link_up_t0) |
| 1310 | link_up_lat_graph3 = int(graph_timestamp_3) -\ |
| 1311 | int(timestamp_link_up_t0) |
| 1312 | |
| 1313 | link_up_lat_link1 = int(link_timestamp_1) -\ |
| 1314 | int(timestamp_link_up_t0) |
| 1315 | link_up_lat_link2 = int(link_timestamp_2) -\ |
| 1316 | int(timestamp_link_up_t0) |
| 1317 | link_up_lat_link3 = int(link_timestamp_3) -\ |
| 1318 | int(timestamp_link_up_t0) |
| 1319 | else: |
| 1320 | main.log.error("There was an error calculating"+ |
| 1321 | " the delta for link down event") |
| 1322 | link_up_lat_graph1 = 0 |
| 1323 | link_up_lat_graph2 = 0 |
| 1324 | link_up_lat_graph3 = 0 |
| 1325 | |
| 1326 | link_up_lat_device1 = 0 |
| 1327 | link_up_lat_device2 = 0 |
| 1328 | link_up_lat_device3 = 0 |
| 1329 | |
| 1330 | if debug_mode == 'on': |
| 1331 | main.log.info("Link up latency ONOS1 iteration "+ |
| 1332 | str(i)+" (end-to-end): "+ |
| 1333 | str(link_up_lat_graph1)+" ms") |
| 1334 | main.log.info("Link up latency ONOS2 iteration "+ |
| 1335 | str(i)+" (end-to-end): "+ |
| 1336 | str(link_up_lat_graph2)+" ms") |
| 1337 | main.log.info("Link up latency ONOS3 iteration "+ |
| 1338 | str(i)+" (end-to-end): "+ |
| 1339 | str(link_up_lat_graph3)+" ms") |
| 1340 | |
| 1341 | main.log.info("Link up latency ONOS1 iteration "+ |
| 1342 | str(i)+" (link-event-to-system-timestamp): "+ |
| 1343 | str(link_up_lat_link1)+" ms") |
| 1344 | main.log.info("Link up latency ONOS2 iteration "+ |
| 1345 | str(i)+" (link-event-to-system-timestamp): "+ |
| 1346 | str(link_up_lat_link2)+" ms") |
| 1347 | main.log.info("Link up latency ONOS3 iteration "+ |
| 1348 | str(i)+" (link-event-to-system-timestamp): "+ |
| 1349 | str(link_up_lat_link3)) |
| 1350 | |
| 1351 | #Calculate avg of node calculations |
| 1352 | link_up_lat_graph_avg =\ |
| 1353 | (link_up_lat_graph1 + |
| 1354 | link_up_lat_graph2 + |
| 1355 | link_up_lat_graph3) / 3 |
| 1356 | link_up_lat_link_avg =\ |
| 1357 | (link_up_lat_link1 + |
| 1358 | link_up_lat_link2 + |
| 1359 | link_up_lat_link3) / 3 |
| 1360 | |
| 1361 | #Set threshold and append latency to list |
| 1362 | if link_up_lat_graph_avg > up_threshold_min and\ |
| 1363 | link_up_lat_graph_avg < up_threshold_max: |
| 1364 | link_up_graph_to_system_list.append( |
| 1365 | link_up_lat_graph_avg) |
| 1366 | else: |
| 1367 | main.log.info("Link up latency exceeded threshold") |
| 1368 | main.log.info("Results for iteration "+str(i)+ |
| 1369 | "have been omitted") |
| 1370 | if link_up_lat_link_avg > up_threshold_min and\ |
| 1371 | link_up_lat_link_avg < up_threshold_max: |
| 1372 | link_up_link_to_system_list.append( |
| 1373 | link_up_lat_link_avg) |
| 1374 | else: |
| 1375 | main.log.info("Link up latency exceeded threshold") |
| 1376 | main.log.info("Results for iteration "+str(i)+ |
| 1377 | "have been omitted") |
| 1378 | |
| 1379 | #Calculate min, max, avg of list and report |
| 1380 | link_down_min = min(link_down_graph_to_system_list) |
| 1381 | link_down_max = max(link_down_graph_to_system_list) |
| 1382 | link_down_avg = sum(link_down_graph_to_system_list) / \ |
| 1383 | len(link_down_graph_to_system_list) |
| 1384 | link_up_min = min(link_up_graph_to_system_list) |
| 1385 | link_up_max = max(link_up_graph_to_system_list) |
| 1386 | link_up_avg = sum(link_up_graph_to_system_list) / \ |
| 1387 | len(link_up_graph_to_system_list) |
| 1388 | link_down_std_dev = \ |
| 1389 | str(round(numpy.std(link_down_graph_to_system_list),1)) |
| 1390 | link_up_std_dev = \ |
| 1391 | str(round(numpy.std(link_up_graph_to_system_list),1)) |
| 1392 | |
| 1393 | main.log.report("Link down latency " + |
| 1394 | "Avg: "+str(link_down_avg)+" ms "+ |
| 1395 | "Std Deviation: "+link_down_std_dev+" ms") |
| 1396 | main.log.report("Link up latency "+ |
| 1397 | "Avg: "+str(link_up_avg)+" ms "+ |
| 1398 | "Std Deviation: "+link_up_std_dev+" ms") |
| 1399 | |
| 1400 | utilities.assert_equals(expect=main.TRUE, actual=assertion, |
| 1401 | onpass="Link discovery latency calculation successful", |
| 1402 | onfail="Link discovery latency case failed") |
| 1403 | |
| 1404 | def CASE5(self, main): |
| 1405 | ''' |
| 1406 | 100 Switch discovery latency |
| 1407 | |
| 1408 | Important: |
| 1409 | This test case can be potentially dangerous if |
| 1410 | your machine has previously set iptables rules. |
| 1411 | One of the steps of the test case will flush |
| 1412 | all existing iptables rules. |
| 1413 | Note: |
| 1414 | You can specify the number of switches in the |
| 1415 | params file to adjust the switch discovery size |
| 1416 | (and specify the corresponding topology in Mininet1 |
| 1417 | .topo file) |
| 1418 | ''' |
| 1419 | import time |
| 1420 | import subprocess |
| 1421 | import os |
| 1422 | import requests |
| 1423 | import json |
| 1424 | |
| 1425 | ONOS1_ip = main.params['CTRL']['ip1'] |
| 1426 | ONOS2_ip = main.params['CTRL']['ip2'] |
| 1427 | ONOS3_ip = main.params['CTRL']['ip3'] |
| 1428 | MN1_ip = main.params['MN']['ip1'] |
| 1429 | ONOS_user = main.params['CTRL']['user'] |
| 1430 | |
| 1431 | default_sw_port = main.params['CTRL']['port1'] |
| 1432 | |
| 1433 | #Number of iterations of case |
| 1434 | num_iter = main.params['TEST']['numIter'] |
| 1435 | num_sw = main.params['TEST']['numSwitch'] |
| 1436 | |
| 1437 | #Timestamp 'keys' for json metrics output. |
| 1438 | #These are subject to change, hence moved into params |
| 1439 | deviceTimestamp = main.params['JSON']['deviceTimestamp'] |
| 1440 | graphTimestamp = main.params['JSON']['graphTimestamp'] |
| 1441 | |
| 1442 | debug_mode = main.params['TEST']['debugMode'] |
| 1443 | |
| 1444 | local_time = time.strftime('%X') |
| 1445 | local_time = local_time.replace("/","") |
| 1446 | local_time = local_time.replace(" ","_") |
| 1447 | local_time = local_time.replace(":","") |
| 1448 | if debug_mode == 'on': |
| 1449 | main.ONOS1.tshark_pcap("eth0", |
| 1450 | "/tmp/100_sw_lat_pcap_"+local_time) |
| 1451 | |
| 1452 | #Threshold for this test case |
| 1453 | sw_disc_threshold_str = main.params['TEST']['swDisc100Threshold'] |
| 1454 | sw_disc_threshold_obj = sw_disc_threshold_str.split(",") |
| 1455 | sw_disc_threshold_min = int(sw_disc_threshold_obj[0]) |
| 1456 | sw_disc_threshold_max = int(sw_disc_threshold_obj[1]) |
| 1457 | |
| 1458 | tshark_ofp_output = "/tmp/tshark_ofp_"+num_sw+"sw.txt" |
| 1459 | tshark_tcp_output = "/tmp/tshark_tcp_"+num_sw+"sw.txt" |
| 1460 | |
| 1461 | tshark_ofp_result_list = [] |
| 1462 | tshark_tcp_result_list = [] |
| 1463 | |
| 1464 | sw_discovery_lat_list = [] |
| 1465 | |
| 1466 | main.case(num_sw+" Switch discovery latency") |
| 1467 | main.step("Assigning all switches to ONOS1") |
| 1468 | for i in range(1, int(num_sw)+1): |
| 1469 | main.Mininet1.assign_sw_controller( |
| 1470 | sw=str(i), |
| 1471 | ip1=ONOS1_ip, |
| 1472 | port1=default_sw_port) |
| 1473 | |
| 1474 | #Ensure that nodes are configured with ptpd |
| 1475 | #Just a warning message |
| 1476 | main.log.info("Please check ptpd configuration to ensure"+\ |
| 1477 | " All nodes' system times are in sync") |
| 1478 | time.sleep(5) |
| 1479 | |
| 1480 | for i in range(0, int(num_iter)): |
| 1481 | |
| 1482 | main.step("Set iptables rule to block incoming sw connections") |
| 1483 | #Set iptables rule to block incoming switch connections |
| 1484 | #The rule description is as follows: |
| 1485 | # Append to INPUT rule, |
| 1486 | # behavior DROP that matches following: |
| 1487 | # * packet type: tcp |
| 1488 | # * source IP: MN1_ip |
| 1489 | # * destination PORT: 6633 |
| 1490 | main.ONOS1.handle.sendline( |
| 1491 | "sudo iptables -A INPUT -p tcp -s "+MN1_ip+ |
| 1492 | " --dport "+default_sw_port+" -j DROP") |
| 1493 | main.ONOS1.handle.expect("\$") |
| 1494 | # Append to OUTPUT rule, |
| 1495 | # behavior DROP that matches following: |
| 1496 | # * packet type: tcp |
| 1497 | # * source IP: MN1_ip |
| 1498 | # * destination PORT: 6633 |
| 1499 | main.ONOS1.handle.sendline( |
| 1500 | "sudo iptables -A OUTPUT -p tcp -s "+MN1_ip+ |
| 1501 | " --dport "+default_sw_port+" -j DROP") |
| 1502 | main.ONOS1.handle.expect("\$") |
| 1503 | #Give time to allow rule to take effect |
| 1504 | #NOTE: Sleep period may need to be configured |
| 1505 | # based on the number of switches in the topology |
| 1506 | main.log.info("Please wait for switch connection to "+ |
| 1507 | "time out") |
| 1508 | time.sleep(60) |
| 1509 | |
| 1510 | #Gather vendor OFP with tshark |
| 1511 | main.ONOS1.tshark_grep("OFP 86 Vendor", |
| 1512 | tshark_ofp_output) |
| 1513 | main.ONOS1.tshark_grep("TCP 74 ", |
| 1514 | tshark_tcp_output) |
| 1515 | |
| 1516 | #NOTE: Remove all iptables rule quickly (flush) |
| 1517 | # Before removal, obtain TestON timestamp at which |
| 1518 | # removal took place |
| 1519 | # (ensuring nodes are configured via ptp) |
| 1520 | # sudo iptables -F |
| 1521 | |
| 1522 | t0_system = time.time() * 1000 |
| 1523 | main.ONOS1.handle.sendline( |
| 1524 | "sudo iptables -F") |
| 1525 | |
| 1526 | #Counter to track loop count |
| 1527 | counter_loop = 0 |
| 1528 | counter_avail1 = 0 |
| 1529 | counter_avail2 = 0 |
| 1530 | counter_avail3 = 0 |
| 1531 | onos1_dev = False |
| 1532 | onos2_dev = False |
| 1533 | onos3_dev = False |
| 1534 | while counter_loop < 60: |
| 1535 | #Continue to check devices for all device |
| 1536 | #availability. When all devices in all 3 |
| 1537 | #ONOS instances indicate that devices are available |
| 1538 | #obtain graph event timestamp for t1. |
| 1539 | device_str_obj1 = main.ONOS1cli.devices() |
| 1540 | device_str_obj2 = main.ONOS2cli.devices() |
| 1541 | device_str_obj3 = main.ONOS3cli.devices() |
| 1542 | |
| 1543 | device_json1 = json.loads(device_str_obj1) |
| 1544 | device_json2 = json.loads(device_str_obj2) |
| 1545 | device_json3 = json.loads(device_str_obj3) |
| 1546 | |
| 1547 | for device1 in device_json1: |
| 1548 | if device1['available'] == True: |
| 1549 | counter_avail1 += 1 |
| 1550 | if counter_avail1 == int(num_sw): |
| 1551 | onos1_dev = True |
| 1552 | main.log.info("All devices have been "+ |
| 1553 | "discovered on ONOS1") |
| 1554 | else: |
| 1555 | counter_avail1 = 0 |
| 1556 | for device2 in device_json2: |
| 1557 | if device2['available'] == True: |
| 1558 | counter_avail2 += 1 |
| 1559 | if counter_avail2 == int(num_sw): |
| 1560 | onos2_dev = True |
| 1561 | main.log.info("All devices have been "+ |
| 1562 | "discovered on ONOS2") |
| 1563 | else: |
| 1564 | counter_avail2 = 0 |
| 1565 | for device3 in device_json3: |
| 1566 | if device3['available'] == True: |
| 1567 | counter_avail3 += 1 |
| 1568 | if counter_avail3 == int(num_sw): |
| 1569 | onos3_dev = True |
| 1570 | main.log.info("All devices have been "+ |
| 1571 | "discovered on ONOS3") |
| 1572 | else: |
| 1573 | counter_avail3 = 0 |
| 1574 | |
| 1575 | if onos1_dev and onos2_dev and onos3_dev: |
| 1576 | main.log.info("All devices have been discovered "+ |
| 1577 | "on all ONOS instances") |
| 1578 | json_str_topology_metrics_1 =\ |
| 1579 | main.ONOS1cli.topology_events_metrics() |
| 1580 | json_str_topology_metrics_2 =\ |
| 1581 | main.ONOS2cli.topology_events_metrics() |
| 1582 | json_str_topology_metrics_3 =\ |
| 1583 | main.ONOS3cli.topology_events_metrics() |
| 1584 | |
| 1585 | #Exit while loop if all devices discovered |
| 1586 | break |
| 1587 | |
| 1588 | counter_loop += 1 |
| 1589 | #Give some time in between CLI calls |
| 1590 | #(will not affect measurement) |
| 1591 | time.sleep(3) |
| 1592 | |
| 1593 | main.ONOS1.tshark_stop() |
| 1594 | |
| 1595 | os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+ |
| 1596 | tshark_ofp_output+" /tmp/") |
| 1597 | os.system("scp "+ONOS_user+"@"+ONOS1_ip+":"+ |
| 1598 | tshark_tcp_output+" /tmp/") |
| 1599 | |
| 1600 | #TODO: Automate OFP output analysis |
| 1601 | #Debug mode - print out packets captured at runtime |
| 1602 | if debug_mode == 'on': |
| 1603 | ofp_file = open(tshark_ofp_output, 'r') |
| 1604 | main.log.info("Tshark OFP Vendor output: ") |
| 1605 | for line in ofp_file: |
| 1606 | tshark_ofp_result_list.append(line) |
| 1607 | main.log.info(line) |
| 1608 | ofp_file.close() |
| 1609 | |
| 1610 | tcp_file = open(tshark_tcp_output, 'r') |
| 1611 | main.log.info("Tshark TCP 74 output: ") |
| 1612 | for line in tcp_file: |
| 1613 | tshark_tcp_result_list.append(line) |
| 1614 | main.log.info(line) |
| 1615 | tcp_file.close() |
| 1616 | |
| 1617 | json_obj_1 = json.loads(json_str_topology_metrics_1) |
| 1618 | json_obj_2 = json.loads(json_str_topology_metrics_2) |
| 1619 | json_obj_3 = json.loads(json_str_topology_metrics_3) |
| 1620 | |
| 1621 | graph_timestamp_1 = \ |
| 1622 | json_obj_1[graphTimestamp]['value'] |
| 1623 | graph_timestamp_2 = \ |
| 1624 | json_obj_2[graphTimestamp]['value'] |
| 1625 | graph_timestamp_3 = \ |
| 1626 | json_obj_3[graphTimestamp]['value'] |
| 1627 | |
| 1628 | graph_lat_1 = int(graph_timestamp_1) - int(t0_system) |
| 1629 | graph_lat_2 = int(graph_timestamp_2) - int(t0_system) |
| 1630 | graph_lat_3 = int(graph_timestamp_3) - int(t0_system) |
| 1631 | |
| 1632 | avg_graph_lat = \ |
| 1633 | (int(graph_lat_1) +\ |
| 1634 | int(graph_lat_2) +\ |
| 1635 | int(graph_lat_3)) / 3 |
| 1636 | |
| 1637 | if avg_graph_lat > sw_disc_threshold_min \ |
| 1638 | and avg_graph_lat < sw_disc_threshold_max: |
| 1639 | sw_discovery_lat_list.append( |
| 1640 | avg_graph_lat) |
| 1641 | else: |
| 1642 | main.log.info("100 Switch discovery latency "+ |
| 1643 | "exceeded the threshold.") |
| 1644 | |
| 1645 | #END ITERATION FOR LOOP |
| 1646 | |
| 1647 | sw_lat_min = min(sw_discovery_lat_list) |
| 1648 | sw_lat_max = max(sw_discovery_lat_list) |
| 1649 | sw_lat_avg = sum(sw_discovery_lat_list) /\ |
| 1650 | len(sw_discovery_lat_list) |
| 1651 | |
| 1652 | main.log.report("100 Switch discovery lat "+\ |
| 1653 | "Min: "+str(sw_lat_min)+" ms"+\ |
| 1654 | "Max: "+str(sw_lat_max)+" ms"+\ |
| 1655 | "Avg: "+str(sw_lat_avg)+" ms") |
| 1656 | |
| 1657 | def CASE6(self, main): |
| 1658 | ''' |
| 1659 | Increase number of nodes and initiate CLI |
| 1660 | ''' |
| 1661 | import time |
| 1662 | |
| 1663 | ONOS1_ip = main.params['CTRL']['ip1'] |
| 1664 | ONOS2_ip = main.params['CTRL']['ip2'] |
| 1665 | ONOS3_ip = main.params['CTRL']['ip3'] |
| 1666 | ONOS4_ip = main.params['CTRL']['ip4'] |
| 1667 | ONOS5_ip = main.params['CTRL']['ip5'] |
| 1668 | ONOS6_ip = main.params['CTRL']['ip6'] |
| 1669 | ONOS7_ip = main.params['CTRL']['ip7'] |
| 1670 | |
| 1671 | cell_name = main.params['ENV']['cellName'] |
| 1672 | |
| 1673 | global cluster_count |
| 1674 | |
| 1675 | #Cluster size increased everytime the case is defined |
| 1676 | cluster_count += 2 |
| 1677 | |
| 1678 | main.log.report("Increasing cluster size to "+ |
| 1679 | str(cluster_count)) |
| 1680 | |
| 1681 | install_result = main.FALSE |
| 1682 | if cluster_count == 5: |
| 1683 | main.log.info("Installing nodes 4 and 5") |
| 1684 | node4_result = \ |
| 1685 | main.ONOSbench.onos_install(node=ONOS4_ip) |
| 1686 | node5_result = \ |
| 1687 | main.ONOSbench.onos_install(node=ONOS5_ip) |
| 1688 | install_result = node4_result and node5_result |
| 1689 | |
| 1690 | time.sleep(5) |
| 1691 | |
| 1692 | main.ONOS4cli.start_onos_cli(ONOS4_ip) |
| 1693 | main.ONOS5cli.start_onos_cli(ONOS5_ip) |
| 1694 | |
| 1695 | elif cluster_count == 7: |
| 1696 | main.log.info("Installing nodes 4 and 5") |
| 1697 | node6_result = \ |
| 1698 | main.ONOSbench.onos_install(node=ONOS6_ip) |
| 1699 | node7_result = \ |
| 1700 | main.ONOSbench.onos_install(node=ONOS7_ip) |
| 1701 | install_result = node6_result and node7_result |
| 1702 | |
| 1703 | time.sleep(5) |
| 1704 | |
| 1705 | main.ONOS6cli.start_onos_cli(ONOS6_ip) |
| 1706 | main.ONOS7cli.start_onos_cli(ONOS7_ip) |
| 1707 | |
| 1708 | |
| 1709 | |
| 1710 | |