加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
experiment_run copy.py 3.98 KB
一键复制 编辑 原始数据 按行查看 历史
Seven 提交于 2024-08-13 23:16 . Update: :test
from experiment_func import *
import multiprocessing
import time
start_time = time.time()
# 定义每个程序的函数
def run_for_scale():
expreriment_start_time = time.time()
print("running for scale")
write_for_scale()
experriment_end_time = time.time()
print(f"scale实验耗时 {experriment_end_time - expreriment_start_time}秒")
def run_for_workload():
expreriment_start_time = time.time()
print("running for workload")
write_for_workload()
experriment_end_time = time.time()
print(f"workload实验耗时 {experriment_end_time - expreriment_start_time}秒")
def run_for_successPro():
expreriment_start_time = time.time()
print("running for successPro")
write_for_successPro()
experriment_end_time = time.time()
print(f"successPr实验耗时 {experriment_end_time - expreriment_start_time}秒")
def run_for_connectivity():
expreriment_start_time = time.time()
print("running for connectivity")
write_for_connectivity()
experriment_end_time = time.time()
print(f"connectivity实验耗时 {experriment_end_time - expreriment_start_time}秒")
def run_for_el_remain():
expreriment_start_time = time.time()
print("running for el_remain")
write_for_el_remain()
experriment_end_time = time.time()
print(f"el_remain实验耗时 {experriment_end_time - expreriment_start_time}秒")
# run_for_scale()
node_num = 200
sd_num = 100
for i in range(1):
start_time = time.time()
GenerateNetwork("./data/generate_data.txt", node_num, sd_num, .55, .12, 1, 80)
end_time = time.time()
print(f"node数量{node_num} sd数量{sd_num}, 耗时{end_time - start_time}秒")
node_num += 50
sd_num += 25
# 创建进程列表
# processes = []
# # 创建并启动进程
# processes.append(multiprocessing.Process(target=run_for_scale))
# processes.append(multiprocessing.Process(target=run_for_workload))
# processes.append(multiprocessing.Process(target=run_for_successPro))
# processes.append(multiprocessing.Process(target=run_for_connectivity))
# processes.append(multiprocessing.Process(target=run_for_el_remain))
# for process in processes:
# process.start()
# # 等待所有进程完成
# for process in processes:
# process.join()
print("All experiments have finished.")
end_time = time.time()
print(f"所有实验总耗时 {end_time - start_time}秒")
# def updateSelectNodeID(self, el_success_before_loop):
# # Use multiprocessing to parallelize node updates
# with multiprocessing.Pool(processes=2) as pool:
# pool.starmap(self.update_node_select_mlp, [(node, el_success_before_loop, ) for node in self.nodeList])
# def update_node_select_mlp(self, node, el_success_before_loop):
# if node.epsFlag == 1:
# # print(f"node{node.nodeID} is eps")
# ec_counter = {}
# if node.adjacentNodes:
# excluded_values = self.get_excluded_values(node.nodeID)
# for neighbor in node.adjacentNodes:
# # Self.el_success_looping = copy.deepcopy(Self.el_success)
# el_success_looping_neighbor = copy.deepcopy(el_success_before_loop)
# if neighbor not in excluded_values:
# # Self.el_success_looping[remaining_num-1][node.nodeID-1] = neighbor
# el_success_looping_neighbor[remaining_num-1][node.nodeID-1] = neighbor
# ec_counter[neighbor] = self.calculateEcCount_mlp(el_success_looping_neighbor)
# # print(f"node {node.nodeID} with neighbor {neighbor} has payoff {ec_counter[neighbor]}")
# if ec_counter:
# node.selectNodeID = max(ec_counter, key = ec_counter.get)
# print(f"max is {max(ec_counter, key = ec_counter.get)} with {ec_counter[node.selectNodeID]}")
# print(f"node{node.nodeID} has selected neighbor {node.selectNodeID}")
# self.el_success[remaining_num-1][node.nodeID-1] = node.selectNodeID
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化