|
@@ -137,52 +137,51 @@ if __name__ == '__main__':
|
|
|
keylist = list(user_groups.keys())
|
|
|
print("keylist: ", keylist)
|
|
|
# ======= Splitting into clusters. FL groups =======
|
|
|
- # cluster_size = int(args.num_users / args.num_clusters)
|
|
|
- cluster_size = 50
|
|
|
+ cluster_size = int(args.num_users / args.num_clusters)
|
|
|
+ # cluster_size = 50
|
|
|
print("Each cluster size: ", cluster_size)
|
|
|
|
|
|
# Cluster 1
|
|
|
- # A1 = keylist[:cluster_size]
|
|
|
- A1 = np.random.choice(keylist, cluster_size, replace=False)
|
|
|
+ A1 = keylist[:cluster_size]
|
|
|
+ # A1 = np.random.choice(keylist, cluster_size, replace=False)
|
|
|
print("A1: ", A1)
|
|
|
user_groupsA = {k:user_groups[k] for k in A1 if k in user_groups}
|
|
|
print("Size of cluster 1: ", len(user_groupsA))
|
|
|
# Cluster 2
|
|
|
- # B1 = keylist[cluster_size:2*cluster_size]
|
|
|
- B1 = np.random.choice(keylist, cluster_size, replace=False)
|
|
|
+ B1 = keylist[cluster_size:2*cluster_size]
|
|
|
+ # B1 = np.random.choice(keylist, cluster_size, replace=False)
|
|
|
print("B1: ", B1)
|
|
|
user_groupsB = {k:user_groups[k] for k in B1 if k in user_groups}
|
|
|
print("Size of cluster 2: ", len(user_groupsB))
|
|
|
# Cluster 3
|
|
|
- # C1 = keylist[2*cluster_size:3*cluster_size]
|
|
|
- C1 = np.random.choice(keylist, cluster_size, replace=False)
|
|
|
+ C1 = keylist[2*cluster_size:3*cluster_size]
|
|
|
+ # C1 = np.random.choice(keylist, cluster_size, replace=False)
|
|
|
print("C1: ", C1)
|
|
|
user_groupsC = {k:user_groups[k] for k in C1 if k in user_groups}
|
|
|
print("Size of cluster 3: ", len(user_groupsC))
|
|
|
# Cluster 4
|
|
|
- # D1 = keylist[3*cluster_size:4*cluster_size]
|
|
|
- D1 = np.random.choice(keylist, cluster_size, replace=False)
|
|
|
+ D1 = keylist[3*cluster_size:4*cluster_size]
|
|
|
+ # D1 = np.random.choice(keylist, cluster_size, replace=False)
|
|
|
print("D1: ", D1)
|
|
|
user_groupsD = {k:user_groups[k] for k in D1 if k in user_groups}
|
|
|
print("Size of cluster 4: ", len(user_groupsD))
|
|
|
-
|
|
|
# Cluster 5
|
|
|
- E1 = np.random.choice(keylist, cluster_size, replace=False)
|
|
|
+ E1 = keylist[4*cluster_size:5*cluster_size] #np.random.choice(keylist, cluster_size, replace=False)
|
|
|
print("E1: ", E1)
|
|
|
user_groupsE = {k:user_groups[k] for k in E1 if k in user_groups}
|
|
|
print("Size of cluster 5: ", len(user_groupsE))
|
|
|
# Cluster 6
|
|
|
- F1 = np.random.choice(keylist, cluster_size, replace=False)
|
|
|
+ F1 = keylist[5*cluster_size:6*cluster_size] #np.random.choice(keylist, cluster_size, replace=False)
|
|
|
print("F1: ", F1)
|
|
|
user_groupsF = {k:user_groups[k] for k in F1 if k in user_groups}
|
|
|
print("Size of cluster 6: ", len(user_groupsF))
|
|
|
# Cluster 7
|
|
|
- G1 = np.random.choice(keylist, cluster_size, replace=False)
|
|
|
+ G1 = keylist[6*cluster_size:7*cluster_size] #np.random.choice(keylist, cluster_size, replace=False)
|
|
|
print("G1: ", G1)
|
|
|
user_groupsG = {k:user_groups[k] for k in G1 if k in user_groups}
|
|
|
print("Size of cluster 7: ", len(user_groupsC))
|
|
|
# Cluster 8
|
|
|
- H1 = np.random.choice(keylist, cluster_size, replace=False)
|
|
|
+ H1 = keylist[7*cluster_size:] #np.random.choice(keylist, cluster_size, replace=False)
|
|
|
print("H1: ", H1)
|
|
|
user_groupsH = {k:user_groups[k] for k in H1 if k in user_groups}
|
|
|
print("Size of cluster 8: ", len(user_groupsH))
|
|
@@ -260,55 +259,54 @@ if __name__ == '__main__':
|
|
|
|
|
|
# for epoch in tqdm(range(args.epochs)):
|
|
|
# for epoch in range(args.epochs):
|
|
|
- # while testacc_check < args.test_acc or epoch < args.epochs:
|
|
|
- while epoch < args.epochs:
|
|
|
+ while testacc_check < args.test_acc or epoch < args.epochs:
|
|
|
+ # while epoch < args.epochs:
|
|
|
local_weights, local_losses, local_accuracies= [], [], []
|
|
|
print(f'\n | Global Training Round : {epoch+1} |\n')
|
|
|
|
|
|
# ============== TRAIN ==============
|
|
|
global_model.train()
|
|
|
|
|
|
- # Cluster A
|
|
|
+ # ===== Cluster A =====
|
|
|
A_model, A_weights, A_losses = fl_train(args, train_dataset, cluster_modelA, A1, user_groupsA, args.Cepochs)
|
|
|
local_weights.append(copy.deepcopy(A_weights))
|
|
|
local_losses.append(copy.deepcopy(A_losses))
|
|
|
- cluster_modelA = A_model
|
|
|
- # Cluster B
|
|
|
+ cluster_modelA = global_model# = A_model
|
|
|
+ # ===== Cluster B =====
|
|
|
B_model, B_weights, B_losses = fl_train(args, train_dataset, cluster_modelB, B1, user_groupsB, args.Cepochs)
|
|
|
local_weights.append(copy.deepcopy(B_weights))
|
|
|
local_losses.append(copy.deepcopy(B_losses))
|
|
|
- cluster_modelB = B_model
|
|
|
- # Cluster C
|
|
|
+ cluster_modelB = global_model# = B_model
|
|
|
+ # ===== Cluster C =====
|
|
|
C_model, C_weights, C_losses = fl_train(args, train_dataset, cluster_modelC, C1, user_groupsC, args.Cepochs)
|
|
|
local_weights.append(copy.deepcopy(C_weights))
|
|
|
local_losses.append(copy.deepcopy(C_losses))
|
|
|
- cluster_modelC = C_model
|
|
|
- # Cluster D
|
|
|
+ cluster_modelC = global_model# = C_model
|
|
|
+ # ===== Cluster D =====
|
|
|
D_model, D_weights, D_losses = fl_train(args, train_dataset, cluster_modelD, D1, user_groupsD, args.Cepochs)
|
|
|
local_weights.append(copy.deepcopy(D_weights))
|
|
|
local_losses.append(copy.deepcopy(D_losses))
|
|
|
- cluster_modelD = D_model
|
|
|
-
|
|
|
- # Cluster E
|
|
|
+ cluster_modelD = global_model# = D_model
|
|
|
+ # ===== Cluster E =====
|
|
|
E_model, E_weights, E_losses = fl_train(args, train_dataset, cluster_modelE, E1, user_groupsE, args.Cepochs)
|
|
|
local_weights.append(copy.deepcopy(E_weights))
|
|
|
local_losses.append(copy.deepcopy(E_losses))
|
|
|
- cluster_modelE = E_model
|
|
|
- # Cluster F
|
|
|
+ cluster_modelE = global_model# = E_model
|
|
|
+ # ===== Cluster F =====
|
|
|
F_model, F_weights, F_losses = fl_train(args, train_dataset, cluster_modelF, F1, user_groupsF, args.Cepochs)
|
|
|
local_weights.append(copy.deepcopy(F_weights))
|
|
|
local_losses.append(copy.deepcopy(F_losses))
|
|
|
- cluster_modelF = F_model
|
|
|
- # Cluster G
|
|
|
+ cluster_modelF = global_model# = F_model
|
|
|
+ # ===== Cluster G =====
|
|
|
G_model, G_weights, G_losses = fl_train(args, train_dataset, cluster_modelG, G1, user_groupsG, args.Cepochs)
|
|
|
local_weights.append(copy.deepcopy(G_weights))
|
|
|
local_losses.append(copy.deepcopy(G_losses))
|
|
|
- cluster_modelG = G_model
|
|
|
- # Cluster H
|
|
|
+ cluster_modelG = global_model# = G_model
|
|
|
+ # ===== Cluster H =====
|
|
|
H_model, H_weights, H_losses = fl_train(args, train_dataset, cluster_modelH, H1, user_groupsH, args.Cepochs)
|
|
|
local_weights.append(copy.deepcopy(H_weights))
|
|
|
local_losses.append(copy.deepcopy(H_losses))
|
|
|
- cluster_modelH = H_model
|
|
|
+ cluster_modelH = global_model# = H_model
|
|
|
|
|
|
|
|
|
# averaging global weights
|
|
@@ -358,7 +356,7 @@ if __name__ == '__main__':
|
|
|
print("|---- Test Accuracy: {:.2f}%".format(100*test_acc))
|
|
|
|
|
|
# Saving the objects train_loss and train_accuracy:
|
|
|
- file_name = '../save/objects/HFL4_{}_{}_{}_lr[{}]_C[{}]_iid[{}]_E[{}]_B[{}].pkl'.\
|
|
|
+ file_name = '../save/objects/HFL8_{}_{}_{}_lr[{}]_C[{}]_iid[{}]_E[{}]_B[{}].pkl'.\
|
|
|
format(args.dataset, args.model, epoch, args.lr, args.frac, args.iid,
|
|
|
args.local_ep, args.local_bs)
|
|
|
|