Jelajahi Sumber

change CNNCifar model to 917350 params and update evaluation results

wesleyjtann 5 tahun lalu
induk
melakukan
59ebd7820b
42 mengubah file dengan 512 tambahan dan 104 penghapusan
  1. TEMPAT SAMPAH
      save/MNIST_CNN_IID_acc.png
  2. TEMPAT SAMPAH
      save/MNIST_CNN_IID_loss.png
  3. TEMPAT SAMPAH
      save/MNIST_MLP_IID_acc.png
  4. TEMPAT SAMPAH
      save/MNIST_MLP_IID_loss.png
  5. TEMPAT SAMPAH
      save/MNIST_MLP_NONIID_acc.png
  6. TEMPAT SAMPAH
      save/MNIST_MLP_NONIID_loss.png
  7. 0 0
      save/objects/Bad/HFL4_mnist_mlp_100_lr[0.05]_C[0.1]_iid[0]_E[1]_B[10].pkl
  8. 0 0
      save/objects/Bad/HFL4_mnist_mlp_150_lr[0.05]_C[0.1]_iid[0]_E[1]_B[10].pkl
  9. TEMPAT SAMPAH
      save/objects/HFL2_mnist_mlp_100_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10].pkl
  10. 0 0
      save/objects/Old/FL_mnist_mlp_141_lr[0.1]_C[0.1]_iid[1]_E[1]_B[10].pkl
  11. 0 0
      save/objects/Old/FL_mnist_mlp_302_lr[0.1]_C[0.1]_iid[0]_E[1]_B[10].pkl
  12. 0 0
      save/objects/Old/[1]FL_mnist_mlp_200_lr[0.05]_C[0.1]_iid[1]_E[1]_B[10].pkl
  13. 0 0
      save/objects/Old/[2]FL_mnist_mlp_302_lr[0.05]_C[0.1]_iid[0]_E[1]_B[10].pkl
  14. 0 0
      save/objects/Old/[3]HFL2_mnist_mlp_101_lr[0.05]_C[0.1]_iid[1]_E[1]_B[10].pkl
  15. 0 0
      save/objects/Old/[4]HFL2_mnist_mlp_101_lr[0.05]_C[0.1]_iid[0]_E[1]_B[10].pkl
  16. TEMPAT SAMPAH
      save/objects/Old/clustersize50HFL4_mnist_mlp_100_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10].pkl
  17. TEMPAT SAMPAH
      save/objects/[10]FL_mnist_cnn_261_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10].pkl
  18. TEMPAT SAMPAH
      save/objects/[11]HFL2_mnist_cnn_100_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10].pkl
  19. TEMPAT SAMPAH
      save/objects/[12]HFL2_mnist_cnn_100_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10].pkl
  20. TEMPAT SAMPAH
      save/objects/[13]HFL4_mnist_cnn_100_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10].pkl
  21. TEMPAT SAMPAH
      save/objects/[15]HFL8_mnist_cnn_30_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10].pkl
  22. TEMPAT SAMPAH
      save/objects/[16]HFL8_mnist_cnn_30_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10].pkl
  23. 0 0
      save/objects/[1]FL_mnist_mlp_468_C[0.1]_iid[1]_E[1]_B[10].pkl
  24. 0 0
      save/objects/[2]FL_mnist_mlp_1196_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10].pkl
  25. TEMPAT SAMPAH
      save/objects/[3]HFL2_mnist_mlp_100_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10].pkl
  26. TEMPAT SAMPAH
      save/objects/[4]HFL2_mnist_mlp_100_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10].pkl
  27. TEMPAT SAMPAH
      save/objects/[5]HFL4_mnist_mlp_100_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10].pkl
  28. TEMPAT SAMPAH
      save/objects/[6]HFL4_mnist_mlp_150_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10].pkl
  29. TEMPAT SAMPAH
      save/objects/[7]HFL4_mnist_mlp_30_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10].pkl
  30. TEMPAT SAMPAH
      save/objects/[9]FL_mnist_cnn_100_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10].pkl
  31. TEMPAT SAMPAH
      save/objects/clustersize50HFL4_mnist_mlp_100_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10].pkl
  32. 143 0
      src/.ipynb_checkpoints/Eval-checkpoint.ipynb
  33. 16 12
      src/.ipynb_checkpoints/federated-hierarchical_v1_twoclusters-changeEval-checkpoint.ipynb
  34. 209 0
      src/Eval.ipynb
  35. TEMPAT SAMPAH
      src/__pycache__/models.cpython-37.pyc
  36. 25 8
      src/baseline_main.py
  37. 6 6
      src/federated-hierarchical2_main.py
  38. 19 19
      src/federated-hierarchical4_main.py
  39. 33 35
      src/federated-hierarchical8_main.py
  40. 33 17
      src/federated-hierarchical_v1_twoclusters-changeEval.ipynb
  41. 2 1
      src/federated_main.py
  42. 26 6
      src/models.py

TEMPAT SAMPAH
save/MNIST_CNN_IID_acc.png


TEMPAT SAMPAH
save/MNIST_CNN_IID_loss.png


TEMPAT SAMPAH
save/MNIST_MLP_IID_acc.png


TEMPAT SAMPAH
save/MNIST_MLP_IID_loss.png


TEMPAT SAMPAH
save/MNIST_MLP_NONIID_acc.png


TEMPAT SAMPAH
save/MNIST_MLP_NONIID_loss.png


+ 0 - 0
save/objects/HFL4_mnist_mlp_100_lr[0.05]_C[0.1]_iid[0]_E[1]_B[10].pkl → save/objects/Bad/HFL4_mnist_mlp_100_lr[0.05]_C[0.1]_iid[0]_E[1]_B[10].pkl


+ 0 - 0
save/objects/HFL4_mnist_mlp_150_lr[0.05]_C[0.1]_iid[0]_E[1]_B[10].pkl → save/objects/Bad/HFL4_mnist_mlp_150_lr[0.05]_C[0.1]_iid[0]_E[1]_B[10].pkl


TEMPAT SAMPAH
save/objects/HFL2_mnist_mlp_100_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10].pkl


+ 0 - 0
save/objects/FL_mnist_mlp_141_lr[0.1]_C[0.1]_iid[1]_E[1]_B[10].pkl → save/objects/Old/FL_mnist_mlp_141_lr[0.1]_C[0.1]_iid[1]_E[1]_B[10].pkl


+ 0 - 0
save/objects/FL_mnist_mlp_302_lr[0.1]_C[0.1]_iid[0]_E[1]_B[10].pkl → save/objects/Old/FL_mnist_mlp_302_lr[0.1]_C[0.1]_iid[0]_E[1]_B[10].pkl


+ 0 - 0
save/objects/[1]FL_mnist_mlp_200_lr[0.05]_C[0.1]_iid[1]_E[1]_B[10].pkl → save/objects/Old/[1]FL_mnist_mlp_200_lr[0.05]_C[0.1]_iid[1]_E[1]_B[10].pkl


+ 0 - 0
save/objects/[2]FL_mnist_mlp_302_lr[0.05]_C[0.1]_iid[0]_E[1]_B[10].pkl → save/objects/Old/[2]FL_mnist_mlp_302_lr[0.05]_C[0.1]_iid[0]_E[1]_B[10].pkl


+ 0 - 0
save/objects/[3]HFL2_mnist_mlp_101_lr[0.05]_C[0.1]_iid[1]_E[1]_B[10].pkl → save/objects/Old/[3]HFL2_mnist_mlp_101_lr[0.05]_C[0.1]_iid[1]_E[1]_B[10].pkl


+ 0 - 0
save/objects/[4]HFL2_mnist_mlp_101_lr[0.05]_C[0.1]_iid[0]_E[1]_B[10].pkl → save/objects/Old/[4]HFL2_mnist_mlp_101_lr[0.05]_C[0.1]_iid[0]_E[1]_B[10].pkl


TEMPAT SAMPAH
save/objects/Old/clustersize50HFL4_mnist_mlp_100_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10].pkl


TEMPAT SAMPAH
save/objects/[10]FL_mnist_cnn_261_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10].pkl


TEMPAT SAMPAH
save/objects/[11]HFL2_mnist_cnn_100_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10].pkl


TEMPAT SAMPAH
save/objects/[12]HFL2_mnist_cnn_100_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10].pkl


TEMPAT SAMPAH
save/objects/[13]HFL4_mnist_cnn_100_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10].pkl


TEMPAT SAMPAH
save/objects/[15]HFL8_mnist_cnn_30_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10].pkl


TEMPAT SAMPAH
save/objects/[16]HFL8_mnist_cnn_30_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10].pkl


+ 0 - 0
save/objects/FL_mnist_mlp_468_C[0.1]_iid[1]_E[1]_B[10].pkl → save/objects/[1]FL_mnist_mlp_468_C[0.1]_iid[1]_E[1]_B[10].pkl


+ 0 - 0
save/objects/FL_mnist_mlp_1196_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10].pkl → save/objects/[2]FL_mnist_mlp_1196_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10].pkl


TEMPAT SAMPAH
save/objects/[3]HFL2_mnist_mlp_100_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10].pkl


TEMPAT SAMPAH
save/objects/[4]HFL2_mnist_mlp_100_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10].pkl


TEMPAT SAMPAH
save/objects/[5]HFL4_mnist_mlp_100_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10].pkl


TEMPAT SAMPAH
save/objects/[6]HFL4_mnist_mlp_150_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10].pkl


TEMPAT SAMPAH
save/objects/[7]HFL4_mnist_mlp_30_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10].pkl


TEMPAT SAMPAH
save/objects/[9]FL_mnist_cnn_100_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10].pkl


TEMPAT SAMPAH
save/objects/clustersize50HFL4_mnist_mlp_100_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10].pkl


File diff ditekan karena terlalu besar
+ 143 - 0
src/.ipynb_checkpoints/Eval-checkpoint.ipynb


File diff ditekan karena terlalu besar
+ 16 - 12
src/.ipynb_checkpoints/federated-hierarchical_v1_twoclusters-changeEval-checkpoint.ipynb


+ 209 - 0
src/Eval.ipynb

@@ -0,0 +1,209 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  },
+  {
+   "cell_type": "raw",
+   "metadata": {},
+   "source": [
+    "import pickle\n",
+    "\n",
+    "filename1 = \"[16]HFL8_mnist_cnn_30_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10]\"\n",
+    "with open(r\"../save/objects/\" + filename1 + \".pkl\", \"rb\") as input_file: data = pickle.load(input_file)\n",
+    "        \n",
+    "# print(data)\n",
+    "trloss = data[0]\n",
+    "tracc = data[1]\n",
+    "# testloss = data[2]\n",
+    "# print(len(trloss))\n",
+    "# (len(tracc))"
+   ]
+  },
+  {
+   "cell_type": "raw",
+   "metadata": {},
+   "source": [
+    "# using enumerate() + next() to find index of first element just greater than 80%\n",
+    "testacc = 0.97\n",
+    "res = next(x for x, val in enumerate(tracc) if val >= testacc) \n",
+    "\n",
+    "# printing result \n",
+    "print (\"The number of global training round just greater than \" + str(testacc*100) + \"% : \" + str(res+1))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# ===== MNIST MLP IID ===== \n",
+    "datamodelset = \"MNIST_MLP_IID\"\n",
+    "filename1 = \"[1]FL_mnist_mlp_468_C[0.1]_iid[1]_E[1]_B[10]\"\n",
+    "filename2 = \"[3]HFL2_mnist_mlp_100_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10]\"\n",
+    "filename3 = \"[5]HFL4_mnist_mlp_100_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10]\"\n",
+    "filename4 = \"[7]HFL4_mnist_mlp_30_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10]\""
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# ===== MNIST MLP NON-IID ===== \n",
+    "datamodelset = \"MNIST_MLP_NONIID\"\n",
+    "filename1 = \"[2]FL_mnist_mlp_1196_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10]\"\n",
+    "filename2 = \"[4]HFL2_mnist_mlp_100_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10]\"\n",
+    "filename3 = \"[6]HFL4_mnist_mlp_150_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10]\"\n",
+    "# filename4 = \"\""
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# ===== MNIST CNN IID ===== \n",
+    "datamodelset = \"MNIST_CNN_IID\"\n",
+    "filename1 = \"[9]FL_mnist_cnn_100_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10]\"\n",
+    "filename2 = \"[11]HFL2_mnist_cnn_100_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10]\"\n",
+    "filename3 = \"[13]HFL4_mnist_cnn_100_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10]\"\n",
+    "filename4 = \"[15]HFL8_mnist_cnn_30_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10]\""
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# ===== MNIST CNN NON-IID ===== \n",
+    "datamodelset = \"MNIST_CNN_IID\"\n",
+    "filename1 = \"[10]FL_mnist_cnn_261_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10]\"\n",
+    "filename2 = \"[12]HFL2_mnist_cnn_100_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10]\"\n",
+    "filename3 = \"\"\n",
+    "filename4 = \"[16]HFL8_mnist_cnn_30_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10]\""
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import pickle\n",
+    "\n",
+    "with open(r\"../save/objects/\" + filename1 + \".pkl\", \"rb\") as input_file: data1 = pickle.load(input_file)\n",
+    "with open(r\"../save/objects/\" + filename2 + \".pkl\", \"rb\") as input_file: data2 = pickle.load(input_file)\n",
+    "with open(r\"../save/objects/\" + filename3 + \".pkl\", \"rb\") as input_file: data3 = pickle.load(input_file)\n",
+    "with open(r\"../save/objects/\" + filename4 + \".pkl\", \"rb\") as input_file: data4 = pickle.load(input_file)    \n",
+    "\n",
+    "trloss1 = data1[0]    \n",
+    "trloss2 = data2[0]    \n",
+    "trloss3 = data3[0]    \n",
+    "trloss4 = data4[0]    \n",
+    "    \n",
+    "tracc1 = data1[1]\n",
+    "tracc2 = data2[1]\n",
+    "tracc3 = data3[1]\n",
+    "tracc4 = data4[1]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# PLOTTING (optional)\n",
+    "import matplotlib\n",
+    "import matplotlib.pyplot as plt\n",
+    "matplotlib.use('Agg')\n",
+    "\n",
+    "# Plot Loss curve\n",
+    "plt.figure()\n",
+    "plt.title('MNIST CNN IID')\n",
+    "\n",
+    "# plt.plot(range(len(trloss)), trloss, color='r', label=\"FL\", linewidth=0.9)\n",
+    "\n",
+    "plt.plot(range(len(trloss1)), trloss1, color='r', label=\"FL\", linewidth=0.9)\n",
+    "plt.plot(range(len(trloss2)), trloss2, '-.', color='b', label=\"HFL2\", linewidth=0.9)\n",
+    "plt.plot(range(len(trloss3)), trloss3, '--', color='k', label=\"HFL4\", linewidth=0.9)\n",
+    "plt.plot(range(len(trloss4)), trloss4, color='k', label=\"HFL8\")\n",
+    "plt.legend(loc=\"upper right\")\n",
+    "\n",
+    "plt.ylabel('Training loss')\n",
+    "plt.xlabel('Communication Rounds')\n",
+    "# plt.savefig('../save/fed_{}_{}_{}_C[{}]_iid[{}]_E[{}]_B[{}]_loss.png'.\n",
+    "#             format(args.dataset, args.model, args.epochs, args.frac,\n",
+    "#                    args.iid, args.local_ep, args.local_bs))\n",
+    "\n",
+    "plt.savefig('../save/' + datamodelset + '_loss.png')\n",
+    "plt.show"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Plot Average Accuracy vs Communication rounds\n",
+    "plt.figure()\n",
+    "plt.title('MNIST CNN IID')\n",
+    "# plt.plot(range(len(tracc)), tracc, color='k')\n",
+    "\n",
+    "plt.plot(range(len(tracc1)), tracc1, color='r', label=\"FL\", linewidth=0.9)\n",
+    "plt.plot(range(len(tracc2)), tracc2, '-.', color='b', label=\"HFL2\", linewidth=0.9)\n",
+    "plt.plot(range(len(tracc3)), tracc3, '--', color='k', label=\"HFL4\", linewidth=0.9)\n",
+    "plt.plot(range(len(tracc4)), tracc4, color='k', label=\"HFL8\")\n",
+    "plt.legend(loc=\"lower right\")\n",
+    "\n",
+    "\n",
+    "plt.ylabel('Average Accuracy')\n",
+    "plt.xlabel('Communication Rounds')\n",
+    "# plt.savefig('../save/fed_{}_{}_{}_C[{}]_iid[{}]_E[{}]_B[{}]_acc.png'.\n",
+    "#             format(args.dataset, args.model, args.epochs, args.frac,\n",
+    "#                    args.iid, args.local_ep, args.local_bs))\n",
+    "\n",
+    "plt.savefig('../save/' + datamodelset + '_acc.png')\n",
+    "plt.show"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "fl_pytorch",
+   "language": "python",
+   "name": "fl_pytorch"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.7.3"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}

TEMPAT SAMPAH
src/__pycache__/models.cpython-37.pyc


+ 25 - 8
src/baseline_main.py

@@ -13,7 +13,8 @@ from utils import get_dataset
 from options import args_parser
 from update import test_inference
 from models import MLP, CNNMnist, CNNFashion_Mnist, CNNCifar
-
+import pickle
+import time
 
 if __name__ == '__main__':
     args = args_parser()
@@ -21,6 +22,8 @@ if __name__ == '__main__':
         torch.cuda.set_device(args.gpu)
     device = 'cuda' if args.gpu else 'cpu'
 
+    start_time = time.time()
+
     # load datasets
     train_dataset, test_dataset, _ = get_dataset(args)
 
@@ -84,15 +87,29 @@ if __name__ == '__main__':
         print('\nTrain loss:', loss_avg)
         epoch_loss.append(loss_avg)
 
-    # Plot loss
-    plt.figure()
-    plt.plot(range(len(epoch_loss)), epoch_loss)
-    plt.xlabel('epochs')
-    plt.ylabel('Train loss')
-    plt.savefig('../save/nn_{}_{}_{}.png'.format(args.dataset, args.model,
-                                                 args.epochs))
 
     # testing
     test_acc, test_loss = test_inference(args, global_model, test_dataset)
     print('Test on', len(test_dataset), 'samples')
     print("Test Accuracy: {:.2f}%".format(100*test_acc))
+
+
+    # Saving the objects train_loss, test_acc, test_loss:
+    file_name = '../save/objects/BaseSGD_{}_{}_epoch[{}]_lr[{}]_iid[{}].pkl'.\
+        format(args.dataset, args.model, epoch, args.lr, args.iid)
+
+    with open(file_name, 'wb') as f:
+        pickle.dump([epoch_loss, test_acc, test_loss], f)
+
+    print('\n Total Run Time: {0:0.4f}'.format(time.time()-start_time))
+
+
+    # # Plot loss
+    # plt.figure()
+    # plt.plot(range(len(epoch_loss)), epoch_loss)
+    # plt.xlabel('epochs')
+    # plt.ylabel('Train loss')
+    # plt.savefig('../save/nn_{}_{}_{}.png'.format(args.dataset, args.model,
+    #                                              args.epochs))
+
+

+ 6 - 6
src/federated-hierarchical2_main.py

@@ -221,24 +221,24 @@ if __name__ == '__main__':
 
     # for epoch in tqdm(range(args.epochs)):
     # for epoch in range(args.epochs):
-    # while testacc_check < args.test_acc or epoch < args.epochs:
-    while epoch < args.epochs: 
+    while testacc_check < args.test_acc or epoch < args.epochs:
+    # while epoch < args.epochs: 
         local_weights, local_losses, local_accuracies= [], [], []
         print(f'\n | Global Training Round : {epoch+1} |\n')
         
         # ============== TRAIN ==============
         global_model.train()
         
-        # Cluster A
+        # ===== Cluster A ===== 
         A_model, A_weights, A_losses = fl_train(args, train_dataset, cluster_modelA, A1, user_groupsA, args.Cepochs)        
         local_weights.append(copy.deepcopy(A_weights))
         local_losses.append(copy.deepcopy(A_losses))    
-        cluster_modelA = A_model    
-        # Cluster B
+        cluster_modelA = global_model# = A_model    
+        # ===== Cluster B ===== 
         B_model, B_weights, B_losses = fl_train(args, train_dataset, cluster_modelB, B1, user_groupsB, args.Cepochs)
         local_weights.append(copy.deepcopy(B_weights))
         local_losses.append(copy.deepcopy(B_losses))
-        cluster_modelB = B_model 
+        cluster_modelB = global_model# = B_model 
 
         # # Cluster C
         # C_weights, C_losses = fl_train(args, train_dataset, cluster_modelC, C1, user_groupsC, args.Cepochs)

+ 19 - 19
src/federated-hierarchical4_main.py

@@ -137,31 +137,31 @@ if __name__ == '__main__':
     keylist = list(user_groups.keys())
     print("keylist: ", keylist)
     # ======= Splitting into clusters. FL groups ======= 
-    # cluster_size = int(args.num_users / args.num_clusters)    
-    cluster_size = 50
+    cluster_size = int(args.num_users / args.num_clusters)    
+    # cluster_size = 50
     print("Each cluster size: ", cluster_size)
 
     # Cluster 1
-    # A1 = keylist[:cluster_size]
-    A1 = np.random.choice(keylist, cluster_size, replace=False)
+    A1 = keylist[:cluster_size]
+    # A1 = np.random.choice(keylist, cluster_size, replace=False)
     print("A1: ", A1)
     user_groupsA = {k:user_groups[k] for k in A1 if k in user_groups}
     print("Size of cluster 1: ", len(user_groupsA))
     # Cluster 2
-    # B1 = keylist[cluster_size:2*cluster_size]
-    B1 = np.random.choice(keylist, cluster_size, replace=False)    
+    B1 = keylist[cluster_size:2*cluster_size]
+    # B1 = np.random.choice(keylist, cluster_size, replace=False)    
     print("B1: ", B1)
     user_groupsB = {k:user_groups[k] for k in B1 if k in user_groups}
     print("Size of cluster 2: ", len(user_groupsB))
     # Cluster 3
-    # C1 = keylist[2*cluster_size:3*cluster_size]
-    C1 = np.random.choice(keylist, cluster_size, replace=False)
+    C1 = keylist[2*cluster_size:3*cluster_size]
+    # C1 = np.random.choice(keylist, cluster_size, replace=False)
     print("C1: ", C1)
     user_groupsC = {k:user_groups[k] for k in C1 if k in user_groups}
     print("Size of cluster 3: ", len(user_groupsC))
     # Cluster 4
-    # D1 = keylist[3*cluster_size:4*cluster_size]
-    D1 = np.random.choice(keylist, cluster_size, replace=False)
+    D1 = keylist[3*cluster_size:4*cluster_size]
+    # D1 = np.random.choice(keylist, cluster_size, replace=False)
     print("D1: ", D1)
     user_groupsD = {k:user_groups[k] for k in D1 if k in user_groups}
     print("Size of cluster 4: ", len(user_groupsD))
@@ -229,26 +229,26 @@ if __name__ == '__main__':
         # ============== TRAIN ==============
         global_model.train()
         
-        # Cluster A
-        A_model, A_weights, A_losses = fl_train(args, train_dataset, cluster_modelA, A1, user_groupsA, args.Cepochs)        
+        # ===== Cluster A ===== 
+        _, A_weights, A_losses = fl_train(args, train_dataset, cluster_modelA, A1, user_groupsA, args.Cepochs)        
         local_weights.append(copy.deepcopy(A_weights))
         local_losses.append(copy.deepcopy(A_losses))    
-        cluster_modelA = A_model    
-        # Cluster B
+        cluster_modelA = global_model #= A_model        
+        # ===== Cluster B ===== 
         B_model, B_weights, B_losses = fl_train(args, train_dataset, cluster_modelB, B1, user_groupsB, args.Cepochs)
         local_weights.append(copy.deepcopy(B_weights))
         local_losses.append(copy.deepcopy(B_losses))
-        cluster_modelB = B_model 
-        # Cluster C
+        cluster_modelB = global_model #= B_model 
+        # ===== Cluster C ===== 
         C_model, C_weights, C_losses = fl_train(args, train_dataset, cluster_modelC, C1, user_groupsC, args.Cepochs)
         local_weights.append(copy.deepcopy(C_weights))
         local_losses.append(copy.deepcopy(C_losses))   
-        cluster_modelC = C_model      
-        # Cluster D
+        cluster_modelC = global_model #= C_model      
+        # ===== Cluster D ===== 
         D_model, D_weights, D_losses = fl_train(args, train_dataset, cluster_modelD, D1, user_groupsD, args.Cepochs)
         local_weights.append(copy.deepcopy(D_weights))
         local_losses.append(copy.deepcopy(D_losses))
-        cluster_modelD = D_model 
+        cluster_modelD= global_model #= D_model 
         
         
         # averaging global weights

+ 33 - 35
src/federated-hierarchical8_main.py

@@ -137,52 +137,51 @@ if __name__ == '__main__':
     keylist = list(user_groups.keys())
     print("keylist: ", keylist)
     # ======= Splitting into clusters. FL groups ======= 
-    # cluster_size = int(args.num_users / args.num_clusters)    
-    cluster_size = 50
+    cluster_size = int(args.num_users / args.num_clusters)    
+    # cluster_size = 50
     print("Each cluster size: ", cluster_size)
 
     # Cluster 1
-    # A1 = keylist[:cluster_size]
-    A1 = np.random.choice(keylist, cluster_size, replace=False)
+    A1 = keylist[:cluster_size]
+    # A1 = np.random.choice(keylist, cluster_size, replace=False)
     print("A1: ", A1)
     user_groupsA = {k:user_groups[k] for k in A1 if k in user_groups}
     print("Size of cluster 1: ", len(user_groupsA))
     # Cluster 2
-    # B1 = keylist[cluster_size:2*cluster_size]
-    B1 = np.random.choice(keylist, cluster_size, replace=False)    
+    B1 = keylist[cluster_size:2*cluster_size]
+    # B1 = np.random.choice(keylist, cluster_size, replace=False)    
     print("B1: ", B1)
     user_groupsB = {k:user_groups[k] for k in B1 if k in user_groups}
     print("Size of cluster 2: ", len(user_groupsB))
     # Cluster 3
-    # C1 = keylist[2*cluster_size:3*cluster_size]
-    C1 = np.random.choice(keylist, cluster_size, replace=False)
+    C1 = keylist[2*cluster_size:3*cluster_size]
+    # C1 = np.random.choice(keylist, cluster_size, replace=False)
     print("C1: ", C1)
     user_groupsC = {k:user_groups[k] for k in C1 if k in user_groups}
     print("Size of cluster 3: ", len(user_groupsC))
     # Cluster 4
-    # D1 = keylist[3*cluster_size:4*cluster_size]
-    D1 = np.random.choice(keylist, cluster_size, replace=False)
+    D1 = keylist[3*cluster_size:4*cluster_size]
+    # D1 = np.random.choice(keylist, cluster_size, replace=False)
     print("D1: ", D1)
     user_groupsD = {k:user_groups[k] for k in D1 if k in user_groups}
     print("Size of cluster 4: ", len(user_groupsD))
-
     # Cluster 5    
-    E1 = np.random.choice(keylist, cluster_size, replace=False)
+    E1 = keylist[4*cluster_size:5*cluster_size] #np.random.choice(keylist, cluster_size, replace=False)
     print("E1: ", E1)
     user_groupsE = {k:user_groups[k] for k in E1 if k in user_groups}
     print("Size of cluster 5: ", len(user_groupsE))
     # Cluster 6
-    F1 = np.random.choice(keylist, cluster_size, replace=False)    
+    F1 = keylist[5*cluster_size:6*cluster_size] #np.random.choice(keylist, cluster_size, replace=False)    
     print("F1: ", F1)
     user_groupsF = {k:user_groups[k] for k in F1 if k in user_groups}
     print("Size of cluster 6: ", len(user_groupsF))
     # Cluster 7    
-    G1 = np.random.choice(keylist, cluster_size, replace=False)
+    G1 = keylist[6*cluster_size:7*cluster_size] #np.random.choice(keylist, cluster_size, replace=False)
     print("G1: ", G1)
     user_groupsG = {k:user_groups[k] for k in G1 if k in user_groups}
     print("Size of cluster 7: ", len(user_groupsC))
     # Cluster 8
-    H1 = np.random.choice(keylist, cluster_size, replace=False)
+    H1 = keylist[7*cluster_size:] #np.random.choice(keylist, cluster_size, replace=False)
     print("H1: ", H1)
     user_groupsH = {k:user_groups[k] for k in H1 if k in user_groups}
     print("Size of cluster 8: ", len(user_groupsH))
@@ -260,55 +259,54 @@ if __name__ == '__main__':
 
     # for epoch in tqdm(range(args.epochs)):
     # for epoch in range(args.epochs):
-    # while testacc_check < args.test_acc or epoch < args.epochs:
-    while epoch < args.epochs:        
+    while testacc_check < args.test_acc or epoch < args.epochs:
+    # while epoch < args.epochs:        
         local_weights, local_losses, local_accuracies= [], [], []
         print(f'\n | Global Training Round : {epoch+1} |\n')
         
         # ============== TRAIN ==============
         global_model.train()
         
-        # Cluster A
+        # ===== Cluster A =====
         A_model, A_weights, A_losses = fl_train(args, train_dataset, cluster_modelA, A1, user_groupsA, args.Cepochs)        
         local_weights.append(copy.deepcopy(A_weights))
         local_losses.append(copy.deepcopy(A_losses))    
-        cluster_modelA = A_model    
-        # Cluster B
+        cluster_modelA = global_model# = A_model    
+        # ===== Cluster B ===== 
         B_model, B_weights, B_losses = fl_train(args, train_dataset, cluster_modelB, B1, user_groupsB, args.Cepochs)
         local_weights.append(copy.deepcopy(B_weights))
         local_losses.append(copy.deepcopy(B_losses))
-        cluster_modelB = B_model 
-        # Cluster C
+        cluster_modelB = global_model# = B_model 
+        # ===== Cluster C ===== 
         C_model, C_weights, C_losses = fl_train(args, train_dataset, cluster_modelC, C1, user_groupsC, args.Cepochs)
         local_weights.append(copy.deepcopy(C_weights))
         local_losses.append(copy.deepcopy(C_losses))   
-        cluster_modelC = C_model      
-        # Cluster D
+        cluster_modelC = global_model# = C_model      
+        # ===== Cluster D ===== 
         D_model, D_weights, D_losses = fl_train(args, train_dataset, cluster_modelD, D1, user_groupsD, args.Cepochs)
         local_weights.append(copy.deepcopy(D_weights))
         local_losses.append(copy.deepcopy(D_losses))
-        cluster_modelD = D_model 
-
-        # Cluster E
+        cluster_modelD = global_model# = D_model 
+        # ===== Cluster E ===== 
         E_model, E_weights, E_losses = fl_train(args, train_dataset, cluster_modelE, E1, user_groupsE, args.Cepochs)        
         local_weights.append(copy.deepcopy(E_weights))
         local_losses.append(copy.deepcopy(E_losses))    
-        cluster_modelE = E_model    
-        # Cluster F
+        cluster_modelE = global_model# = E_model    
+        # ===== Cluster F ===== 
         F_model, F_weights, F_losses = fl_train(args, train_dataset, cluster_modelF, F1, user_groupsF, args.Cepochs)
         local_weights.append(copy.deepcopy(F_weights))
         local_losses.append(copy.deepcopy(F_losses))
-        cluster_modelF = F_model 
-        # Cluster G
+        cluster_modelF = global_model# = F_model 
+        # ===== Cluster G ===== 
         G_model, G_weights, G_losses = fl_train(args, train_dataset, cluster_modelG, G1, user_groupsG, args.Cepochs)
         local_weights.append(copy.deepcopy(G_weights))
         local_losses.append(copy.deepcopy(G_losses))   
-        cluster_modelG = G_model      
-        # Cluster H
+        cluster_modelG = global_model# = G_model      
+        # ===== Cluster H ===== 
         H_model, H_weights, H_losses = fl_train(args, train_dataset, cluster_modelH, H1, user_groupsH, args.Cepochs)
         local_weights.append(copy.deepcopy(H_weights))
         local_losses.append(copy.deepcopy(H_losses))
-        cluster_modelH = H_model 
+        cluster_modelH = global_model# = H_model 
         
         
         # averaging global weights
@@ -358,7 +356,7 @@ if __name__ == '__main__':
     print("|---- Test Accuracy: {:.2f}%".format(100*test_acc))
 
     # Saving the objects train_loss and train_accuracy:
-    file_name = '../save/objects/HFL4_{}_{}_{}_lr[{}]_C[{}]_iid[{}]_E[{}]_B[{}].pkl'.\
+    file_name = '../save/objects/HFL8_{}_{}_{}_lr[{}]_C[{}]_iid[{}]_E[{}]_B[{}].pkl'.\
     format(args.dataset, args.model, epoch, args.lr, args.frac, args.iid,
            args.local_ep, args.local_bs)
 

File diff ditekan karena terlalu besar
+ 33 - 17
src/federated-hierarchical_v1_twoclusters-changeEval.ipynb


+ 2 - 1
src/federated_main.py

@@ -17,7 +17,7 @@ from options import args_parser
 from update import LocalUpdate, test_inference
 from models import MLP, CNNMnist, CNNFashion_Mnist, CNNCifar
 from utils import get_dataset, average_weights, exp_details
-
+# os.environ['CUDA_VISIBLE_DEVICES'] ='0'
 
 if __name__ == '__main__':
     start_time = time.time()
@@ -31,6 +31,7 @@ if __name__ == '__main__':
 
     if args.gpu:
         torch.cuda.set_device(args.gpu)
+        # torch.cuda.set_device(0)
     device = 'cuda' if args.gpu else 'cpu'
 
     # load dataset and user groups

+ 26 - 6
src/models.py

@@ -109,21 +109,41 @@ class CNNFashion_Mnist(nn.Module):
         return out
 
 
+# class CNNCifar(nn.Module):
+#     def __init__(self, args):
+#         super(CNNCifar, self).__init__()
+#         self.conv1 = nn.Conv2d(3, 6, 5)
+#         self.pool = nn.MaxPool2d(2, 2)
+#         self.conv2 = nn.Conv2d(6, 16, 5)
+#         self.fc1 = nn.Linear(16 * 5 * 5, 120)
+#         self.fc2 = nn.Linear(120, 84)
+#         self.fc3 = nn.Linear(84, args.num_classes)
+
+#     def forward(self, x):
+#         x = self.pool(F.relu(self.conv1(x)))
+#         x = self.pool(F.relu(self.conv2(x)))
+#         x = x.view(-1, 16 * 5 * 5)
+#         x = F.relu(self.fc1(x))
+#         x = F.relu(self.fc2(x))
+#         x = self.fc3(x)
+#         return F.log_softmax(x, dim=1)
+
+# Change CNNCifar model to 917350 params
 class CNNCifar(nn.Module):
     def __init__(self, args):
         super(CNNCifar, self).__init__()
-        self.conv1 = nn.Conv2d(3, 6, 5)
+        self.conv1 = nn.Conv2d(3, 32, 5)
         self.pool = nn.MaxPool2d(2, 2)
-        self.conv2 = nn.Conv2d(6, 16, 5)
-        self.fc1 = nn.Linear(16 * 5 * 5, 120)
-        self.fc2 = nn.Linear(120, 84)
+        self.conv2 = nn.Conv2d(32, 64, 5)
+        self.fc1 = nn.Linear(64 * 5 * 5, 512)
+        self.fc2 = nn.Linear(512, 84)
         self.fc3 = nn.Linear(84, args.num_classes)
 
     def forward(self, x):
         x = self.pool(F.relu(self.conv1(x)))
         x = self.pool(F.relu(self.conv2(x)))
-        x = x.view(-1, 16 * 5 * 5)
+        x = x.view(-1, 64 * 5 * 5)
         x = F.relu(self.fc1(x))
         x = F.relu(self.fc2(x))
         x = self.fc3(x)
-        return F.log_softmax(x, dim=1)
+        return F.log_softmax(x, dim=1)

Beberapa file tidak ditampilkan karena terlalu banyak file yang berubah dalam diff ini