script_bash_FL_diffFP_cifar.sh 3.3 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253
  1. #!/bin/bash
  2. # Comments line start with a #
  3. # Commands are surrounde by ()
  4. # Website on how to write bash script https://hackernoon.com/know-shell-scripting-202b2fbe03a8
  5. # Set GPU device
  6. GPU_ID="cuda:0"
  7. # ================ 32-bit ================
  8. # This is for FL for 32-bit floating point
  9. python federated_main.py --local_ep=5 --local_bs=50 --frac=0.1 --model=cnn --dataset=cifar --iid=1 --gpu=1 --gpu_id=$GPU_ID --lr=0.01 --epochs=300
  10. # This is for 2 clusters HFL for 32-bit floating point
  11. python federated-hierarchical2_main.py --local_ep=5 --local_bs=50 --frac=0.1 --Cepochs=10 --model=cnn --dataset=cifar --iid=1 --num_cluster=2 --gpu=1 --gpu_id=$GPU_ID --lr=0.01 --epochs=100
  12. # This is for 4 clusters HFL for 32-bit floating point
  13. python federated-hierarchical4_main.py --local_ep=5 --local_bs=50 --frac=0.1 --Cepochs=10 --model=cnn --dataset=cifar --iid=1 --gpu=1 --gpu_id=$GPU_ID --lr=0.01 --epochs=100 --num_cluster=4
  14. # This is for 8 clusters HFL for 32-bit floating point
  15. python federated-hierarchical8_main.py --local_ep=5 --local_bs=50 --Cepochs=10 --model=cnn --dataset=cifar --iid=1 --gpu=1 --gpu_id=$GPU_ID --lr=0.01 --epochs=100 --num_cluster=8
  16. # ================ 16-bit ================
  17. # This is the baseline without FL for 16-bit floating point.
  18. python ./baseline_main_fp16.py --epochs=10 --model=cnn --dataset=cifar --num_classes=10 --gpu=1 --gpu_id=$GPU_ID | tee -a ../logs/terminaloutput_cifar_fp16_baseline.txt &
  19. # This is for 1 cluster FL for 16-bit floating point
  20. python ./federated_main_fp16.py --local_ep=5 --local_bs=50 --frac=0.1 --model=cnn --dataset=cifar --iid=1 --gpu=1 --gpu_id=$GPU_ID --lr=0.01 --test_acc=85 --epochs=100 | tee -a ../logs/terminaloutput_cifar_fp16_1c_10ep_ta85.txt &
  21. python ./federated_main_fp16.py --local_ep=5 --local_bs=50 --frac=0.1 --model=cnn --dataset=cifar --iid=1 --gpu=1 --gpu_id=$GPU_ID --lr=0.01 --epochs=200 | tee -a ../logs/terminaloutput_cifar_fp16_1c_200ep_ta95.txt &
  22. python ./federated_main_fp16.py --local_ep=5 --local_bs=50 --frac=0.1 --model=cnn --dataset=cifar --iid=1 --gpu=1 --gpu_id=$GPU_ID --lr=0.01 --epochs=300 | tee -a ../logs/terminaloutput_cifar_fp16_1c_300ep_ta95.txt &
  23. # This is for 2 clusters FL for 16-bit floating point
  24. python ./federated-hierarchical2_main_fp16.py --local_ep=5 --local_bs=50 --frac=0.1 --Cepochs=10 --model=cnn --dataset=cifar --iid=1 --num_cluster=2 --gpu=1 --gpu_id=$GPU_ID --lr=0.01 --epochs=100 --test_acc=85 | tee -a ../logs/terminaloutput_cifar_fp16_2c_100ep_ta85.txt &
  25. python ./federated-hierarchical2_main_fp16.py --local_ep=5 --local_bs=50 --frac=0.1 --Cepochs=10 --model=cnn --dataset=cifar --iid=1 --num_cluster=2 --gpu=1 --gpu_id=$GPU_ID --lr=0.01 --epochs=100 | tee -a ../logs/terminaloutput_cifar_fp16_2c_100ep_t95.txt &
  26. # This is for 4 clusters FL for 16-bit floating point
  27. python ./federated-hierarchical4_main_fp16.py --local_ep=5 --local_bs=50 --frac=0.1 --Cepochs=10 --model=cnn --dataset=cifar --iid=1 --gpu=1 --gpu_id=$GPU_ID --lr=0.01 --epochs=100 --num_cluster=4 | tee -a ../logs/terminaloutput_cifar_fp16_4c_100ep_t95.txt &
  28. # This is for 8 clusters FL for 16-bit floating point
  29. python ./federated-hierarchical8_main_fp16.py --local_ep=5 --local_bs=50 --Cepochs=10 --model=cnn --dataset=cifar --iid=1 --gpu=1 --gpu_id=$GPU_ID --lr=0.01 --epochs=100 --num_cluster=8 | tee -a ../logs/terminaloutput_cifar_fp16_8c_100ep_t95.txt &