|
@@ -3,18 +3,21 @@ import opacus
|
|
|
from typing import List, Union
|
|
|
import os
|
|
|
|
|
|
-def generate_noise(max_norm, parameter, sigma, noise_type, device):
|
|
|
- if sigma > 0:
|
|
|
+def generate_noise(max_norm, parameter, noise_multiplier, noise_type, device):
|
|
|
+ """
|
|
|
+ A noise generation function that can utilize different distributions for noise generation.
|
|
|
+ """
|
|
|
+ if noise_multiplier > 0:
|
|
|
mean = 0
|
|
|
- scale_scalar = sigma * max_norm
|
|
|
+ scale_scalar = noise_multiplier * max_norm
|
|
|
|
|
|
scale = torch.full(size=parameter.shape, fill_value=scale_scalar, dtype=torch.float32, device=device)
|
|
|
|
|
|
- if noise_type == "gaussian":
|
|
|
+ if noise_type.lower() in ["normal", "gauss", "gaussian"]:
|
|
|
dist = torch.distributions.normal.Normal(mean, scale)
|
|
|
- elif noise_type == "laplacian":
|
|
|
+ elif noise_type.lower() in ["laplace", "laplacian"]:
|
|
|
dist = torch.distributions.laplace.Laplace(mean, scale)
|
|
|
- elif noise_type == "exponential":
|
|
|
+ elif noise_type.lower() in ["exponential"]:
|
|
|
rate = 1 / scale
|
|
|
dist = torch.distributions.exponential.Exponential(rate)
|
|
|
else:
|
|
@@ -25,14 +28,23 @@ def generate_noise(max_norm, parameter, sigma, noise_type, device):
|
|
|
return noise
|
|
|
return 0.0
|
|
|
|
|
|
-def apply_noise(weights, batch_size, sigma, noise_type, device, loss_reduction="mean"):
|
|
|
+# Server side Noise
|
|
|
+def apply_noise(weights, batch_size, noise_multiplier, noise_type, device, loss_reduction="mean"):
|
|
|
+ """
|
|
|
+ A function for applying noise to weights on the (intermediate) server side that utilizes the generate_noise function above.
|
|
|
+ """
|
|
|
for p in weights.values():
|
|
|
- noise = generate_noise(0, p, sigma, noise_type, device)
|
|
|
+ noise = generate_noise(0, p, noise_multiplier, noise_type, device)
|
|
|
if loss_reduction == "mean":
|
|
|
noise /= batch_size
|
|
|
p += noise
|
|
|
|
|
|
+# Client side Noise
|
|
|
class PrivacyEngineXL(opacus.PrivacyEngine):
|
|
|
+ """
|
|
|
+ A privacy engine that can utilize different distributions for noise generation, based on opacus' privacy engine.
|
|
|
+ It gets attached to the optimizer just like the privacy engine from opacus.
|
|
|
+ """
|
|
|
|
|
|
def __init__(
|
|
|
self,
|
|
@@ -75,23 +87,4 @@ class PrivacyEngineXL(opacus.PrivacyEngine):
|
|
|
self.noise_type = noise_type
|
|
|
|
|
|
def _generate_noise(self, max_norm, parameter):
|
|
|
- if self.noise_multiplier > 0:
|
|
|
- mean = 0
|
|
|
- scale_scalar = self.noise_multiplier * max_norm
|
|
|
-
|
|
|
- scale = torch.full(size=parameter.grad.shape, fill_value=scale_scalar, dtype=torch.float32, device=self.device)
|
|
|
-
|
|
|
- if self.noise_type == "gaussian":
|
|
|
- dist = torch.distributions.normal.Normal(mean, scale)
|
|
|
- elif self.noise_type == "laplacian":
|
|
|
- dist = torch.distributions.laplace.Laplace(mean, scale)
|
|
|
- elif self.noise_type == "exponential":
|
|
|
- rate = 1 / scale
|
|
|
- dist = torch.distributions.exponential.Exponential(rate)
|
|
|
- else:
|
|
|
- dist = torch.distributions.normal.Normal(mean, scale)
|
|
|
-
|
|
|
- noise = dist.sample()
|
|
|
-
|
|
|
- return noise
|
|
|
- return 0.0
|
|
|
+ return generate_noise(max_norm, parameter, self.noise_multiplier, self.noise_type, self.device)
|