diff --git a/research/pate_2018/core.py b/research/pate_2018/core.py index 4abc9d0..36317c0 100644 --- a/research/pate_2018/core.py +++ b/research/pate_2018/core.py @@ -152,7 +152,7 @@ def rdp_gaussian(logq, sigma, orders): if np.isscalar(orders): return 0. else: - return np.full_like(orders, 0., dtype=np.float) + return np.full_like(orders, 0., dtype=float) variance = sigma**2 diff --git a/tensorflow_privacy/privacy/optimizers/dp_optimizer_test.py b/tensorflow_privacy/privacy/optimizers/dp_optimizer_test.py index 6230ea1..6cea99c 100644 --- a/tensorflow_privacy/privacy/optimizers/dp_optimizer_test.py +++ b/tensorflow_privacy/privacy/optimizers/dp_optimizer_test.py @@ -43,7 +43,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase): np.reshape( per_example_gradients, [num_microbatches, - np.int(batch_size / num_microbatches), num_vars]), + int(batch_size / num_microbatches), num_vars]), axis=1) microbatch_gradients_norms = np.linalg.norm(microbatch_gradients, axis=1)