diff --git a/privacy/bolt_on/__init__.py b/privacy/bolt_on/__init__.py index 52b1b29..075edf9 100644 --- a/privacy/bolt_on/__init__.py +++ b/privacy/bolt_on/__init__.py @@ -19,7 +19,7 @@ import tensorflow as tf if LooseVersion(tf.__version__) < LooseVersion("2.0.0"): raise ImportError("Please upgrade your version " "of tensorflow from: {0} to at least 2.0.0 to " - "use privacy/bolton".format(LooseVersion(tf.__version__))) + "use privacy/bolt_on".format(LooseVersion(tf.__version__))) if hasattr(sys, "skip_tf_privacy_import"): # Useful for standalone scripts. pass else: diff --git a/privacy/bolt_on/losses.py b/privacy/bolt_on/losses.py index c742326..81bd0c3 100644 --- a/privacy/bolt_on/losses.py +++ b/privacy/bolt_on/losses.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -"""Loss functions for bolton method.""" +"""Loss functions for BoltOn method.""" from __future__ import absolute_import from __future__ import division diff --git a/privacy/bolt_on/models.py b/privacy/bolt_on/models.py index 98f2167..7cdcccd 100644 --- a/privacy/bolt_on/models.py +++ b/privacy/bolt_on/models.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -"""BoltOn model for bolton method of differentially private ML.""" +"""BoltOn model for Bolt-on method of differentially private ML.""" from __future__ import absolute_import from __future__ import division @@ -134,7 +134,7 @@ class BoltOnModel(Model): # pylint: disable=abstract-method whose dim == n_classes. n_samples: the number of individual samples in x. epsilon: privacy parameter, which trades off between utility an privacy. - See the bolton paper for more description. + See the bolt-on paper for more description. noise_distribution: the distribution to pull noise from. steps_per_epoch: **kwargs: kwargs to keras Model.fit. See super. diff --git a/privacy/bolt_on/optimizers.py b/privacy/bolt_on/optimizers.py index 97d1aba..3536450 100644 --- a/privacy/bolt_on/optimizers.py +++ b/privacy/bolt_on/optimizers.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -"""BoltOn Optimizer for bolton method.""" +"""BoltOn Optimizer for Bolt-on method.""" from __future__ import absolute_import from __future__ import division @@ -91,7 +91,7 @@ class BoltOn(optimizer_v2.OptimizerV2): BoltOn optimizer wraps another tf optimizer to be used as the visible optimizer to the tf model. No matter the optimizer - passed, "BoltOn" enables the bolton model to control the learning rate + passed, "BoltOn" enables the bolt-on model to control the learning rate based on the strongly convex loss. To use the BoltOn method, you must: @@ -100,7 +100,7 @@ class BoltOn(optimizer_v2.OptimizerV2): This can be accomplished by the following: optimizer = tf.optimizers.SGD() - loss = privacy.bolton.losses.StrongConvexBinaryCrossentropy() + loss = privacy.bolt_on.losses.StrongConvexBinaryCrossentropy() bolton = BoltOn(optimizer, loss) with bolton(*args) as _: model.fit() diff --git a/tutorials/bolton_tutorial.py b/tutorials/bolton_tutorial.py index 5a3b748..fdfe338 100644 --- a/tutorials/bolton_tutorial.py +++ b/tutorials/bolton_tutorial.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -"""Tutorial for bolton module, the model and the optimizer.""" +"""Tutorial for bolt_on module, the model and the optimizer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function @@ -48,7 +48,7 @@ bolt = models.BoltOnModel(n_outputs) # tell the model how many outputs we have. # ------- # Now, we will pick our optimizer and Strongly Convex Loss function. The loss # must extend from StrongConvexMixin and implement the associated methods.Some -# existing loss functions are pre - implemented in bolton.loss +# existing loss functions are pre - implemented in bolt_on.loss # ------- optimizer = tf.optimizers.SGD() reg_lambda = 1 @@ -132,7 +132,7 @@ bolt.fit(generator, noise_distribution=noise_distribution, verbose=0) # ------- -# You don't have to use the bolton model to use the BoltOn method. +# You don't have to use the BoltOn model to use the BoltOn method. # There are only a few requirements: # 1. make sure any requirements from the loss are implemented in the model. # 2. instantiate the optimizer and use it as a context around the fit operation.