From 6fb9a6ce987e23dea39a2a20156293267fc2862b Mon Sep 17 00:00:00 2001 From: Aitik Gupta Date: Sat, 30 Jan 2021 01:31:54 +0530 Subject: [PATCH] REF: Remove annotations for Python 3.6 --- README.md | 2 +- swi_ml/classification/logistic_regression.py | 2 +- swi_ml/regression/linear_regression.py | 34 +++++++++----------- swi_ml/svm/svm.py | 6 ++-- swi_ml/utils/regularisers.py | 8 ++--- 5 files changed, 25 insertions(+), 27 deletions(-) diff --git a/README.md b/README.md index 40b65c8..a058692 100644 --- a/README.md +++ b/README.md @@ -156,7 +156,7 @@ Aitik Gupta - [Personal Website][website-url] [linkedin-shield]: https://img.shields.io/badge/-LinkedIn-black.svg?style=flat-square&logo=linkedin&colorB=555 [linkedin-url]: https://linkedin.com/in/aitik-gupta [product-screenshot]: images/screenshot.png -[python-shield]: https://img.shields.io/badge/python-3.7+-blue.svg +[python-shield]: https://img.shields.io/badge/python-3.6+-blue.svg [python-url]: https://www.python.org/ [website-shield]: https://img.shields.io/badge/website-aitikgupta.ml-blue?style=flat-square [website-url]: https://aitikgupta.github.io/ \ No newline at end of file diff --git a/swi_ml/classification/logistic_regression.py b/swi_ml/classification/logistic_regression.py index 33988b7..2bd309d 100644 --- a/swi_ml/classification/logistic_regression.py +++ b/swi_ml/classification/logistic_regression.py @@ -15,7 +15,7 @@ def __init__( normalize=False, initialiser="uniform", verbose=None, - ) -> None: + ): self.activation = activations.Sigmoid() regularisation = L1_L2Regularisation( multiply_factor=multiply_factor, l1_ratio=l1_ratio diff --git a/swi_ml/regression/linear_regression.py b/swi_ml/regression/linear_regression.py index 8c27935..0b90b1e 100644 --- a/swi_ml/regression/linear_regression.py +++ b/swi_ml/regression/linear_regression.py @@ -1,5 +1,3 @@ -from __future__ import annotations - import logging import math import time @@ -32,7 +30,7 @@ def __init__( regularisation=None, initialiser="uniform", verbose=None, - ) -> None: + ): if verbose is not None: logger.setLevel(verbose) else: @@ -45,7 +43,7 @@ def __init__( self.history = [] self.backend = super().get_backend() - def _initialise_uniform_weights(self, shape: tuple) -> None: + def _initialise_uniform_weights(self, shape: tuple): self.num_samples, self.num_features = shape limit = 1 / math.sqrt(self.num_features) self.W = self.backend.asarray( @@ -55,7 +53,7 @@ def _initialise_uniform_weights(self, shape: tuple) -> None: 1, ) - def _initialise_zeros_weights(self, shape: tuple) -> None: + def _initialise_zeros_weights(self, shape: tuple): self.num_samples, self.num_features = shape self.W = self.backend.asarray( self.backend.zeros( @@ -66,7 +64,7 @@ def _initialise_zeros_weights(self, shape: tuple) -> None: 1, ) - def _update_history(self) -> None: + def _update_history(self): self.history.append(self.curr_loss) def _update_weights(self): @@ -82,7 +80,7 @@ def MSE_loss(self, Y_true, Y_pred): 0.5 * (Y_true - Y_pred) ** 2 ) + self.regularisation.add_cost_regularisation(self.W) - def initialise_weights(self, X) -> None: + def initialise_weights(self, X): """ Initialises weights with correct dimensions """ @@ -154,7 +152,7 @@ def predict(self, X): def _predict(self, X): return X.dot(self.W) + self.b - def plot_loss(self) -> None: + def plot_loss(self): """ Plots the loss history curve during the training period. NOTE: This function just plots the graph, to display it @@ -174,7 +172,7 @@ def __init__( normalize=False, initialiser="uniform", verbose=None, - ) -> None: + ): # regularisation of alpha 0 (essentially NIL) regularisation = _BaseRegularisation(multiply_factor=0, l1_ratio=0) super().__init__( @@ -186,7 +184,7 @@ def __init__( verbose, ) - def plot_loss(self) -> None: + def plot_loss(self): plt.plot(self.history, label="Linear Regression") super().plot_loss() @@ -200,7 +198,7 @@ def __init__( normalize=False, initialiser="uniform", verbose=None, - ) -> None: + ): regularisation = L1Regularisation(l1_cost=l1_cost) super().__init__( num_iterations, @@ -211,7 +209,7 @@ def __init__( verbose, ) - def plot_loss(self) -> None: + def plot_loss(self): plt.plot(self.history, label="Lasso Regression") super().plot_loss() @@ -226,7 +224,7 @@ def __init__( initialiser="uniform", backend="cupy", verbose=None, - ) -> None: + ): regularisation = L2Regularisation(l2_cost=l2_cost) super().__init__( num_iterations, @@ -237,7 +235,7 @@ def __init__( verbose, ) - def plot_loss(self) -> None: + def plot_loss(self): plt.plot(self.history, label="Ridge Regression") super().plot_loss() @@ -252,7 +250,7 @@ def __init__( normalize=False, initialiser="uniform", verbose=None, - ) -> None: + ): regularisation = L1_L2Regularisation( multiply_factor=multiply_factor, l1_ratio=l1_ratio ) @@ -265,7 +263,7 @@ def __init__( verbose, ) - def plot_loss(self) -> None: + def plot_loss(self): plt.plot(self.history, label="Elastic Net Regression") super().plot_loss() @@ -281,7 +279,7 @@ def __init__( normalize=False, initialiser="uniform", verbose=None, - ) -> None: + ): self.degree = degree regularisation = L1_L2Regularisation( multiply_factor=multiply_factor, l1_ratio=l1_ratio @@ -303,7 +301,7 @@ def _predict_preprocess(self, data): poly_data = transform_polynomial(data, self.degree) return super()._predict_preprocess(poly_data) - def plot_loss(self) -> None: + def plot_loss(self): plt.plot( self.history, label=f"Polynomial Regression, degree={self.degree}" ) diff --git a/swi_ml/svm/svm.py b/swi_ml/svm/svm.py index 7ff1a00..999110d 100644 --- a/swi_ml/svm/svm.py +++ b/swi_ml/svm/svm.py @@ -33,7 +33,7 @@ def __init__( self.history = [] self.backend = super().get_backend() - def _initialise_uniform_weights(self, shape: tuple) -> None: + def _initialise_uniform_weights(self, shape: tuple): self.num_samples, self.num_features = shape limit = 1 / math.sqrt(self.num_features) self.W = self.backend.asarray( @@ -46,7 +46,7 @@ def _initialise_uniform_weights(self, shape: tuple) -> None: self.hinge_constant, ) - def _initialise_zeros_weights(self, shape: tuple) -> None: + def _initialise_zeros_weights(self, shape: tuple): self.num_samples, self.num_features = shape self.W = self.backend.asarray( self.backend.zeros( @@ -57,7 +57,7 @@ def _initialise_zeros_weights(self, shape: tuple) -> None: 1, ) - def initialise_weights(self, X) -> None: + def initialise_weights(self, X): """ Initialises weights with correct dimensions """ diff --git a/swi_ml/utils/regularisers.py b/swi_ml/utils/regularisers.py index 146b035..820a73f 100644 --- a/swi_ml/utils/regularisers.py +++ b/swi_ml/utils/regularisers.py @@ -7,7 +7,7 @@ class _BaseRegularisation(_Backend): NOTE: Can be used directly as a L1_L2 (ElasticNet) Regularisation """ - def __init__(self, multiply_factor: float, l1_ratio: float) -> None: + def __init__(self, multiply_factor: float, l1_ratio: float): self.multiply_factor = ( multiply_factor if multiply_factor is not None else 1 ) @@ -30,7 +30,7 @@ class L1Regularisation(_BaseRegularisation): Lasso Regression Regularisation """ - def __init__(self, l1_cost: float) -> None: + def __init__(self, l1_cost: float): multiply_factor = l1_cost l1_ratio = 1 super().__init__(multiply_factor, l1_ratio) @@ -41,7 +41,7 @@ class L2Regularisation(_BaseRegularisation): Ridge Regression Regularisation """ - def __init__(self, l2_cost: float) -> None: + def __init__(self, l2_cost: float): multiply_factor = l2_cost l1_ratio = 0 super().__init__(multiply_factor, l1_ratio) @@ -52,5 +52,5 @@ class L1_L2Regularisation(_BaseRegularisation): ElasticNet Regression Regularisation """ - def __init__(self, multiply_factor: float, l1_ratio: float) -> None: + def __init__(self, multiply_factor: float, l1_ratio: float): super().__init__(multiply_factor, l1_ratio)