diff --git a/.github/.keep b/.github/.keep new file mode 100644 index 00000000..e69de29b diff --git a/.github/workflows/classroom.yml b/.github/workflows/classroom.yml new file mode 100644 index 00000000..76974213 --- /dev/null +++ b/.github/workflows/classroom.yml @@ -0,0 +1,67 @@ +name: Autograding Tests +'on': +- workflow_dispatch +- repository_dispatch +permissions: + checks: write + actions: read + contents: read +jobs: + run-autograding-tests: + runs-on: ubuntu-latest + if: github.actor != 'github-classroom[bot]' + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Setup + id: setup + uses: classroom-resources/autograding-command-grader@v1 + with: + test-name: Setup + setup-command: sudo -H pip3 install -qr requirements.txt; sudo -H pip3 install + flake8==5.0.4 mypy + command: flake8 --ignore "N801, E203, E266, E501, W503, F812, E741, N803, + N802, N806" minitorch/ tests/ project/; mypy minitorch/ + timeout: 10 + - name: Task 0.1 + id: task-0-1 + uses: classroom-resources/autograding-command-grader@v1 + with: + test-name: Task 0.1 + setup-command: sudo -H pip3 install -qr requirements.txt + command: pytest -m task0_1 + timeout: 10 + - name: Task 0.2 + id: task-0-2 + uses: classroom-resources/autograding-command-grader@v1 + with: + test-name: Task 0.2 + setup-command: sudo -H pip3 install -qr requirements.txt + command: pytest -m task0_2 + timeout: 10 + - name: Task 0.3 + id: task-0-3 + uses: classroom-resources/autograding-command-grader@v1 + with: + test-name: Task 0.3 + setup-command: sudo -H pip3 install -qr requirements.txt + command: pytest -m task0_3 + timeout: 10 + - name: Task 0.4 + id: task-0-4 + uses: classroom-resources/autograding-command-grader@v1 + with: + test-name: Task 0.4 + setup-command: sudo -H pip3 install -qr requirements.txt + command: pytest -m task0_4 + timeout: 10 + - name: Autograding Reporter + uses: classroom-resources/autograding-grading-reporter@v1 + env: + SETUP_RESULTS: "${{steps.setup.outputs.result}}" + TASK-0-1_RESULTS: "${{steps.task-0-1.outputs.result}}" + TASK-0-2_RESULTS: "${{steps.task-0-2.outputs.result}}" + TASK-0-3_RESULTS: "${{steps.task-0-3.outputs.result}}" + TASK-0-4_RESULTS: "${{steps.task-0-4.outputs.result}}" + with: + runners: setup,task-0-1,task-0-2,task-0-3,task-0-4 diff --git a/.gitignore b/.gitignore index 9f521073..d4f3ef85 100644 --- a/.gitignore +++ b/.gitignore @@ -129,4 +129,7 @@ dmypy.json .pyre/ *.\#* data/ -pyodide \ No newline at end of file +pyodide + +# VS code configure +.vscode/ diff --git a/README.md b/README.md index 62e4d6ba..00d3d013 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,7 @@ +[![Open in Visual Studio Code](https://classroom.github.com/assets/open-in-vscode-2e0aaae1b6195c2367325f4f02e2d04e9abb55f0b24a779b69b11b9e10269abc.svg)](https://classroom.github.com/online_ide?assignment_repo_id=23504068&assignment_repo_type=AssignmentRepo) # MiniTorch Module 0 - +![Result](images/result.png) * Docs: https://minitorch.github.io/ diff --git a/images/result.png b/images/result.png new file mode 100644 index 00000000..6dcbd1b9 Binary files /dev/null and b/images/result.png differ diff --git a/minitorch/datasets.py b/minitorch/datasets.py index b3bd9faa..9dea8cd9 100644 --- a/minitorch/datasets.py +++ b/minitorch/datasets.py @@ -4,7 +4,8 @@ from typing import List, Tuple -def make_pts(N): +def make_pts(N: int) -> list: + """Generate N random points.""" X = [] for i in range(N): x_1 = random.random() @@ -20,7 +21,8 @@ class Graph: y: List[int] -def simple(N): +def simple(N: int) -> Graph: + """Generate a simple dataset.""" X = make_pts(N) y = [] for x_1, x_2 in X: @@ -29,7 +31,8 @@ def simple(N): return Graph(N, X, y) -def diag(N): +def diag(N: int) -> Graph: + """Generate a diagonal dataset.""" X = make_pts(N) y = [] for x_1, x_2 in X: @@ -38,7 +41,8 @@ def diag(N): return Graph(N, X, y) -def split(N): +def split(N: int) -> Graph: + """Generate a split dataset.""" X = make_pts(N) y = [] for x_1, x_2 in X: @@ -47,7 +51,8 @@ def split(N): return Graph(N, X, y) -def xor(N): +def xor(N: int) -> Graph: + """Generate an XOR dataset.""" X = make_pts(N) y = [] for x_1, x_2 in X: @@ -56,7 +61,8 @@ def xor(N): return Graph(N, X, y) -def circle(N): +def circle(N: int) -> Graph: + """Generate a circular dataset.""" X = make_pts(N) y = [] for x_1, x_2 in X: @@ -66,20 +72,32 @@ def circle(N): return Graph(N, X, y) -def spiral(N): +def spiral(N: int) -> Graph: + """Generate a spiral dataset.""" - def x(t): + def x(t: float) -> float: return t * math.cos(t) / 20.0 - def y(t): + def y(t: float) -> float: return t * math.sin(t) / 20.0 - X = [(x(10.0 * (float(i) / (N // 2))) + 0.5, y(10.0 * (float(i) / (N // - 2))) + 0.5) for i in range(5 + 0, 5 + N // 2)] - X = X + [(y(-10.0 * (float(i) / (N // 2))) + 0.5, x(-10.0 * (float(i) / - (N // 2))) + 0.5) for i in range(5 + 0, 5 + N // 2)] + + X = [ + (x(10.0 * (float(i) / (N // 2))) + 0.5, y(10.0 * (float(i) / (N // 2))) + 0.5) + for i in range(5 + 0, 5 + N // 2) + ] + X = X + [ + (y(-10.0 * (float(i) / (N // 2))) + 0.5, x(-10.0 * (float(i) / (N // 2))) + 0.5) + for i in range(5 + 0, 5 + N // 2) + ] y2 = [0] * (N // 2) + [1] * (N // 2) return Graph(N, X, y2) -datasets = {'Simple': simple, 'Diag': diag, 'Split': split, 'Xor': xor, - 'Circle': circle, 'Spiral': spiral} +datasets = { + "Simple": simple, + "Diag": diag, + "Split": split, + "Xor": xor, + "Circle": circle, + "Spiral": spiral, +} diff --git a/minitorch/module.py b/minitorch/module.py index 0a66058c..fbac16d7 100644 --- a/minitorch/module.py +++ b/minitorch/module.py @@ -31,13 +31,15 @@ def modules(self) -> Sequence[Module]: def train(self) -> None: """Set the mode of this module and all descendent modules to `train`.""" - # TODO: Implement for Task 0.4. - raise NotImplementedError("Need to implement for Task 0.4") + self.training = True + for m in self._modules.values(): + m.train() def eval(self) -> None: """Set the mode of this module and all descendent modules to `eval`.""" - # TODO: Implement for Task 0.4. - raise NotImplementedError("Need to implement for Task 0.4") + self.training = False + for m in self._modules.values(): + m.eval() def named_parameters(self) -> Sequence[Tuple[str, Parameter]]: """Collect all the parameters of this module and its descendents. @@ -47,13 +49,19 @@ def named_parameters(self) -> Sequence[Tuple[str, Parameter]]: The name and `Parameter` of each ancestor parameter. """ - # TODO: Implement for Task 0.4. - raise NotImplementedError("Need to implement for Task 0.4") + result = [] + for k, v in self._parameters.items(): + result.append((k, v)) + + for mod_name, mod in self._modules.items(): + for sub_name, param in mod.named_parameters(): + result.append((f"{mod_name}.{sub_name}", param)) + + return result def parameters(self) -> Sequence[Parameter]: """Enumerate over all the parameters of this module and its descendents.""" - # TODO: Implement for Task 0.4. - raise NotImplementedError("Need to implement for Task 0.4") + return [p for _, p in self.named_parameters()] def add_parameter(self, k: str, v: Any) -> Parameter: """Manually add a parameter. Useful helper for scalar parameters. @@ -89,6 +97,7 @@ def __getattr__(self, key: str) -> Any: return None def __call__(self, *args: Any, **kwargs: Any) -> Any: + """Call the forward method.""" return self.forward(*args, **kwargs) def __repr__(self) -> str: diff --git a/minitorch/operators.py b/minitorch/operators.py index 37cc7c09..2d514f10 100644 --- a/minitorch/operators.py +++ b/minitorch/operators.py @@ -35,6 +35,278 @@ # TODO: Implement for Task 0.1. +def mul(x: float, y: float) -> float: + """Calculates $x*y$. + + Args: + ---- + x: First value x. + y: Second value y. + + Returns: + ------- + $x * y$. + + """ + return x * y + + +def id(x: float) -> float: + """Preserve input value unchanged. + + Args: + ---- + x: Input value x. + + Returns: + ------- + return Input value x out. + + """ + return x + + +def add(x: float, y: float) -> float: + """Add two floats $x + y$. + + Args: + ---- + x: First value x. + y: Second value y. + + Returns: + ------- + return $x + y$. + + """ + return x + y + + +def neg(x: float) -> float: + """Make input value change sign. + + Args: + ---- + x: input value x. + + Returns: + ------- + return $-x$. + + """ + return -1.0 * x + + +def lt(x: float, y: float) -> float: + """Checks if one number is less than another. + + Args: + ---- + x: First value x. + y: Second value y. + + Returns: + ------- + 1.0 if x is less than y else 0.0. + + """ + return 1.0 if x < y else 0.0 + + +def eq(x: float, y: float) -> float: + """Checks if two numbers are equal. + + Args: + ---- + x: First value x. + y: Second value y. + + Returns: + ------- + 1.0 if x is equal to y else 0.0. + + """ + return 1.0 if x == y else 0.0 + + +def max(x: float, y: float) -> float: + """Returns the larger of two numbers. + + Args: + ---- + x: First value x. + y: Second value y. + + Returns: + ------- + x if $x > y$ else y. + + """ + return x if x > y else y + + +def is_close(x: float, y: float) -> float: + """Check if two numbers are close in value. + + Args: + ---- + x: First value x. + y: Second value y. + + Returns: + ------- + 1.0 if $|x - y| < 1e-2$, else 0.0. + + """ + return 1.0 if abs(x - y) < 1e-2 else 0.0 + + +def sigmoid(x: float) -> float: + """Calculates the sigmoid function. + + The sigmoid function is defined as $f(x) = 1 / (1 + e^{-x})$. + It maps any real number to a value between 0.0 and 1.0. + + Args: + ---- + x: Input value. + + Returns: + ------- + The sigmoid of x. + + """ + if x >= 0: + return 1.0 / (1.0 + math.exp(-x)) + else: + # enhance numerical stability. + return math.exp(x) / (1.0 + math.exp(x)) + + +def relu(x: float) -> float: + r"""Applies the ReLU activation function. + + The Rectified Linear Unit (ReLU) is defined as $f(x) = \max(0, x)$. + It returns the input value if it is positive, and 0.0 otherwise. + + Args: + ---- + x: Input value x. + + Returns: + ------- + The ReLU of x. + + """ + return x if x > 0.0 else 0.0 + + +def log(x: float) -> float: + """Calculates the natural logarithm. + + The natural logarithm is the inverse of the exponential function, + defined for $x > 0$. + + Args: + ---- + x: Input value x. + + Returns: + ------- + The natural logarithm of x, clamped to a minimum positive epsilon. + + """ + return math.log(max(x, 1e-9)) + + +def exp(x: float) -> float: + """Calculates the exponential function. + + The exponential function is defined as $f(x) = e^x$. + + Args: + ---- + x: Input value. + + Returns: + ------- + The exponential of x. + + """ + return math.exp(x) + + +def inv(x: float) -> float: + """Calculates the reciprocal. + + The reciprocal is defined as $f(x) = 1 / x$. + + Args: + ---- + x: Input value. + + Returns: + ------- + The reciprocal of x ($1.0 / x$). + + """ + return 1.0 / x + + +def log_back(x: float, d: float) -> float: + r"""Computes the derivative of log times a second arg. + + $f(x) = \log(x)$, so $f'(x) = 1/x$. + The backward pass computes $d \times f'(x)$. + + Args: + ---- + x: The input value to the forward function. + d: The upstream gradient (gradient flowing back). + + Returns: + ------- + The gradient with respect to x. + + """ + return d / x + + +def inv_back(x: float, d: float) -> float: + r"""Computes the derivative of reciprocal times a second arg. + + $f(x) = 1/x \implies f'(x) = -1/x^2$. + The backward pass computes $d \times f'(x)$. + + Args: + ---- + x: The input value to the reciprocal function. + d: The upstream gradient (gradient flowing back). + + Returns: + ------- + The derivative $-d / x^2$. + + """ + return -(d / (x**2)) + + +def relu_back(x: float, d: float) -> float: + """Computes the derivative of ReLU times a second arg. + + Args: + ---- + x: The input value to the ReLU function during forward pass. + d: The gradient flowing back from the upper layer. + + Returns: + ------- + d if x is greater than 0, else 0.0. + + """ + return d if x > 0.0 else 0.0 + + # ## Task 0.3 # Small practice library of elementary higher-order functions. @@ -52,3 +324,57 @@ # TODO: Implement for Task 0.3. + + +def map(fn: Callable[[float], float]) -> Callable[[Iterable[float]], Iterable[float]]: + """Higher-order map that applies a function to a list.""" + + def _map(ls: Iterable[float]) -> Iterable[float]: + return [fn(x) for x in ls] + + return _map + + +def zipWith( + fn: Callable[[float, float], float], +) -> Callable[[Iterable[float], Iterable[float]], Iterable[float]]: + """Higher-order zipWith that combines two lists using a function.""" + + def _zipWith(ls1: Iterable[float], ls2: Iterable[float]) -> Iterable[float]: + return [fn(x, y) for x, y in zip(ls1, ls2)] + + return _zipWith + + +def reduce( + fn: Callable[[float, float], float], start: float +) -> Callable[[Iterable[float]], float]: + """Higher-order reduce that reduces a list to a single value.""" + + def _reduce(ls: Iterable[float]) -> float: + val = start + for x in ls: + val = fn(val, x) + return val + + return _reduce + + +def negList(ls: Iterable[float]) -> Iterable[float]: + """Negate each element in a list using map.""" + return map(neg)(ls) + + +def addLists(ls1: Iterable[float], ls2: Iterable[float]) -> Iterable[float]: + """Add corresponding elements from two lists using zipWith.""" + return zipWith(add)(ls1, ls2) + + +def sum(ls: Iterable[float]) -> float: + """Sum a list of numbers using reduce.""" + return reduce(add, 0.0)(ls) + + +def prod(ls: Iterable[float]) -> float: + """Product of a list of numbers using reduce.""" + return reduce(mul, 1.0)(ls) diff --git a/requirements.txt b/requirements.txt index c9cd8a02..ab3113ca 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -colorama==0.4.3 +colorama==0.4.6 hypothesis == 6.54 numba == 0.60 numpy == 2.0.0 diff --git a/tests/test_operators.py b/tests/test_operators.py index f6e555af..94e0185d 100644 --- a/tests/test_operators.py +++ b/tests/test_operators.py @@ -1,3 +1,4 @@ +import math from typing import Callable, List, Tuple import pytest @@ -23,6 +24,7 @@ relu, relu_back, sigmoid, + log, ) from .strategies import assert_close, small_floats @@ -104,44 +106,58 @@ def test_sigmoid(a: float) -> None: """Check properties of the sigmoid function, specifically * It is always between 0.0 and 1.0. * one minus sigmoid is the same as sigmoid of the negative - * It crosses 0 at 0.5 + * It crosses 0.5 at 0 * It is strictly increasing. """ - # TODO: Implement for Task 0.2. - raise NotImplementedError("Need to implement for Task 0.2") + s = sigmoid(a) + # check output always in [0.0,1.0]. + assert 0.0 <= s <= 1.0 + # check symmetry. + assert_close(1.0 - s, sigmoid(-a)) + # check central point + assert_close(sigmoid(0.0), 0.5) + # monotony of sigmoid. + assert sigmoid(a + 1.0) >= s @pytest.mark.task0_2 @given(small_floats, small_floats, small_floats) def test_transitive(a: float, b: float, c: float) -> None: """Test the transitive property of less-than (a < b and b < c implies a < c)""" - # TODO: Implement for Task 0.2. - raise NotImplementedError("Need to implement for Task 0.2") + # if a < b and b < c, then a < c. + if lt(a, b) and lt(b, c): + assert lt(a, c) @pytest.mark.task0_2 -def test_symmetric() -> None: +@given(small_floats, small_floats) +def test_symmetric(a: float, b: float) -> None: """Write a test that ensures that :func:`minitorch.operators.mul` is symmetric, i.e. gives the same value regardless of the order of its input. """ - # TODO: Implement for Task 0.2. - raise NotImplementedError("Need to implement for Task 0.2") + # Ensures that mul is symmetric: a*b = b*a. + assert_close(mul(a, b), mul(b, a)) @pytest.mark.task0_2 -def test_distribute() -> None: +@given(small_floats, small_floats, small_floats) +def test_distribute(x: float, y: float, z: float) -> None: r"""Write a test that ensures that your operators distribute, i.e. :math:`z \times (x + y) = z \times x + z \times y` """ - # TODO: Implement for Task 0.2. - raise NotImplementedError("Need to implement for Task 0.2") + # Ensures distribution: z * (x + y) = z * x + z * y. + left_side = mul(z, add(x, y)) + right_side = add(mul(z, x), mul(z, y)) + assert_close(left_side, right_side) @pytest.mark.task0_2 -def test_other() -> None: +@given(small_floats) +def test_other(a: float) -> None: """Write a test that ensures some other property holds for your functions.""" - # TODO: Implement for Task 0.2. - raise NotImplementedError("Need to implement for Task 0.2") + # Ensures a > 0 to test log. + val = abs(a) + 0.1 + assert_close(math.exp(log(val)), val) # ## Task 0.3 - Higher-order functions @@ -168,8 +184,11 @@ def test_sum_distribute(ls1: List[float], ls2: List[float]) -> None: """Write a test that ensures that the sum of `ls1` plus the sum of `ls2` is the same as the sum of each element of `ls1` plus each element of `ls2`. """ - # TODO: Implement for Task 0.3. - raise NotImplementedError("Need to implement for Task 0.3") + left_side = minitorch.operators.sum(ls1) + minitorch.operators.sum(ls2) + sum_of_pairs = [a + b for a, b in zip(ls1, ls2)] + right_side = minitorch.operators.sum(sum_of_pairs) + + assert_close(left_side, right_side) @pytest.mark.task0_3