# here is a mathematical expression that takes 3 inputs and produces one output
from math import sin, cos
def f(a, b, c):
return -a**3 + sin(3*b) - 1.0/c + b**2.5 - a**0.5
print(f(2, 3, 4))
6.336362190988558
# here is a mathematical expression that takes 3 inputs and produces one output
from math import sin, cos
def f(a, b, c):
return -a**3 + sin(3*b) - 1.0/c + b**2.5 - a**0.5
print(f(2, 3, 4))
6.336362190988558
\[ \frac{\partial f}{\partial a} = -3a^2 - 0.5a^{-0.5} \] \[ \frac{\partial f}{\partial b} = 3\cos({3b}) + 2.5b^{1.5} \] \[ \frac{\partial f}{\partial c} = \frac{1.0}{c^2} \]
# write the function df that returns the analytical gradient of f
# i.e. use your skills from calculus to take the derivative, then implement the formula
# if you do not calculus then feel free to ask wolframalpha, e.g.:
# https://www.wolframalpha.com/input?i=d%2Fda%28sin%283*a%29%29%29
def gradf(a, b, c):
return [-3*a**2 - 0.5*a**-0.5, # df/da
3*cos(3*b) + 2.5*b**1.5, # df/db
1.0/c**2] # df/dc
# expected answer is the list of
ans = [-12.353553390593273, 10.25699027111255, 0.0625]
yours = gradf(2, 3, 4)
for dim in range(3):
ok = 'OK' if abs(yours[dim] - ans[dim]) < 1e-5 else 'WRONG!'
print(f"{ok} for dim {dim}: expected {ans[dim]}, yours returns {yours[dim]}")
OK for dim 0: expected -12.353553390593273, yours returns -12.353553390593273
OK for dim 1: expected 10.25699027111255, yours returns 10.25699027111255
OK for dim 2: expected 0.0625, yours returns 0.0625
# now estimate the gradient numerically without any calculus, using
# the approximation we used in the video.
# you should not call the function df from the last cell
# -----------
h = 0.000001
numerical_grad = [0, 0, 0] # TODO
a, b, c = 2, 3, 4
numerical_grad[0] = (f(a + h, b, c) - f(a, b, c))/h
numerical_grad[1] = (f(a, b + h, c) - f(a, b, c))/h
numerical_grad[2] = (f(a, b, c + h) - f(a, b, c))/h
# -----------
divergence = 0
for dim in range(3):
ok = 'OK' if abs(numerical_grad[dim] - ans[dim]) < 1e-5 else 'WRONG!'
print(f"{ok} for dim {dim}: expected {ans[dim]}, yours returns {numerical_grad[dim]}")
divergence += abs(numerical_grad[dim] - ans[dim])
print(divergence)
OK for dim 0: expected -12.353553390593273, yours returns -12.353559348809995
OK for dim 1: expected 10.25699027111255, yours returns 10.256991666679482
OK for dim 2: expected 0.0625, yours returns 0.062499984743169534
7.369040485372125e-06
# there is an alternative formula that provides a much better numerical
# approximation to the derivative of a function.
# learn about it here: https://en.wikipedia.org/wiki/Symmetric_derivative
# implement it. confirm that for the same step size h this version gives a
# better approximation.
# -----------
numerical_grad2 = [0, 0, 0] # TODO
numerical_grad2[0] = (f(a + h, b, c) - f(a - h, b, c))/(2*h)
numerical_grad2[1] = (f(a, b + h, c) - f(a, b - h, c))/(2*h)
numerical_grad2[2] = (f(a, b, c + h) - f(a, b, c - h))/(2*h)
# -----------
divergence = 0
for dim in range(3):
ok = 'OK' if abs(numerical_grad2[dim] - ans[dim]) < 1e-5 else 'WRONG!'
print(f"{ok} for dim {dim}: expected {ans[dim]}, yours returns {numerical_grad2[dim]}")
divergence += abs(numerical_grad2[dim] - ans[dim])
print(divergence)
OK for dim 0: expected -12.353553390593273, yours returns -12.353553391353245
OK for dim 1: expected 10.25699027111255, yours returns 10.256990273571631
OK for dim 2: expected 0.0625, yours returns 0.06250000028629188
3.505345347321054e-09
Yes, the symmetric derivative gives better approximation because the above computed divergence of numerical_grad2
is smaller than of numerical_grad
# Value class starter code, with many functions taken out
from math import exp, log
class Value:
def __init__(self, data, _children=(), _op='', label=''):
self.data = data
self.grad = 0.0
self._backward = lambda: None
self._prev = set(_children)
self._op = _op
self.label = label
def __repr__(self):
return f"Value(data={self.data})"
def __add__(self, other): # exactly as in the video
other = other if isinstance(other, Value) else Value(other)
out = Value(self.data + other.data, (self, other), '+')
def _backward():
self.grad += 1.0 * out.grad
other.grad += 1.0 * out.grad
out._backward = _backward
return out
# ------
# re-implement all the other functions needed for the exercises below
# your code here
# TODO
# ------
def __radd__(self, other): # other + self
return self + other
def exp(self):
out = Value(exp(self.data), (self,), 'exp')
def _backward():
self.grad += out.data * out.grad
out._backward = _backward
return out
def log(self):
out = Value(log(self.data), (self,), 'log')
def _backward():
self.grad += (1.0/self.data) * out.grad
out._backward = _backward
return out
def __truediv__(self, other): # self / other
out = Value(self.data/other.data, (self, other), 'div')
def _backward():
self.grad += (1/other.data) * out.grad
other.grad += -(self.data/(other.data ** 2)) * out.grad
out._backward = _backward
return out
def __neg__(self): #-self
out = Value(self.data * -1, (self,), 'neg')
def _backward():
self.grad += -1 * out.grad
out._backward = _backward
return out
def backward(self): # exactly as in video
topo = []
visited = set()
def build_topo(v):
if v not in visited:
visited.add(v)
for child in v._prev:
build_topo(child)
topo.append(v)
build_topo(self)
self.grad = 1.0
for node in reversed(topo):
node._backward()
# without referencing our code/video __too__ much, make this cell work
# you'll have to implement (in some cases re-implemented) a number of functions
# of the Value object, similar to what we've seen in the video.
# instead of the squared error loss this implements the negative log likelihood
# loss, which is very often used in classification.
# this is the softmax function
# https://en.wikipedia.org/wiki/Softmax_function
def softmax(logits):
counts = [logit.exp() for logit in logits]
denominator = sum(counts)
out = [c / denominator for c in counts]
return out
# this is the negative log likelihood loss function, pervasive in classification
logits = [Value(0.0), Value(3.0), Value(-2.0), Value(1.0)]
probs = softmax(logits)
loss = -probs[3].log() # dim 3 acts as the label for this input example
loss.backward()
ans = [0.041772570515350445, 0.8390245074625319, 0.005653302662216329, -0.8864503806400986]
for dim in range(4):
ok = 'OK' if abs(logits[dim].grad - ans[dim]) < 1e-5 else 'WRONG!'
print(f"{ok} for dim {dim}: expected {ans[dim]}, yours returns {logits[dim].grad}")
OK for dim 0: expected 0.041772570515350445, yours returns 0.041772570515350445
OK for dim 1: expected 0.8390245074625319, yours returns 0.8390245074625319
OK for dim 2: expected 0.005653302662216329, yours returns 0.005653302662216329
OK for dim 3: expected -0.8864503806400986, yours returns -0.886450380640099
# verify the gradient using the torch library
# torch should give you the exact same gradient
import torch
logits = torch.Tensor([0.0, 3.0, -2.0, 1.0]); logits.requires_grad = True
probs = torch.softmax(logits, dim = 0)
loss = -probs[3].log();
loss.backward()
ans = [0.041772570515350445, 0.8390245074625319, 0.005653302662216329, -0.8864503806400986]
for dim in range(4):
ok = 'OK' if abs(logits.grad[dim] - ans[dim]) < 1e-5 else 'WRONG!'
print(f"{ok} for dim {dim}: expected {ans[dim]}, yours returns {logits.grad[dim]}")
/Users/anubhavmaity/mambaforge/envs/fastai/lib/python3.9/site-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html
from .autonotebook import tqdm as notebook_tqdm
OK for dim 0: expected 0.041772570515350445, yours returns 0.041772566735744476
OK for dim 1: expected 0.8390245074625319, yours returns 0.8390244841575623
OK for dim 2: expected 0.005653302662216329, yours returns 0.005653302650898695
OK for dim 3: expected -0.8864503806400986, yours returns -0.8864504098892212