# -*- coding: utf-8 -*-
"""
Created on Sat Oct  3 18:58:46 2020

@author: jbobowsk
"""

# In this Python script we attempt to evaluate numberical derivatives of a
# list of data.

import numpy as np
import matplotlib.pyplot as plt

# First, we introduce the 'diff' function.  diff(x) will take list x and
# create a new list whose elements are equal to the difference between
# adjacent elements in x.  For example, let's make x a list from -10 to 10 in
# steps of 1.
xdata = np.arange(-10, 11, 1)
dx = np.diff(xdata)

print('x:', xdata)
print('diff(x):', dx)


# In this case, dx is a list of ones.  Note that, because it is determined
# from a difference, the list dx is one element shorter than the list x.
print('length of x:', len(xdata))
print('length of dx:', len(dx))

# Now let's make y = x^2 and plot y vs x.  The result is clearly a
# quadratic.
y = xdata**2
plt.plot(xdata, y, 'go', fillstyle = 'none')

# To evaluate the derivative of y with respect to x, we need to determine
# the change in y over the change in x.
dydx = np.diff(y)/np.diff(xdata)
print('dy/dx:', dydx)

# We should expect dydx vs x to be a straigt line of slope 2.  To generate
# the plot, remember that we have to reduce the length of x by 1.
x1 = xdata[1:len(xdata)]
print('length of x1:', len(x1))

# We will fit the data to confirm the slope is about right.
def linearFunc(x, slope, intercept):
    y = slope*x + intercept
    return y
from scipy.optimize import curve_fit
a_fit, cov = curve_fit(linearFunc, x1, dydx)
print('The best-fit parameters are: (1) slope =', a_fit[0], 'and (2) intercept',\
      a_fit[1])

# Plot the numerical derivative and our fit line.
plt.figure()
plt.plot(x1, dydx, 'o', fillstyle = 'none')
fitFcn = np.poly1d(a_fit)
plt.plot(x1, fitFcn(x1), 'k-')

# Notice from the fit that the slope is indeed 2, but the y-intercept is 1
# instead of the expected zero.  This is an artifact of taking derivatives
# of a discrete set of the data.  We can improve our results if we reduced
# the spacing between the x data.

# Just for fun, let's add some noise to our data.  Let's suppose that the
# uncertainty in x is 10% and that dy = 2*x*dx.  'np.random.uniform()' 
# generates a random number uniformly distributed between 0 and 1.  
# You can confirm for yourself that a + (b - a)*np.random.uniform() generates 
# a random number between a and b.
xN = []
yN = []
for k in range(len(xdata)):
    xN = xN + [xdata[k] + 0.1*xdata[k]*(-1 + (1 - (-1))*np.random.uniform())]
    yN = yN + [xdata[k]**2 + 2*xdata[k]*0.1*xdata[k]*(-1 + (1 - (-1))*np.random.uniform())]
plt.figure()
plt.plot(xN, yN, 'go', fillstyle = 'none')
dyNdx = np.diff(yN)/np.diff(xN)
xN1 = xN[1:len(xN)]
plt.figure()
plt.plot(xN1, dyNdx, 'o', fillstyle = 'none')

# As you might have anticipated, taking the ratio of the differences between a
# pair of noisey datasets results in even more fluctuations.  Obtaining
# clean derivatives from discrete datasets requires precision measurements.