Linear Regression using Gradient Descent

For detailed explanation

import numpy as np

x = np.array([1, 2, 3, 4, 5])
y = np.array([1, 4, 9, 16, 25])
learningRate = 0.0001
iterations = 200000

def gradient(m, b):
  m_gradient = 0
  b_gradient = 0
  
  #summation
  for i in range(x.size):
    h = (m * x[i] + b)
    m_gradient += x[i] * (h - y[i])
    b_gradient += (h - y[i])

  m_gradient *= (2.0/x.size)
  b_gradient *= (2.0/x.size)

  m_new = m - (learningRate * m_gradient)
  b_new = b - (learningRate * b_gradient)

  return [m_new, b_new]

def gradient_iterations(m, b):
  for i in range(iterations):
    [m, b] = gradient(m, b)

  return [m, b]

def computeError(m, b):
  error = 0.0
  for i in range(x.size):
    error += (y[i] - (m * x[i] + b)) ** 2

  return error

m = 0
b = 0
[m, b] = gradient_iterations(m, b)
error = computeError(m, b)
print "slope m:", m
print "intercept b:", b
print "error:", error

Link to gist

Palindrome in Python

Solution to Project Euler problem 4

def isPalindrome(s):
    i = 0
    j = len(s) - 1
    
    while i < j:
        if s[i] != s[j]:
            return False
        i = i + 1
        j = j - 1 
    return True

maxVal = 0
for i in range(100, 1000):
    for j in range(100, 1000):
        prod = i * j
        prodStr = str(prod)
        if isPalindrome(prodStr) and prod > maxVal:
            maxVal = prod

print(maxVal)