OiO.lk Community platform!

Oio.lk is an excellent forum for developers, providing a wide range of resources, discussions, and support for those in the developer community. Join oio.lk today to connect with like-minded professionals, share insights, and stay updated on the latest trends and technologies in the development field.
  You need to log in or register to access the solved answers to this problem.
  • You have reached the maximum number of guest views allowed
  • Please register below to remove this limitation

The all the arguments required for a created function are given yet it says argument is missing [closed]

  • Thread starter Thread starter Soham Ghodake
  • Start date Start date
S

Soham Ghodake

Guest
thanks for stopping by, I am an amateur in building neural networks, I encountered a problem based upon number of arguments of the function (the function is self.net.loss.forward). It requires two arguments, and these were given in the form of x_batch and y_batch yet it says one argument is missing. The error message and the code are attached to the description

I went up through all the parent classes to check the possibility of error where I could have mistakenly given an extra argument but no such mistakes were found. The error is TypeError : forward() missing 1 required positional argument: 'target' The code is

%%​


Code:
# %%
import numpy as np 
from numpy import ndarray

from typing import List
import copy

# %%
from sklearn.datasets import fetch_openml 
boston = fetch_openml(name='boston', version=1) 
data = boston['data']
target = boston['target']
features = boston['feature_names']

# %%
from sklearn.preprocessing import StandardScaler
s = StandardScaler()
data = s.fit_transform(data)

# %%
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(data,target,test_size = 0.30,random_state=8071)
y_train = np.reshape(y_train,(-1,1))
y_test = np.reshape(y_test,(-1,1))

# %%
def permute_data(X:ndarray,y:ndarray):
    p = np.random.permutation(X.shape[0])
    return X[p],y[p]

# %%
def check_shape(a1:ndarray,a2:ndarray):
    assert a1.ndim == a2.ndim, \
    "The dimentions of both the arrays should be same intsead the dimention of first array is {0} and that of second is {1} " .format(a1.ndim,a2.ndim)
    
    assert a1.shape == a2.shape ,\
    "The shapes of the arrays do not coalace,The shape of the first array is {0} whereas the second one has {1} as shape ".format(a1.shape,a2.shape)
    
    return None

# %%
class Operation(object):
    def __init__(self):
        pass
    def forward(self,input_:ndarray):
        self.input_ = input_
        self.output = self._output()
        
        return self.output
    
    def backward(self,output_grad:ndarray):       # Returns Gradient with respect to input
        self.input_grad = self._input_grad(output_grad)

        return self.input_grad
    
    def _output(self):
        raise NotImplementedError
    
    def _input_grad(self,output_grad:ndarray):
        raise NotImplementedError


# %%
class ParamOperation(Operation):
    def __init__(self,params:ndarray):
        super().__init__()
        self.param = params
    
    def backward(self,output_grad:ndarray):
        self.input_grad = self._input_grad(output_grad)
        self.param_grad = self._param_grad(output_grad)

        return self.input_grad
    
    def _param_grad(self,output_grad:ndarray):
        raise NotImplementedError

# %%
class WeightsMultiply(ParamOperation):
    def __init__(self,W:ndarray):  # W refers to weights
        super().__init__(W)
    
    def _output(self):
        return np.dot(self.input_,self.param)
    
    def _input_grad(self, output_grad: ndarray):
        return np.transpose(self.param) * output_grad
    
    def _param_grad(self,output_grad:ndarray):
        return np.transpose(self.input_) * output_grad

# %%
class BiasAdd(ParamOperation):
    def __init__(self,Bias:ndarray):
        assert Bias.shape[0] == 1
        super().__init__(Bias)

    def _output(self):
        return self.input_ + self.param
    
    def _input_grad(self,output_grad:ndarray):
        return np.transpose(self.param) * output_grad
    
    def _param_grad(self, output_grad: ndarray):
        return np.transpose(self.input_) * output_grad

# %%
class Sigmoid(Operation):
    def __init__(self):
        super().__init__()
    
    def _output(self):
        return 1/(1+np.exp(-1 * self.input_))
    
    def _input_grad(self,output_grad:ndarray):
       back  = self.output(1 - self.output)
       input_grad = back * output_grad
       return input_grad

# %%
class Linear(Operation):
    def __init__(self):
        super().__init__()

    def _output(self):
        return self.input_
    
    def _input_grad(self,output_grad:ndarray):
        return output_grad

# %%
class Layer(object):
    def __init__(self,neurons:int):
        self.neurons = neurons
        self.operations: List[Operation] = []
        self.params : List[ndarray]=[]
        self.param_grads : List[ndarray]= []
        self.first = True
    
    def forward(self,input_:ndarray):
        if self.first:
            self._setup_layer(input_)
            self.first = False

        self.input_ = input_ # hela kadhun baghu

        for operation in self.operations:
            input_ = operation.forward(input_)

        self.output_ =input_
        
        return self.output_
    
    def backward(self,output_grad:ndarray):
        for operation in reversed(self.operations):
            output_grad = operation.backward(output_grad)
        
        input_grad = output_grad

        self._param_grads()
    
        return input_grad
    
    def _setup_layer(input_:ndarray):
        raise NotImplementedError   
    
    def _param_grads(self):
        self.param_grads = [] #Hela kadhun baghu

        for operation in reversed(self.operations):
            if issubclass(operation.__class__,ParamOperation):
                self.param_grads.append(operation.param_grad)

    def _params(self):
        self.params =[]

        for operation in self.operations:
            if issubclass(operation.__init__,ParamOperation):
                self.params.append(operation.param)

# %%
class Dense(Layer):
    def __init__(self,neurons:int,activation:Operation):

        super().__init__(neurons)
        self.activation = activation

    def _setup_layer(self,input_:ndarray):

        if self.seed:
            np.random.seed(self.seed)

        self.params =[]

        self.params.append(np.random.randn(input_.shape[1],self.neurons))
        self.params.append(np.random.randn(1,self.neurons))

        self.operations = [WeightsMultiply(self.params[0]),BiasAdd(self.params[1]),self.activation]

        return None
    

# %%
class Loss(object):
    def _init__(self):
        pass

    def forward(self,preds:ndarray,target:ndarray):

        self.prediction = preds
        
        self.target = target

        loss_value = self._output()

        return None
    
    def backward(self):

        self.input_grad = self._input_grad()

        return self.input_grad
    
    def _output(self):

        raise NotImplementedError
    
    def _input_grad(self):

        raise NotImplementedError

# %%
class MSE(Loss):
    def __init__(self):
        super().__init__()
    
    def _output(self):

        loss = np.sum(np.power(self.prediction - self.target,2))/self.prediction.shape[0]

        return loss
    
    def _input_grad(self):
        return (-1 * 2 *(self.prediction - self.target))/self.prediction.shape[0]

# %%
class NeuralNetwork(object):
    def __init__(self,layers:List[Layer],loss:Loss,seed:int =1):
        self.layers = layers
        self.loss = loss
        self.seed = seed

        if self.seed:
            for layer in self.layers:
                setattr(layer,'seed',self.seed)

    def forward(self,x_batch:ndarray):

        x_out= x_batch

        for layer in self.layers:
            x_out = layer.forward(x_out)

        output_ = x_out

        return output_
    
    def backward(self,loss_grad:ndarray): # Only calculates and stores the value

        grad = loss_grad

        for layer in reversed(self.layers):
            grad = layer.backward(grad)

        return None
    
    def train_batch(self,x_batch:ndarray,y_batch:ndarray): # Returns loss and stores gradients

        preds = self.forward(x_batch)

        loss = self.loss.forward(preds,y_batch)

        self.backward(self.loss.backward())

        return loss
    
    def params(self):
        for layer in self.layers:
            yield from layer.params

    def param_grads(self):
        for layer in self.layers:
            yield from layer.param_grads

# %%
class Optimizer(object):
    def __init__(self,learning_rate):
        self.lr = learning_rate

    def _step(self):
        raise NotImplementedError # Try pass here

# %%
class SGD(Optimizer):
    def __init__(self,learning_rate):
        super().__init__(learning_rate)
    
    def  _step(self):
        for param , param_grad in zip(self.net.params,self.net.param_grads):
            param -= self.lr * param_grad

# %%
class Train(object):
    def __init__(self,net:NeuralNetwork,optim:Optimizer,):
        self.net = net
        self.optim = optim
        setattr(self.optim,'net',self.net)
    
    def generate_batches(self,X:ndarray,y:ndarray,size:int = 39):
        N = X.shape[0]

        for ii in range(N):
            X_batch,y_batch =  X[ii:ii+ size],y[ii:ii+size]
            yield X_batch,y_batch #return vaprun baghu

    def fit(self,X_train:ndarray,y_train:ndarray,epochs:int = 100,batch_size:int = 32,seed:int = 1,restart:bool = True):
        np.random.seed(seed)

        if restart:
            for layer in self.net.layers:
                layer.first = True
        
        self.max_loss = 1e9

        for e in range(epochs):
            X_train, y_train = permute_data(X_train,y_train)
            batch_generator= self.generate_batches(X_train,y_train,batch_size)

            for ii,(X_batch,y_batch) in enumerate(batch_generator):
                self.net.train_batch(X_batch,y_batch)
                self.optim._step()

# %%
def mae(y_true:ndarray,y_pred:ndarray):
    return np.mean(np.abs(y_true-y_pred))

# %%
def eval_model(model:NeuralNetwork,X_test:ndarray,y_test:ndarray):
    preds = model.forward(X_test)
    preds = preds.reshape(-1,1)
    print(mae(y_test,preds))

# %%
nn = NeuralNetwork([Dense(13,Sigmoid()),Dense(1,Linear())],MSE)
trainer = Train(nn,SGD(0.001))
trainer.fit(X_train,y_train)
eval_model(nn,X_test,y_test)


`
<p>thanks for stopping by, I am an amateur in building neural networks, I encountered a problem based upon number of arguments of the function (the function is self.net.loss.forward). It requires two arguments, and these were given in the form of x_batch and y_batch yet it says one argument is missing. The error message and the code are attached to the description</p>
<p>I went up through all the parent classes to check the possibility of error where I could have mistakenly given an extra argument but no such mistakes were found. The error is TypeError : forward() missing 1 required positional argument: 'target'
The code is</p>
<h1>%%</h1>
<pre><code># %%
import numpy as np
from numpy import ndarray

from typing import List
import copy

# %%
from sklearn.datasets import fetch_openml
boston = fetch_openml(name='boston', version=1)
data = boston['data']
target = boston['target']
features = boston['feature_names']

# %%
from sklearn.preprocessing import StandardScaler
s = StandardScaler()
data = s.fit_transform(data)

# %%
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(data,target,test_size = 0.30,random_state=8071)
y_train = np.reshape(y_train,(-1,1))
y_test = np.reshape(y_test,(-1,1))

# %%
def permute_data(X:ndarray,y:ndarray):
p = np.random.permutation(X.shape[0])
return X[p],y[p]

# %%
def check_shape(a1:ndarray,a2:ndarray):
assert a1.ndim == a2.ndim, \
"The dimentions of both the arrays should be same intsead the dimention of first array is {0} and that of second is {1} " .format(a1.ndim,a2.ndim)

assert a1.shape == a2.shape ,\
"The shapes of the arrays do not coalace,The shape of the first array is {0} whereas the second one has {1} as shape ".format(a1.shape,a2.shape)

return None

# %%
class Operation(object):
def __init__(self):
pass
def forward(self,input_:ndarray):
self.input_ = input_
self.output = self._output()

return self.output

def backward(self,output_grad:ndarray): # Returns Gradient with respect to input
self.input_grad = self._input_grad(output_grad)

return self.input_grad

def _output(self):
raise NotImplementedError

def _input_grad(self,output_grad:ndarray):
raise NotImplementedError


# %%
class ParamOperation(Operation):
def __init__(self,params:ndarray):
super().__init__()
self.param = params

def backward(self,output_grad:ndarray):
self.input_grad = self._input_grad(output_grad)
self.param_grad = self._param_grad(output_grad)

return self.input_grad

def _param_grad(self,output_grad:ndarray):
raise NotImplementedError

# %%
class WeightsMultiply(ParamOperation):
def __init__(self,W:ndarray): # W refers to weights
super().__init__(W)

def _output(self):
return np.dot(self.input_,self.param)

def _input_grad(self, output_grad: ndarray):
return np.transpose(self.param) * output_grad

def _param_grad(self,output_grad:ndarray):
return np.transpose(self.input_) * output_grad

# %%
class BiasAdd(ParamOperation):
def __init__(self,Bias:ndarray):
assert Bias.shape[0] == 1
super().__init__(Bias)

def _output(self):
return self.input_ + self.param

def _input_grad(self,output_grad:ndarray):
return np.transpose(self.param) * output_grad

def _param_grad(self, output_grad: ndarray):
return np.transpose(self.input_) * output_grad

# %%
class Sigmoid(Operation):
def __init__(self):
super().__init__()

def _output(self):
return 1/(1+np.exp(-1 * self.input_))

def _input_grad(self,output_grad:ndarray):
back = self.output(1 - self.output)
input_grad = back * output_grad
return input_grad

# %%
class Linear(Operation):
def __init__(self):
super().__init__()

def _output(self):
return self.input_

def _input_grad(self,output_grad:ndarray):
return output_grad

# %%
class Layer(object):
def __init__(self,neurons:int):
self.neurons = neurons
self.operations: List[Operation] = []
self.params : List[ndarray]=[]
self.param_grads : List[ndarray]= []
self.first = True

def forward(self,input_:ndarray):
if self.first:
self._setup_layer(input_)
self.first = False

self.input_ = input_ # hela kadhun baghu

for operation in self.operations:
input_ = operation.forward(input_)

self.output_ =input_

return self.output_

def backward(self,output_grad:ndarray):
for operation in reversed(self.operations):
output_grad = operation.backward(output_grad)

input_grad = output_grad

self._param_grads()

return input_grad

def _setup_layer(input_:ndarray):
raise NotImplementedError

def _param_grads(self):
self.param_grads = [] #Hela kadhun baghu

for operation in reversed(self.operations):
if issubclass(operation.__class__,ParamOperation):
self.param_grads.append(operation.param_grad)

def _params(self):
self.params =[]

for operation in self.operations:
if issubclass(operation.__init__,ParamOperation):
self.params.append(operation.param)

# %%
class Dense(Layer):
def __init__(self,neurons:int,activation:Operation):

super().__init__(neurons)
self.activation = activation

def _setup_layer(self,input_:ndarray):

if self.seed:
np.random.seed(self.seed)

self.params =[]

self.params.append(np.random.randn(input_.shape[1],self.neurons))
self.params.append(np.random.randn(1,self.neurons))

self.operations = [WeightsMultiply(self.params[0]),BiasAdd(self.params[1]),self.activation]

return None


# %%
class Loss(object):
def _init__(self):
pass

def forward(self,preds:ndarray,target:ndarray):

self.prediction = preds

self.target = target

loss_value = self._output()

return None

def backward(self):

self.input_grad = self._input_grad()

return self.input_grad

def _output(self):

raise NotImplementedError

def _input_grad(self):

raise NotImplementedError

# %%
class MSE(Loss):
def __init__(self):
super().__init__()

def _output(self):

loss = np.sum(np.power(self.prediction - self.target,2))/self.prediction.shape[0]

return loss

def _input_grad(self):
return (-1 * 2 *(self.prediction - self.target))/self.prediction.shape[0]

# %%
class NeuralNetwork(object):
def __init__(self,layers:List[Layer],loss:Loss,seed:int =1):
self.layers = layers
self.loss = loss
self.seed = seed

if self.seed:
for layer in self.layers:
setattr(layer,'seed',self.seed)

def forward(self,x_batch:ndarray):

x_out= x_batch

for layer in self.layers:
x_out = layer.forward(x_out)

output_ = x_out

return output_

def backward(self,loss_grad:ndarray): # Only calculates and stores the value

grad = loss_grad

for layer in reversed(self.layers):
grad = layer.backward(grad)

return None

def train_batch(self,x_batch:ndarray,y_batch:ndarray): # Returns loss and stores gradients

preds = self.forward(x_batch)

loss = self.loss.forward(preds,y_batch)

self.backward(self.loss.backward())

return loss

def params(self):
for layer in self.layers:
yield from layer.params

def param_grads(self):
for layer in self.layers:
yield from layer.param_grads

# %%
class Optimizer(object):
def __init__(self,learning_rate):
self.lr = learning_rate

def _step(self):
raise NotImplementedError # Try pass here

# %%
class SGD(Optimizer):
def __init__(self,learning_rate):
super().__init__(learning_rate)

def _step(self):
for param , param_grad in zip(self.net.params,self.net.param_grads):
param -= self.lr * param_grad

# %%
class Train(object):
def __init__(self,net:NeuralNetwork,optim:Optimizer,):
self.net = net
self.optim = optim
setattr(self.optim,'net',self.net)

def generate_batches(self,X:ndarray,y:ndarray,size:int = 39):
N = X.shape[0]

for ii in range(N):
X_batch,y_batch = X[ii:ii+ size],y[ii:ii+size]
yield X_batch,y_batch #return vaprun baghu

def fit(self,X_train:ndarray,y_train:ndarray,epochs:int = 100,batch_size:int = 32,seed:int = 1,restart:bool = True):
np.random.seed(seed)

if restart:
for layer in self.net.layers:
layer.first = True

self.max_loss = 1e9

for e in range(epochs):
X_train, y_train = permute_data(X_train,y_train)
batch_generator= self.generate_batches(X_train,y_train,batch_size)

for ii,(X_batch,y_batch) in enumerate(batch_generator):
self.net.train_batch(X_batch,y_batch)
self.optim._step()

# %%
def mae(y_true:ndarray,y_pred:ndarray):
return np.mean(np.abs(y_true-y_pred))

# %%
def eval_model(model:NeuralNetwork,X_test:ndarray,y_test:ndarray):
preds = model.forward(X_test)
preds = preds.reshape(-1,1)
print(mae(y_test,preds))

# %%
nn = NeuralNetwork([Dense(13,Sigmoid()),Dense(1,Linear())],MSE)
trainer = Train(nn,SGD(0.001))
trainer.fit(X_train,y_train)
eval_model(nn,X_test,y_test)


`
</code></pre>
 

Latest posts

Top