मैं गहन विश्वास अवधारणा का उपयोग करके डेटासेट का प्रशिक्षण और परीक्षण कर रहा हूं। इसके साथ काम करते समय एक numpy.float64 त्रुटि का सामना करना पड़ रहा है:

from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.neural_network import MLPClassifier
from sklearn.neural_network import BernoulliRBM
  


def Deep_belief_network(X,*args):
  x,y,regularization_nn,learning_rate_RBM,learning_rate_nn,n_iter_RBM,batch_size_RBM,batch_size_nn,n_iter_nn=args
  n_components_RBM,n_components_nn_=X
  #n_components_RBM=50
  #n_components_nn_=20
  rbm_model_1=BernoulliRBM(n_components=n_components_RBM,n_iter=n_iter_RBM,learning_rate=learning_rate_RBM,batch_size=batch_size_RBM,verbose=0)
  X_train, X_test, Y_train, Y_test = train_test_split(x,y, test_size=0.1, random_state=0)
  min_max_scaler = MinMaxScaler()
  X_train = min_max_scaler.fit_transform(X_train)
  rbm_train=rbm_model_1.fit_transform(X_train )
  nn_model=MLPClassifier(activation='relu', solver='sgd', alpha=regularization_nn,
      batch_size=batch_size_nn, hidden_layer_sizes=(n_components_nn_), learning_rate='constant',
      learning_rate_init=learning_rate_nn, max_iter=n_iter_nn,shuffle=True, validation_fraction=0.1, verbose=False,random_state=1)
  nn_model.fit(rbm_train,Y_train)


X= [[0.00000000e+00 1.76100000e+03 3.02434414e-02 ... 1.00505479e+01
  6.90634415e+00 1.13641060e+01]
 [1.00000000e+00 5.54300000e+03 3.32540176e-01 ... 9.66597699e+01
  7.81453418e+01 2.94180046e+02]
 [2.00000000e+00 3.44800000e+03 1.35616750e-01 ... 2.77757100e+01
  2.95552513e+01 6.49203725e+01]
 ...
 [5.85600000e+03 3.37100000e+03 1.99433359e-01 ... 1.82494245e+01
  2.96294395e+01 4.89311229e+01]
 [5.85700000e+03 4.59700000e+03 3.33133906e-01 ... 4.38591971e+01
  6.63558086e+01 5.51086311e+01]
 [5.85800000e+03 4.82800000e+03 3.91628974e-01 ... 6.50556871e+01
  1.02210562e+02 5.93112050e+01]]
Y= [0. 0. 0. ... 1. 1. 1.]
X_train =[[-0.78210062  0.14111439 -0.3656736  ... -0.06997126 -0.14909763
  -0.54753418]
 [-0.51177349 -0.77928848 -0.47345634 ... -0.24668018 -0.17406007
  -0.67165109]
 [ 0.07797737 -0.54963283 -0.45941915 ... -0.26573143 -0.21174211
  -0.61647054]
 ...
 [-0.75134129 -1.29734889 -0.59546654 ... -0.328803   -0.25774714
  -0.69402226]
 [-0.18702603 -0.3460879  -0.58283771 ... -0.24237006 -0.26603587
  -0.59759254]
 [-0.11308535 -0.82498224 -0.50012261 ... -0.3001251  -0.22795859
  -0.64925682]]

मुझे नहीं पता कि यह त्रुटि क्यों बढ़ रही है। क्या यह एक आयामी समस्या है ?? मैं अजगर के लिए नया हूँ कृपया इसमें मेरी मदद करें।

कोड चलाते समय मुझे प्राप्त हुई यह ट्रेसबैक त्रुटियां हैं:

     Traceback (most recent call last)
<ipython-input-44-a2940e738482> in <module>()
      1 if __name__ == '__main__':
----> 2     main(X,Y,X_train,Y_train)

5 frames
<ipython-input-43-5182266968ae> in main(X, Y, X_train, Y_train)
     26       best_alpha=swarm.g
     27       '''
---> 28   xopt,fopt=pso(Deep_belief_network,[6,15],[12,60],args=args,maxiter=3,debug=True,swarmsize=60,minstep=0)
     29   print(xopt,fopt)
     30   acc=Deep_belief_network(x=0.00010482,*args)

/usr/local/lib/python3.6/dist-packages/pyswarm/pso.py in pso(func, lb, ub, ieqcons, f_ieqcons, args, kwargs, swarmsize, omega, phip, phig, maxiter, minstep, minfunc, debug)
    109 
    110         # Calculate the objective's value at the current particle's
--> 111         fp[i] = obj(p[i, :])
    112 
    113         # At the start, there may not be any feasible starting point, so just

/usr/local/lib/python3.6/dist-packages/pyswarm/pso.py in <lambda>(x)
     72 
     73     # Check for constraint function(s) #########################################
---> 74     obj = lambda x: func(x, *args, **kwargs)
     75     if f_ieqcons is None:
     76         if not len(ieqcons):

<ipython-input-42-2d0aad95eafd> in Deep_belief_network(X, *args)
     12   min_max_scaler = MinMaxScaler()
     13   X_train = min_max_scaler.fit_transform(X_train)
---> 14   rbm_train=rbm_model_1.fit_transform(X_train )
     15   nn_model=MLPClassifier(activation='relu', solver='sgd', alpha=regularization_nn,
     16       batch_size=batch_size_nn, hidden_layer_sizes=(n_components_nn_), learning_rate='constant',

/usr/local/lib/python3.6/dist-packages/sklearn/base.py in fit_transform(self, X, y, **fit_params)
    569         if y is None:
    570             # fit method of arity 1 (unsupervised transformation)
--> 571             return self.fit(X, **fit_params).transform(X)
    572         else:
    573             # fit method of arity 2 (supervised transformation)

/usr/local/lib/python3.6/dist-packages/sklearn/neural_network/_rbm.py in fit(self, X, y)
    342 
    343         self.components_ = np.asarray(
--> 344             rng.normal(0, 0.01, (self.n_components, X.shape[1])),
    345             order='F')
    346         self.intercept_hidden_ = np.zeros(self.n_components, )

mtrand.pyx in numpy.random.mtrand.RandomState.normal()

_common.pyx in numpy.random._common.cont()

TypeError: 'numpy.float64' object cannot be interpreted as an integer
0
Java Programmer 29 अक्टूबर 2020, 07:54

1 उत्तर

सबसे बढ़िया उत्तर

n_components_RBM (और शायद n_components_nn_) शायद एक पूर्णांक नहीं बल्कि एक अस्थायी बिंदु है। BernoulliRBM को पास करने से पहले आप इसे पूर्णांक में बदल सकते हैं:

n_components_RBM, n_components_nn_ = X
n_components_RBM = int(n_components_RBM)
n_components_nn_ = int(n_components_nn_)

या आप अपने फ़ंक्शन के तर्क के रूप में एक पूर्णांक सूची X पास कर सकते हैं।

1
Tirth Patel 29 अक्टूबर 2020, 07:07