Everything works well when I run BayesianOptimization with XGBoost on a small dataframe but when I use a large dataframe I get a variety of errors. The most recent time that I ran it I received
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
~\anaconda3\envs\tf-gpu\lib\site-packages\bayes_opt\target_space.py in probe(self, params)
190 try:
--> 191 target = self._cache[_hashable(x)]
192 except KeyError:
KeyError: (0.5414533613708468, 0.4244312334264955, 2.505215712179838, 3.29866527650986, 6937.176867016293, 0.833081120152225)
During handling of the above exception, another exception occurred:
XGBoostError Traceback (most recent call last)
<ipython-input-2-ef2efb90fddb> in <module>
53 # Use the expected improvement acquisition function to handle negative numbers
54 # Optimally needs quite a few more initiation points and number of iterations
---> 55 xgb_bo.maximize(init_points=10, n_iter=100, acq='ei')
56 print(xgb_bo.max)
~\anaconda3\envs\tf-gpu\lib\site-packages\bayes_opt\bayesian_optimization.py in maximize(self, init_points, n_iter, acq, kappa, xi, **gp_params)
172 iteration += 1
173
--> 174 self.probe(x_probe, lazy=False)
175
176 self.dispatch(Events.OPTIMIZATION_END)
~\anaconda3\envs\tf-gpu\lib\site-packages\bayes_opt\bayesian_optimization.py in probe(self, params, lazy)
110 self._queue.add(params)
111 else:
--> 112 self._space.probe(params)
113 self.dispatch(Events.OPTIMIZATION_STEP)
114
~\anaconda3\envs\tf-gpu\lib\site-packages\bayes_opt\target_space.py in probe(self, params)
192 except KeyError:
193 params = dict(zip(self._keys, x))
--> 194 target = self.target_func(**params)
195 self.register(x, target)
196 return target
<ipython-input-2-ef2efb90fddb> in xgb_evaluate(max_depth, gamma, colsample_bytree, subsample, eta, min_child_weight)
38 }
39 # Used around 1000 boosting rounds in the full model
---> 40 cv_result = xgb.cv(params, dtrain, num_boost_round=100, nfold=3)
41
42
~\anaconda3\envs\tf-gpu\lib\site-packages\xgboost\training.py in cv(params, dtrain, num_boost_round, nfold, stratified, folds, metrics, obj, feval, maximize, early_stopping_rounds, fpreproc, as_pandas, verbose_eval, show_stdv, seed, callbacks, shuffle)
496 evaluation_result_list=None))
497 for fold in cvfolds:
--> 498 fold.update(i, obj)
499 res = aggcv([f.eval(i, feval) for f in cvfolds])
500
~\anaconda3\envs\tf-gpu\lib\site-packages\xgboost\training.py in update(self, iteration, fobj)
224 def update(self, iteration, fobj):
225 """"Update the boosters for one iteration"""
--> 226 self.bst.update(self.dtrain, iteration, fobj)
227
228 def eval(self, iteration, feval):
~\anaconda3\envs\tf-gpu\lib\site-packages\xgboost\core.py in update(self, dtrain, iteration, fobj)
1367 _check_call(_LIB.XGBoosterUpdateOneIter(self.handle,
1368 ctypes.c_int(iteration),
-> 1369 dtrain.handle))
1370 else:
1371 pred = self.predict(dtrain, output_margin=True, training=True)
~\anaconda3\envs\tf-gpu\lib\site-packages\xgboost\core.py in _check_call(ret)
188 """
189 if ret != 0:
--> 190 raise XGBoostError(py_str(_LIB.XGBGetLastError()))
191
192
>
>XGBoostError: [07:47:25] C:/Users/Administrator/workspace/xgboost-win64_release_1.1.0/src/tree/updater_gpu_hist.cu:952: Exception in gpu_hist: bad allocation: temporary_buffer::allocate: get_temporary_buffer failed
Before that, I've received
>OSError: [WinError -529697949] Windows Error 0xe06d7363
Here's the code that I am running.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
number_of_rows = 10000000
print('reading data')
train_file = "D:\\reading_data/training_data.csv"
df = pd.read_csv(train_file, sep=',', nrows=number_of_rows)
df = df.replace([np.inf, -np.inf], np.nan).dropna(axis=0)
print('done reading data')
import xgboost as xgb
from bayes_opt import BayesianOptimization
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(df.drop(['Symbol', 'new_date', 'old_dep_var'], axis=1),
df['old_dep_var'], test_size=0.25)
del(df)
dtrain = xgb.DMatrix(X_train, label=y_train)
del(X_train)
dtest = xgb.DMatrix(X_test)
del(X_test)
def xgb_evaluate(max_depth, gamma, colsample_bytree, subsample, eta, min_child_weight):
params = {'objective': 'reg:squarederror',
'eval_metric': 'rmse',
'max_depth': int(max_depth),
'subsample': 0.8,
'eta': 0.1,
'gamma': gamma,
'colsample_bytree': colsample_bytree,
'tree_method':'gpu_hist',
'min_child_weight': 100
}
cv_result = xgb.cv(params, dtrain, num_boost_round=100, nfold=3)
return -1.0 * cv_result['test-rmse-mean'].iloc[-1]
xgb_bo = BayesianOptimization(xgb_evaluate, {'max_depth': (2, 10),
'gamma': (0, 10),
'colsample_bytree': (0.1, 0.9),
'subsample': (0.2, 0.95),
'eta': (0.05, 0.5),
'min_child_weight': (10, 10000)
})
xgb_bo.maximize(init_points=10, n_iter=100, acq='ei')
print(xgb_bo.max)
I've tried: 1) reinstalling bayesian-optimization, 2) creating/using a new conda environment, 3) installing and running Outbyte PC Repair to update all drivers, fix all corrupt system files, and eliminate malware.
I have no idea what else to try. Any help would be appreciated.
User contributions licensed under CC BY-SA 3.0