Spark随机森林交叉验证错误

麦克

我正在尝试在Spark中的随机森林上运行交叉验证。

from pyspark.ml import Pipeline
from pyspark.ml.classification import RandomForestClassifier
from pyspark.ml.tuning import CrossValidator, ParamGridBuilder
from pyspark.ml.evaluation import MulticlassClassificationEvaluator

data = nds.sc.parallelize([
 LabeledPoint(0.0, [0,402,6,0]),
 LabeledPoint(0.0, [3,500,3,0]),
 LabeledPoint(1.0, [1,590,1,1]),
 LabeledPoint(1.0, [3,328,5,0]),
 LabeledPoint(1.0, [4,351,4,0]),
 LabeledPoint(0.0, [2,372,2,0]),
 LabeledPoint(0.0, [4,302,5,0]),
 LabeledPoint(1.0, [1,387,2,0]),
 LabeledPoint(1.0, [1,419,3,0]),
 LabeledPoint(0.0, [1,370,5,0]),
 LabeledPoint(0.0, [1,410,4,0]),
 LabeledPoint(0.0, [2,509,7,1]),
 LabeledPoint(0.0, [1,307,5,0]),
 LabeledPoint(0.0, [0,424,4,1]),
 LabeledPoint(0.0, [1,509,2,1]),
 LabeledPoint(1.0, [3,361,4,0]),
 ])


train=data.toDF(['label','features'])

numfolds =2

rf = RandomForestClassifier(labelCol="label", featuresCol="features")
evaluator = MulticlassClassificationEvaluator()  


paramGrid = ParamGridBuilder().addGrid(rf.maxDepth,      
[4,8,10]).addGrid(rf.impurity, ['entropy','gini']).addGrid(rf.featureSubsetStrategy, [6,8,10]).build()

pipeline = Pipeline(stages=[rf])

crossval = CrossValidator(
    estimator=pipeline,
    estimatorParamMaps=paramGrid,
    evaluator=evaluator,
    numFolds= numfolds)

model = crossval.fit(train)

我收到以下错误

Py4JJavaError                             Traceback (most recent call last)
<ipython-input-87-7ea70f89086a> in <module>()
 66     numFolds=num)
 67 
---> 68 model = crossval.fit(train)

/opt/spark/current/python/pyspark/ml/pipeline.py in fit(self, dataset, params)
 67                 return self.copy(params)._fit(dataset)
 68             else:
---> 69                 return self._fit(dataset)
 70         else:
 71             raise ValueError("Params must be either a param map or a list/tuple of param maps, "

/opt/spark/current/python/pyspark/ml/tuning.py in _fit(self, dataset)
237             train = df.filter(~condition)
238             for j in range(numModels):
--> 239                 model = est.fit(train, epm[j])
240                 # TODO: duplicate evaluator to take extra params from input
241                 metric = eva.evaluate(model.transform(validation, epm[j]))

/opt/spark/current/python/pyspark/ml/pipeline.py in fit(self, dataset, params)
 65         elif isinstance(params, dict):
 66             if params:
---> 67                 return self.copy(params)._fit(dataset)
 68             else:
 69                 return self._fit(dataset)

/opt/spark/current/python/pyspark/ml/pipeline.py in _fit(self, dataset)
211                     dataset = stage.transform(dataset)
212                 else:  # must be an Estimator
--> 213                     model = stage.fit(dataset)
214                     transformers.append(model)
215                     if i < indexOfLastEstimator:

/opt/spark/current/python/pyspark/ml/pipeline.py in fit(self, dataset, params)
 67                 return self.copy(params)._fit(dataset)
 68             else:
---> 69                 return self._fit(dataset)
 70         else:
 71             raise ValueError("Params must be either a param map or a list/tuple of param maps, "

/opt/spark/current/python/pyspark/ml/wrapper.py in _fit(self, dataset)
130 
131     def _fit(self, dataset):
--> 132         java_model = self._fit_java(dataset)
133         return self._create_model(java_model)
134 

/opt/spark/current/python/pyspark/ml/wrapper.py in _fit_java(self, dataset)
126         :return: fitted Java model
127         """
--> 128         self._transfer_params_to_java()
129         return self._java_obj.fit(dataset._jdf)
130 

/opt/spark/current/python/pyspark/ml/wrapper.py in _transfer_params_to_java(self)
 80         for param in self.params:
 81             if param in paramMap:
---> 82                 pair = self._make_java_param_pair(param, paramMap[param])
 83                 self._java_obj.set(pair)
 84 

/opt/spark/current/python/pyspark/ml/wrapper.py in _make_java_param_pair(self, param, value)
 71         java_param = self._java_obj.getParam(param.name)
 72         java_value = _py2java(sc, value)
---> 73         return java_param.w(java_value)
 74 
 75     def _transfer_params_to_java(self):

   /opt/spark/current/python/lib/py4j-0.9-src.zip/py4j/java_gateway.py in __call__(self, *args)
811         answer = self.gateway_client.send_command(command)
812         return_value = get_return_value(
   --> 813             answer, self.gateway_client, self.target_id, self.name)
814 
815         for temp_arg in temp_args:

/opt/spark/current/python/pyspark/sql/utils.py in deco(*a, **kw)
 43     def deco(*a, **kw):
 44         try:
    ---> 45             return f(*a, **kw)
 46         except py4j.protocol.Py4JJavaError as e:
 47             s = e.java_exception.toString()

/opt/spark/current/python/lib/py4j-0.9-src.zip/py4j/protocol.py in get_return_value(answer, gateway_client, target_id, name)
306                 raise Py4JJavaError(
307                     "An error occurred while calling {0}{1}{2}.\n".
--> 308                     format(target_id, ".", name), value)
309             else:
310                 raise Py4JError(

Py4JJavaError: An error occurred while calling o1434.w.
: java.lang.ClassCastException: java.lang.Integer cannot be cast to      java.lang.String
at org.apache.spark.ml.tree.RandomForestParams$$anonfun$5.apply(treeParams.scala:340)
at org.apache.spark.ml.param.Param.validate(params.scala:71)
at org.apache.spark.ml.param.ParamPair.<init>(params.scala:509)
at org.apache.spark.ml.param.Param.$minus$greater(params.scala:85)
at org.apache.spark.ml.param.Param.w(params.scala:82)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:497)
at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:231)
at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:381)
at py4j.Gateway.invoke(Gateway.java:259)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:133)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.GatewayConnection.run(GatewayConnection.java:209)
at java.lang.Thread.run(Thread.java:745)

看来paramGrid没有将我的输入作为列表读取。是否有其他格式或解决方法。任何帮助,将不胜感激。

零323

您将错误的值传递给rf.featureSubsetStrategy它应该是描述策略的字符串,并且支持以下值:自动,全部,三分之一,sqrt,log2。请参阅:RandomForestClassifier.featureSubsetStrategy.doc

也不要使用data.toDF(['label','features'])它不会保留正确的顺序。用:

data.toDF()

或者如果您想修改名称:

from operator import attrgetter

data.map(attrgetter("label", "features")).toDF(["some_name", "some_other_name"])

最后,必须对标签列进行索引,或者您必须提供所需的元数据。请参见如何在XML中将列声明为DataFrame中的分类特征

本文收集自互联网,转载请注明来源。

如有侵权,请联系[email protected] 删除。

编辑于
0

我来说两句

0条评论
登录后参与评论

相关文章

来自分类Dev

Spark随机森林交叉验证错误

来自分类Dev

Spark随机森林错误

来自分类Dev

尽管非常成功的交叉验证结果与随机森林过度拟合

来自分类Dev

交叉验证WEKA随机

来自分类Dev

在R中留下一个ID交叉验证的情况下计算随机森林

来自分类Dev

如何在Scikit学习中将固定的验证集(而非K折交叉验证)用于决策树分类器/随机森林分类器?

来自分类Dev

Spark 1.5.1,MLLIb随机森林概率

来自分类Dev

交叉验证错误

来自分类Dev

交叉验证错误

来自分类常见问题

什么是“随机森林”中的出库错误?

来自分类Dev

插入符号包中的“随机森林”错误

来自分类Dev

插入符号包中的“随机森林”错误

来自分类Dev

R内存错误中的随机森林

来自分类Dev

R中的随机森林-适用于测试/验证集

来自分类Dev

用于回归的 Spark ML 随机森林和梯度提升树

来自分类Dev

如何为随机森林修复“ eval()中的错误”?

来自分类Dev

如何为随机森林修复“ eval()中的错误”?

来自分类Dev

OpenCV抛出错误。尝试使用随机森林模型

来自分类Dev

Python - 使用 scikit 学习随机森林关于值格式的错误

来自分类Dev

我在随机森林分类器中遇到 Not Fitted 错误?

来自分类Dev

尝试使用交叉验证时出现错误

来自分类Dev

OnevsrestClassifier和随机森林

来自分类Dev

Python中的随机森林

来自分类Dev

接近矩阵-随机森林

来自分类Dev

剧情图例随机森林

来自分类Dev

并行化随机森林

来自分类Dev

随机森林过度拟合

来自分类Dev

随机森林的可能算法

来自分类Dev

随机森林预测值