rotation+forest

=rotation Foresttoc= include component="wikiList" wikiListType="member" limit="10" 其实这个我不太懂，既然是旋转的，或者是翻转的，森林，那么它直观的就有一个序列的问题，或者，它的基分类器的产生一次和二次会有不同，这样产生的 关于rotation forest 的介绍在论文Rotation Forest: A New Classifier Ensemble Method中可以看到，以下是文章的英文简介： We propose a method for generating classifier ensembles based on feature extraction. To create the training data for a base classifier, the feature set is randomly split into K subsets (K is a parameter of the algorithm) and Principal Component Analysis (PCA) is applied to each subset. All principal components are retained in order to preserve the variability information in the data. Thus, K axis rotations take place to form the new features for a base classifier. The idea of the rotation approach is to encourage simultaneously individual accuracy and diversity within the ensemble. Diversity is promoted through the feature extraction for each base classifier. Decision trees were chosen here because they are sensitive to rotation of the feature axes, hence the name "forest.” Accuracy is sought by keeping all principal components and also using the whole data set to train each base classifier. Using WEKA, we examined the Rotation Forest ensemble on a random selection of 33 benchmark data sets from the UCI repository and compared it with Bagging, AdaBoost, and Random Forest. The results were favorable to Rotation Forest and prompted an investigation into diversity-accuracy landscape of the ensemble models. Diversity-error diagrams revealed that Rotation Forest ensembles construct individual classifiers which are more accurate than these in AdaBoost and Random Forest, and more diverse than these in Bagging, sometimes more accurate as well. 翻译一下： 这是一种基于特征提取的组合分类方法，它分别为每个基分类器创建训练数据集合，新数据集的创建需要划分原始属性集合为K个子集，在每个子集上使用主成分分析. 每个主要的组件都需要固定，为的是保留数据中的易变信息. 于是每个基分类器的训练集合的新属性都有K个轴心的旋转，这个方法的特点是促进单个分类器的正确性和差异性，以便于为组合分类器服务. 差异性体现在基分类器的特征提取上，基分类器使用决策树方法，因为它对于特征轴的旋转非常敏感，这是题目叫做森林的原因. 正确性的实现是通过保留所有的主成分，还有使用整个数据集来训练基分类器来完成的. 在weka的实验平台上，我们使用33个标准数据集和bagging，boosting，还有Random forest比较，结果对于rotationforest更有利，差别-错误方图显示单个基分类器的正确性要比Adaboost还有Random Forest的基分类器要好，而且差异性比bagging好，有时候正确性也相对较好. THAT‘S ALL. =下面是weka代码分析：=

code /**	 * builds the classifier. *	 * @param data *           the training data to be used for generating the classifier. * @throws Exception *            if the classifier could not be built successfully */	public void buildClassifier(Instances data) throws Exception {

// can classifier handle the data? getCapabilities.testWithFail(data);

data = new Instances(data); super.buildClassifier(data);

checkMinMax(data);

Random random; if (data.numInstances > 0) { **// This function fails if there are 0 instances** random = data.getRandomNumberGenerator(m_Seed); } else { random = new Random(m_Seed); }

m_RemoveUseless = new RemoveUseless; //FILTER m_RemoveUseless.setInputFormat(data); data = Filter.useFilter(data, m_RemoveUseless);

m_Normalize = new Normalize;                //规范化 m_Normalize.setInputFormat(data); data = Filter.useFilter(data, m_Normalize);

if (m_NumberOfGroups) { generateGroupsFromNumbers(data, random); } else { generateGroupsFromSizes(data, random); }

m_ProjectionFilters = new Filter[m_Groups.length][]; for (int i = 0; i < m_ProjectionFilters.length; i++) { m_ProjectionFilters[i] = Filter.makeCopies(m_ProjectionFilter,					m_Groups[i].length); }

int numClasses = data.numClasses;

// Split the instances according to their class Instances[] instancesOfClass = new Instances[numClasses + 1]; if (data.classAttribute.isNumeric) { instancesOfClass = new Instances[numClasses]; instancesOfClass[0] = data; } else { instancesOfClass = new Instances[numClasses + 1]; for (int i = 0; i < instancesOfClass.length; i++) { instancesOfClass[i] = new Instances(data, 0); }			Enumeration enu = data.enumerateInstances; while (enu.hasMoreElements) { Instance instance = (Instance) enu.nextElement; if (instance.classIsMissing) { instancesOfClass[numClasses].add(instance); } else { int c = (int) instance.classValue; instancesOfClass[c].add(instance); }			}			// If there are not instances with a missing class, we do not need // to			// consider them if (instancesOfClass[numClasses].numInstances == 0) { Instances[] tmp = instancesOfClass; instancesOfClass = new Instances[numClasses]; System.arraycopy(tmp, 0, instancesOfClass, 0, numClasses); }		}

// These arrays keep the information of the transformed data set m_Headers = new Instances[m_Classifiers.length]; m_ReducedHeaders = new Instances[m_Classifiers.length][];

// Construction of the base classifiers for (int i = 0; i < m_Classifiers.length; i++) { m_ReducedHeaders[i] = new Instances[m_Groups[i].length]; FastVector transformedAttributes = new FastVector(data					.numAttributes);

// Construction of the dataset for each group of attributes for (int j = 0; j < m_Groups[i].length; j++) { FastVector fv = new FastVector(m_Groups[i][j].length + 1); for (int k = 0; k < m_Groups[i][j].length; k++) { fv.addElement(data.attribute(m_Groups[i][j][k]).copy); }				fv.addElement(data.classAttribute.copy); Instances dataSubSet = new Instances("rotated-" + i + "-" + j						+ "-", fv, 0); dataSubSet.setClassIndex(dataSubSet.numAttributes - 1);

// Select instances for the dataset m_ReducedHeaders[i][j] = new Instances(dataSubSet, 0); boolean[] selectedClasses = selectClasses(						instancesOfClass.length, random); for (int c = 0; c < selectedClasses.length; c++) { if (!selectedClasses[c]) continue; Enumeration enu = instancesOfClass[c].enumerateInstances; while (enu.hasMoreElements) { Instance instance = (Instance) enu.nextElement; Instance newInstance = new Instance(dataSubSet								.numAttributes); newInstance.setDataset(dataSubSet); for (int k = 0; k < m_Groups[i][j].length; k++) { newInstance.setValue(k, instance									.value(m_Groups[i][j][k])); }						newInstance.setClassValue(instance.classValue); dataSubSet.add(newInstance); }				}

dataSubSet.randomize(random); // Remove a percentage of the instances Instances originalDataSubSet = dataSubSet; dataSubSet.randomize(random); RemovePercentage rp = new RemovePercentage; rp.setPercentage(m_RemovedPercentage); rp.setInputFormat(dataSubSet); dataSubSet = Filter.useFilter(dataSubSet, rp); if (dataSubSet.numInstances < 2) { dataSubSet = originalDataSubSet; }

// Project de data m_ProjectionFilters[i][j].setInputFormat(dataSubSet); Instances projectedData = null; do { try { projectedData = Filter.useFilter(dataSubSet,								m_ProjectionFilters[i][j]); } catch (Exception e) { // The data could not be projected, we add some random // instances addRandomInstances(dataSubSet, 10, random); }				} while (projectedData == null);

// Include the projected attributes in the attributes of the // transformed dataset for (int a = 0; a < projectedData.numAttributes - 1; a++) { transformedAttributes.addElement(projectedData.attribute(a)							.copy); }			}

transformedAttributes.addElement(data.classAttribute.copy); Instances transformedData = new Instances("rotated-" + i + "-",					transformedAttributes, 0); transformedData.setClassIndex(transformedData.numAttributes - 1); m_Headers[i] = new Instances(transformedData, 0);

// Project all the training data Enumeration enu = data.enumerateInstances; while (enu.hasMoreElements) { Instance instance = (Instance) enu.nextElement; Instance newInstance = convertInstance(instance, i); transformedData.add(newInstance); }

// Build the base classifier if (m_Classifier instanceof Randomizable) { ((Randomizable) m_Classifiers[i]).setSeed(random.nextInt); }			m_Classifiers[i].buildClassifier(transformedData); }

if (m_Debug) { printGroups; }	}

code 上面的代码，为每个基分类器构造新的训练数据集合，分别训练，（决策树训练）方法介绍完毕.