The Altair Community is migrating to a new platform to provide a better experience for you. In preparation for the migration, the Altair Community is on read-only mode from October 28 - November 6, 2024. Technical support via cases will continue to work as is. For any urgent requests from Students/Faculty members, please submit the form linked here
Is this normal for rapidminer to take this long ?
kashif_khan
Member Posts: 19 Contributor II
I am dealing with text classification in rapidminer. I have two seperate test/train set. My test set consist of two categories with 30 documents each whereas my training sets consist of two categories with 70 documents each.
I applied feature selection via information gain to select top 200 attributes but its two hours till now since i started my process and it is still selecting those 200 attributes.
Is it supposed to take this much long for such a small corpus ??
Is their any way to optimize the performance of Rapid Miner ?
Software Version: RapidMiner 5.3.013 (64 bit)
Java : 1.7
I applied feature selection via information gain to select top 200 attributes but its two hours till now since i started my process and it is still selecting those 200 attributes.
Is it supposed to take this much long for such a small corpus ??
Is their any way to optimize the performance of Rapid Miner ?
OS: Windows 7
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<process version="5.3.013">
<context>
<input/>
<output/>
<macros/>
</context>
<operator activated="true" class="process" compatibility="5.3.013" expanded="true" name="Process">
<parameter key="logverbosity" value="init"/>
<parameter key="random_seed" value="2001"/>
<parameter key="send_mail" value="never"/>
<parameter key="notification_email" value=""/>
<parameter key="process_duration_for_mail" value="30"/>
<parameter key="encoding" value="SYSTEM"/>
<process expanded="true">
<operator activated="true" class="text:process_document_from_file" compatibility="5.2.004" expanded="true" height="76" name="Process Documents from Files" width="90" x="45" y="30">
<list key="text_directories">
<parameter key="comp.windows.x" value="D:\Machine Learning\News_Reuters\small_dataset0\Training\comp.windows.x"/>
<parameter key="misc.forsale" value="D:\Machine Learning\News_Reuters\small_dataset0\Training\misc.forsale"/>
</list>
<parameter key="file_pattern" value="*"/>
<parameter key="extract_text_only" value="true"/>
<parameter key="use_file_extension_as_type" value="true"/>
<parameter key="content_type" value="txt"/>
<parameter key="encoding" value="SYSTEM"/>
<parameter key="create_word_vector" value="true"/>
<parameter key="vector_creation" value="TF-IDF"/>
<parameter key="add_meta_information" value="true"/>
<parameter key="keep_text" value="false"/>
<parameter key="prune_method" value="none"/>
<parameter key="prunde_below_percent" value="3.0"/>
<parameter key="prune_above_percent" value="30.0"/>
<parameter key="prune_below_absolute" value="3"/>
<parameter key="prune_above_absolute" value="200"/>
<parameter key="prune_below_rank" value="0.05"/>
<parameter key="prune_above_rank" value="0.05"/>
<parameter key="datamanagement" value="double_sparse_array"/>
<process expanded="true">
<operator activated="true" class="text:tokenize" compatibility="5.2.004" expanded="true" height="60" name="Tokenize" width="90" x="45" y="30">
<parameter key="mode" value="non letters"/>
<parameter key="characters" value=".:"/>
<parameter key="language" value="English"/>
<parameter key="max_token_length" value="3"/>
</operator>
<operator activated="true" class="text:filter_stopwords_english" compatibility="5.2.004" expanded="true" height="60" name="Filter Stopwords (English)" width="90" x="179" y="30"/>
<operator activated="true" class="text:stem_snowball" compatibility="5.2.004" expanded="true" height="60" name="Stem (Snowball)" width="90" x="313" y="30">
<parameter key="language" value="English"/>
</operator>
<operator activated="true" class="text:generate_n_grams_terms" compatibility="5.2.004" expanded="true" height="60" name="Generate n-Grams (Terms)" width="90" x="447" y="30">
<parameter key="max_length" value="2"/>
</operator>
<connect from_port="document" to_op="Tokenize" to_port="document"/>
<connect from_op="Tokenize" from_port="document" to_op="Filter Stopwords (English)" to_port="document"/>
<connect from_op="Filter Stopwords (English)" from_port="document" to_op="Stem (Snowball)" to_port="document"/>
<connect from_op="Stem (Snowball)" from_port="document" to_op="Generate n-Grams (Terms)" to_port="document"/>
<connect from_op="Generate n-Grams (Terms)" from_port="document" to_port="document 1"/>
<portSpacing port="source_document" spacing="0"/>
<portSpacing port="sink_document 1" spacing="0"/>
<portSpacing port="sink_document 2" spacing="0"/>
</process>
</operator>
<operator activated="true" class="text:process_document_from_file" compatibility="5.2.004" expanded="true" height="76" name="Process Documents from Files (2)" width="90" x="179" y="255">
<list key="text_directories">
<parameter key="comp.windows.x" value="D:\Machine Learning\News_Reuters\small_dataset0\Testing\comp.windows.x"/>
<parameter key="misc.forsale" value="D:\Machine Learning\News_Reuters\small_dataset0\Testing\misc.forsale"/>
</list>
<parameter key="file_pattern" value="*"/>
<parameter key="extract_text_only" value="true"/>
<parameter key="use_file_extension_as_type" value="true"/>
<parameter key="content_type" value="txt"/>
<parameter key="encoding" value="SYSTEM"/>
<parameter key="create_word_vector" value="true"/>
<parameter key="vector_creation" value="TF-IDF"/>
<parameter key="add_meta_information" value="true"/>
<parameter key="keep_text" value="false"/>
<parameter key="prune_method" value="none"/>
<parameter key="prunde_below_percent" value="3.0"/>
<parameter key="prune_above_percent" value="30.0"/>
<parameter key="prune_below_absolute" value="3"/>
<parameter key="prune_above_absolute" value="200"/>
<parameter key="prune_below_rank" value="0.05"/>
<parameter key="prune_above_rank" value="0.05"/>
<parameter key="datamanagement" value="double_sparse_array"/>
<process expanded="true">
<operator activated="true" class="text:tokenize" compatibility="5.2.004" expanded="true" name="Tokenize (2)">
<parameter key="mode" value="non letters"/>
<parameter key="characters" value=".:"/>
<parameter key="language" value="English"/>
<parameter key="max_token_length" value="3"/>
</operator>
<operator activated="true" class="text:filter_stopwords_english" compatibility="5.2.004" expanded="true" name="Filter Stopwords (2)"/>
<operator activated="true" class="text:stem_snowball" compatibility="5.2.004" expanded="true" name="Stem (2)">
<parameter key="language" value="English"/>
</operator>
<operator activated="true" class="text:generate_n_grams_terms" compatibility="5.2.004" expanded="true" name="Generate n-Grams (2)">
<parameter key="max_length" value="2"/>
</operator>
<connect from_port="document" to_op="Tokenize (2)" to_port="document"/>
<connect from_op="Tokenize (2)" from_port="document" to_op="Filter Stopwords (2)" to_port="document"/>
<connect from_op="Filter Stopwords (2)" from_port="document" to_op="Stem (2)" to_port="document"/>
<connect from_op="Stem (2)" from_port="document" to_op="Generate n-Grams (2)" to_port="document"/>
<connect from_op="Generate n-Grams (2)" from_port="document" to_port="document 1"/>
<portSpacing port="source_document" spacing="0"/>
<portSpacing port="sink_document 1" spacing="0"/>
<portSpacing port="sink_document 2" spacing="0"/>
</process>
</operator>
<operator activated="true" class="weight_by_information_gain" compatibility="5.3.013" expanded="true" height="76" name="Weight by Information Gain" width="90" x="246" y="30">
<parameter key="normalize_weights" value="true"/>
<parameter key="sort_weights" value="true"/>
<parameter key="sort_direction" value="ascending"/>
</operator>
<operator activated="true" class="multiply" compatibility="5.3.013" expanded="true" height="94" name="Multiply" width="90" x="313" y="165"/>
<operator activated="true" class="select_by_weights" compatibility="5.3.013" expanded="true" height="94" name="Select by Weights (2)" width="90" x="447" y="255">
<parameter key="weight_relation" value="greater"/>
<parameter key="weight" value="0.0"/>
<parameter key="k" value="1000"/>
<parameter key="p" value="0.5"/>
<parameter key="deselect_unknown" value="true"/>
<parameter key="use_absolute_weights" value="true"/>
</operator>
<operator activated="true" class="select_by_weights" compatibility="5.3.013" expanded="true" height="94" name="Select by Weights" width="90" x="447" y="30">
<parameter key="weight_relation" value="greater"/>
<parameter key="weight" value="0.0"/>
<parameter key="k" value="1000"/>
<parameter key="p" value="0.5"/>
<parameter key="deselect_unknown" value="true"/>
<parameter key="use_absolute_weights" value="true"/>
</operator>
<operator activated="true" class="k_nn" compatibility="5.3.013" expanded="true" height="76" name="k-NN" width="90" x="648" y="30">
<parameter key="k" value="1"/>
<parameter key="weighted_vote" value="false"/>
<parameter key="measure_types" value="MixedMeasures"/>
<parameter key="mixed_measure" value="MixedEuclideanDistance"/>
<parameter key="nominal_measure" value="NominalDistance"/>
<parameter key="numerical_measure" value="EuclideanDistance"/>
<parameter key="divergence" value="GeneralizedIDivergence"/>
<parameter key="kernel_type" value="radial"/>
<parameter key="kernel_gamma" value="1.0"/>
<parameter key="kernel_sigma1" value="1.0"/>
<parameter key="kernel_sigma2" value="0.0"/>
<parameter key="kernel_sigma3" value="2.0"/>
<parameter key="kernel_degree" value="3.0"/>
<parameter key="kernel_shift" value="1.0"/>
<parameter key="kernel_a" value="1.0"/>
<parameter key="kernel_b" value="0.0"/>
</operator>
<operator activated="true" class="apply_model" compatibility="5.3.013" expanded="true" height="76" name="Apply Model" width="90" x="581" y="255">
<list key="application_parameters"/>
<parameter key="create_view" value="false"/>
</operator>
<operator activated="true" class="performance_classification" compatibility="5.3.013" expanded="true" height="76" name="Performance" width="90" x="715" y="255">
<parameter key="main_criterion" value="first"/>
<parameter key="accuracy" value="true"/>
<parameter key="classification_error" value="false"/>
<parameter key="kappa" value="false"/>
<parameter key="weighted_mean_recall" value="false"/>
<parameter key="weighted_mean_precision" value="false"/>
<parameter key="spearman_rho" value="false"/>
<parameter key="kendall_tau" value="false"/>
<parameter key="absolute_error" value="false"/>
<parameter key="relative_error" value="false"/>
<parameter key="relative_error_lenient" value="false"/>
<parameter key="relative_error_strict" value="false"/>
<parameter key="normalized_absolute_error" value="false"/>
<parameter key="root_mean_squared_error" value="false"/>
<parameter key="root_relative_squared_error" value="false"/>
<parameter key="squared_error" value="false"/>
<parameter key="correlation" value="false"/>
<parameter key="squared_correlation" value="false"/>
<parameter key="cross-entropy" value="false"/>
<parameter key="margin" value="false"/>
<parameter key="soft_margin_loss" value="false"/>
<parameter key="logistic_loss" value="false"/>
<parameter key="skip_undefined_labels" value="true"/>
<parameter key="use_example_weights" value="true"/>
<list key="class_weights"/>
</operator>
<connect from_port="input 1" to_op="Process Documents from Files" to_port="word list"/>
<connect from_op="Process Documents from Files" from_port="example set" to_op="Weight by Information Gain" to_port="example set"/>
<connect from_op="Process Documents from Files" from_port="word list" to_op="Process Documents from Files (2)" to_port="word list"/>
<connect from_op="Process Documents from Files (2)" from_port="example set" to_op="Select by Weights (2)" to_port="example set input"/>
<connect from_op="Weight by Information Gain" from_port="weights" to_op="Multiply" to_port="input"/>
<connect from_op="Weight by Information Gain" from_port="example set" to_op="Select by Weights" to_port="example set input"/>
<connect from_op="Multiply" from_port="output 1" to_op="Select by Weights" to_port="weights"/>
<connect from_op="Multiply" from_port="output 2" to_op="Select by Weights (2)" to_port="weights"/>
<connect from_op="Select by Weights (2)" from_port="example set output" to_op="Apply Model" to_port="unlabelled data"/>
<connect from_op="Select by Weights" from_port="example set output" to_op="k-NN" to_port="training set"/>
<connect from_op="k-NN" from_port="model" to_op="Apply Model" to_port="model"/>
<connect from_op="k-NN" from_port="exampleSet" to_port="result 1"/>
<connect from_op="Apply Model" from_port="labelled data" to_op="Performance" to_port="labelled data"/>
<connect from_op="Performance" from_port="performance" to_port="result 2"/>
<portSpacing port="source_input 1" spacing="0"/>
<portSpacing port="source_input 2" spacing="0"/>
<portSpacing port="sink_result 1" spacing="0"/>
<portSpacing port="sink_result 2" spacing="0"/>
<portSpacing port="sink_result 3" spacing="0"/>
</process>
</operator>
</process>
3.
Software Version: RapidMiner 5.3.013 (64 bit)
Java : 1.7
0
Answers
for my point of view using attribute weighting and k-NN solving text mining problems seems not to be the ideal constellation.
Please try nstead some pruning methods inside the Process-Document-Operator and choose a learning scheme like NaiveBayes or SVM.