summaryrefslogtreecommitdiff
path: root/src/Chapter5/text/ExtractTopKeywords.java
blob: 8ab412abf740e77216509e61734c6259e5403db2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
/* TweetTracker. Copyright (c) Arizona Board of Regents on behalf of Arizona State University
 * @author shamanth
 */
package Chapter5.text;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.Set;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import utils.Tags;
import utils.TextUtils;

public class ExtractTopKeywords
{

    static final String DEF_INFILENAME = "ows.json";
    static final int DEF_K = 60;
    
    /**
     * Extracts the most frequently occurring keywords from the tweets by processing them sequentially. Stopwords are ignored.
     * @param inFilename File containing a list of tweets as JSON objects
     * @param K Count of the top keywords to return
     * @param ignoreHashtags If true, hashtags are not considered while counting the most frequent keywords
     * @param ignoreUsernames If true, usernames are not considered while counting the most frequent keywords
     * @param tu TextUtils object which handles the stopwords
     * @return a JSONArray containing an array of JSONObjects. Each object contains two elements "text" and "size" referring to the word and it's frequency
     */
    public JSONArray GetTopKeywords(String inFilename, int K, boolean ignoreHashtags, boolean ignoreUsernames, TextUtils tu)
    {
        HashMap<String, Integer> words = new HashMap<String,Integer>();
        BufferedReader br = null;
        try{
            br = new BufferedReader(new InputStreamReader(new FileInputStream(inFilename),"UTF-8"));            
            String temp = "";
            while((temp = br.readLine())!=null)
            {
                try{
                    JSONObject tweetobj = new JSONObject(temp);
                    if(!tweetobj.isNull("text"))
                    {
                        String text = tweetobj.getString("text");
                        //System.out.println(text);
                        text = text.toLowerCase().replaceAll("\\s+", " ");
                        /** Step 1: Tokenize tweets into individual words. and count their frequency in the corpus
                           * Remove stop words and special characters. Ignore user names and hashtags if the user chooses to.
                           */
                        HashMap<String,Integer> tokens = tu.TokenizeText(text,ignoreHashtags,ignoreUsernames);
                        Set<String> keys = tokens.keySet();
                        for(String key:keys)
                        {
                            if(words.containsKey(key))
                            {
                                words.put(key, words.get(key)+tokens.get(key));
                            }
                            else
                            {
                                words.put(key, tokens.get(key));
                            }
                        }
                    }
                }catch(JSONException ex)
                {
                    ex.printStackTrace();
                }
            }
        }catch(IOException ex)
        {
            ex.printStackTrace();
        }finally{
            try {
                br.close();
            } catch (IOException ex) {
                Logger.getLogger(ExtractTopKeywords.class.getName()).log(Level.SEVERE, null, ex);
            }
        }        
        Set<String> keys = words.keySet();
        ArrayList<Tags> tags = new ArrayList<Tags>();
        for(String key:keys)
        {
            Tags tag = new Tags();
            tag.setKey(key);
            tag.setValue(words.get(key));
            tags.add(tag);
        }
        // Step 2: Sort the words in descending order of frequency
        Collections.sort(tags, Collections.reverseOrder());
        JSONArray cloudwords = new JSONArray();
        int numwords = K;
        if(tags.size()<numwords)
        {
            numwords = tags.size();
        }        
        for(int i=0;i<numwords;i++)
        {
            JSONObject wordfreq = new JSONObject();
            Tags tag = tags.get(i);
            try{
                wordfreq.put("text", tag.getKey());
                wordfreq.put("size",tag.getValue());
                cloudwords.put(wordfreq);
            }catch(JSONException ex)
            {
                ex.printStackTrace();
            }
        }
        return cloudwords;
    }

    public static void main(String[] args)
    {
        ExtractTopKeywords etk = new ExtractTopKeywords();

        //Initialize the TextUtils class which handles all the processing of text.
        TextUtils tu = new TextUtils();
        tu.LoadStopWords("C:/tweettracker/stopwords.txt");        
        String infilename = DEF_INFILENAME;
        int K = DEF_K;
        if(args!=null)
        {
            if(args.length>=1&&!args[0].isEmpty())
            {
                File fl = new File(args[0]);
                if(fl.exists())
                {
                    infilename = args[0];
                }
            }
            if(args.length>=2&&!args[1].isEmpty())
            {
                try{
                    K = Integer.parseInt(args[1]);
                }catch(NumberFormatException ex)
                {
                    ex.printStackTrace();
                }
            }
        }
        System.out.println(etk.GetTopKeywords(infilename, K, false,true,tu));
    }

}