summaryrefslogtreecommitdiff
path: root/src/main/Analyzor.java
blob: 0c3ede3310a88ab98918953835af076f1eb11f28 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
package main;

import analysis.BrandChecker;
import database.NamedPreparedStatement;
import database.QueryUtils;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.PrintWriter;
import java.io.UnsupportedEncodingException;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.List;
import java.util.HashMap;
import java.util.Map.Entry;
import java.util.Scanner;

/**
 * The sentiment analysis class that rates tweets based on a unigram and bigram
 * set of weights.
 */
public class Analyzor {

    /**
     * The map that matches single words to their weights.
     */
    private final HashMap<String, Double> unimap = new HashMap();

    /**
     * The map that matches word pairs to their weights.
     */
    private final HashMap<String, Double> bimap = new HashMap();

    /**
     * The results of a query, maybe return from query().
     */
    private ResultSet data;

    /**
     * The persistent connection to the database.
     */
    private final Connection connection;

    /**
     * @param connection An open connection to the database.
     */
    public Analyzor(Connection connection) {
        this.connection = connection;
    }

    /**
     * Read the unigram and bigram lexica.
     *
     * @throws FileNotFoundException
     */
    public void readLexicon() throws FileNotFoundException {
        if (!unimap.isEmpty()) {
            // data is already read.
            return;
        }
        System.err.println("Trying to read lexicons...");
        // A unigram is in the format (WS = whitespace):
        // word <WS> rating <WS> ??? <WS> ??
        // A bigram has an two WS-separated words instead of one.
        try (Scanner uniScanner = new Scanner(new File("unigrams-pmilexicon.txt"));
                Scanner biScanner = new Scanner(new File("bigrams-pmilexicon.txt"));) {
            //Fill the map of unigrams
            int lineno = 1;
            while (uniScanner.hasNext()) {

                String words = uniScanner.next();
                Double d = Double.valueOf(uniScanner.next());
                unimap.put(words.toLowerCase(), d);
                if (uniScanner.hasNextLine()) {
                    uniScanner.nextLine();
                }
                lineno++;

            }

            //fill the map of bigrams
            while (biScanner.hasNext()) {
                String words = biScanner.next() + " " + biScanner.next();
                bimap.put(words.toLowerCase(), Double.valueOf(biScanner.next()));
                if (biScanner.hasNextLine()) {
                    biScanner.nextLine();
                }
            }
        }
        System.err.println("Lexicons are read.");
    }

    /**
     * Executes a query that the analyzer can analyze.
     *
     * @param query The query string to execute.
     * @throws SQLException When database connection isn't available.
     */
    public void query(String query) throws SQLException {
        PreparedStatement statement;
        //make a connection to the database and execute the query
        statement = connection.prepareStatement(query);
        data = statement.executeQuery();
    }

    /**
     * Run a sentiment analysis and fill the database with the output.
     *
     * @param query The sql text for the query.
     * @throws SQLException
     * @throws IOException
     */
    public void sentimentAnalysis(String query) throws SQLException, IOException {
        query(query);

        //read the lexicons
        readLexicon();

        //go to the start of te dataset
        if (data == null) {
            System.err.println("data is empty, try querying first");
            return;
        }

        Double value;
        String text;

        //for all tuples
        while (data.next()) {
            //get the text
            text = data.getString("text");
            text = splitPunctToWords(text);
            // test is the tweet text you are going to analyze
            String[] words = text.split("\\s+"); // text splitted into separate words
            double positiverate = 0; // positive rating

            // Rate the text with unigrams
            for (String word : words) {
                value = unimap.get(word);
                if (value != null) {
                    positiverate += unimap.get(word);
                }
            }
            // Rate the text with bigrams
            for (int i = 0; i < words.length - 1; i++) {
                String pair = words[i] + " " + words[i + 1];
                value = bimap.get(pair);
                if (value != null) {
                    positiverate += bimap.get(pair);
                }
            }
            //insert the rating into the database
            NamedPreparedStatement m_insertRating;
            m_insertRating = new NamedPreparedStatement(connection, QueryUtils.insertRating);
            QueryUtils.setInsertParams(m_insertRating, data.getLong("tweetid"), data.getString("brand"), (int) (positiverate * 10));
            m_insertRating.executeUpdate();
            //don't print the rate
            //System.out.println(text + ": " + (int) (positiverate * 10));
        }
    }

    /**
     * Make a wordcloud of the results of some query.
     *
     * @param query The sql text for a query.
     * @throws SQLException
     * @throws FileNotFoundException
     * @throws UnsupportedEncodingException
     */
    public void makeWordCloud(String query) throws SQLException, FileNotFoundException, UnsupportedEncodingException {

        query(query);
        //go to the start of the ResultSet data
        if (data == null) {
            System.err.println("data is empty, try querying first");
            return;
        }

        String text;
        String brand;
        String[] words;
        HashMap<String,HashMap<String, Integer>> wordcloud = new HashMap<>();

        while (data.next()) {
            //get brand
            brand=data.getString("brand");
            //make hashmap for each brand
            if(!wordcloud.containsKey(brand)){
                wordcloud.put(brand, new HashMap<String,Integer>());
            }
            //get the text
            text = data.getString("text");
            //remove punctuation, convert to lowercase and split on words
            text = removePunct(text);
            text = text.toLowerCase();
            words = text.split("\\s+");
            //for all words
            for (String word : words) {
                //if it is empty, a space or a stripe, skip it
                if(word.equals("") || word.equals(" ") || word.equals("-")){
                    continue;
                }
                //if the word is already in the map, increment the amount
                if(wordcloud.get(brand).containsKey(word)){
                    wordcloud.get(brand).put(word, wordcloud.get(brand).get(word) + 1);
                }
                //if the word is not already in the map, make an entry with amount = 1
                else{
                    wordcloud.get(brand).put(word, 1);
                }
            }
        }
        //print the words and their frequency in a csv file
        mapToCSV(wordcloud, "wordcloud.csv", "brand,word,count");
    }

    //generate csv for disco from the query
    public void disco(String query) throws SQLException, FileNotFoundException, UnsupportedEncodingException {
        //do the query
        query(query);
        PrintWriter writer = new PrintWriter("output.csv", "UTF-8");
        //print the first row
        for (int i = 1; i < data.getMetaData().getColumnCount(); i++) {
            writer.print(data.getMetaData().getColumnLabel(i) + ", ");
        }
        writer.println(data.getMetaData().getColumnLabel(data.getMetaData().getColumnCount()));
        //print the values
        while (data.next()) {
            for (int i = 1; i < data.getMetaData().getColumnCount(); i++) {
                if (data.getObject(i) == null) {
                    writer.print(", ");
                } else {
                    writer.print(data.getObject(i).toString().replaceAll("[,\n]", " ") + ", ");
                }
            }
            if (data.getObject(data.getMetaData().getColumnCount()) == null) {
                writer.println("0");
            } else {
                writer.println(data.getObject(data.getMetaData().getColumnCount()).toString().replace(",", " "));
            }
        }
        writer.close();
    }

    public void getBrands() throws SQLException {
        PreparedStatement statement;
        //make a connection to the database and execute the query
        statement = connection.prepareStatement("delete from mentionsbrand");
        statement.executeUpdate();
        BrandChecker checker = new BrandChecker("brandonlyrules.txt");
        query("select * from tweet");
        NamedPreparedStatement m_insertBrand = new NamedPreparedStatement(connection, QueryUtils.insertBrand);
        while (data.next()) {
            List<String> brands = checker.getBrands(data.getString("text"));
            if (brands.isEmpty()) {
                QueryUtils.setInsertBrandParams(m_insertBrand, data.getLong("tweetid"), "no");
                m_insertBrand.executeUpdate();
            } else {
                for (String brand : brands) {
                    QueryUtils.setInsertBrandParams(m_insertBrand, data.getLong("tweetid"), brand);
                    m_insertBrand.executeUpdate();
                }
            }
        }
    }

    //gets the amount of users that tweet about a brand in a timezone
    //makes a csv file timezone, brand, amount
    public void timezone(String query) throws SQLException, FileNotFoundException, UnsupportedEncodingException{
        query(query);
        //hashmap timezone, brand, amount
        HashMap<String, HashMap<String, Integer>> timeMap = new HashMap<>();
        String timezone;
        String brand;
        
        while(data.next()){
            timezone = data.getString("timezone");
            brand = data.getString("brand");
            //if the timezone is already in the map
            if(timeMap.containsKey(timezone)){
                //if the brand for that timezone is already in the map
                if(timeMap.get(timezone).containsKey(brand)){
                    //increment the amount
                    timeMap.get(timezone).put(brand, timeMap.get(timezone).get(brand) + 1);
                }
                //if the brand for that timezone is not yet in the map
                else{
                    //make a new entry for that brand with amount = 1
                    timeMap.get(timezone).put(brand, 1);
                }
            }
            //if the timezone is not yet in the map
            else{
                //make a new hashmap for this map and fill it with the brand and the amount
                timeMap.put(timezone, new HashMap<String, Integer>());
                timeMap.get(timezone).put(brand, 1);
            }
        }
        //make the CSV out of the map
        mapToCSV(timeMap, "timezone.csv", "timezone,brand,count");
    }
    
    //replaces punctuation so it will be splitted
    //also removes urls
    private String splitPunctToWords(String text) {
        text = text.replaceAll("https?://\\S*", "");
        text = text.replaceAll("[!?):;\"']", " $0");
        text = text.replaceAll("[.,-](\\s|$)", " $0");
        text = text.replaceAll("\\s[(\"']", "$0 ");
        return text;
    }

    //removes punctuation
    //also removes urls
    private String removePunct(String text) {
        text = text.replaceAll("https?://\\S*", " ");
        text = text.replaceAll("@\\S*", " ");
        text = text.replaceAll("[^a-zA-Z0-9#_-]", " ");
        return text;
    }
    
    //prints a hashmap into a csv for a html application
    //Hashmap<key1, HashMap<key2, value>> becomes key1, key2, value
    //only for String, String, Integer
    void mapToCSV(HashMap<String, HashMap<String, Integer>> map, String fileName, String firstLine) 
                throws FileNotFoundException, UnsupportedEncodingException{
        
        PrintWriter writer = new PrintWriter(fileName, "UTF-8");
        
        writer.println(firstLine);
          
        //loop over brands
        for(Entry en : map.entrySet()){
            //loop over words
            for(Entry e : map.get(en.getKey()).entrySet()){
                writer.println(en.getKey() + "," + e.getKey() + "," + e.getValue());
            }
        }
        
        writer.close();
        System.out.println("csv file made, please put it next to html file and run this");
    }
}