summaryrefslogtreecommitdiff
path: root/src/main/Analyzor.java
blob: 5a201bef4337eb4ffd7e55c662a351c865fe295a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
package main;

import analysis.BrandChecker;
import database.NamedPreparedStatement;
import database.QueryUtils;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.PrintWriter;
import java.io.UnsupportedEncodingException;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.util.List;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map.Entry;
import java.util.Scanner;

/**
 * The sentiment analysis class that rates tweets based on a unigram and bigram
 * set of weights.
 */
public class Analyzor {

    /**
     * The map that matches single words to their weights.
     */
    private final HashMap<String, Double> unimap = new HashMap();

    /**
     * The map that matches word pairs to their weights.
     */
    private final HashMap<String, Double> bimap = new HashMap();

    /**
     * The results of a query, maybe return from query().
     */
    private ResultSet data;

    /**
     * The persistent connection to the database.
     */
    private final Connection connection;

    /**
     * @param connection An open connection to the database.
     */
    public Analyzor(Connection connection) {
        this.connection = connection;
    }

    /**
     * Read the unigram and bigram lexica.
     *
     * @throws FileNotFoundException
     */
    public void readLexicon() throws FileNotFoundException {
        if (!unimap.isEmpty()) {
            // data is already read.
            return;
        }
        System.err.println("Trying to read lexicons...");
        // A unigram is in the format (WS = whitespace):
        // word <WS> rating <WS> ??? <WS> ??
        // A bigram has an two WS-separated words instead of one.
        try (Scanner uniScanner = new Scanner(new File("unigrams-pmilexicon.txt"));
                Scanner biScanner = new Scanner(new File("bigrams-pmilexicon.txt"));) {
            //Fill the map of unigrams
            int lineno = 1;
            while (uniScanner.hasNext()) {

                String words = uniScanner.next();
                Double d = Double.valueOf(uniScanner.next());
                unimap.put(words.toLowerCase(), d);
                if (uniScanner.hasNextLine()) {
                    uniScanner.nextLine();
                }
                lineno++;

            }

            //fill the map of bigrams
            while (biScanner.hasNext()) {
                String words = biScanner.next() + " " + biScanner.next();
                bimap.put(words.toLowerCase(), Double.valueOf(biScanner.next()));
                if (biScanner.hasNextLine()) {
                    biScanner.nextLine();
                }
            }
        }
        System.err.println("Lexicons are read.");
    }

    /**
     * Executes a query that the analyzer can analyze.
     *
     * @param query The query string to execute.
     * @throws SQLException When database connection isn't available.
     */
    public void query(String query) throws SQLException {
        PreparedStatement statement;
        //make a connection to the database and execute the query
        statement = connection.prepareStatement(query);
        data = statement.executeQuery();
    }

    /**
     * Run a sentiment analysis and fill the database with the output.
     *
     * @param query The sql text for the query.
     * @throws SQLException
     * @throws IOException
     */
    public void sentimentAnalysis(String query) throws SQLException, IOException {
        query(query);

        //read the lexicons
        readLexicon();

        //go to the start of te dataset
        if (data == null) {
            System.err.println("data is empty, try querying first");
            return;
        }

        Double value;
        String text;

        //for all tuples
        while (data.next()) {
            //get the text
            text = data.getString("text");
            text = splitPunctToWords(text);
            // test is the tweet text you are going to analyze
            String[] words = text.split("\\s+"); // text splitted into separate words
            double positiverate = 0; // positive rating

            // Rate the text with unigrams
            for (String word : words) {
                value = unimap.get(word);
                if (value != null) {
                    positiverate += unimap.get(word);
                }
            }
            // Rate the text with bigrams
            for (int i = 0; i < words.length - 1; i++) {
                String pair = words[i] + " " + words[i + 1];
                value = bimap.get(pair);
                if (value != null) {
                    positiverate += bimap.get(pair);
                }
            }
            //insert the rating into the database
            NamedPreparedStatement m_insertRating;
            m_insertRating = new NamedPreparedStatement(connection, QueryUtils.insertRating);
            QueryUtils.setInsertParams(m_insertRating, data.getLong("tweetid"), data.getString("brand"), (int) (positiverate * 10));
            m_insertRating.executeUpdate();
            //don't print the rate
            //System.out.println(text + ": " + (int) (positiverate * 10));
        }
    }

    /**
     * Make a wordcloud of the results of some query.
     *
     * @param query The sql text for a query.
     * @throws SQLException
     * @throws FileNotFoundException
     * @throws UnsupportedEncodingException
     */
    public void makeWordCloud(String query) throws SQLException, FileNotFoundException, UnsupportedEncodingException {

        query(query);
        //go to the start of the ResultSet data
        if (data == null) {
            System.err.println("data is empty, try querying first");
            return;
        }

        String text;
        String brand;
        String[] words;
        HashMap<String, HashMap<String, Integer>> wordcloud = new HashMap<>();

        while (data.next()) {
            //get brand
            brand = data.getString("brand");
            //make hashmap for each brand
            if (!wordcloud.containsKey(brand)) {
                wordcloud.put(brand, new HashMap<String, Integer>());
            }
            //get the text
            text = data.getString("text");
            //remove punctuation, convert to lowercase and split on words
            text = removePunct(text);
            text = text.toLowerCase();
            words = text.split("\\s+");
            //for all words
            for (String word : words) {
                //if it is empty, a space or a stripe, skip it
                if (word.equals("") || word.equals(" ") || word.equals("-")) {
                    continue;
                }
                //if the word is already in the map, increment the amount
                if (wordcloud.get(brand).containsKey(word)) {
                    wordcloud.get(brand).put(word, wordcloud.get(brand).get(word) + 1);
                } //if the word is not already in the map, make an entry with amount = 1
                else {
                    wordcloud.get(brand).put(word, 1);
                }
            }
        }
        //print the words and their frequency in a csv file
        ssiMapToCSV(wordcloud, "wordcloud.csv", "brand,word,count");
    }

    //generate csv for disco from the query
    public void disco(String query) throws SQLException, FileNotFoundException, UnsupportedEncodingException {
        //do the query
        query(query);
        PrintWriter writer = new PrintWriter("output.csv", "UTF-8");
        //print the first row
        for (int i = 1; i < data.getMetaData().getColumnCount(); i++) {
            writer.print(data.getMetaData().getColumnLabel(i) + ", ");
        }
        writer.println(data.getMetaData().getColumnLabel(data.getMetaData().getColumnCount()));
        //print the values
        while (data.next()) {
            for (int i = 1; i < data.getMetaData().getColumnCount(); i++) {
                if (data.getObject(i) == null) {
                    writer.print(", ");
                } else {
                    writer.print(data.getObject(i).toString().replaceAll("[,\n]", " ") + ", ");
                }
            }
            if (data.getObject(data.getMetaData().getColumnCount()) == null) {
                writer.println("0");
            } else {
                writer.println(data.getObject(data.getMetaData().getColumnCount()).toString().replace(",", " "));
            }
        }
        writer.close();
    }

    /**
     * Obtain the brands of select tweet texts.
     *
     * @param queryText The rows to select.
     * @param reset Whether to reset mentionsbrand.
     * @throws SQLException If the query is unsuccesfull.
     */
    public void getBrands(String queryText, boolean reset) throws SQLException {
        BrandChecker checker = new BrandChecker("brandonlyrules.txt");

        PreparedStatement statement;
        // make a connection to the database and execute the query
        if (reset) {
            System.out.println("Cleaning old entries of mentionsbrand.");
            statement = connection.prepareStatement("delete from mentionsbrand");
            statement.executeUpdate();
        }

        System.out.println("Obtaining all selected entries in tweet.");
        if (queryText.isEmpty()) {
            query("select * from tweet");
        } else {
            query(queryText);
        }
        System.out.println("Query finished.");

        NamedPreparedStatement insertBrand = new NamedPreparedStatement(connection, QueryUtils.insertBrand);

        int brandCount = 0;
        int count = 0;
        long timestamp = System.currentTimeMillis();

        while (data.next()) {
            List<String> brands = checker.getBrands(data.getString("text"));
            if (brands.isEmpty()) {
                brandCount++;
                QueryUtils.setInsertBrandParams(insertBrand, data.getLong("tweetid"), "no");
                insertBrand.executeUpdate();
            } else {
                brandCount += brands.size();
                for (String brand : brands) {
                    QueryUtils.setInsertBrandParams(insertBrand, data.getLong("tweetid"), brand);
                    insertBrand.executeUpdate();
                }
            }

            count++;
            if (count % 10000 == 0) {
                System.out.println("Processed " + count + " tweets in " + (System.currentTimeMillis() - timestamp) + " ms");
            }
        }

        System.out.println("Processed " + count + " tweets in " + (System.currentTimeMillis() - timestamp) + " ms");
        System.out.println("Finished getBrands, processed " + count + " number of tweets, added " + brandCount + " brands or no.");
    }

    //gets the amount of users that tweet about a brand in a timezone
    //makes a csv file timezone, brand, amount
    public void timezone(String query) throws SQLException, FileNotFoundException, UnsupportedEncodingException {
        query(query);

        InputStream inFile = new FileInputStream("timezone.txt");
        Scanner readFile = new Scanner(inFile);
        HashMap<String, String> toTimezone = new HashMap<>();
        while (readFile.hasNextLine()) {
            String line = readFile.nextLine();
            if (line.split(",").length > 1) {
                toTimezone.put(line.split(",")[0], line.split(",")[1]);
            }
        }

        //hashmap timezone, brand, amount
        HashMap<String, HashMap<String, Integer>> timeMap = new HashMap<>();
        String timezone;
        String brand;

        while (data.next()) {
            timezone = data.getString("timezone");
            if (toTimezone.containsKey(timezone)) {
                timezone = toTimezone.get(timezone);
            } else {
                timezone = "other";
            }
            brand = data.getString("brand");

            //if the timezone is already in the map
            if (timeMap.containsKey(timezone)) {
                //if the brand for that timezone is already in the map
                if (timeMap.get(timezone).containsKey(brand)) {
                    //increment the amount
                    timeMap.get(timezone).put(brand, timeMap.get(timezone).get(brand) + 1);
                } //if the brand for that timezone is not yet in the map
                else {
                    //make a new entry for that brand with amount = 1
                    timeMap.get(timezone).put(brand, 1);
                }
            } //if the timezone is not yet in the map
            else {
                //make a new hashmap for this map and fill it with the brand and the amount
                timeMap.put(timezone, new HashMap<String, Integer>());
                timeMap.get(timezone).put(brand, 1);
            }
        }

        //make the CSV out of the map
        ssiMapToCSV(timeMap, "timezone.csv", "timezone,brand,count");
    }

    //gets the positivity of the tweets about a brand
    //makes a csv file for posnegVisualizer
    void posNeg(String query) throws SQLException, FileNotFoundException, UnsupportedEncodingException {
        query(query);

        String brand;
        int rating;
        int ratingInterval;

        int intervalSize = 10;
        //brand, ratingInterval, amount
        HashMap<String, HashMap<Integer, Integer>> posnegMap = new HashMap<>();
        /*
         the rating interval is given by an integer, which is the result of the
         tweets sentiment value divided by interval size rounded down.
         This puts all data in boxes for the histogram.
         */

        while (data.next()) {

            brand = data.getString("brand");
            rating = data.getInt("rating");
            //ratingInterval is an integer divisible by intervalSize
            //if a rating is between a ratingInterval+-0.5*intervalSize, it belongs in that interval
            ratingInterval = (rating + (int) (0.5 * intervalSize)) / intervalSize * intervalSize;

            //if the brand is already in the map
            if (posnegMap.containsKey(brand)) {
                //if the brand for that brand is already in the map
                if (posnegMap.get(brand).containsKey(ratingInterval)) {
                    //increment the amount
                    posnegMap.get(brand).put(ratingInterval, posnegMap.get(brand).get(ratingInterval) + 1);
                } //if the brand for that brand is not yet in the map
                else {
                    //make a new entry for that brand with amount = 1
                    posnegMap.get(brand).put(ratingInterval, 1);
                }
            } //if the brand is not yet in the map
            else {
                //make a new hashmap for this map and fill it with the brand and the amount
                posnegMap.put(brand, new HashMap<Integer, Integer>());
                posnegMap.get(brand).put(ratingInterval, 1);
            }
        }
        siiMapToCSV(posnegMap, "posneg.csv", "brand,ratingInterval,count");
    }

    /*
     makes a csv for disco of a process of news spreading
    
    
     the query should be as follows:
     - it should be a union of the following query twice, once with TYPE = retweet, once with TYPE = reply
     - pick two tables of tweet (t1 and t2) and one of TYPEof
     - t1.tweetid = TYPEof.TYPEonid and t2.tweetid = TYPEof.TYPEid
     - t1.tweetid should be named maintweetid
     - t2.tweetid should be named TYPEid
     - t1.timestamp should be names maintime
     - t2.timestamp should be named othertime
     - t1.userid should be named mainuserid
     - t2.userid should be named otheruserid
    
     so the resulting tables should be:
     maintweetid, maintime, mainuserid, replyid, retweetid, othertime, otheruserid
    
     note that one of replyid and retweetid has to be null and the other a long for each row
     how to do this: http://stackoverflow.com/questions/2309943/unioning-two-tables-with-different-number-of-columns
    
    
     the csv will contain: tweetID of the replied/retweeted on, reply/retweet, timestamp, tweetid of the reply/retweet, userid
     which corresponds to: caseID                             , activity     , timestamp, resource                    , rescource
     */
    void newsSpread(String query) throws SQLException, FileNotFoundException, UnsupportedEncodingException {
        query(query);

        long maintweetID;
        long replyID;
        long retweetID;

        //tweetID, set of replyID's
        HashMap<Long, HashSet<Long>> hasReplies = new HashMap<>();
        //tweetID, set of retweetID's
        HashMap<Long, HashSet<Long>> hasRetweets = new HashMap<>();
        //tweetID, its timestamp
        HashMap<Long, Timestamp> timestamp = new HashMap<>();
        //tweetID, its userID
        HashMap<Long, Long> user = new HashMap<>();

        while (data.next()) {

            maintweetID = data.getLong("thetweetid");
            replyID = data.getLong("replyid");
            retweetID = data.getLong("retweetid");

            //put these in the corresponding maps
            //note that exact one of the two if statements below will hold
            //if the replyID is not null
            if (replyID != 0) {
                //if this tweetID has no set yet, make one
                if (hasReplies.get(maintweetID) == null) {
                    hasReplies.put(maintweetID, new HashSet<Long>());
                }
                //add the replyID to the tweetID
                hasReplies.get(maintweetID).add(replyID);
                //store the time of the tweet
                timestamp.put(replyID, data.getTimestamp("othertime"));
                //store teh user of the tweet
                user.put(replyID, data.getLong("otheruser"));
            }
            //if the retweetID is not null
            if (retweetID != 0) {
                //if this tweetID has no set yet, make one
                if (hasRetweets.get(maintweetID) == null) {
                    hasRetweets.put(maintweetID, new HashSet<Long>());
                }
                //add the retweetID to the tweetID
                hasRetweets.get(maintweetID).add(retweetID);
                //store the time of the tweet
                timestamp.put(retweetID, data.getTimestamp("othertime"));
                //store teh user of the tweet
                user.put(retweetID, data.getLong("otheruser"));
            }
        }

        //now use this data to make a csv for disco
        PrintWriter writer = new PrintWriter("newsSpread.csv", "UTF-8");
        //print the first line
        writer.println("caseID,activity,timestamp,tweet,user");

        //print all replies
        for (Long tweetid : hasReplies.keySet()) {
            for (Long replyid : hasReplies.get(tweetid)) {
                writer.println(tweetid + ", reply, " + timestamp.get(replyid) + ", " + replyid + ", " + user.get(replyid));
            }
        }
        //print all retweets
        for (Long tweetid : hasRetweets.keySet()) {
            for (Long retweetid : hasRetweets.get(tweetid)) {
                writer.println(tweetid + ", retweet, " + timestamp.get(retweetid) + ", " + retweetid + ", " + user.get(retweetid));
            }
        }
        writer.close();
    }

    //replaces punctuation so it will be splitted
    //also removes urls
    private String splitPunctToWords(String text) {
        text = text.replaceAll("https?://\\S*", "");
        text = text.replaceAll("[!?):;\"']", " $0");
        text = text.replaceAll("[.,-](\\s|$)", " $0");
        text = text.replaceAll("\\s[(\"']", "$0 ");
        return text;
    }

    //removes punctuation
    //also removes urls
    private String removePunct(String text) {
        text = text.replaceAll("https?://\\S*", " ");
        text = text.replaceAll("@\\S*", " ");
        text = text.replaceAll("[^a-zA-Z0-9#_-]", " ");
        return text;
    }

    //prints a hashmap into a csv for a html application
    //Hashmap<key1, HashMap<key2, value>> becomes key1, key2, value
    //only for String, String, Integer
    void ssiMapToCSV(HashMap<String, HashMap<String, Integer>> map, String fileName, String firstLine)
            throws FileNotFoundException, UnsupportedEncodingException {

        PrintWriter writer = new PrintWriter(fileName, "UTF-8");

        writer.println(firstLine);

        //loop over brands
        for (Entry en : map.entrySet()) {
            //loop over words
            for (Entry e : map.get(en.getKey()).entrySet()) {
                writer.println(en.getKey() + "," + e.getKey() + "," + e.getValue());
            }
        }

        writer.close();
        System.out.println("csv file made, please put it next to html file and run this");
    }

    void siiMapToCSV(HashMap<String, HashMap<Integer, Integer>> map, String fileName, String firstLine)
            throws FileNotFoundException, UnsupportedEncodingException {

        PrintWriter writer = new PrintWriter(fileName, "UTF-8");

        writer.println(firstLine);

        //loop over brands
        for (Entry en : map.entrySet()) {
            //loop over words
            for (Entry e : map.get(en.getKey()).entrySet()) {
                writer.println(en.getKey() + "," + e.getKey() + "," + e.getValue());
            }
        }

        writer.close();
        System.out.println("csv file made, please put it next to html file and run this");
    }
}