1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
|
package main;
import database.NamedPreparedStatement;
import database.QueryUtils;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.Scanner;
/**
* The sentiment analysis class that rates tweets based on a unigram and bigram
* set of weights.
*/
public class Analyzor {
/**
* The map that matches single words to their weights.
*/
private final HashMap<String, Double> unimap = new HashMap();
/**
* The map that matches word pairs to their weights.
*/
private final HashMap<String, Double> bimap = new HashMap();
private ResultSet data;
private final Connection connection;
Analyzor(Connection connection) {
this.connection = connection;
}
//reads the lexicons
void readLexicon() throws FileNotFoundException {
if (!unimap.isEmpty()) {
// data is already read.
return;
}
// A unigram is in the format (WS = whitespace):
// word <WS> rating <WS> ??? <WS> ??
// A bigram has an two WS-separated words instead of one.
try (Scanner uniScanner = new Scanner(new File("unigrams-pmilexicon.txt"));
Scanner biScanner = new Scanner(new File("bigrams-pmilexicon.txt"));) {
//Fill the map of unigrams
while (uniScanner.hasNext()) {
String words = uniScanner.next();
unimap.put(words.toLowerCase(), uniScanner.nextDouble());
if (uniScanner.hasNextLine()) {
uniScanner.nextLine();
}
}
//fill the map of bigrams
while (biScanner.hasNext()) {
String words = biScanner.next() + " " + biScanner.next();
bimap.put(words.toLowerCase(), biScanner.nextDouble());
if (biScanner.hasNextLine()) {
biScanner.nextLine();
}
}
}
}
/**
* Executes a query that the analyzer can analyze.
*
* @param query The query string to execute.
* @throws SQLException When database connection isn't available.
*/
public void query(String query) throws SQLException {
PreparedStatement statement;
//make a connection to the database and execute the query
statement = connection.prepareStatement(query);
data = statement.executeQuery();
}
/**
* Run a sentiment analysis and fill the database with the output.
*
* @throws SQLException
* @throws IOException
*/
public void sentimentAnalysis(String query) throws SQLException, IOException {
query(query);
//read the lexicons
readLexicon();
//go to the start of te dataset
if (data == null) {
System.err.println("data is empty, try querying first");
return;
}
Double value;
String text;
//for all tuples
while (data.next()) {
//get the text
text = data.getString("text");
text = splitPunctToWords(text);
// test is the tweet text you are going to analyze
String[] words = text.split("\\s+"); // text splitted into separate words
double positiverate = 0; // positive rating
// Rate the text with unigrams
for (String word : words) {
value = unimap.get(word);
if (value != null) {
positiverate += unimap.get(word);
}
}
// Rate the text with bigrams
for (int i = 0; i < words.length - 1; i++) {
String pair = words[i] + " " + words[i + 1];
value = bimap.get(pair);
if (value != null) {
positiverate += bimap.get(pair);
}
}
//insert the rating into the database
NamedPreparedStatement m_insertRating;
m_insertRating = new NamedPreparedStatement(connection, QueryUtils.insertRating);
QueryUtils.setInsertParams(m_insertRating, data.getLong("tweetid"), data.getString("brand"), (int) (positiverate * 10));
m_insertRating.executeUpdate();
//don't print the rate
//System.out.println(text + ": " + (int) (positiverate * 10));
}
}
//makes a wordcloud of the tweets in the ResultSet data
void makeWordCloud(String query) throws SQLException {
query(query);
//go to the start of the ResultSet data
if (data == null) {
System.err.println("data is empty, try querying first");
return;
}
//make the hashmap with the words and their frequency
HashMap<String, Integer> wordcloud = new HashMap<>();
String text;
String[] words;
Integer value;
while (data.next()) {
//get the text
text = data.getString("text");
//remove punctuation, convert to lowercase and split on words
text = removePunct(text);
text = text.toLowerCase();
words = text.split("\\s+");
//count the words
for (String word : words) {
value = wordcloud.get(word);
if (value == null) {
wordcloud.put(word, 1);
} else {
wordcloud.put(word, value++);
}
}
}
}
//replaces punctuation so it will be splitted
//also removes urls
private String splitPunctToWords(String text) {
text = text.replaceAll("https?://\\S*", "");
text = text.replaceAll("[!?):;\"']", " $0");
text = text.replaceAll("[.,-](\\s|$)", " $0");
text = text.replaceAll("\\s[(\"']", "$0 ");
return text;
}
//removes punctuation
//also removes urls
private String removePunct(String text) {
text = text.replaceAll("https?://\\S*", "");
text = text.replaceAll("[.,!?()-:;\"']", " ");
return text;
}
}
|