summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Wu <peter@lekensteyn.nl>2014-05-26 11:36:25 +0200
committerPeter Wu <peter@lekensteyn.nl>2014-05-26 11:36:25 +0200
commite051abbfdbf7ff721bf1318bf0b5939741b1f792 (patch)
treee66107b3dd74a743ec73afd654e6541df0d731ba
parentbab0c2f127c989635145855675b320ae5e06ae53 (diff)
downloadGoldfarmer-e051abbfdbf7ff721bf1318bf0b5939741b1f792.tar.gz
FKING CRLF
-rw-r--r--nbproject/configs/such_database.properties2
-rw-r--r--nbproject/project.properties162
-rw-r--r--src/main/Analyzor.java690
3 files changed, 427 insertions, 427 deletions
diff --git a/nbproject/configs/such_database.properties b/nbproject/configs/such_database.properties
index bba41ec..9dffee6 100644
--- a/nbproject/configs/such_database.properties
+++ b/nbproject/configs/such_database.properties
@@ -1 +1 @@
-$label=such database
+$label=such database
diff --git a/nbproject/project.properties b/nbproject/project.properties
index b262ab6..ab8ae05 100644
--- a/nbproject/project.properties
+++ b/nbproject/project.properties
@@ -1,81 +1,81 @@
-annotation.processing.enabled=true
-annotation.processing.enabled.in.editor=false
-annotation.processing.processors.list=
-annotation.processing.run.all.processors=true
-annotation.processing.source.output=${build.generated.sources.dir}/ap-source-output
-application.title=Goldfarmer
-application.vendor=maurice
-build.classes.dir=${build.dir}/classes
-build.classes.excludes=**/*.java,**/*.form
-# This directory is removed when the project is cleaned:
-build.dir=build
-build.generated.dir=${build.dir}/generated
-build.generated.sources.dir=${build.dir}/generated-sources
-# Only compile against the classpath explicitly listed here:
-build.sysclasspath=ignore
-build.test.classes.dir=${build.dir}/test/classes
-build.test.results.dir=${build.dir}/test/results
-# Uncomment to specify the preferred debugger connection transport:
-#debug.transport=dt_socket
-debug.classpath=\
- ${run.classpath}
-debug.test.classpath=\
- ${run.test.classpath}
-# Files in build.classes.dir which should be excluded from distribution jar
-dist.archive.excludes=
-# This directory is removed when the project is cleaned:
-dist.dir=dist
-dist.jar=${dist.dir}/Goldfarmer.jar
-dist.javadoc.dir=${dist.dir}/javadoc
-endorsed.classpath=
-excludes=
-file.reference.joda-time-2.3.jar=lib/joda-time-2.3.jar
-file.reference.postgresql-9.3-1101.jdbc41.jar=lib/postgresql-9.3-1101.jdbc41.jar
-includes=**
-jar.compress=false
-javac.classpath=\
- ${file.reference.joda-time-2.3.jar}:\
- ${file.reference.postgresql-9.3-1101.jdbc41.jar}
-# Space-separated list of extra javac options
-javac.compilerargs=
-javac.deprecation=false
-javac.processorpath=\
- ${javac.classpath}
-javac.source=1.7
-javac.target=1.7
-javac.test.classpath=\
- ${javac.classpath}:\
- ${build.classes.dir}:\
- ${libs.junit_4.classpath}
-javac.test.processorpath=\
- ${javac.test.classpath}
-javadoc.additionalparam=
-javadoc.author=false
-javadoc.encoding=${source.encoding}
-javadoc.noindex=false
-javadoc.nonavbar=false
-javadoc.notree=false
-javadoc.private=false
-javadoc.splitindex=true
-javadoc.use=true
-javadoc.version=false
-javadoc.windowtitle=
-main.class=main.Main
-manifest.file=manifest.mf
-meta.inf.dir=${src.dir}/META-INF
-mkdist.disabled=false
-platform.active=default_platform
-project.licensePath=./nbproject/licenseheader.txt
-run.classpath=\
- ${javac.classpath}:\
- ${build.classes.dir}
-# Space-separated list of JVM arguments used when running the project.
-# You may also define separate properties like run-sys-prop.name=value instead of -Dname=value.
-# To set system properties for unit tests define test-sys-prop.name=value:
-run.jvmargs=
-run.test.classpath=\
- ${javac.test.classpath}:\
- ${build.test.classes.dir}
-source.encoding=UTF-8
-src.dir=src
-test.src.dir=test
+annotation.processing.enabled=true
+annotation.processing.enabled.in.editor=false
+annotation.processing.processors.list=
+annotation.processing.run.all.processors=true
+annotation.processing.source.output=${build.generated.sources.dir}/ap-source-output
+application.title=Goldfarmer
+application.vendor=maurice
+build.classes.dir=${build.dir}/classes
+build.classes.excludes=**/*.java,**/*.form
+# This directory is removed when the project is cleaned:
+build.dir=build
+build.generated.dir=${build.dir}/generated
+build.generated.sources.dir=${build.dir}/generated-sources
+# Only compile against the classpath explicitly listed here:
+build.sysclasspath=ignore
+build.test.classes.dir=${build.dir}/test/classes
+build.test.results.dir=${build.dir}/test/results
+# Uncomment to specify the preferred debugger connection transport:
+#debug.transport=dt_socket
+debug.classpath=\
+ ${run.classpath}
+debug.test.classpath=\
+ ${run.test.classpath}
+# Files in build.classes.dir which should be excluded from distribution jar
+dist.archive.excludes=
+# This directory is removed when the project is cleaned:
+dist.dir=dist
+dist.jar=${dist.dir}/Goldfarmer.jar
+dist.javadoc.dir=${dist.dir}/javadoc
+endorsed.classpath=
+excludes=
+file.reference.joda-time-2.3.jar=lib/joda-time-2.3.jar
+file.reference.postgresql-9.3-1101.jdbc41.jar=lib/postgresql-9.3-1101.jdbc41.jar
+includes=**
+jar.compress=false
+javac.classpath=\
+ ${file.reference.joda-time-2.3.jar}:\
+ ${file.reference.postgresql-9.3-1101.jdbc41.jar}
+# Space-separated list of extra javac options
+javac.compilerargs=
+javac.deprecation=false
+javac.processorpath=\
+ ${javac.classpath}
+javac.source=1.7
+javac.target=1.7
+javac.test.classpath=\
+ ${javac.classpath}:\
+ ${build.classes.dir}:\
+ ${libs.junit_4.classpath}
+javac.test.processorpath=\
+ ${javac.test.classpath}
+javadoc.additionalparam=
+javadoc.author=false
+javadoc.encoding=${source.encoding}
+javadoc.noindex=false
+javadoc.nonavbar=false
+javadoc.notree=false
+javadoc.private=false
+javadoc.splitindex=true
+javadoc.use=true
+javadoc.version=false
+javadoc.windowtitle=
+main.class=main.Main
+manifest.file=manifest.mf
+meta.inf.dir=${src.dir}/META-INF
+mkdist.disabled=false
+platform.active=default_platform
+project.licensePath=./nbproject/licenseheader.txt
+run.classpath=\
+ ${javac.classpath}:\
+ ${build.classes.dir}
+# Space-separated list of JVM arguments used when running the project.
+# You may also define separate properties like run-sys-prop.name=value instead of -Dname=value.
+# To set system properties for unit tests define test-sys-prop.name=value:
+run.jvmargs=
+run.test.classpath=\
+ ${javac.test.classpath}:\
+ ${build.test.classes.dir}
+source.encoding=UTF-8
+src.dir=src
+test.src.dir=test
diff --git a/src/main/Analyzor.java b/src/main/Analyzor.java
index 0c3ede3..9c98a9d 100644
--- a/src/main/Analyzor.java
+++ b/src/main/Analyzor.java
@@ -1,345 +1,345 @@
-package main;
-
-import analysis.BrandChecker;
-import database.NamedPreparedStatement;
-import database.QueryUtils;
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.io.UnsupportedEncodingException;
-import java.sql.Connection;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.util.List;
-import java.util.HashMap;
-import java.util.Map.Entry;
-import java.util.Scanner;
-
-/**
- * The sentiment analysis class that rates tweets based on a unigram and bigram
- * set of weights.
- */
-public class Analyzor {
-
- /**
- * The map that matches single words to their weights.
- */
- private final HashMap<String, Double> unimap = new HashMap();
-
- /**
- * The map that matches word pairs to their weights.
- */
- private final HashMap<String, Double> bimap = new HashMap();
-
- /**
- * The results of a query, maybe return from query().
- */
- private ResultSet data;
-
- /**
- * The persistent connection to the database.
- */
- private final Connection connection;
-
- /**
- * @param connection An open connection to the database.
- */
- public Analyzor(Connection connection) {
- this.connection = connection;
- }
-
- /**
- * Read the unigram and bigram lexica.
- *
- * @throws FileNotFoundException
- */
- public void readLexicon() throws FileNotFoundException {
- if (!unimap.isEmpty()) {
- // data is already read.
- return;
- }
- System.err.println("Trying to read lexicons...");
- // A unigram is in the format (WS = whitespace):
- // word <WS> rating <WS> ??? <WS> ??
- // A bigram has an two WS-separated words instead of one.
- try (Scanner uniScanner = new Scanner(new File("unigrams-pmilexicon.txt"));
- Scanner biScanner = new Scanner(new File("bigrams-pmilexicon.txt"));) {
- //Fill the map of unigrams
- int lineno = 1;
- while (uniScanner.hasNext()) {
-
- String words = uniScanner.next();
- Double d = Double.valueOf(uniScanner.next());
- unimap.put(words.toLowerCase(), d);
- if (uniScanner.hasNextLine()) {
- uniScanner.nextLine();
- }
- lineno++;
-
- }
-
- //fill the map of bigrams
- while (biScanner.hasNext()) {
- String words = biScanner.next() + " " + biScanner.next();
- bimap.put(words.toLowerCase(), Double.valueOf(biScanner.next()));
- if (biScanner.hasNextLine()) {
- biScanner.nextLine();
- }
- }
- }
- System.err.println("Lexicons are read.");
- }
-
- /**
- * Executes a query that the analyzer can analyze.
- *
- * @param query The query string to execute.
- * @throws SQLException When database connection isn't available.
- */
- public void query(String query) throws SQLException {
- PreparedStatement statement;
- //make a connection to the database and execute the query
- statement = connection.prepareStatement(query);
- data = statement.executeQuery();
- }
-
- /**
- * Run a sentiment analysis and fill the database with the output.
- *
- * @param query The sql text for the query.
- * @throws SQLException
- * @throws IOException
- */
- public void sentimentAnalysis(String query) throws SQLException, IOException {
- query(query);
-
- //read the lexicons
- readLexicon();
-
- //go to the start of te dataset
- if (data == null) {
- System.err.println("data is empty, try querying first");
- return;
- }
-
- Double value;
- String text;
-
- //for all tuples
- while (data.next()) {
- //get the text
- text = data.getString("text");
- text = splitPunctToWords(text);
- // test is the tweet text you are going to analyze
- String[] words = text.split("\\s+"); // text splitted into separate words
- double positiverate = 0; // positive rating
-
- // Rate the text with unigrams
- for (String word : words) {
- value = unimap.get(word);
- if (value != null) {
- positiverate += unimap.get(word);
- }
- }
- // Rate the text with bigrams
- for (int i = 0; i < words.length - 1; i++) {
- String pair = words[i] + " " + words[i + 1];
- value = bimap.get(pair);
- if (value != null) {
- positiverate += bimap.get(pair);
- }
- }
- //insert the rating into the database
- NamedPreparedStatement m_insertRating;
- m_insertRating = new NamedPreparedStatement(connection, QueryUtils.insertRating);
- QueryUtils.setInsertParams(m_insertRating, data.getLong("tweetid"), data.getString("brand"), (int) (positiverate * 10));
- m_insertRating.executeUpdate();
- //don't print the rate
- //System.out.println(text + ": " + (int) (positiverate * 10));
- }
- }
-
- /**
- * Make a wordcloud of the results of some query.
- *
- * @param query The sql text for a query.
- * @throws SQLException
- * @throws FileNotFoundException
- * @throws UnsupportedEncodingException
- */
- public void makeWordCloud(String query) throws SQLException, FileNotFoundException, UnsupportedEncodingException {
-
- query(query);
- //go to the start of the ResultSet data
- if (data == null) {
- System.err.println("data is empty, try querying first");
- return;
- }
-
- String text;
- String brand;
- String[] words;
- HashMap<String,HashMap<String, Integer>> wordcloud = new HashMap<>();
-
- while (data.next()) {
- //get brand
- brand=data.getString("brand");
- //make hashmap for each brand
- if(!wordcloud.containsKey(brand)){
- wordcloud.put(brand, new HashMap<String,Integer>());
- }
- //get the text
- text = data.getString("text");
- //remove punctuation, convert to lowercase and split on words
- text = removePunct(text);
- text = text.toLowerCase();
- words = text.split("\\s+");
- //for all words
- for (String word : words) {
- //if it is empty, a space or a stripe, skip it
- if(word.equals("") || word.equals(" ") || word.equals("-")){
- continue;
- }
- //if the word is already in the map, increment the amount
- if(wordcloud.get(brand).containsKey(word)){
- wordcloud.get(brand).put(word, wordcloud.get(brand).get(word) + 1);
- }
- //if the word is not already in the map, make an entry with amount = 1
- else{
- wordcloud.get(brand).put(word, 1);
- }
- }
- }
- //print the words and their frequency in a csv file
- mapToCSV(wordcloud, "wordcloud.csv", "brand,word,count");
- }
-
- //generate csv for disco from the query
- public void disco(String query) throws SQLException, FileNotFoundException, UnsupportedEncodingException {
- //do the query
- query(query);
- PrintWriter writer = new PrintWriter("output.csv", "UTF-8");
- //print the first row
- for (int i = 1; i < data.getMetaData().getColumnCount(); i++) {
- writer.print(data.getMetaData().getColumnLabel(i) + ", ");
- }
- writer.println(data.getMetaData().getColumnLabel(data.getMetaData().getColumnCount()));
- //print the values
- while (data.next()) {
- for (int i = 1; i < data.getMetaData().getColumnCount(); i++) {
- if (data.getObject(i) == null) {
- writer.print(", ");
- } else {
- writer.print(data.getObject(i).toString().replaceAll("[,\n]", " ") + ", ");
- }
- }
- if (data.getObject(data.getMetaData().getColumnCount()) == null) {
- writer.println("0");
- } else {
- writer.println(data.getObject(data.getMetaData().getColumnCount()).toString().replace(",", " "));
- }
- }
- writer.close();
- }
-
- public void getBrands() throws SQLException {
- PreparedStatement statement;
- //make a connection to the database and execute the query
- statement = connection.prepareStatement("delete from mentionsbrand");
- statement.executeUpdate();
- BrandChecker checker = new BrandChecker("brandonlyrules.txt");
- query("select * from tweet");
- NamedPreparedStatement m_insertBrand = new NamedPreparedStatement(connection, QueryUtils.insertBrand);
- while (data.next()) {
- List<String> brands = checker.getBrands(data.getString("text"));
- if (brands.isEmpty()) {
- QueryUtils.setInsertBrandParams(m_insertBrand, data.getLong("tweetid"), "no");
- m_insertBrand.executeUpdate();
- } else {
- for (String brand : brands) {
- QueryUtils.setInsertBrandParams(m_insertBrand, data.getLong("tweetid"), brand);
- m_insertBrand.executeUpdate();
- }
- }
- }
- }
-
- //gets the amount of users that tweet about a brand in a timezone
- //makes a csv file timezone, brand, amount
- public void timezone(String query) throws SQLException, FileNotFoundException, UnsupportedEncodingException{
- query(query);
- //hashmap timezone, brand, amount
- HashMap<String, HashMap<String, Integer>> timeMap = new HashMap<>();
- String timezone;
- String brand;
-
- while(data.next()){
- timezone = data.getString("timezone");
- brand = data.getString("brand");
- //if the timezone is already in the map
- if(timeMap.containsKey(timezone)){
- //if the brand for that timezone is already in the map
- if(timeMap.get(timezone).containsKey(brand)){
- //increment the amount
- timeMap.get(timezone).put(brand, timeMap.get(timezone).get(brand) + 1);
- }
- //if the brand for that timezone is not yet in the map
- else{
- //make a new entry for that brand with amount = 1
- timeMap.get(timezone).put(brand, 1);
- }
- }
- //if the timezone is not yet in the map
- else{
- //make a new hashmap for this map and fill it with the brand and the amount
- timeMap.put(timezone, new HashMap<String, Integer>());
- timeMap.get(timezone).put(brand, 1);
- }
- }
- //make the CSV out of the map
- mapToCSV(timeMap, "timezone.csv", "timezone,brand,count");
- }
-
- //replaces punctuation so it will be splitted
- //also removes urls
- private String splitPunctToWords(String text) {
- text = text.replaceAll("https?://\\S*", "");
- text = text.replaceAll("[!?):;\"']", " $0");
- text = text.replaceAll("[.,-](\\s|$)", " $0");
- text = text.replaceAll("\\s[(\"']", "$0 ");
- return text;
- }
-
- //removes punctuation
- //also removes urls
- private String removePunct(String text) {
- text = text.replaceAll("https?://\\S*", " ");
- text = text.replaceAll("@\\S*", " ");
- text = text.replaceAll("[^a-zA-Z0-9#_-]", " ");
- return text;
- }
-
- //prints a hashmap into a csv for a html application
- //Hashmap<key1, HashMap<key2, value>> becomes key1, key2, value
- //only for String, String, Integer
- void mapToCSV(HashMap<String, HashMap<String, Integer>> map, String fileName, String firstLine)
- throws FileNotFoundException, UnsupportedEncodingException{
-
- PrintWriter writer = new PrintWriter(fileName, "UTF-8");
-
- writer.println(firstLine);
-
- //loop over brands
- for(Entry en : map.entrySet()){
- //loop over words
- for(Entry e : map.get(en.getKey()).entrySet()){
- writer.println(en.getKey() + "," + e.getKey() + "," + e.getValue());
- }
- }
-
- writer.close();
- System.out.println("csv file made, please put it next to html file and run this");
- }
-}
+package main;
+
+import analysis.BrandChecker;
+import database.NamedPreparedStatement;
+import database.QueryUtils;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.io.UnsupportedEncodingException;
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.List;
+import java.util.HashMap;
+import java.util.Map.Entry;
+import java.util.Scanner;
+
+/**
+ * The sentiment analysis class that rates tweets based on a unigram and bigram
+ * set of weights.
+ */
+public class Analyzor {
+
+ /**
+ * The map that matches single words to their weights.
+ */
+ private final HashMap<String, Double> unimap = new HashMap();
+
+ /**
+ * The map that matches word pairs to their weights.
+ */
+ private final HashMap<String, Double> bimap = new HashMap();
+
+ /**
+ * The results of a query, maybe return from query().
+ */
+ private ResultSet data;
+
+ /**
+ * The persistent connection to the database.
+ */
+ private final Connection connection;
+
+ /**
+ * @param connection An open connection to the database.
+ */
+ public Analyzor(Connection connection) {
+ this.connection = connection;
+ }
+
+ /**
+ * Read the unigram and bigram lexica.
+ *
+ * @throws FileNotFoundException
+ */
+ public void readLexicon() throws FileNotFoundException {
+ if (!unimap.isEmpty()) {
+ // data is already read.
+ return;
+ }
+ System.err.println("Trying to read lexicons...");
+ // A unigram is in the format (WS = whitespace):
+ // word <WS> rating <WS> ??? <WS> ??
+ // A bigram has an two WS-separated words instead of one.
+ try (Scanner uniScanner = new Scanner(new File("unigrams-pmilexicon.txt"));
+ Scanner biScanner = new Scanner(new File("bigrams-pmilexicon.txt"));) {
+ //Fill the map of unigrams
+ int lineno = 1;
+ while (uniScanner.hasNext()) {
+
+ String words = uniScanner.next();
+ Double d = Double.valueOf(uniScanner.next());
+ unimap.put(words.toLowerCase(), d);
+ if (uniScanner.hasNextLine()) {
+ uniScanner.nextLine();
+ }
+ lineno++;
+
+ }
+
+ //fill the map of bigrams
+ while (biScanner.hasNext()) {
+ String words = biScanner.next() + " " + biScanner.next();
+ bimap.put(words.toLowerCase(), Double.valueOf(biScanner.next()));
+ if (biScanner.hasNextLine()) {
+ biScanner.nextLine();
+ }
+ }
+ }
+ System.err.println("Lexicons are read.");
+ }
+
+ /**
+ * Executes a query that the analyzer can analyze.
+ *
+ * @param query The query string to execute.
+ * @throws SQLException When database connection isn't available.
+ */
+ public void query(String query) throws SQLException {
+ PreparedStatement statement;
+ //make a connection to the database and execute the query
+ statement = connection.prepareStatement(query);
+ data = statement.executeQuery();
+ }
+
+ /**
+ * Run a sentiment analysis and fill the database with the output.
+ *
+ * @param query The sql text for the query.
+ * @throws SQLException
+ * @throws IOException
+ */
+ public void sentimentAnalysis(String query) throws SQLException, IOException {
+ query(query);
+
+ //read the lexicons
+ readLexicon();
+
+ //go to the start of te dataset
+ if (data == null) {
+ System.err.println("data is empty, try querying first");
+ return;
+ }
+
+ Double value;
+ String text;
+
+ //for all tuples
+ while (data.next()) {
+ //get the text
+ text = data.getString("text");
+ text = splitPunctToWords(text);
+ // test is the tweet text you are going to analyze
+ String[] words = text.split("\\s+"); // text splitted into separate words
+ double positiverate = 0; // positive rating
+
+ // Rate the text with unigrams
+ for (String word : words) {
+ value = unimap.get(word);
+ if (value != null) {
+ positiverate += unimap.get(word);
+ }
+ }
+ // Rate the text with bigrams
+ for (int i = 0; i < words.length - 1; i++) {
+ String pair = words[i] + " " + words[i + 1];
+ value = bimap.get(pair);
+ if (value != null) {
+ positiverate += bimap.get(pair);
+ }
+ }
+ //insert the rating into the database
+ NamedPreparedStatement m_insertRating;
+ m_insertRating = new NamedPreparedStatement(connection, QueryUtils.insertRating);
+ QueryUtils.setInsertParams(m_insertRating, data.getLong("tweetid"), data.getString("brand"), (int) (positiverate * 10));
+ m_insertRating.executeUpdate();
+ //don't print the rate
+ //System.out.println(text + ": " + (int) (positiverate * 10));
+ }
+ }
+
+ /**
+ * Make a wordcloud of the results of some query.
+ *
+ * @param query The sql text for a query.
+ * @throws SQLException
+ * @throws FileNotFoundException
+ * @throws UnsupportedEncodingException
+ */
+ public void makeWordCloud(String query) throws SQLException, FileNotFoundException, UnsupportedEncodingException {
+
+ query(query);
+ //go to the start of the ResultSet data
+ if (data == null) {
+ System.err.println("data is empty, try querying first");
+ return;
+ }
+
+ String text;
+ String brand;
+ String[] words;
+ HashMap<String,HashMap<String, Integer>> wordcloud = new HashMap<>();
+
+ while (data.next()) {
+ //get brand
+ brand=data.getString("brand");
+ //make hashmap for each brand
+ if(!wordcloud.containsKey(brand)){
+ wordcloud.put(brand, new HashMap<String,Integer>());
+ }
+ //get the text
+ text = data.getString("text");
+ //remove punctuation, convert to lowercase and split on words
+ text = removePunct(text);
+ text = text.toLowerCase();
+ words = text.split("\\s+");
+ //for all words
+ for (String word : words) {
+ //if it is empty, a space or a stripe, skip it
+ if(word.equals("") || word.equals(" ") || word.equals("-")){
+ continue;
+ }
+ //if the word is already in the map, increment the amount
+ if(wordcloud.get(brand).containsKey(word)){
+ wordcloud.get(brand).put(word, wordcloud.get(brand).get(word) + 1);
+ }
+ //if the word is not already in the map, make an entry with amount = 1
+ else{
+ wordcloud.get(brand).put(word, 1);
+ }
+ }
+ }
+ //print the words and their frequency in a csv file
+ mapToCSV(wordcloud, "wordcloud.csv", "brand,word,count");
+ }
+
+ //generate csv for disco from the query
+ public void disco(String query) throws SQLException, FileNotFoundException, UnsupportedEncodingException {
+ //do the query
+ query(query);
+ PrintWriter writer = new PrintWriter("output.csv", "UTF-8");
+ //print the first row
+ for (int i = 1; i < data.getMetaData().getColumnCount(); i++) {
+ writer.print(data.getMetaData().getColumnLabel(i) + ", ");
+ }
+ writer.println(data.getMetaData().getColumnLabel(data.getMetaData().getColumnCount()));
+ //print the values
+ while (data.next()) {
+ for (int i = 1; i < data.getMetaData().getColumnCount(); i++) {
+ if (data.getObject(i) == null) {
+ writer.print(", ");
+ } else {
+ writer.print(data.getObject(i).toString().replaceAll("[,\n]", " ") + ", ");
+ }
+ }
+ if (data.getObject(data.getMetaData().getColumnCount()) == null) {
+ writer.println("0");
+ } else {
+ writer.println(data.getObject(data.getMetaData().getColumnCount()).toString().replace(",", " "));
+ }
+ }
+ writer.close();
+ }
+
+ public void getBrands() throws SQLException {
+ PreparedStatement statement;
+ //make a connection to the database and execute the query
+ statement = connection.prepareStatement("delete from mentionsbrand");
+ statement.executeUpdate();
+ BrandChecker checker = new BrandChecker("brandonlyrules.txt");
+ query("select * from tweet");
+ NamedPreparedStatement m_insertBrand = new NamedPreparedStatement(connection, QueryUtils.insertBrand);
+ while (data.next()) {
+ List<String> brands = checker.getBrands(data.getString("text"));
+ if (brands.isEmpty()) {
+ QueryUtils.setInsertBrandParams(m_insertBrand, data.getLong("tweetid"), "no");
+ m_insertBrand.executeUpdate();
+ } else {
+ for (String brand : brands) {
+ QueryUtils.setInsertBrandParams(m_insertBrand, data.getLong("tweetid"), brand);
+ m_insertBrand.executeUpdate();
+ }
+ }
+ }
+ }
+
+ //gets the amount of users that tweet about a brand in a timezone
+ //makes a csv file timezone, brand, amount
+ public void timezone(String query) throws SQLException, FileNotFoundException, UnsupportedEncodingException{
+ query(query);
+ //hashmap timezone, brand, amount
+ HashMap<String, HashMap<String, Integer>> timeMap = new HashMap<>();
+ String timezone;
+ String brand;
+
+ while(data.next()){
+ timezone = data.getString("timezone");
+ brand = data.getString("brand");
+ //if the timezone is already in the map
+ if(timeMap.containsKey(timezone)){
+ //if the brand for that timezone is already in the map
+ if(timeMap.get(timezone).containsKey(brand)){
+ //increment the amount
+ timeMap.get(timezone).put(brand, timeMap.get(timezone).get(brand) + 1);
+ }
+ //if the brand for that timezone is not yet in the map
+ else{
+ //make a new entry for that brand with amount = 1
+ timeMap.get(timezone).put(brand, 1);
+ }
+ }
+ //if the timezone is not yet in the map
+ else{
+ //make a new hashmap for this map and fill it with the brand and the amount
+ timeMap.put(timezone, new HashMap<String, Integer>());
+ timeMap.get(timezone).put(brand, 1);
+ }
+ }
+ //make the CSV out of the map
+ mapToCSV(timeMap, "timezone.csv", "timezone,brand,count");
+ }
+
+ //replaces punctuation so it will be splitted
+ //also removes urls
+ private String splitPunctToWords(String text) {
+ text = text.replaceAll("https?://\\S*", "");
+ text = text.replaceAll("[!?):;\"']", " $0");
+ text = text.replaceAll("[.,-](\\s|$)", " $0");
+ text = text.replaceAll("\\s[(\"']", "$0 ");
+ return text;
+ }
+
+ //removes punctuation
+ //also removes urls
+ private String removePunct(String text) {
+ text = text.replaceAll("https?://\\S*", " ");
+ text = text.replaceAll("@\\S*", " ");
+ text = text.replaceAll("[^a-zA-Z0-9#_-]", " ");
+ return text;
+ }
+
+ //prints a hashmap into a csv for a html application
+ //Hashmap<key1, HashMap<key2, value>> becomes key1, key2, value
+ //only for String, String, Integer
+ void mapToCSV(HashMap<String, HashMap<String, Integer>> map, String fileName, String firstLine)
+ throws FileNotFoundException, UnsupportedEncodingException{
+
+ PrintWriter writer = new PrintWriter(fileName, "UTF-8");
+
+ writer.println(firstLine);
+
+ //loop over brands
+ for(Entry en : map.entrySet()){
+ //loop over words
+ for(Entry e : map.get(en.getKey()).entrySet()){
+ writer.println(en.getKey() + "," + e.getKey() + "," + e.getValue());
+ }
+ }
+
+ writer.close();
+ System.out.println("csv file made, please put it next to html file and run this");
+ }
+}