For answering specific types of queries requested by analytics team on user's activity on a music streaming app
# Import Python packages
import pandas as pd
import cassandra
import re
import os
import glob
import numpy as np
import json
import csv
# checking current working directory
print("Current working directory:", os.getcwd())
# Get current folder and subfolder event data
filepath = os.getcwd() + '/event_data'
# Creating a list of files and collect each filepath
for root, dirs, files in os.walk(filepath):
# join the file path and roots with the subdirectories using glob
file_path_list = glob.glob(os.path.join(root,'*'))
#print(file_path_list)
# initiating an empty list of rows that will be generated from each file
full_data_rows_list = []
# for every filepath in the file path list
for f in file_path_list:
# reading csv file
with open(f, 'r', encoding = 'utf8', newline='') as csvfile:
# creating a csv reader object
csvreader = csv.reader(csvfile)
next(csvreader)
# extracting each data row one by one and append it
for line in csvreader:
#print(line)
full_data_rows_list.append(line)
print("Total number of rows in files", len(full_data_rows_list))
# creating a smaller event data csv file called event_datafile_full csv that will be used to insert data into the \
# Apache Cassandra tables
csv.register_dialect('myDialect', quoting=csv.QUOTE_ALL, skipinitialspace=True)
with open('event_datafile_new.csv', 'w', encoding = 'utf8', newline='') as f:
writer = csv.writer(f, dialect='myDialect')
writer.writerow(['artist','firstName','gender','itemInSession','lastName','length',\
'level','location','sessionId','song','userId'])
for row in full_data_rows_list:
if (row[0] == ''):
continue
writer.writerow((row[0], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[12], row[13], row[16]))
#checking number of rows in csv file
with open('event_datafile_new.csv', 'r', encoding = 'utf8') as f:
print("Number of rows in event_datafile_new.csv:", sum(1 for line in f))
The **event_datafile_new.csv** contains the following columns:
The image below is a screenshot of what the denormalized data appears like in the **event_datafile_new.csv** after the code above is run:
# Making connection to Cassandra on local machine
# (127.0.0.1)
from cassandra.cluster import Cluster
cluster = Cluster()
# Starting a session to establish connection and begin executing queries
session = cluster.connect()
try:
session.execute("""
CREATE KEYSPACE IF NOT EXISTS ath
WITH REPLICATION =
{ 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"""
)
session.set_keyspace('ath')
except Exception as e:
print(e)
# Creating table modeled to handle this specific type of query
create_table_query = """
CREATE TABLE IF NOT EXISTS songs_played_in_session (
session_id int,
item_in_session int,
song_title text,
artist text,
song_length float,
PRIMARY KEY (session_id, item_in_session)
);
"""
try:
session.execute(create_table_query)
except Exception as e:
print(e)
# Loading data into table from csv file
file = 'event_datafile_new.csv'
with open(file, encoding = 'utf8') as f:
csvreader = csv.reader(f)
next(csvreader) # skip header
for line in csvreader:
query = "INSERT INTO songs_played_in_session (session_id, item_in_session, song_title, artist, song_length) "
query = query + "VALUES (%s, %s, %s, %s, %s)"
session.execute(query, (int(line[8]), int(line[3]), line[9], line[0], float(line[5])))
# Executing a SELECT query to verify that the data have been inserted into each table
query = """
SELECT artist, song_title, song_length
FROM songs_played_in_session
WHERE session_id=%s AND item_in_session = %s
"""
try:
rows = session.execute(query, (338, 4))
except Exception as e:
print(e)
for row in rows:
print ("Artist:", row.artist, ", Song:", row.song_title, ", Song length:", row.song_length)
# Creating table modeled to handle this specific type of query
create_table_query = """
CREATE TABLE IF NOT EXISTS song_playlist_session (
user_id text,
session_id int,
item_in_session int,
artist text,
song_title text,
user_first_name text,
user_last_name text,
PRIMARY KEY ((user_id, session_id), item_in_session)
);
"""
try:
session.execute(create_table_query)
except Exception as e:
print(e)
# Loading data into table from csv file
file = 'event_datafile_new.csv'
with open(file, encoding = 'utf8') as f:
csvreader = csv.reader(f)
next(csvreader) # skip header
for line in csvreader:
query = "INSERT INTO song_playlist_session (session_id, item_in_session, song_title, artist, user_first_name, user_last_name, user_id) "
query = query + "VALUES (%s, %s, %s, %s, %s, %s, %s)"
session.execute(query, (int(line[8]), int(line[3]), line[9], line[0], line[1], line[4], line[10]))
# Executing a SELECT query to verify that the data have been inserted into each table
query = """
SELECT artist, song_title, user_first_name, user_last_name
FROM song_playlist_session
WHERE user_id=%s AND session_id=%s
"""
try:
rows = session.execute(query, ("10", 182))
except Exception as e:
print(e)
for row in rows:
print ("Artist:", row.artist, ", Song:", row.song_title, ", User first name:", row.user_first_name, ", User last name:", row.user_last_name)
# Creating table modeled to handle this specific type of query
create_table_query = """
CREATE TABLE IF NOT EXISTS song_listeners (
song_title text,
user_id text,
user_first_name text,
user_last_name text,
PRIMARY KEY (song_title, user_id)
);
"""
try:
session.execute(create_table_query)
except Exception as e:
print(e)
# Loading data into table from csv file
file = 'event_datafile_new.csv'
with open(file, encoding = 'utf8') as f:
csvreader = csv.reader(f)
next(csvreader) # skip header
for line in csvreader:
query = "INSERT INTO song_listeners (song_title, user_id, user_first_name, user_last_name) "
query = query + "VALUES (%s, %s, %s, %s)"
session.execute(query, ( line[9], line[10], line[1], line[4]))
# Executing a SELECT query to verify that the data have been inserted into each table
query = """
SELECT user_first_name, user_last_name
FROM song_listeners
WHERE song_title=%s
"""
try:
rows = session.execute(query, ("All Hands Against His Own",))
except Exception as e:
print(e)
for row in rows:
print ("User first name:", row.user_first_name, ", User last name:", row.user_last_name)
try:
session.execute("DROP TABLE IF EXISTS table1")
session.execute("DROP TABLE IF EXISTS table2")
session.execute("DROP TABLE IF EXISTS table3")
except Exception as e:
print (e)
session.shutdown()
cluster.shutdown()