1
0

Working imports for statistics and short terms.

This commit is contained in:
2023-09-30 20:56:00 -04:00
parent e3918523ba
commit 1f900ecb4a
13 changed files with 289916 additions and 192910 deletions

View File

@@ -1,18 +1,33 @@
import csv
import pandas as pd
############
# read files
############
# file locations
statistics_meta_archive_file = "input/raw/postgres/statistics_meta.csv"
statistics_meta_export_file = "input/raw/sqlite/statistics_meta-export.csv"
statistics_archive_file = "input/raw/postgres/statistics.csv"
statistics_export_file = "input/raw/sqlite/statistics-export.csv"
statistics_import_file = "output/statistics-import.csv"
statistics_short_term_archive_file = "input/raw/postgres/statistics_short_term.csv"
statistics_short_term_export_file = "input/raw/sqlite/statistics_short_term-export.csv"
statistics_short_term_import_file = "output/statistics_short_term-import.csv"
# read in current export, and the archive
meta_df = pd.read_csv(statistics_meta_export_file)
meta_archive_df = pd.read_csv(statistics_meta_archive_file)
statistics_df = pd.read_csv(statistics_export_file, index_col='id')
statistics_archive_df = pd.read_csv(statistics_archive_file, index_col='id')
statistics_short_term_df = pd.read_csv(statistics_short_term_export_file, index_col='id')
statistics_short_term_archive_df = pd.read_csv(statistics_short_term_archive_file, index_col='id')
#################
# statistics_meta
#################
statistics_meta_archive_file = "statistics_meta.csv"
statistics_meta_export_file = "statistics_meta-export.csv"
# read in current export, and the archive
meta_df = pd.read_csv(statistics_meta_export_file)
meta_archive_df = pd.read_csv(statistics_meta_archive_file)
# find the id's and the unique statistics from each
meta_df = meta_df[['id','statistic_id']]
meta_archive_df = meta_archive_df[['id','statistic_id']]
@@ -25,66 +40,73 @@ meta_lookup.set_index('id_x').to_csv("meta_merge.csv")
meta_lookup = meta_lookup[['id_y','id_x']]
meta_lookup = meta_lookup.T.to_dict('records')[0]
############
# statistics
############
statistics_archive_file = "statistics.csv"
statistics_export_file = "statistics-export.csv"
statistics_import_file = "statistics-import.csv"
statistics_df = pd.read_csv(statistics_export_file, index_col='id')
statistics_archive_df = pd.read_csv(statistics_archive_file, index_col='id')
# make unique indexes
statistics_max_id = statistics_df.last_valid_index()
statistics_df.reset_index(inplace=True)
statistics_df['id'] += statistics_max_id
statistics_df.set_index('id',drop=True,inplace=True)
statistics_archive_df.reset_index(inplace=True)
statistics_archive_df['id'] += statistics_max_id
statistics_archive_df.set_index('id',drop=True,inplace=True)
# find any duplicates where tuple (start_ts,metadata_id)
# exist in export and archive, drop the archive
# read in current export, and the archive
print(statistics_df.info())
print(statistics_archive_df.info())
statistics_df['unique_tuple'] = statistics_df.apply(lambda row: (row['start_ts'],row['metadata_id']), axis=1)
statistics_archive_df['unique_tuple'] = statistics_archive_df.apply(lambda row: (row['start_ts'],row['metadata_id']), axis=1)
statistics_df_copy = statistics_df.copy()
statistics_df_copy = statistics_df_copy[['start_ts','metadata_id','unique_tuple']]
statistics_archive_df = statistics_archive_df[['start_ts','metadata_id','unique_tuple']]
unique_lookup = statistics_df_copy.merge(statistics_archive_df, on=['unique_tuple'], how='left', indicator=True)
statistics_archive_df_copy = statistics_archive_df.copy()
statistics_archive_df_copy = statistics_archive_df_copy[['start_ts','metadata_id','unique_tuple']]
statistics_df = statistics_df[['start_ts','metadata_id','unique_tuple']]
unique_lookup = statistics_archive_df_copy.merge(statistics_df, on=['unique_tuple'], how='left', indicator=True)
unique_lookup = unique_lookup[unique_lookup['_merge']=="both"]
unique_lookup.to_csv("unique_merge.csv")
unique_tuples = unique_lookup['unique_tuple']
statistics_df = statistics_df[~statistics_df['unique_tuple'].isin(unique_tuples)]
statistics_df.drop(columns='unique_tuple',inplace=True)
print(statistics_df.info())
statistics_archive_df = statistics_archive_df[~statistics_archive_df['unique_tuple'].isin(unique_tuples)]
statistics_archive_df.drop(columns='unique_tuple',inplace=True)
print(statistics_archive_df.info())
# drop any statistics not in the existing systems metadata
statistics_df = statistics_df[statistics_df['metadata_id'].isin(meta_lookup.keys())]
statistics_archive_df = statistics_archive_df[statistics_archive_df['metadata_id'].isin(meta_lookup.keys())]
# correct the meta column
statistics_df.replace({'metadata_id': meta_lookup}, inplace=True)
statistics_archive_df.replace({'metadata_id': meta_lookup}, inplace=True)
#######################
# statistics_short_term
#######################
statistics_short_term_archive_file = "statistics_short_term.csv"
statistics_short_term_export_file = "statistics_short_term-export.csv"
statistics_short_term_import_file = "statistics_short_term-import.csv"
# make unique indexes
statistics_short_term_max_id = statistics_short_term_df.last_valid_index()
statistics_short_term_archive_df.reset_index(inplace=True)
statistics_short_term_archive_df['id'] += statistics_short_term_max_id
statistics_short_term_archive_df.set_index('id',drop=True,inplace=True)
statistics_short_term_df = pd.read_csv(statistics_short_term_export_file, index_col='id')
# OBEY UNIQUE HERE TOO!!!!!
# find any duplicates where tuple (start_ts,metadata_id)
# exist in export and archive, drop the archive
# read in current export, and the archive
print(statistics_short_term_archive_df.info())
statistics_short_term_df['unique_tuple'] = statistics_short_term_df.apply(lambda row: (row['start_ts'],row['metadata_id']), axis=1)
statistics_short_term_archive_df['unique_tuple'] = statistics_short_term_archive_df.apply(lambda row: (row['start_ts'],row['metadata_id']), axis=1)
statistics_short_term_archive_df_copy = statistics_short_term_archive_df.copy()
statistics_short_term_archive_df_copy = statistics_short_term_archive_df_copy[['start_ts','metadata_id','unique_tuple']]
statistics_short_term_df = statistics_short_term_df[['start_ts','metadata_id','unique_tuple']]
unique_lookup = statistics_short_term_archive_df_copy.merge(statistics_short_term_df, on=['unique_tuple'], how='left', indicator=True)
#unique_lookup.to_csv(statistics_short_term_import_file)
unique_lookup = unique_lookup[unique_lookup['_merge']=="both"]
#unique_lookup.to_csv("unique_merge.csv")
unique_tuples = unique_lookup['unique_tuple']
statistics_short_term_archive_df = statistics_short_term_archive_df[~statistics_short_term_archive_df['unique_tuple'].isin(unique_tuples)]
statistics_short_term_archive_df.drop(columns='unique_tuple',inplace=True)
print(statistics_short_term_archive_df.info())
# drop any statistics not in the existing systems metadata
statistics_short_term_df = statistics_short_term_df[statistics_short_term_df['metadata_id'].isin(meta_lookup.keys())]
statistics_short_term_archive_df = statistics_short_term_archive_df[statistics_short_term_archive_df['metadata_id'].isin(meta_lookup.keys())]
# correct the meta column
statistics_short_term_df.replace({'metadata_id': meta_lookup}, inplace=True)
statistics_short_term_archive_df.replace({'metadata_id': meta_lookup}, inplace=True)
@@ -92,7 +114,7 @@ statistics_short_term_df.replace({'metadata_id': meta_lookup}, inplace=True)
# write files for importing
###########################
statistics_df.to_csv(statistics_import_file)
statistics_short_term_df.to_csv(statistics_short_term_import_file)
statistics_archive_df.to_csv(statistics_import_file)
statistics_short_term_archive_df.to_csv(statistics_short_term_import_file)