diff --git a/csharp/App/Backend/DataTypes/Installation.cs b/csharp/App/Backend/DataTypes/Installation.cs index bcbe2368c..1ca209c6e 100644 --- a/csharp/App/Backend/DataTypes/Installation.cs +++ b/csharp/App/Backend/DataTypes/Installation.cs @@ -4,10 +4,13 @@ namespace InnovEnergy.App.Backend.DataTypes; public class Installation : TreeNode { - public String Location { get; set; } = ""; - public String Region { get; set; } = ""; - public String Country { get; set; } = ""; - public String VpnIp { get; set; } = ""; + //Each installation has 2 roles, a read role and a write role. + //There are 2 keys per role a public key and a secret + //Product can be 0 or 1, 0 for Salimax, 1 for Salidomo + public String Location { get; set; } = ""; + public String Region { get; set; } = ""; + public String Country { get; set; } = ""; + public String VpnIp { get; set; } = ""; public String InstallationName { get; set; } = ""; public String S3Region { get; set; } = "sos-ch-dk-2"; @@ -16,14 +19,15 @@ public class Installation : TreeNode public String S3Key { get; set; } = ""; public String S3WriteSecret { get; set; } = ""; public String S3Secret { get; set; } = ""; - public int S3BucketId { get; set; } = 0; + public int S3BucketId { get; set; } = 0; public String ReadRoleId { get; set; } = ""; public String WriteRoleId { get; set; } = ""; - public Boolean TestingMode { get; set; } = false; - - public int Product { get; set; } = 0; - public int Device { get; set; } = 0; + public Boolean TestingMode { get; set; } = false; + public int Status { get; set; } = -1; + public int Product { get; set; } = 0; + public int Device { get; set; } = 0; + [Ignore] - public String OrderNumbers { get; set; } - public String VrmLink { get; set; } = ""; + public String OrderNumbers { get; set; } + public String VrmLink { get; set; } = ""; } \ No newline at end of file diff --git a/csharp/App/Backend/DataTypes/Methods/Session.cs b/csharp/App/Backend/DataTypes/Methods/Session.cs index 70fdd5f0f..37e239d92 100644 --- a/csharp/App/Backend/DataTypes/Methods/Session.cs +++ b/csharp/App/Backend/DataTypes/Methods/Session.cs @@ -282,9 +282,9 @@ public static class SessionMethods && Db.Delete(installation) && await installation.RevokeReadKey() && await installation.RevokeWriteKey() - && await installation.DeleteBucket() && await installation.RemoveReadRole() - && await installation.RemoveWriteRole(); + && await installation.RemoveWriteRole() + && await installation.DeleteBucket(); } diff --git a/csharp/App/Backend/DataTypes/TreeNode.cs b/csharp/App/Backend/DataTypes/TreeNode.cs index 87b2e24e9..a0dd788df 100644 --- a/csharp/App/Backend/DataTypes/TreeNode.cs +++ b/csharp/App/Backend/DataTypes/TreeNode.cs @@ -3,7 +3,7 @@ namespace InnovEnergy.App.Backend.DataTypes; public abstract partial class TreeNode { - //This is the parent class of each relation. It has an autoincrement Id, name, information, parent Id and Type. + //This is the parent class of each relation. It has an autoincrement Id, name, information, parent Id and Type. //Ignore means: "Do not map this property to a database column." [PrimaryKey, AutoIncrement] public Int64 Id { get; set; } diff --git a/csharp/App/Backend/Database/Create.cs b/csharp/App/Backend/Database/Create.cs index f697a8931..9263b0793 100644 --- a/csharp/App/Backend/Database/Create.cs +++ b/csharp/App/Backend/Database/Create.cs @@ -7,28 +7,19 @@ namespace InnovEnergy.App.Backend.Database; public static partial class Db { - private static int _backupCounter = 0; + private static Boolean Insert(Object obj) { var success = Connection.Insert(obj) > 0; - if (success) - { - _backupCounter++; - if (_backupCounter > 100) - { - _backupCounter = 0; - BackupDatabase(); - } - } - + if (success) Backup(); return success; } public static Boolean Create(Installation installation) { - installation.S3BucketId = Installations.Where(inst => inst.Product == installation.Product).Max(inst => (int?)inst.S3BucketId)+1 ?? 0; - + // The bucket Id it calculated as follows: It is 1 + the maximum bucket id of all the existing installations of the same product // SQLite wrapper is smart and *modifies* t's Id to the one generated (autoincrement) by the insertion + installation.S3BucketId = Installations.Where(inst => inst.Product == installation.Product).Max(inst => (int?)inst.S3BucketId)+1 ?? 0; return Insert(installation); } @@ -103,21 +94,8 @@ public static partial class Db } } - public static void UpdateAction(UserAction updatedAction) - { - var existingAction = UserActions.FirstOrDefault(action => action.Id == updatedAction.Id); - - - if (existingAction != null) - { - //existingAction.Description = updatedAction.Description; - //existingAction.Timestamp = updatedAction.Timestamp; - //existingAction.TestingMode = updatedAction.TestingMode; - Update(updatedAction); - Console.WriteLine("---------------Updated the Action in the database-----------------"); - } - } - + //This function is called from the RabbitMQ manager when a new error arrives to the database. + //We keep only the last 100 errors for each installation. If we already have stored 100 errors, we delete the older one and we insert the new one. public static void HandleError(Error newError,int installationId) { @@ -140,7 +118,7 @@ public static partial class Db } else { - Console.WriteLine("---------------Added the new Error to the database-----------------"); + Console.WriteLine("---------------Added the new Alarm to the database-----------------"); Create(newError); } } @@ -158,10 +136,10 @@ public static partial class Db .OrderBy(warning => warning.Date) .FirstOrDefault(); - //Remove the old error + //Remove the old warning Delete(oldestWarning); - //Add the new error + //Add the new warning Create(newWarning); } else diff --git a/csharp/App/Backend/Database/Db.cs b/csharp/App/Backend/Database/Db.cs index 97074f4af..39d11b9c5 100644 --- a/csharp/App/Backend/Database/Db.cs +++ b/csharp/App/Backend/Database/Db.cs @@ -9,6 +9,9 @@ using SQLiteConnection = SQLite.SQLiteConnection; namespace InnovEnergy.App.Backend.Database; +//The methods of the Db class are located in multiple files (Create.cs, Read,cs, Delete.cs, Update.cs) +//That's why the class definition is partial + public static partial class Db { private static SQLiteConnection Connection { get; } = InitConnection(); @@ -30,7 +33,7 @@ public static partial class Db //Since this class is static, we call Init method from the Program.cs to initialize all the fields of the class //When a class is loaded, the fields are initialized before the constructor's code is executed. //The TableQuery fields are lazy meaning that they will be initialized when they get accessed - //The connection searches for the latest backup and it binds all the tables to it. + //The connection searches for the latest backup and binds all the tables to it. } //This is the constructor of the class @@ -90,7 +93,7 @@ public static partial class Db Connection.Backup("DbBackups/" + filename); } - //Delete all by 10 snapshots every 24 hours. + //Delete all except 10 snapshots every 24 hours. private static async Task DeleteSnapshots() { while (true) diff --git a/csharp/App/Backend/Database/Delete.cs b/csharp/App/Backend/Database/Delete.cs index e6f556071..6c0497ed8 100644 --- a/csharp/App/Backend/Database/Delete.cs +++ b/csharp/App/Backend/Database/Delete.cs @@ -1,16 +1,12 @@ using InnovEnergy.App.Backend.DataTypes; using InnovEnergy.App.Backend.DataTypes.Methods; using InnovEnergy.App.Backend.Relations; -using Microsoft.AspNetCore.Authentication.OAuth.Claims; - - namespace InnovEnergy.App.Backend.Database; - public static partial class Db { - - + //Since we do not want to stress the memory in the VM a lot, we make a snapshot of the database every 100 transactions. + private static int _backupCounter = 0; private static void Backup() { _backupCounter++; @@ -65,6 +61,8 @@ public static partial class Db public static Boolean Delete(UserAction actionToDelete) { var deleteSuccess = RunTransaction(DeleteAction); + + if (deleteSuccess) Backup(); return deleteSuccess; @@ -73,6 +71,7 @@ public static partial class Db Boolean DeleteAction() { return UserActions.Delete(action => action.Id == actionToDelete.Id) >0; + } } @@ -86,7 +85,7 @@ public static partial class Db Boolean DeleteWarning() { - return Warnings.Delete(error => error.Id == warningToDelete.Id) >0; + return Warnings.Delete(warning => warning.Id == warningToDelete.Id) >0; } } @@ -100,15 +99,14 @@ public static partial class Db Boolean DeleteInstallationAndItsDependencies() { + InstallationAccess.Delete(i => i.InstallationId == installation.Id); if (installation.Product == 0) { - InstallationAccess.Delete(i => i.InstallationId == installation.Id); + //For Salimax, delete the OrderNumber2Installation entries associated with this installation id. OrderNumber2Installation.Delete(i => i.InstallationId == installation.Id); - } return Installations.Delete(i => i.Id == installation.Id) > 0; - } } @@ -123,7 +121,6 @@ public static partial class Db { FolderAccess .Delete(u => u.UserId == user.Id); InstallationAccess.Delete(u => u.UserId == user.Id); - return Users.Delete(u => u.Id == user.Id) > 0; } } diff --git a/csharp/App/Backend/Database/Update.cs b/csharp/App/Backend/Database/Update.cs index 1528530ff..88e748240 100644 --- a/csharp/App/Backend/Database/Update.cs +++ b/csharp/App/Backend/Database/Update.cs @@ -1,10 +1,13 @@ using InnovEnergy.App.Backend.DataTypes; - namespace InnovEnergy.App.Backend.Database; public static partial class Db { + //We can execute the updates manually for each table, but we prefer the abstract way using Connection.Update method + //We pass an object as an argument and the Connection will connect this object with the corresponding table. + //The update is being done based on the primary id of the object. + private static Boolean Update(Object obj) { var success = Connection.Update(obj) > 0; @@ -22,7 +25,6 @@ public static partial class Db return Update(obj: error); } - public static Boolean Update(Warning warning) { return Update(obj: warning); @@ -44,4 +46,15 @@ public static partial class Db return Update(obj: user); } + + public static void UpdateAction(UserAction updatedAction) + { + var existingAction = UserActions.FirstOrDefault(action => action.Id == updatedAction.Id); + + if (existingAction != null) + { + Update(updatedAction); + } + } + } \ No newline at end of file diff --git a/csharp/App/Backend/Program.cs b/csharp/App/Backend/Program.cs index 036c41d12..797e61cfd 100644 --- a/csharp/App/Backend/Program.cs +++ b/csharp/App/Backend/Program.cs @@ -26,9 +26,9 @@ public static class Program var builder = WebApplication.CreateBuilder(args); RabbitMqManager.InitializeEnvironment(); - RabbitMqManager.StartRabbitMqConsumer(); - WebsocketManager.MonitorSalimaxInstallationTable(); - WebsocketManager.MonitorSalidomoInstallationTable(); + RabbitMqManager.StartRabbitMqConsumer().SupressAwaitWarning(); + WebsocketManager.MonitorSalimaxInstallationTable().SupressAwaitWarning(); + WebsocketManager.MonitorSalidomoInstallationTable().SupressAwaitWarning(); builder.Services.AddControllers(); diff --git a/csharp/App/Backend/Websockets/RabbitMQManager.cs b/csharp/App/Backend/Websockets/RabbitMQManager.cs index 4ac5d32a6..f31726c93 100644 --- a/csharp/App/Backend/Websockets/RabbitMQManager.cs +++ b/csharp/App/Backend/Websockets/RabbitMQManager.cs @@ -1,12 +1,10 @@ -using System.Net; -using System.Net.Sockets; using System.Text; using System.Text.Json; using InnovEnergy.App.Backend.Database; using InnovEnergy.App.Backend.DataTypes; +using InnovEnergy.Lib.Utils; using RabbitMQ.Client; using RabbitMQ.Client.Events; -using InnovEnergy.Lib.Mailer; namespace InnovEnergy.App.Backend.Websockets; @@ -59,23 +57,17 @@ public static class RabbitMqManager { Installation installation = Db.Installations.FirstOrDefault(f => f.Product == receivedStatusMessage.Product && f.S3BucketId == receivedStatusMessage.InstallationId); int installationId = (int)installation.Id; - - //if (installationId == 138) - //{ - // Console.WriteLine("Received a message from installation: " + installationId + " , product is: " + receivedStatusMessage.Product + " and status is: " + receivedStatusMessage.Status); - //} - - + //Console.WriteLine("received a message from rabbitmq\n"); //This is a heartbit message, just update the timestamp for this installation. //There is no need to notify the corresponding front-ends. //Every 15 iterations(30 seconds), the installation sends a heartbit message to the queue if (receivedStatusMessage.Type == MessageType.Heartbit) { - if (installation.Product == 1 && installation.Device == 2) - { - Console.WriteLine("This is a heartbit message from installation: " + installationId + " Name of the file is " + receivedStatusMessage.Timestamp); - } + // if (installation.Product == 1 && installation.Device == 2) + // { + // Console.WriteLine("This is a heartbit message from installation: " + installationId + " Name of the file is " + receivedStatusMessage.Timestamp); + // } } else { @@ -95,7 +87,7 @@ public static class RabbitMqManager Seen = false }; //Create a new warning and add it to the database - Console.WriteLine("Add a warning for installation "+installationId); + //Console.WriteLine("Add a warning for installation "+installationId); Db.HandleWarning(newWarning, installationId); } } @@ -176,7 +168,15 @@ public static class RabbitMqManager prevStatus = WebsocketManager.InstallationConnections[installationId].Status; WebsocketManager.InstallationConnections[installationId].Status = receivedStatusMessage.Status; WebsocketManager.InstallationConnections[installationId].Timestamp = DateTime.Now; + // if (installationId == 130) + // { + // Console.WriteLine("prevStatus " + prevStatus + " , new status is: " + receivedStatusMessage.Status + " and status is: " + receivedStatusMessage.Status); + // } + } + + installation.Status = receivedStatusMessage.Status; + installation.Apply(Db.Update); //Console.WriteLine("----------------------------------------------"); //If the status has changed, update all the connected front-ends regarding this installation diff --git a/csharp/App/Backend/Websockets/WebsockerManager.cs b/csharp/App/Backend/Websockets/WebsockerManager.cs index e60b90596..9a16d1c11 100644 --- a/csharp/App/Backend/Websockets/WebsockerManager.cs +++ b/csharp/App/Backend/Websockets/WebsockerManager.cs @@ -19,32 +19,53 @@ public static class WebsocketManager { while (true){ lock (InstallationConnections){ + Console.WriteLine("MONITOR SALIMAX INSTALLATIONS\n"); foreach (var installationConnection in InstallationConnections){ - if (installationConnection.Value.Product==0 && (DateTime.Now - installationConnection.Value.Timestamp) > TimeSpan.FromMinutes(1)){ + + if (installationConnection.Value.Product==0 && (DateTime.Now - installationConnection.Value.Timestamp) > TimeSpan.FromMinutes(2)){ + + Console.WriteLine("Installation ID is "+installationConnection.Key); + Console.WriteLine("installationConnection.Value.Timestamp is "+installationConnection.Value.Timestamp); + Console.WriteLine("diff is "+(DateTime.Now-installationConnection.Value.Timestamp)); + installationConnection.Value.Status = -1; - Installation installation = Db.Installations.FirstOrDefault(f => f.Product == 0 && f.S3BucketId == installationConnection.Key); + Installation installation = Db.Installations.FirstOrDefault(f => f.Product == 0 && f.Id == installationConnection.Key); installation.Status = -1; installation.Apply(Db.Update); if (installationConnection.Value.Connections.Count > 0){InformWebsocketsForInstallation(installationConnection.Key);} } } + Console.WriteLine("FINISHED MONITORING SALIMAX INSTALLATIONS\n"); } - await Task.Delay(TimeSpan.FromMinutes(2)); + + await Task.Delay(TimeSpan.FromMinutes(1)); } } public static async Task MonitorSalidomoInstallationTable() { while (true){ + Console.WriteLine("TRY TO LOCK FOR MONITOR SALIDOMO INSTALLATIONS\n"); lock (InstallationConnections){ + Console.WriteLine("MONITOR SALIDOMO INSTALLATIONS\n"); foreach (var installationConnection in InstallationConnections){ + Console.WriteLine("Installation ID is "+installationConnection.Key); + + + // if (installationConnection.Key == 104) + // { + // Console.WriteLine("installationConnection.Value.Timestamp is "+installationConnection.Value.Timestamp); + // Console.WriteLine("diff is "+(DateTime.Now-installationConnection.Value.Timestamp)); + // + // + // } if (installationConnection.Value.Product==1 && (DateTime.Now - installationConnection.Value.Timestamp) > TimeSpan.FromMinutes(30)) { // Console.WriteLine("Installation ID is "+installationConnection.Key); // Console.WriteLine("installationConnection.Value.Timestamp is "+installationConnection.Value.Timestamp); // Console.WriteLine("diff is "+(DateTime.Now-installationConnection.Value.Timestamp)); - Installation installation = Db.Installations.FirstOrDefault(f => f.Product == 1 && f.S3BucketId == installationConnection.Key); + Installation installation = Db.Installations.FirstOrDefault(f => f.Product == 1 && f.Id == installationConnection.Key); installation.Status = -1; installation.Apply(Db.Update); @@ -52,8 +73,9 @@ public static class WebsocketManager if (installationConnection.Value.Connections.Count > 0){InformWebsocketsForInstallation(installationConnection.Key);} } } + Console.WriteLine("FINISHED WITH UPDATING\n"); } - await Task.Delay(TimeSpan.FromMinutes(10)); + await Task.Delay(TimeSpan.FromMinutes(1)); } } @@ -124,7 +146,6 @@ public static class WebsocketManager //Console.WriteLine("Received a new message from websocket"); lock (InstallationConnections) { - List dataToSend = new List(); //Each front-end will send the list of the installations it wants to access @@ -139,7 +160,7 @@ public static class WebsocketManager //Console.WriteLine("Create new empty list for installation id " + installationId); InstallationConnections[installationId] = new InstallationInfo { - Status = -1, + Status = installation.Status, Product = installation.Product }; } @@ -154,16 +175,7 @@ public static class WebsocketManager }; dataToSend.Add(jsonObject); - - //var jsonString = JsonSerializer.Serialize(jsonObject); - //var dataToSend = Encoding.UTF8.GetBytes(jsonString); - - - // currentWebSocket.SendAsync(dataToSend, - // WebSocketMessageType.Text, - // true, // Indicates that this is the end of the message - // CancellationToken.None - // ); + } var jsonString = JsonSerializer.Serialize(dataToSend); var encodedDataToSend = Encoding.UTF8.GetBytes(jsonString); diff --git a/csharp/App/Backend/db-1733849565.sqlite b/csharp/App/Backend/db-1733849565.sqlite new file mode 100644 index 000000000..f3b3e33a3 Binary files /dev/null and b/csharp/App/Backend/db-1733849565.sqlite differ diff --git a/firmware/Cerbo_Release/CerboReleaseFiles/dbus-fzsonick-48tl/config.py b/firmware/Cerbo_Release/CerboReleaseFiles/dbus-fzsonick-48tl/config.py index 6e3d9f703..48eeee06a 100644 --- a/firmware/Cerbo_Release/CerboReleaseFiles/dbus-fzsonick-48tl/config.py +++ b/firmware/Cerbo_Release/CerboReleaseFiles/dbus-fzsonick-48tl/config.py @@ -13,9 +13,9 @@ DEVICE_INSTANCE = 1 SERVICE_NAME_PREFIX = 'com.victronenergy.battery.' #s3 configuration -S3BUCKET = "673-c0436b6a-d276-4cd8-9c44-1eae86cf5d0e" -S3KEY = "EXO270612dc3f57a61870220eea" -S3SECRET = "4fPVVN8JGnD9IY1k5RrrNUzo2L1IpR6gdSuGRB9pMWg" +S3BUCKET = "489-c0436b6a-d276-4cd8-9c44-1eae86cf5d0e" +S3KEY = "EXOd55ec2d5702c3b94f1e275a4" +S3SECRET = "0bOJW6COdJ1_vQ_SDbaYKtLs9E7bxANZb5d1X4zf97g" # driver configuration diff --git a/firmware/Cerbo_Release/CerboReleaseFiles/dbus-fzsonick-48tl/dbus-fzsonick-48tl.py b/firmware/Cerbo_Release/CerboReleaseFiles/dbus-fzsonick-48tl/dbus-fzsonick-48tl.py index edf960398..0b3054421 100755 --- a/firmware/Cerbo_Release/CerboReleaseFiles/dbus-fzsonick-48tl/dbus-fzsonick-48tl.py +++ b/firmware/Cerbo_Release/CerboReleaseFiles/dbus-fzsonick-48tl/dbus-fzsonick-48tl.py @@ -643,6 +643,10 @@ def read_battery_status(modbus, battery): modbus.connect() data = read_modbus_registers(modbus, battery.slave_address) return BatteryStatus(battery, data.registers) + except Exception as e: + logging.error(f"An error occurred: {e}") + create_batch_of_csv_files() # Call this only if there's an error + raise finally: modbus.close() # close in any case @@ -655,6 +659,7 @@ def publish_values(dbus, signals, statuses): previous_warnings = {} previous_alarms = {} +num_of_csv_files_saved=0 class MessageType: ALARM_OR_WARNING = "AlarmOrWarning" @@ -680,6 +685,7 @@ def SubscribeToQueue(): connection = pika.BlockingConnection(pika.ConnectionParameters(host="10.2.0.11", port=5672, virtual_host="/", + heartbeat=30, credentials=pika.PlainCredentials("producer", "b187ceaddb54d5485063ddc1d41af66f"))) channel = connection.channel() channel.queue_declare(queue="statusQueue", durable=True) @@ -903,7 +909,7 @@ def count_files_in_folder(folder_path): def create_batch_of_csv_files(): - global prev_status,INSTALLATION_ID, PRODUCT_ID + global prev_status,INSTALLATION_ID, PRODUCT_ID, num_of_csv_files_saved # list all files in the directory files = os.listdir(CSV_DIR) @@ -914,7 +920,8 @@ def create_batch_of_csv_files(): csv_files.sort(key=lambda x: os.path.getctime(os.path.join(CSV_DIR, x))) # keep the 600 MOST RECENT FILES - recent_csv_files = csv_files[-600:] if len(csv_files) > 600 else csv_files + recent_csv_files = csv_files[-num_of_csv_files_saved:] + print("num_of_csv_files_saved is " + str(num_of_csv_files_saved)) # get the name of the first csv file if not csv_files: @@ -949,6 +956,7 @@ def create_batch_of_csv_files(): # replace the original first csv file with the temporary file os.remove(first_csv_file) os.rename(temp_file_path, first_csv_file) + num_of_csv_files_saved = 0 # create a loggin directory that contains at max 20 batch files for logging info # logging_dir = os.path.join(CSV_DIR, 'logging_batch_files') @@ -1023,10 +1031,12 @@ def create_update_task(modbus, dbus, batteries, signals, csv_signals, main_loop) ALLOW = True alive = update(modbus, batteries, dbus, signals, csv_signals) elapsed_time = time.time() - start_time + print("11111111111111111111111111111111111111111111 elapsed time is ", elapsed_time) # keep at most 1900 files at CSV_DIR for logging and aggregation manage_csv_files(CSV_DIR, 1900) if elapsed_time >= 1200: + print("CREATE BATCH ======================================>") create_batch_of_csv_files() start_time = time.time() #alive = update_for_testing(modbus, batteries, dbus, signals, csv_signals) @@ -1075,7 +1085,7 @@ def insert_id(path, id_number): return "/".join(parts) def create_csv_files(signals, statuses, node_numbers, alarms_number_list, warnings_number_list): - global s3_config + global s3_config, num_of_csv_files_saved timestamp = int(time.time()) if timestamp % 2 != 0: timestamp -= 1 @@ -1084,6 +1094,8 @@ def create_csv_files(signals, statuses, node_numbers, alarms_number_list, warnin os.makedirs(CSV_DIR) csv_filename = f"{timestamp}.csv" csv_path = os.path.join(CSV_DIR, csv_filename) + num_of_csv_files_saved+=1 + # Append values to the CSV file if not os.path.exists(csv_path): with open(csv_path, 'a', newline='') as csvfile: diff --git a/firmware/Venus_Release/VenusReleaseFiles/dbus-fzsonick-48tl/config.py b/firmware/Venus_Release/VenusReleaseFiles/dbus-fzsonick-48tl/config.py index 595ed1bcb..a9666d22f 100755 --- a/firmware/Venus_Release/VenusReleaseFiles/dbus-fzsonick-48tl/config.py +++ b/firmware/Venus_Release/VenusReleaseFiles/dbus-fzsonick-48tl/config.py @@ -54,6 +54,6 @@ INNOVENERGY_PROTOCOL_VERSION = '48TL200V3' # S3 Credentials -S3BUCKET = "140-c0436b6a-d276-4cd8-9c44-1eae86cf5d0e" -S3KEY = "EXOa947c7fc5990a7a6f6c40860" -S3SECRET = "J1yOTLbYEO6cMxQ2wgIwe__ru9-_RH5BBtKzx_2JJHk" +S3BUCKET = "627-c0436b6a-d276-4cd8-9c44-1eae86cf5d0e" +S3KEY = "EXOb7bcf7d1e53f2d46923144de" +S3SECRET = "-uUmMuAfx40LpTKTZgdbXswTw09o_qmE4gzkmQS8PTk" diff --git a/firmware/Venus_Release/VenusReleaseFiles/dbus-fzsonick-48tl/dbus-fzsonick-48tl.py b/firmware/Venus_Release/VenusReleaseFiles/dbus-fzsonick-48tl/dbus-fzsonick-48tl.py index f34de575a..094e9e617 100755 --- a/firmware/Venus_Release/VenusReleaseFiles/dbus-fzsonick-48tl/dbus-fzsonick-48tl.py +++ b/firmware/Venus_Release/VenusReleaseFiles/dbus-fzsonick-48tl/dbus-fzsonick-48tl.py @@ -33,463 +33,470 @@ import io import json from convert import first import shutil + CSV_DIR = "/data/csv_files/" INSTALLATION_NAME_FILE = '/data/innovenergy/openvpn/installation-name' # trick the pycharm type-checker into thinking Callable is in scope, not used at runtime # noinspection PyUnreachableCode if False: - from typing import Callable, List, Iterable, NoReturn - + from typing import Callable, List, Iterable, NoReturn RESET_REGISTER = 0x2087 def compress_csv_data(csv_data, file_name="data.csv"): - memory_stream = io.BytesIO() + memory_stream = io.BytesIO() - # Create a zip archive in the memory buffer - with zipfile.ZipFile(memory_stream, 'w', zipfile.ZIP_DEFLATED) as archive: - # Add CSV data to the ZIP archive using writestr - archive.writestr(file_name, csv_data.encode('utf-8')) + # Create a zip archive in the memory buffer + with zipfile.ZipFile(memory_stream, 'w', zipfile.ZIP_DEFLATED) as archive: + # Add CSV data to the ZIP archive using writestr + archive.writestr(file_name, csv_data.encode('utf-8')) - # Get the compressed byte array from the memory buffer - compressed_bytes = memory_stream.getvalue() + # Get the compressed byte array from the memory buffer + compressed_bytes = memory_stream.getvalue() - # Encode the compressed byte array as a Base64 string - base64_string = base64.b64encode(compressed_bytes).decode('utf-8') + # Encode the compressed byte array as a Base64 string + base64_string = base64.b64encode(compressed_bytes).decode('utf-8') + + return base64_string - return base64_string class S3config: - def __init__(self): - self.bucket = cfg.S3BUCKET - self.region = "sos-ch-dk-2" - self.provider = "exo.io" - self.key = cfg.S3KEY - self.secret = cfg.S3SECRET - self.content_type = "application/base64; charset=utf-8" + def __init__(self): + self.bucket = cfg.S3BUCKET + self.region = "sos-ch-dk-2" + self.provider = "exo.io" + self.key = cfg.S3KEY + self.secret = cfg.S3SECRET + self.content_type = "application/base64; charset=utf-8" - @property - def host(self): - return "{}.{}.{}".format(self.bucket, self.region, self.provider) + @property + def host(self): + return "{}.{}.{}".format(self.bucket, self.region, self.provider) - @property - def url(self): - return "https://{}".format(self.host) + @property + def url(self): + return "https://{}".format(self.host) - def create_put_request(self, s3_path, data): - headers = self._create_request("PUT", s3_path) - url = "{}/{}".format(self.url, s3_path) - response = requests.put(url, headers=headers, data=data) - return response + def create_put_request(self, s3_path, data): + headers = self._create_request("PUT", s3_path) + url = "{}/{}".format(self.url, s3_path) + response = requests.put(url, headers=headers, data=data) + return response - def _create_request(self, method, s3_path): - date = datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT') - auth = self._create_authorization(method, self.bucket, s3_path, date, self.key, self.secret, self.content_type) - headers = { - "Host": self.host, - "Date": date, - "Authorization": auth, - "Content-Type": self.content_type - } - return headers + def _create_request(self, method, s3_path): + date = datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT') + auth = self._create_authorization(method, self.bucket, s3_path, date, self.key, self.secret, self.content_type) + headers = { + "Host": self.host, + "Date": date, + "Authorization": auth, + "Content-Type": self.content_type + } + return headers - @staticmethod - def _create_authorization(method, bucket, s3_path, date, s3_key, s3_secret, content_type="", md5_hash=""): - payload = "{}\n{}\n{}\n{}\n/{}/{}".format( - method, md5_hash, content_type, date, bucket.strip('/'), s3_path.strip('/') - ) - signature = base64.b64encode( - hmac.new(s3_secret.encode(), payload.encode(), hashlib.sha1).digest() - ).decode() - return "AWS {}:{}".format(s3_key, signature) + @staticmethod + def _create_authorization(method, bucket, s3_path, date, s3_key, s3_secret, content_type="", md5_hash=""): + payload = "{}\n{}\n{}\n{}\n/{}/{}".format( + method, md5_hash, content_type, date, bucket.strip('/'), s3_path.strip('/') + ) + signature = base64.b64encode( + hmac.new(s3_secret.encode(), payload.encode(), hashlib.sha1).digest() + ).decode() + return "AWS {}:{}".format(s3_key, signature) def SubscribeToQueue(): - try: - connection = pika.BlockingConnection(pika.ConnectionParameters(host="10.2.0.11", - port=5672, - virtual_host="/", - credentials=pika.PlainCredentials("producer", "b187ceaddb54d5485063ddc1d41af66f"))) - channel = connection.channel() - channel.queue_declare(queue="statusQueue", durable=True) - print("Subscribed to queue") - except Exception as ex: - print("An error occurred while connecting to the RabbitMQ queue:", ex) - return channel + try: + connection = pika.BlockingConnection(pika.ConnectionParameters(host="10.2.0.11", + port=5672, + virtual_host="/", + heartbeat=30, + credentials=pika.PlainCredentials("producer", + "b187ceaddb54d5485063ddc1d41af66f"))) + channel = connection.channel() + channel.queue_declare(queue="statusQueue", durable=True) + print("Subscribed to queue") + except Exception as ex: + print("An error occurred while connecting to the RabbitMQ queue:", ex) + return channel previous_warnings = {} previous_alarms = {} + class MessageType: - ALARM_OR_WARNING = "AlarmOrWarning" - HEARTBEAT = "Heartbeat" + ALARM_OR_WARNING = "AlarmOrWarning" + HEARTBEAT = "Heartbeat" + class AlarmOrWarning: - def __init__(self, description, created_by): - self.date = datetime.now().strftime('%Y-%m-%d') - self.time = datetime.now().strftime('%H:%M:%S') - self.description = description - self.created_by = created_by + def __init__(self, description, created_by): + self.date = datetime.now().strftime('%Y-%m-%d') + self.time = datetime.now().strftime('%H:%M:%S') + self.description = description + self.created_by = created_by + + def to_dict(self): + return { + "Date": self.date, + "Time": self.time, + "Description": self.description, + "CreatedBy": self.created_by + } - def to_dict(self): - return { - "Date": self.date, - "Time": self.time, - "Description": self.description, - "CreatedBy": self.created_by - } channel = SubscribeToQueue() # Create an S3config instance s3_config = S3config() -INSTALLATION_ID=int(s3_config.bucket.split('-')[0]) +INSTALLATION_ID = int(s3_config.bucket.split('-')[0]) PRODUCT_ID = 1 is_first_update = True prev_status = 0 +num_of_csv_files_saved = 0 + def update_state_from_dictionaries(current_warnings, current_alarms, node_numbers): - global previous_warnings, previous_alarms, INSTALLATION_ID, PRODUCT_ID, is_first_update, channel, prev_status + global previous_warnings, previous_alarms, INSTALLATION_ID, PRODUCT_ID, is_first_update, channel, prev_status - if is_first_update: - changed_warnings = current_warnings - changed_alarms = current_alarms - is_first_update = False - else: - changed_alarms = {} - changed_warnings = {} + if is_first_update: + changed_warnings = current_warnings + changed_alarms = current_alarms + is_first_update = False + else: + changed_alarms = {} + changed_warnings = {} - for key in current_alarms: - current_value = current_alarms[key] - prev_value = previous_alarms.get(key, False) # Use False if the key doesn't exist - if current_value != prev_value: - changed_alarms[key] = True - else: - changed_alarms[key] = False + for key in current_alarms: + current_value = current_alarms[key] + prev_value = previous_alarms.get(key, False) # Use False if the key doesn't exist + if current_value != prev_value: + changed_alarms[key] = True + else: + changed_alarms[key] = False - for key in current_warnings: - current_value = current_warnings[key] - prev_value = previous_warnings.get(key, False) - if current_value != prev_value: - changed_warnings[key] = True - else: - changed_warnings[key] = False - - status_message = { - "InstallationId": INSTALLATION_ID, - "Product": PRODUCT_ID, - "Status": 0, - "Type": 1, - "Warnings": [], - "Alarms": [] - } + for key in current_warnings: + current_value = current_warnings[key] + prev_value = previous_warnings.get(key, False) + if current_value != prev_value: + changed_warnings[key] = True + else: + changed_warnings[key] = False - alarms_number_list = [] - for node_number in node_numbers: - cnt = 0 - for i, alarm_value in enumerate(current_alarms.values()): - if int(list(current_alarms.keys())[i].split("/")[3]) == int(node_number): - if alarm_value: - cnt+=1 - alarms_number_list.append(cnt) + status_message = { + "InstallationId": INSTALLATION_ID, + "Product": PRODUCT_ID, + "Status": 0, + "Type": 1, + "Warnings": [], + "Alarms": [] + } + alarms_number_list = [] + for node_number in node_numbers: + cnt = 0 + for i, alarm_value in enumerate(current_alarms.values()): + if int(list(current_alarms.keys())[i].split("/")[3]) == int(node_number): + if alarm_value: + cnt += 1 + alarms_number_list.append(cnt) - warnings_number_list = [] - for node_number in node_numbers: - cnt = 0 - for i, warning_value in enumerate(current_warnings.values()): - if int(list(current_warnings.keys())[i].split("/")[3]) == int(node_number): - if warning_value: - cnt+=1 - warnings_number_list.append(cnt) + warnings_number_list = [] + for node_number in node_numbers: + cnt = 0 + for i, warning_value in enumerate(current_warnings.values()): + if int(list(current_warnings.keys())[i].split("/")[3]) == int(node_number): + if warning_value: + cnt += 1 + warnings_number_list.append(cnt) - # Evaluate alarms - if any(changed_alarms.values()): - for i, changed_alarm in enumerate(changed_alarms.values()): - if changed_alarm and list(current_alarms.values())[i]: - description = list(current_alarms.keys())[i].split("/")[-1] - device_created = "Battery node " + list(current_alarms.keys())[i].split("/")[3] - status_message["Alarms"].append(AlarmOrWarning(description, device_created).to_dict()) + # Evaluate alarms + if any(changed_alarms.values()): + for i, changed_alarm in enumerate(changed_alarms.values()): + if changed_alarm and list(current_alarms.values())[i]: + description = list(current_alarms.keys())[i].split("/")[-1] + device_created = "Battery node " + list(current_alarms.keys())[i].split("/")[3] + status_message["Alarms"].append(AlarmOrWarning(description, device_created).to_dict()) - if any(changed_warnings.values()): - for i, changed_warning in enumerate(changed_warnings.values()): - if changed_warning and list(current_warnings.values())[i]: - description = list(current_warnings.keys())[i].split("/")[-1] - device_created = "Battery node " + list(current_warnings.keys())[i].split("/")[3] - status_message["Warnings"].append(AlarmOrWarning(description, device_created).to_dict()) + if any(changed_warnings.values()): + for i, changed_warning in enumerate(changed_warnings.values()): + if changed_warning and list(current_warnings.values())[i]: + description = list(current_warnings.keys())[i].split("/")[-1] + device_created = "Battery node " + list(current_warnings.keys())[i].split("/")[3] + status_message["Warnings"].append(AlarmOrWarning(description, device_created).to_dict()) - if any(current_alarms.values()): - status_message["Status"]=2 + if any(current_alarms.values()): + status_message["Status"] = 2 - if not any(current_alarms.values()) and any(current_warnings.values()): - status_message["Status"]=1 + if not any(current_alarms.values()) and any(current_warnings.values()): + status_message["Status"] = 1 - if not any(current_alarms.values()) and not any(current_warnings.values()): - status_message["Status"]=0 + if not any(current_alarms.values()) and not any(current_warnings.values()): + status_message["Status"] = 0 - if status_message["Status"]!=prev_status or len(status_message["Warnings"])>0 or len(status_message["Alarms"])>0: - prev_status=status_message["Status"] - status_message["Type"]=0 - status_message = json.dumps(status_message) - channel.basic_publish(exchange="", routing_key="statusQueue", body=status_message) - print(status_message) - print("Message sent successfully") + if status_message["Status"] != prev_status or len(status_message["Warnings"]) > 0 or len( + status_message["Alarms"]) > 0: + prev_status = status_message["Status"] + status_message["Type"] = 0 + status_message = json.dumps(status_message) + channel.basic_publish(exchange="", routing_key="statusQueue", body=status_message) + print(status_message) + print("Message sent successfully") - previous_warnings = current_warnings.copy() - previous_alarms = current_alarms.copy() + previous_warnings = current_warnings.copy() + previous_alarms = current_alarms.copy() + + return status_message, alarms_number_list, warnings_number_list - return status_message, alarms_number_list, warnings_number_list def read_csv_as_string(file_path): - """ + """ Reads a CSV file from the given path and returns its content as a single string. """ - try: - # Note: 'encoding' is not available in open() in Python 2.7, so we'll use 'codecs' module. - import codecs - with codecs.open(file_path, 'r', encoding='utf-8') as file: - return file.read() - except IOError as e: - if e.errno == 2: # errno 2 corresponds to "No such file or directory" - print("Error: The file {} does not exist.".format(file_path)) - else: - print("IO error occurred: {}".format(str(e))) - return None - + try: + # Note: 'encoding' is not available in open() in Python 2.7, so we'll use 'codecs' module. + import codecs + with codecs.open(file_path, 'r', encoding='utf-8') as file: + return file.read() + except IOError as e: + if e.errno == 2: # errno 2 corresponds to "No such file or directory" + print("Error: The file {} does not exist.".format(file_path)) + else: + print("IO error occurred: {}".format(str(e))) + return None def init_modbus(tty): - # type: (str) -> Modbus + # type: (str) -> Modbus - logging.debug('initializing Modbus') + logging.debug('initializing Modbus') - return Modbus( - port='/dev/' + tty, - method=cfg.MODE, - baudrate=cfg.BAUD_RATE, - stopbits=cfg.STOP_BITS, - bytesize=cfg.BYTE_SIZE, - timeout=cfg.TIMEOUT, - parity=cfg.PARITY) + return Modbus( + port='/dev/' + tty, + method=cfg.MODE, + baudrate=cfg.BAUD_RATE, + stopbits=cfg.STOP_BITS, + bytesize=cfg.BYTE_SIZE, + timeout=cfg.TIMEOUT, + parity=cfg.PARITY) def init_udp_socket(): - # type: () -> socket + # type: () -> socket - s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - s.setblocking(False) + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + s.setblocking(False) - return s + return s def report_slave_id(modbus, slave_address): - # type: (Modbus, int) -> str + # type: (Modbus, int) -> str - slave = str(slave_address) + slave = str(slave_address) - logging.debug('requesting slave id from node ' + slave) + logging.debug('requesting slave id from node ' + slave) - with modbus: + with modbus: + request = ReportSlaveIdRequest(unit=slave_address) + response = modbus.execute(request) - request = ReportSlaveIdRequest(unit=slave_address) - response = modbus.execute(request) + if response is ExceptionResponse or issubclass(type(response), ModbusException): + raise Exception('failed to get slave id from ' + slave + ' : ' + str(response)) - if response is ExceptionResponse or issubclass(type(response), ModbusException): - raise Exception('failed to get slave id from ' + slave + ' : ' + str(response)) - - return response.identifier + return response.identifier def identify_battery(modbus, slave_address): - # type: (Modbus, int) -> Battery + # type: (Modbus, int) -> Battery - logging.info('identifying battery...') + logging.info('identifying battery...') - hardware_version, bms_version, ampere_hours = parse_slave_id(modbus, slave_address) - firmware_version = read_firmware_version(modbus, slave_address) + hardware_version, bms_version, ampere_hours = parse_slave_id(modbus, slave_address) + firmware_version = read_firmware_version(modbus, slave_address) - specs = Battery( - slave_address=slave_address, - hardware_version=hardware_version, - firmware_version=firmware_version, - bms_version=bms_version, - ampere_hours=ampere_hours) + specs = Battery( + slave_address=slave_address, + hardware_version=hardware_version, + firmware_version=firmware_version, + bms_version=bms_version, + ampere_hours=ampere_hours) - logging.info('battery identified:\n{0}'.format(str(specs))) + logging.info('battery identified:\n{0}'.format(str(specs))) - return specs + return specs def identify_batteries(modbus): - # type: (Modbus) -> List[Battery] + # type: (Modbus) -> List[Battery] - def _identify_batteries(): - slave_address = 0 - n_missing = -7 + def _identify_batteries(): + slave_address = 0 + n_missing = -7 - while n_missing < 3: - slave_address += 1 - try: - yield identify_battery(modbus, slave_address) - n_missing = 0 - except Exception as e: - logging.info('failed to identify battery at {0} : {1}'.format(str(slave_address), str(e))) - n_missing += 1 + while n_missing < 3: + slave_address += 1 + try: + yield identify_battery(modbus, slave_address) + n_missing = 0 + except Exception as e: + logging.info('failed to identify battery at {0} : {1}'.format(str(slave_address), str(e))) + n_missing += 1 - logging.info('giving up searching for further batteries') + logging.info('giving up searching for further batteries') - batteries = list(_identify_batteries()) # dont be lazy! + batteries = list(_identify_batteries()) # dont be lazy! - n = len(batteries) - logging.info('found ' + str(n) + (' battery' if n == 1 else ' batteries')) + n = len(batteries) + logging.info('found ' + str(n) + (' battery' if n == 1 else ' batteries')) - return batteries + return batteries def parse_slave_id(modbus, slave_address): - # type: (Modbus, int) -> (str, str, int) + # type: (Modbus, int) -> (str, str, int) - slave_id = report_slave_id(modbus, slave_address) + slave_id = report_slave_id(modbus, slave_address) - sid = re.sub(r'[^\x20-\x7E]', '', slave_id) # remove weird special chars + sid = re.sub(r'[^\x20-\x7E]', '', slave_id) # remove weird special chars - match = re.match('(?P48TL(?P[0-9]+)) *(?P.*)', sid) + match = re.match('(?P48TL(?P[0-9]+)) *(?P.*)', sid) - if match is None: - raise Exception('no known battery found') + if match is None: + raise Exception('no known battery found') - return match.group('hw').strip(), match.group('bms').strip(), int(match.group('ah').strip()) + return match.group('hw').strip(), match.group('bms').strip(), int(match.group('ah').strip()) def read_firmware_version(modbus, slave_address): - # type: (Modbus, int) -> str + # type: (Modbus, int) -> str - logging.debug('reading firmware version') + logging.debug('reading firmware version') - with modbus: + with modbus: + response = read_modbus_registers(modbus, slave_address, base_address=1054, count=1) + register = response.registers[0] - response = read_modbus_registers(modbus, slave_address, base_address=1054, count=1) - register = response.registers[0] - - return '{0:0>4X}'.format(register) + return '{0:0>4X}'.format(register) def read_modbus_registers(modbus, slave_address, base_address=cfg.BASE_ADDRESS, count=cfg.NO_OF_REGISTERS): - # type: (Modbus, int, int, int) -> ReadInputRegistersResponse + # type: (Modbus, int, int, int) -> ReadInputRegistersResponse - logging.debug('requesting modbus registers {0}-{1}'.format(base_address, base_address + count)) + logging.debug('requesting modbus registers {0}-{1}'.format(base_address, base_address + count)) - return modbus.read_input_registers( - address=base_address, - count=count, - unit=slave_address) + return modbus.read_input_registers( + address=base_address, + count=count, + unit=slave_address) def read_battery_status(modbus, battery): - # type: (Modbus, Battery) -> BatteryStatus - """ + # type: (Modbus, Battery) -> BatteryStatus + """ Read the modbus registers containing the battery's status info. """ - logging.debug('reading battery status') + logging.debug('reading battery status') - with modbus: - data = read_modbus_registers(modbus, battery.slave_address) - return BatteryStatus(battery, data.registers) + with modbus: + data = read_modbus_registers(modbus, battery.slave_address) + return BatteryStatus(battery, data.registers) def publish_values_on_dbus(service, battery_signals, battery_statuses): - # type: (DBusService, Iterable[BatterySignal], Iterable[BatteryStatus]) -> () + # type: (DBusService, Iterable[BatterySignal], Iterable[BatteryStatus]) -> () - publish_individuals(service, battery_signals, battery_statuses) - publish_aggregates(service, battery_signals, battery_statuses) + publish_individuals(service, battery_signals, battery_statuses) + publish_aggregates(service, battery_signals, battery_statuses) def publish_aggregates(service, signals, battery_statuses): - # type: (DBusService, Iterable[BatterySignal], Iterable[BatteryStatus]) -> () + # type: (DBusService, Iterable[BatterySignal], Iterable[BatteryStatus]) -> () - for s in signals: - if s.aggregate is None: - continue - values = [s.get_value(battery_status) for battery_status in battery_statuses] - value = s.aggregate(values) - service.own_properties.set(s.dbus_path, value, s.unit) + for s in signals: + if s.aggregate is None: + continue + values = [s.get_value(battery_status) for battery_status in battery_statuses] + value = s.aggregate(values) + service.own_properties.set(s.dbus_path, value, s.unit) def publish_individuals(service, signals, battery_statuses): - # type: (DBusService, Iterable[BatterySignal], Iterable[BatteryStatus]) -> () + # type: (DBusService, Iterable[BatterySignal], Iterable[BatteryStatus]) -> () - for signal in signals: - for battery_status in battery_statuses: - address = battery_status.battery.slave_address - dbus_path = '/_Battery/' + str(address) + signal.dbus_path - value = signal.get_value(battery_status) - service.own_properties.set(dbus_path, value, signal.unit) + for signal in signals: + for battery_status in battery_statuses: + address = battery_status.battery.slave_address + dbus_path = '/_Battery/' + str(address) + signal.dbus_path + value = signal.get_value(battery_status) + service.own_properties.set(dbus_path, value, signal.unit) def publish_service_signals(service, signals): - # type: (DBusService, Iterable[ServiceSignal]) -> NoReturn + # type: (DBusService, Iterable[ServiceSignal]) -> NoReturn - for signal in signals: - service.own_properties.set(signal.dbus_path, signal.value, signal.unit) + for signal in signals: + service.own_properties.set(signal.dbus_path, signal.value, signal.unit) def upload_status_to_innovenergy(sock, statuses): - # type: (socket, Iterable[BatteryStatus]) -> bool + # type: (socket, Iterable[BatteryStatus]) -> bool - logging.debug('upload status') + logging.debug('upload status') - try: - for s in statuses: - sock.sendto(s.serialize(), (cfg.INNOVENERGY_SERVER_IP, cfg.INNOVENERGY_SERVER_PORT)) - except: - logging.debug('FAILED') - return False - else: - return True + try: + for s in statuses: + sock.sendto(s.serialize(), (cfg.INNOVENERGY_SERVER_IP, cfg.INNOVENERGY_SERVER_PORT)) + except: + logging.debug('FAILED') + return False + else: + return True def print_usage(): - print ('Usage: ' + __file__ + ' ') - print ('Example: ' + __file__ + ' ttyUSB0') + print('Usage: ' + __file__ + ' ') + print('Example: ' + __file__ + ' ttyUSB0') def parse_cmdline_args(argv): - # type: (List[str]) -> str + # type: (List[str]) -> str - if len(argv) == 0: - logging.info('missing command line argument for tty device') - print_usage() - sys.exit(1) + if len(argv) == 0: + logging.info('missing command line argument for tty device') + print_usage() + sys.exit(1) - return argv[0] + return argv[0] def reset_batteries(modbus, batteries): - # type: (Modbus, Iterable[Battery]) -> NoReturn + # type: (Modbus, Iterable[Battery]) -> NoReturn - logging.info('Resetting batteries...') + logging.info('Resetting batteries...') - for battery in batteries: + for battery in batteries: + result = modbus.write_registers(RESET_REGISTER, [1], unit=battery.slave_address) - result = modbus.write_registers(RESET_REGISTER, [1], unit=battery.slave_address) + # expecting a ModbusIOException (timeout) + # BMS can no longer reply because it is already reset + success = isinstance(result, ModbusIOException) - # expecting a ModbusIOException (timeout) - # BMS can no longer reply because it is already reset - success = isinstance(result, ModbusIOException) + outcome = 'successfully' if success else 'FAILED to' + logging.info('Battery {0} {1} reset'.format(str(battery.slave_address), outcome)) - outcome = 'successfully' if success else 'FAILED to' - logging.info('Battery {0} {1} reset'.format(str(battery.slave_address), outcome)) - - logging.info('Shutting down fz-sonick driver') - exit(0) + logging.info('Shutting down fz-sonick driver') + exit(0) -alive = True # global alive flag, watchdog_task clears it, update_task sets it +alive = True # global alive flag, watchdog_task clears it, update_task sets it start_time = time.time() + + def count_files_in_folder(folder_path): try: # List all files in the folder @@ -504,254 +511,275 @@ def count_files_in_folder(folder_path): def create_update_task(modbus, service, batteries): - # type: (Modbus, DBusService, Iterable[Battery]) -> Callable[[],bool] - """ + # type: (Modbus, DBusService, Iterable[Battery]) -> Callable[[],bool] + """ Creates an update task which runs the main update function and resets the alive flag """ - global start_time - _socket = init_udp_socket() - _signals = signals.init_battery_signals() + global start_time + _socket = init_udp_socket() + _signals = signals.init_battery_signals() - csv_signals = signals.create_csv_signals() - node_numbers = [battery.slave_address for battery in batteries] - warnings_signals, alarm_signals = signals.read_warning_and_alarm_flags() - current_warnings = {} - current_alarms = {} + csv_signals = signals.create_csv_signals() + node_numbers = [battery.slave_address for battery in batteries] + warnings_signals, alarm_signals = signals.read_warning_and_alarm_flags() + current_warnings = {} + current_alarms = {} - def update_task(): - # type: () -> bool - global alive, start_time + def update_task(): + # type: () -> bool + try: + global alive, start_time, channel + logging.debug('starting update cycle') - logging.debug('starting update cycle') + if service.own_properties.get('/ResetBatteries').value == 1: + reset_batteries(modbus, batteries) - if service.own_properties.get('/ResetBatteries').value == 1: - reset_batteries(modbus, batteries) + statuses = [read_battery_status(modbus, battery) for battery in batteries] - statuses = [read_battery_status(modbus, battery) for battery in batteries] + # Iterate over each node and signal to create rows in the new format + for i, node in enumerate(node_numbers): + for s in warnings_signals: + signal_name = insert_id(s.name, node) + value = s.get_value(statuses[i]) + current_warnings[signal_name] = value + for s in alarm_signals: + signal_name = insert_id(s.name, node) + value = s.get_value(statuses[i]) + current_alarms[signal_name] = value - # Iterate over each node and signal to create rows in the new format - for i, node in enumerate(node_numbers): - for s in warnings_signals: - signal_name = insert_id(s.name, node) - value = s.get_value(statuses[i]) - current_warnings[signal_name] = value - for s in alarm_signals: - signal_name = insert_id(s.name, node) - value = s.get_value(statuses[i]) - current_alarms[signal_name] = value + status_message, alarms_number_list, warnings_number_list = update_state_from_dictionaries(current_warnings, + current_alarms, + node_numbers) - status_message, alarms_number_list, warnings_number_list = update_state_from_dictionaries(current_warnings, - current_alarms, - node_numbers) + publish_values_on_dbus(service, _signals, statuses) - publish_values_on_dbus(service, _signals, statuses) + elapsed_time = time.time() - start_time + create_csv_files(csv_signals, statuses, node_numbers, alarms_number_list, warnings_number_list) + print("11111111111111111111111111111111111111111111 elapsed time is ", elapsed_time) - elapsed_time = time.time() - start_time - create_csv_files(csv_signals, statuses, node_numbers, alarms_number_list, warnings_number_list) - print("11111111111111111111111111111111111111111111 elapsed time is ",elapsed_time) - - # keep at most 1900 files at CSV_DIR for logging and aggregation - manage_csv_files(CSV_DIR, 1900) + # keep at most 1900 files at CSV_DIR for logging and aggregation + manage_csv_files(CSV_DIR, 1900) - num_files_in_csv_dir = count_files_in_folder(CSV_DIR) - if elapsed_time >= 1200: - print("CREATE BATCH ======================================>") - create_batch_of_csv_files() - start_time = time.time() + num_files_in_csv_dir = count_files_in_folder(CSV_DIR) + if elapsed_time >= 1200: + print("CREATE BATCH ======================================>") + create_batch_of_csv_files() + start_time = time.time() - upload_status_to_innovenergy(_socket, statuses) + upload_status_to_innovenergy(_socket, statuses) -# logging.debug('finished update cycleeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee\n') + # logging.debug('finished update cycleeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee\n') - alive = True + alive = True + except pika.exceptions.AMQPConnectionError: + logging.error("AMQPConnectionError encountered. Subscribing to queue.") + create_batch_of_csv_files() - return True + except Exception as e: + create_batch_of_csv_files() + logging.error("Unexpected error") + raise + + finally: + return True + + return update_task - return update_task def manage_csv_files(directory_path, max_files=20): - csv_files = [f for f in os.listdir(directory_path) if os.path.isfile(os.path.join(directory_path, f))] - csv_files.sort(key=lambda x: os.path.getctime(os.path.join(directory_path, x))) + csv_files = [f for f in os.listdir(directory_path) if os.path.isfile(os.path.join(directory_path, f))] + csv_files.sort(key=lambda x: os.path.getctime(os.path.join(directory_path, x))) + + print("len of csv files is " + str(len(csv_files))) + # Remove oldest files if exceeds maximum + while len(csv_files) > max_files: + file_to_delete = os.path.join(directory_path, csv_files.pop(0)) + os.remove(file_to_delete) + - print("len of csv files is "+str(len(csv_files))) - # Remove oldest files if exceeds maximum - while len(csv_files) > max_files: - file_to_delete = os.path.join(directory_path, csv_files.pop(0)) - os.remove(file_to_delete) def insert_id(path, id_number): - parts = path.split("/") - insert_position = parts.index("Devices") + 1 - parts.insert(insert_position, str(id_number)) - return "/".join(parts) + parts = path.split("/") + insert_position = parts.index("Devices") + 1 + parts.insert(insert_position, str(id_number)) + return "/".join(parts) + def create_batch_of_csv_files(): - global prev_status,channel,INSTALLATION_ID, PRODUCT_ID - # list all files in the directory - files = os.listdir(CSV_DIR) + global prev_status, channel, INSTALLATION_ID, PRODUCT_ID, num_of_csv_files_saved + # list all files in the directory + files = os.listdir(CSV_DIR) - # filter out only csv files - csv_files = [file for file in files if file.endswith('.csv')] + # filter out only csv files + csv_files = [file for file in files if file.endswith('.csv')] - # sort csv files by creation time - csv_files.sort(key=lambda x: os.path.getctime(os.path.join(CSV_DIR, x))) + # sort csv files by creation time + csv_files.sort(key=lambda x: os.path.getctime(os.path.join(CSV_DIR, x))) - # keep the 600 MOST RECENT FILES - recent_csv_files = csv_files[-600:] if len(csv_files) > 600 else csv_files + # keep the num_of_csv_files_saved MOST RECENT FILES + recent_csv_files = csv_files[-num_of_csv_files_saved:] + print("num_of_csv_files_saved is " + str(num_of_csv_files_saved)) - # get the name of the first csv file - if not csv_files: - print("No csv files found in the directory.") - exit(0) + # get the name of the first csv file + if not csv_files: + print("No csv files found in the directory.") + exit(0) - first_csv_file = os.path.join(CSV_DIR, recent_csv_files.pop(0)) - first_csv_filename = os.path.basename(first_csv_file) + first_csv_file = os.path.join(CSV_DIR, recent_csv_files.pop(0)) + first_csv_filename = os.path.basename(first_csv_file) - temp_file_path = os.path.join(CSV_DIR, 'temp_batch_file.csv') + temp_file_path = os.path.join(CSV_DIR, 'temp_batch_file.csv') - # create a temporary file and write the timestamp and the original content of the first file - with open(temp_file_path, 'wb') as temp_file: - # Write the timestamp (filename) at the beginning - temp_file.write('Timestamp;{}\n'.format(first_csv_filename.split('.')[0])) - # write the original content of the first csv file - with open(first_csv_file, 'rb') as f: - temp_file.write(f.read()) - for csv_file in recent_csv_files: - file_path = os.path.join(CSV_DIR, csv_file) - # write an empty line - temp_file.write('\n') - # write the timestamp (filename) - temp_file.write('Timestamp;{}\n'.format(csv_file.split('.')[0])) - # write the content of the file - with open(file_path, 'rb') as f: - temp_file.write(f.read()) + # create a temporary file and write the timestamp and the original content of the first file + with open(temp_file_path, 'wb') as temp_file: + # Write the timestamp (filename) at the beginning + temp_file.write('Timestamp;{}\n'.format(first_csv_filename.split('.')[0])) + # write the original content of the first csv file + with open(first_csv_file, 'rb') as f: + temp_file.write(f.read()) + for csv_file in recent_csv_files: + file_path = os.path.join(CSV_DIR, csv_file) + # write an empty line + temp_file.write('\n') + # write the timestamp (filename) + temp_file.write('Timestamp;{}\n'.format(csv_file.split('.')[0])) + # write the content of the file + with open(file_path, 'rb') as f: + temp_file.write(f.read()) - # replace the original first csv file with the temporary file - os.remove(first_csv_file) - os.rename(temp_file_path, first_csv_file) + # replace the original first csv file with the temporary file + os.remove(first_csv_file) + os.rename(temp_file_path, first_csv_file) + num_of_csv_files_saved = 0 - # create a loggin directory that contains at max 20 batch files for logging info - # logging_dir = os.path.join(CSV_DIR, 'logging_batch_files') - # if not os.path.exists(logging_dir): - # os.makedirs(logging_dir) - # - # shutil.copy(first_csv_file, logging_dir) - # manage_csv_files(logging_dir) + # create a loggin directory that contains at max 20 batch files for logging info + # logging_dir = os.path.join(CSV_DIR, 'logging_batch_files') + # if not os.path.exists(logging_dir): + # os.makedirs(logging_dir) + # + # shutil.copy(first_csv_file, logging_dir) + # manage_csv_files(logging_dir) - # print("The batch csv file is: {}".format(recent_csv_files[-1])) + # print("The batch csv file is: {}".format(recent_csv_files[-1])) - # prepare for compression - csv_data = read_csv_as_string(first_csv_file) + # prepare for compression + csv_data = read_csv_as_string(first_csv_file) - if csv_data is None: - print("error while reading csv as string") - return + if csv_data is None: + print("error while reading csv as string") + return + # zip-comp additions + compressed_csv = compress_csv_data(csv_data) + # Use the name of the last (most recent) CSV file in sorted csv_files as the name for the compressed file + last_csv_file_name = os.path.basename(recent_csv_files[-1]) if recent_csv_files else first_csv_filename - # zip-comp additions - compressed_csv = compress_csv_data(csv_data) - # Use the name of the last (most recent) CSV file in sorted csv_files as the name for the compressed file - last_csv_file_name = os.path.basename(recent_csv_files[-1]) if recent_csv_files else first_csv_filename + # we send the csv files every 30 seconds and the timestamp is adjusted to be a multiple of 30 + numeric_part = int(last_csv_file_name.split('.')[0][:-2]) - # we send the csv files every 30 seconds and the timestamp is adjusted to be a multiple of 30 - numeric_part = int(last_csv_file_name.split('.')[0][:-2]) + # compressed_filename = "{}.csv".format(new_numeric_part) + compressed_filename = "{}.csv".format(numeric_part) - # compressed_filename = "{}.csv".format(new_numeric_part) - compressed_filename = "{}.csv".format(numeric_part) + print("FILE NAME =========================================================> ", compressed_filename) - print("FILE NAME =========================================================> ", compressed_filename) + response = s3_config.create_put_request(compressed_filename, compressed_csv) + # response = s3_config.create_put_request(first_csv_filename, csv_data) - response = s3_config.create_put_request(compressed_filename, compressed_csv) - # response = s3_config.create_put_request(first_csv_filename, csv_data) + print(response) + if response.status_code == 200: + os.remove(first_csv_file) + print("Successfully uploaded the compresseed batch of files in s3") + status_message = { + "InstallationId": INSTALLATION_ID, + "Product": PRODUCT_ID, + "Status": prev_status, + "Type": 1, + "Warnings": [], + "Alarms": [], + } - print(response) - if response.status_code == 200: - os.remove(first_csv_file) - print("Successfully uploaded the compresseed batch of files in s3") - status_message = { - "InstallationId": INSTALLATION_ID, - "Product": PRODUCT_ID, - "Status": prev_status, - "Type": 1, - "Warnings": [], - "Alarms": [], - } + print(status_message) - print(status_message) + status_message = json.dumps(status_message) + try: + channel.basic_publish(exchange="", routing_key="statusQueue", body=status_message) + except: + channel = SubscribeToQueue() + channel.basic_publish(exchange="", routing_key="statusQueue", body=status_message) - status_message = json.dumps(status_message) - try: - channel.basic_publish(exchange="", routing_key="statusQueue", body=status_message) - except: - channel = SubscribeToQueue() - channel.basic_publish(exchange="", routing_key="statusQueue", body=status_message) + print("Successfully sent the heartbit with timestamp") + else: + # we save data that were not successfully uploaded in s3 in a failed directory inside the CSV_DIR for logging + failed_dir = os.path.join(CSV_DIR, "failed") + if not os.path.exists(failed_dir): + os.makedirs(failed_dir) + failed_path = os.path.join(failed_dir, first_csv_filename) + os.rename(first_csv_file, failed_path) + print("Uploading failed") + manage_csv_files(failed_dir, 100) - print("Successfully sent the heartbit with timestamp") - else: - # we save data that were not successfully uploaded in s3 in a failed directory inside the CSV_DIR for logging - failed_dir = os.path.join(CSV_DIR, "failed") - if not os.path.exists(failed_dir): - os.makedirs(failed_dir) - failed_path = os.path.join(failed_dir, first_csv_filename) - os.rename(first_csv_file, failed_path) - print("Uploading failed") - manage_csv_files(failed_dir, 100) - def create_csv_files(signals, statuses, node_numbers, alarms_number_list, warnings_number_list): - timestamp = int(time.time()) - if timestamp % 2 != 0: - timestamp -= 1 - if not os.path.exists(CSV_DIR): - os.makedirs(CSV_DIR) - csv_filename = "{}.csv".format(timestamp) - csv_path = os.path.join(CSV_DIR, csv_filename) + global num_of_csv_files_saved + timestamp = int(time.time()) + if timestamp % 2 != 0: + timestamp -= 1 + if not os.path.exists(CSV_DIR): + os.makedirs(CSV_DIR) + csv_filename = "{}.csv".format(timestamp) + csv_path = os.path.join(CSV_DIR, csv_filename) + num_of_csv_files_saved += 1 + + if not os.path.exists(csv_path): + with open(csv_path, 'ab') as csvfile: + csv_writer = csv.writer(csvfile, delimiter=';') + nodes_config_path = "/Config/Devices/BatteryNodes" + nodes_list = ",".join(str(node) for node in node_numbers) + config_row = [nodes_config_path, nodes_list, ""] + csv_writer.writerow(config_row) + for i, node in enumerate(node_numbers): + csv_writer.writerow(["/Battery/Devices/{}/Alarms".format(str(i + 1)), alarms_number_list[i], ""]) + csv_writer.writerow(["/Battery/Devices/{}/Warnings".format(str(i + 1)), warnings_number_list[i], ""]) + for s in signals: + signal_name = insert_id(s.name, i + 1) + value = s.get_value(statuses[i]) + row_values = [signal_name, value, s.get_text] + csv_writer.writerow(row_values) - if not os.path.exists(csv_path): - with open(csv_path, 'ab') as csvfile: - csv_writer = csv.writer(csvfile, delimiter=';') - nodes_config_path = "/Config/Devices/BatteryNodes" - nodes_list = ",".join(str(node) for node in node_numbers) - config_row = [nodes_config_path, nodes_list, ""] - csv_writer.writerow(config_row) - for i, node in enumerate(node_numbers): - csv_writer.writerow(["/Battery/Devices/{}/Alarms".format(str(i+1)), alarms_number_list[i], ""]) - csv_writer.writerow(["/Battery/Devices/{}/Warnings".format(str(i+1)), warnings_number_list[i], ""]) - for s in signals: - signal_name = insert_id(s.name, i+1) - value = s.get_value(statuses[i]) - row_values = [signal_name, value, s.get_text] - csv_writer.writerow(row_values) def create_watchdog_task(main_loop): - # type: (DBusGMainLoop) -> Callable[[],bool] - """ + # type: (DBusGMainLoop) -> Callable[[],bool] + """ Creates a Watchdog task that monitors the alive flag. The watchdog kills the main loop if the alive flag is not periodically reset by the update task. Who watches the watchdog? """ - def watchdog_task(): - # type: () -> bool - global alive + def watchdog_task(): + # type: () -> bool - if alive: - logging.debug('watchdog_task: update_task is alive') - alive = False - return True - else: - logging.info('watchdog_task: killing main loop because update_task is no longer alive') - main_loop.quit() - return False + global alive + + if alive: + logging.debug('watchdog_task: update_task is alive') + alive = False + return True + else: + logging.info('watchdog_task: killing main loop because update_task is no longer alive') + main_loop.quit() + return False + + return watchdog_task - return watchdog_task BATTERY_COUNTS_FILE = '/data/battery_count.csv' + + def load_battery_counts(): if os.path.exists(BATTERY_COUNTS_FILE): with open(BATTERY_COUNTS_FILE, 'r') as f: reader = csv.reader(f) - return [int(row[0]) for row in reader] + return [int(row[0]) for row in reader] return [] @@ -760,80 +788,81 @@ def save_battery_counts(battery_counts): writer = csv.writer(f) for count in battery_counts: writer.writerow([count]) + + def main(argv): - # type: (List[str]) -> () - print("INSIDE DBUS SONICK") - logging.basicConfig(level=cfg.LOG_LEVEL) - logging.info('starting ' + __file__) + # type: (List[str]) -> () + print("INSIDE DBUS SONICK") + logging.basicConfig(level=cfg.LOG_LEVEL) + logging.info('starting ' + __file__) - # tty = parse_cmdline_args(argv) - # modbus = init_modbus(tty) + # tty = parse_cmdline_args(argv) + # modbus = init_modbus(tty) - # batteries = identify_batteries(modbus) + # batteries = identify_batteries(modbus) - # if len(batteries) <= 0: - # sys.exit(2) + # if len(batteries) <= 0: + # sys.exit(2) - tty = parse_cmdline_args(argv) - battery_counts = load_battery_counts() - max_retry_attempts = 3 # Stop retrying in case it's a real battery loss case - retry_attempts = 0 + tty = parse_cmdline_args(argv) + battery_counts = load_battery_counts() + max_retry_attempts = 3 # Stop retrying in case it's a real battery loss case + retry_attempts = 0 - while True: - modbus = init_modbus(tty) - batteries = identify_batteries(modbus) - n = len(batteries) - logging.info('found %d %s', n, "battery" if n == 1 else "batteries") + while True: - if n <= 0: - sys.exit(2) # Exit if no batteries are found + modbus = init_modbus(tty) + batteries = identify_batteries(modbus) + n = len(batteries) + logging.info('found %d %s', n, "battery" if n == 1 else "batteries") + if n <= 0: + sys.exit(2) # Exit if no batteries are found - if not battery_counts or n > max(battery_counts): - # If it's the first detection or detect more batteries than ever before - logging.info("It's new or more batteries detected") - battery_counts.append(n) - retry_attempts = 0 - save_battery_counts(battery_counts) + if not battery_counts or n > max(battery_counts): + # If it's the first detection or detect more batteries than ever before + logging.info("It's new or more batteries detected") + battery_counts.append(n) + retry_attempts = 0 + save_battery_counts(battery_counts) - elif n < max(battery_counts): - retry_attempts += 1 - logging.warning('Attempt %d/%d: Detected fewer batteries than previously detected.', - retry_attempts, max_retry_attempts) - - # If max retry attempts are exceeded, continue with fewer batteries - if retry_attempts >= max_retry_attempts: - logging.warning('Max retry attempts reached. Continuing with fewer batteries.') - save_battery_counts(battery_counts) - break + elif n < max(battery_counts): + retry_attempts += 1 + logging.warning('Attempt %d/%d: Detected fewer batteries than previously detected.', + retry_attempts, max_retry_attempts) - continue - - elif n == max(battery_counts): - logging.info('Detected the same number of batteries as before. No need to re-detect.') - break + # If max retry attempts are exceeded, continue with fewer batteries + if retry_attempts >= max_retry_attempts: + logging.warning('Max retry attempts reached. Continuing with fewer batteries.') + save_battery_counts(battery_counts) + break + continue - service = DBusService(service_name=cfg.SERVICE_NAME_PREFIX + tty) + elif n == max(battery_counts): + logging.info('Detected the same number of batteries as before. No need to re-detect.') + break - service.own_properties.set('/ResetBatteries', value=False, writable=True) # initial value = False + service = DBusService(service_name=cfg.SERVICE_NAME_PREFIX + tty) - main_loop = gobject.MainLoop() + service.own_properties.set('/ResetBatteries', value=False, writable=True) # initial value = False - service_signals = signals.init_service_signals(batteries) - publish_service_signals(service, service_signals) + main_loop = gobject.MainLoop() - update_task = create_update_task(modbus, service, batteries) - update_task() # run it right away, so that all props are initialized before anyone can ask - watchdog_task = create_watchdog_task(main_loop) + service_signals = signals.init_service_signals(batteries) + publish_service_signals(service, service_signals) - gobject.timeout_add(cfg.UPDATE_INTERVAL * 2, watchdog_task, priority = gobject.PRIORITY_LOW) # add watchdog first - gobject.timeout_add(cfg.UPDATE_INTERVAL, update_task, priority = gobject.PRIORITY_LOW) # call update once every update_interval + update_task = create_update_task(modbus, service, batteries) + update_task() # run it right away, so that all props are initialized before anyone can ask + watchdog_task = create_watchdog_task(main_loop) - logging.info('starting gobject.MainLoop') - main_loop.run() - logging.info('gobject.MainLoop was shut down') + gobject.timeout_add(cfg.UPDATE_INTERVAL * 2, watchdog_task, priority=gobject.PRIORITY_LOW) # add watchdog first + gobject.timeout_add(cfg.UPDATE_INTERVAL, update_task, + priority=gobject.PRIORITY_LOW) # call update once every update_interval - sys.exit(0xFF) # reaches this only on error + logging.info('starting gobject.MainLoop') + main_loop.run() + logging.info('gobject.MainLoop was shut down') + + sys.exit(0xFF) # reaches this only on error -main(sys.argv[1:]) - +main(sys.argv[1:]) \ No newline at end of file diff --git a/firmware/Venus_Release/update_Venus.py b/firmware/Venus_Release/update_Venus.py index 9a61637fd..ba38a0a01 100644 --- a/firmware/Venus_Release/update_Venus.py +++ b/firmware/Venus_Release/update_Venus.py @@ -141,39 +141,39 @@ async def main(remote_host): ##### 2. check whether it's Venus ###### gx_type = await check_GX_type(remote_host) - if gx_type == "beaglebone\n": +# if gx_type == "beaglebone\n": ##### 3. upload VPN and battery files ###### - print("Upload pika and battery files!") - if(await upload_files(remote_host)!="All files uploaded successfully."): - sys.exit("Failed to upload files!") - else: - print(await upload_files(remote_host)) - #### 4. import pika #### - print("Import pika!") - print(await import_pika(remote_host)) - #### 5. resize /dev/root ##### - print("Resize /dev/root now!") - print(await resize(remote_host)) - #### 6. stop battery service ###### - print("Stop battery service!") - print(await stop_battery_service(remote_host)) - #### 7. stop controller service ###### - print("Stop controller service!") - print(await stop_controller(remote_host)) - ##### 8. run rc.local ###### - print("Run rc.local!") - print(await run_rclocal(remote_host)) - ##### 9. start battery service ###### - print("Start battery service!") - print(await start_battery_service(remote_host)) - ##### 10. start controller service ###### - print("Start controller service!") - print(await start_controller(remote_host)) - ##### 11. restart gui ###### - print("Restart gui!") - print(await restart_gui(remote_host)) + print("Upload pika and battery files!") + if(await upload_files(remote_host)!="All files uploaded successfully."): + sys.exit("Failed to upload files!") else: - sys.exit("It's not Venus GX!") + print(await upload_files(remote_host)) + #### 4. import pika #### + print("Import pika!") + print(await import_pika(remote_host)) + #### 5. resize /dev/root ##### + print("Resize /dev/root now!") + print(await resize(remote_host)) + #### 6. stop battery service ###### + print("Stop battery service!") + print(await stop_battery_service(remote_host)) + #### 7. stop controller service ###### + print("Stop controller service!") + print(await stop_controller(remote_host)) + ##### 8. run rc.local ###### + print("Run rc.local!") + print(await run_rclocal(remote_host)) + ##### 9. start battery service ###### + print("Start battery service!") + print(await start_battery_service(remote_host)) + ##### 10. start controller service ###### + print("Start controller service!") + print(await start_controller(remote_host)) + ##### 11. restart gui ###### + print("Restart gui!") + print(await restart_gui(remote_host)) +# else: +# sys.exit("It's not Venus GX!") if __name__ == "__main__": diff --git a/typescript/frontend-marios2/package-lock.json b/typescript/frontend-marios2/package-lock.json index 55246c23d..b8a7c6960 100644 --- a/typescript/frontend-marios2/package-lock.json +++ b/typescript/frontend-marios2/package-lock.json @@ -1,11 +1,11 @@ { - "name": "Inesco Energy", + "name": "InnovEnergy", "version": "2.0.0", "lockfileVersion": 2, "requires": true, "packages": { "": { - "name": "Inesco Energy", + "name": "InnovEnergy", "version": "2.0.0", "dependencies": { "@emotion/react": "11.9.0", diff --git a/typescript/frontend-marios2/package.json b/typescript/frontend-marios2/package.json index fe0944d81..fb96cf903 100644 --- a/typescript/frontend-marios2/package.json +++ b/typescript/frontend-marios2/package.json @@ -1,7 +1,7 @@ { - "name": "c", + "name": "InnovEnergy", "version": "2.0.0", - "title": "Inesco Energy", + "title": "InnovEnergy", "private": false, "dependencies": { "@emotion/react": "11.9.0", diff --git a/typescript/frontend-marios2/public/Logo.png b/typescript/frontend-marios2/public/Logo.png deleted file mode 100644 index 38b67c0f4..000000000 Binary files a/typescript/frontend-marios2/public/Logo.png and /dev/null differ diff --git a/typescript/frontend-marios2/public/index.html b/typescript/frontend-marios2/public/index.html index 44d58e831..ee76d7b15 100644 --- a/typescript/frontend-marios2/public/index.html +++ b/typescript/frontend-marios2/public/index.html @@ -13,7 +13,7 @@ href="https://fonts.googleapis.com/css2?family=Inter:ital,wght@0,400&display=swap" rel="stylesheet" /> - Inesco Energy + InnovEnergy diff --git a/typescript/frontend-marios2/src/Resources/images/Logo.png b/typescript/frontend-marios2/src/Resources/images/Logo.png deleted file mode 100644 index 38b67c0f4..000000000 Binary files a/typescript/frontend-marios2/src/Resources/images/Logo.png and /dev/null differ diff --git a/typescript/frontend-marios2/src/Resources/images/Logo.svg b/typescript/frontend-marios2/src/Resources/images/Logo.svg deleted file mode 100644 index 08220ee5f..000000000 --- a/typescript/frontend-marios2/src/Resources/images/Logo.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/typescript/frontend-marios2/src/Resources/images/Logo_for_dark_bg.png b/typescript/frontend-marios2/src/Resources/images/Logo_for_dark_bg.png deleted file mode 100644 index acfffcd03..000000000 Binary files a/typescript/frontend-marios2/src/Resources/images/Logo_for_dark_bg.png and /dev/null differ diff --git a/typescript/frontend-marios2/src/Resources/images/Logo_for_dark_bg.svg b/typescript/frontend-marios2/src/Resources/images/Logo_for_dark_bg.svg deleted file mode 100644 index c58778608..000000000 --- a/typescript/frontend-marios2/src/Resources/images/Logo_for_dark_bg.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/typescript/frontend-marios2/src/Resources/images/inesco_logo.png b/typescript/frontend-marios2/src/Resources/images/inesco_logo.png new file mode 100644 index 000000000..f72e174f4 Binary files /dev/null and b/typescript/frontend-marios2/src/Resources/images/inesco_logo.png differ diff --git a/typescript/frontend-marios2/src/Resources/images/innovenergy-Logo_Speichern-mit-Salz_R_color.svg b/typescript/frontend-marios2/src/Resources/images/innovenergy-Logo_Speichern-mit-Salz_R_color.svg new file mode 100644 index 000000000..6b5ebc1a2 --- /dev/null +++ b/typescript/frontend-marios2/src/Resources/images/innovenergy-Logo_Speichern-mit-Salz_R_color.svg @@ -0,0 +1,88 @@ + + + + + + + + + + + + + diff --git a/typescript/frontend-marios2/src/components/Footer/index.tsx b/typescript/frontend-marios2/src/components/Footer/index.tsx index c6567c78d..239c9a31e 100644 --- a/typescript/frontend-marios2/src/components/Footer/index.tsx +++ b/typescript/frontend-marios2/src/components/Footer/index.tsx @@ -18,7 +18,7 @@ function Footer() { > - © 2024 - Inesco Energy Solutions AG + © 2024 - InnovEnergy AG - Inesco Energy Solutions AG + InnovEnergy AG diff --git a/typescript/frontend-marios2/src/components/ForgotPassword.tsx b/typescript/frontend-marios2/src/components/ForgotPassword.tsx index 4e61b6fa1..c44a4325c 100644 --- a/typescript/frontend-marios2/src/components/ForgotPassword.tsx +++ b/typescript/frontend-marios2/src/components/ForgotPassword.tsx @@ -3,11 +3,14 @@ import { Box, Button, CircularProgress, + Container, + Grid, Modal, TextField, Typography, useTheme } from '@mui/material'; +import innovenergyLogo from 'src/Resources/innoveng_logo_on_orange.png'; import { UserContext } from 'src/contexts/userContext'; import { TokenContext } from 'src/contexts/tokenContext'; import Avatar from '@mui/material/Avatar'; @@ -15,7 +18,6 @@ import LockOutlinedIcon from '@mui/icons-material/LockOutlined'; import axiosConfig from 'src/Resources/axiosConfig'; import { useNavigate } from 'react-router-dom'; import routes from 'src/Resources/routes.json'; -import inescologo from '../Resources/images/Logo.svg'; interface ForgotPasswordPromps { resetPassword: () => void; @@ -71,6 +73,16 @@ function ForgotPassword() { return ( <> + + + + + innovenergy logo + + + + + - - - inescologo - - - + @@ -127,15 +134,15 @@ function ForgotPassword() { }} /> - {loading && } + {loading && }