Skip to content

Commit

Permalink
linting, documentation improvement
Browse files Browse the repository at this point in the history
  • Loading branch information
ddceruti committed Jul 19, 2024
1 parent 98fce6e commit 17ca5c0
Show file tree
Hide file tree
Showing 6 changed files with 70 additions and 37 deletions.
1 change: 1 addition & 0 deletions .github/workflows/python-package.yml
Original file line number Diff line number Diff line change
Expand Up @@ -45,4 +45,5 @@ jobs:
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Test with pytest
run: |
pytest tests/run_hydraulics_test.py
pytest tests/run_sts_test.py --solver cbc
14 changes: 7 additions & 7 deletions topotherm/__init__.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from . import model
from . import plotting
from . import utils
from . import fileio
from . import precalculation_hydraulic
from . import postprocessing
from . import settings
# from . import model
# from . import plotting
# from . import utils
# from . import fileio
# from . import precalculation_hydraulic
# from . import postprocessing
# from . import settings
36 changes: 30 additions & 6 deletions topotherm/fileio.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,28 @@


def load(path):
"""Input: file_name and file_path
Returns: Matrices A_i A_p and A_c, Heat Demand, Length of edges, and positions"""
"""Read the input data from the given path and return the matrices A_i,
A_p, A_c, Heat Demand, Length of edges, and positions.
Args:
path (str or os.path): Path to the input data.
Returns:
r (dict): Matrices stored in keys 'a_i', 'a_p', 'a_c', 'q_c', 'l_i',
and 'position'.
"""

def duplicate_columns(data, minoccur=2):
"""Find duplicate columns in a numpy array.
Args:
data (np.array): Data to check for duplicates.
minoccur (int): Minimum number of occurrences to be considered a
duplicate.
Returns:
result (list): List of indices of duplicate columns.
"""
ind = np.lexsort(data)
diff = np.any(data.T[ind[1:]] != data.T[ind[:-1]], axis=1)
edges = np.where(diff)[0] + 1
Expand All @@ -19,9 +38,14 @@ def duplicate_columns(data, minoccur=2):
a_i = pd.read_parquet(os.path.join(path, 'A_i.parquet')).to_numpy()
a_p = pd.read_parquet(os.path.join(path, 'A_p.parquet')).to_numpy()
a_c = pd.read_parquet(os.path.join(path, 'A_c.parquet')).to_numpy()
length = pd.read_parquet(os.path.join(path, 'L_i.parquet')).to_numpy().astype(float)
q_c = (pd.read_parquet(os.path.join(path, 'Q_c.parquet')).to_numpy().astype(float)) / 1000 #Example data is in W, optimization in kW
position = pd.read_parquet(os.path.join(path, 'rel_positions.parquet')).loc[:, 'x_rel':'y_rel'].to_numpy().astype(float)
length = pd.read_parquet(
os.path.join(path, 'L_i.parquet')).to_numpy().astype(float)
q_c = (pd.read_parquet(
os.path.join(path, 'Q_c.parquet')
).to_numpy().astype(float)) / 1000 # Data is in W, optimization in kW
position = pd.read_parquet(
os.path.join(path, 'rel_positions.parquet')
).loc[:, 'x_rel':'y_rel'].to_numpy().astype(float)

if (a_i.sum(axis=0).sum() != 0) | (np.abs(a_i).sum(axis=0).sum()/2 != np.shape(a_i)[1]):
print("Warning: The structure of A_i is not correct!")
Expand All @@ -38,7 +62,7 @@ def duplicate_columns(data, minoccur=2):
elif np.shape(position)[0] != np.shape(a_i)[0]:
print("Warning: Position doesn't match with the number of nodes!")
elif len(duplicate_columns(a_i)) != 0:
print("Warning: There are duplicate columns in A_i, but we took care of it!")
print("Warning: There are duplicate columns in A_i, we took care of it!")
delete_col = duplicate_columns(a_i)
if length[delete_col[0][0]] > length[delete_col[0][1]]:
np.delete(length, delete_col[0][0], axis=0)
Expand Down
20 changes: 8 additions & 12 deletions topotherm/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@
import pyomo.environ as pyo



def annuity(c_i, n):
"""Calculate the annuity factor.
Expand Down Expand Up @@ -84,11 +83,14 @@ def sts(matrices, sets, regression_caps, regression_losses,
step operation.
Args:
matrices (dict): Dictionary with the matrices of the district heating network with keys
a_i, a_p, a_c, l_i, position, q_c
sets (dict): Dictionary with the sets for the optimization, obtained from matrices
regression_caps (dict): Dictionary with the regression coefficients for the thermal capacity
regression_losses (dict): Dictionary with the regression coefficients for the heat losses
matrices (dict): Dictionary with the matrices of the district heating
network with keys a_i, a_p, a_c, l_i, position, q_c
sets (dict): Dictionary with the sets for the optimization, obtained
from matrices
regression_caps (dict): Dictionary with the regression coefficients
for the thermal capacity
regression_losses (dict): Dictionary with the regression coefficients
for the heat losses
Returns:
model (pyomo.environ.ConcreteModel): pyomo model
Expand Down Expand Up @@ -145,7 +147,6 @@ def heat_source_cap(m, j, t):
doc='Investment costs for the heat source')
# @TODO: Check if nodal power balance is the same for forced and eco (it should be the case, but testing is needed)


def nodal_power_balance(m, j, t):
term1 = sum(m.P_11[k, t] - m.P_22[k, t] for k in sets['a_i_out'][j]) # Sum of outgoing flows from pipes
term2 = sum(m.P_21[k, t] - m.P_12[k, t] for k in sets['a_i_in'][j]) # Sum of incoming flows from pipes
Expand Down Expand Up @@ -272,7 +273,6 @@ def objective_function(m):
# @TODO: discuss with jerry simplification strategies, since both models share a lot of equations.
# @TODO: change the flh_scaling somehow
# @TODO: implement existing pipes and sources

def mts_easy(matrices, sets, regression_caps, regression_losses,
economics: Economics, opt_mode: str, flh_scaling: float):
"""Create the optimization model for the thermo-hydraulic coupled with multiple time
Expand Down Expand Up @@ -345,14 +345,12 @@ def mts_easy(matrices, sets, regression_caps, regression_losses,
model.P_source_cap = pyo.Var(model.set_n_p,
doc='Thermal capacity of the heat source', **source_power)


def heat_source_cap(m, j, t):
return m.P_source[j, t] <= m.P_source_cap[j]
model.cons_heat_source_cap = pyo.Constraint(
model.set_n_p, model.set_t,
rule=heat_source_cap, doc='Investment costs for the heat source')


def nodal_power_balance(m, j, t):
term1 = sum(m.P_11[k, t] - m.P_22[k, t] for k in sets['a_i_out'][j]) # Sum of outgoing flows from pipes
term2 = sum(m.P_21[k, t] - m.P_12[k, t] for k in sets['a_i_in'][j]) # Sum of incoming flows from pipes
Expand Down Expand Up @@ -481,7 +479,6 @@ def built_usage_mapping_help2(m, j, t):
rule=built_usage_mapping_help2)

def objective_function(m):

term1 = sum(
sum(m.P_source[k, t] * economics.source_price * model.flh for k in m.set_n_p)
for t in model.set_t
Expand Down Expand Up @@ -594,4 +591,3 @@ def power_balance_pipe_21(m, j, t):
doc='Complex Power balance pipe j->i')

return model

35 changes: 24 additions & 11 deletions topotherm/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from dataclasses import dataclass, field
from typing import Tuple


@dataclass
class Water:
"""Water properties for the linearization regression."""
Expand All @@ -12,38 +13,49 @@ class Water:
density: float = 977.76
heat_capacity_cp: float = 4.187e3


@dataclass
class Ground:
"""Ground properties."""
thermal_conductivity: float = 2.4


@dataclass
class Temperatures:
"""Temperatures for the linearization regression."""
ambient: float = -20
supply: float = 70
return_: float = 55


@dataclass
class Piping:
"""Piping properties for thermal losses and investment cost linearization regression."""
"""Piping properties for thermal losses and investment cost linearization
regression."""
# list of all available diameters
# list of floats of all inner diameters of the available discrete pipe sizes
diameter: Tuple[float, ...] = field(default_factory=lambda: (
0.0216, 0.0285, 0.0372, 0.0431, 0.0545, 0.0703, 0.0825, 0.1071, 0.1325, 0.1603, 0.2101,
0.263, 0.3127, 0.3444, 0.3938
))
0.0216, 0.0285, 0.0372, 0.0431,
0.0545, 0.0703, 0.0825, 0.1071,
0.1325, 0.1603, 0.2101, 0.263,
0.3127, 0.3444, 0.3938
))
outer_diameter: Tuple[float, ...] = field(default_factory=lambda: (
0.09, 0.09, 0.11, 0.11, 0.125, 0.14, 0.16, 0.2, 0.225, 0.25, 0.315, 0.4, 0.45, 0.5, 0.56
))
0.09, 0.09, 0.11, 0.11,
0.125, 0.14, 0.16, 0.2,
0.225, 0.25, 0.315, 0.4,
0.45, 0.5, 0.56
))
cost: Tuple[float, ...] = field(default_factory=lambda: (
390, 400, 430, 464, 498, 537, 602, 670, 754, 886, 1171, 1184, 1197, 1401, 1755
))
390, 400, 430, 464,
498, 537, 602, 670,
754, 886, 1171, 1184,
1197, 1401, 1755
))
number_diameter: int = 15 # number of discrete diameters
max_pr_loss: int = 250 # assumed pressure loss in Pa per meter
roughness: float = 0.05e-3 # pipe roughtness factor
roughness: float = 0.05e-3 # pipe roughness factor
thermal_conductivity: float = 0.024 # pipe thermal conductivity in W/mK



@dataclass
Expand All @@ -70,10 +82,11 @@ class Optimization:
economics: Economics = field(default_factory=Economics)
temperatures: Temperatures = field(default_factory=Temperatures)


@dataclass
class Regression:
"""Settings for the linearization of the piping."""
ground: Ground = field(default_factory=Ground)
water: Water = field(default_factory=Water)
temperatures: Temperatures = field(default_factory=Temperatures)
piping: Piping = field(default_factory=Piping)
piping: Piping = field(default_factory=Piping)
1 change: 0 additions & 1 deletion topotherm/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,5 +81,4 @@ def model_to_df(model):
solution[labels[obj]] = pyo.value(obj)

df = pd.Series(solution)

return df

0 comments on commit 17ca5c0

Please sign in to comment.