This commit is contained in:
2026-02-02 12:27:27 +05:30
parent 2ef8a9aff9
commit 5b14adc7c3
9 changed files with 4252 additions and 377 deletions

View File

@@ -1,136 +1,308 @@
# import matplotlib
# matplotlib.use("Agg")
# from flask import Blueprint, render_template, session, redirect, url_for
# import matplotlib.pyplot as plt
# import io
# import base64
# from app.utils.plot_utils import plot_to_base64
# from app.services.dashboard_service import DashboardService
# dashboard_bp = Blueprint("dashboard", __name__, url_prefix="/dashboard")
# # dashboard_bp = Blueprint("dashboard", __name__)
# # charts
# # def plot_to_base64():
# # img = io.BytesIO()
# # plt.savefig(img, format="png", bbox_inches="tight")
# # plt.close()
# # img.seek(0)
# # return base64.b64encode(img.getvalue()).decode()
# # bar chart
# def bar_chart():
# categories = ["Trench", "Manhole", "Pipe Laying", "Restoration"]
# values = [120, 80, 150, 60]
# plt.figure()
# plt.bar(categories, values)
# plt.title("Work Category Report")
# plt.xlabel("test Category")
# plt.ylabel("test Quantity")
# return plot_to_base64(plt)
# # Pie chart
# def pie_chart():
# labels = ["Completed", "In Progress", "Pending"]
# sizes = [55, 20, 25]
# plt.figure()
# plt.pie(sizes, labels=labels, autopct="%1.1f%%", startangle=140)
# plt.title("Project Status")
# return plot_to_base64(plt)
# # Histogram chart
# def histogram_chart():
# daily_work = [5, 10, 15, 20, 20, 25, 30, 35, 40, 45, 50]
# plt.figure()
# plt.hist(daily_work, bins=5)
# plt.title("Daily Work Distribution")
# plt.xlabel("Work Units")
# plt.ylabel("Frequency")
# return plot_to_base64(plt)
# # Dashboaed page
# @dashboard_bp.route("/")
# def dashboard():
# if not session.get("user_id"):
# return redirect(url_for("auth.login"))
# return render_template(
# "dashboard.html",
# title="Dashboard",
# bar_chart=bar_chart(),
# pie_chart=pie_chart(),
# histogram=histogram_chart()
# )
# # subcontractor dashboard
# @dashboard_bp.route("/subcontractor_dashboard", methods=["GET", "POST"])
# def subcontractor_dashboard():
# if not session.get("user_id"):
# return redirect(url_for("auth.login"))
# tr_dash = DashboardService().bar_chart_of_tr_ex
# return render_template(
# "subcontractor_dashboard.html",
# title="Dashboard",
# bar_chart=tr_dash
# )
from flask import Blueprint, render_template, session, redirect, url_for, jsonify
from sqlalchemy import func
import logging
from flask import Blueprint, render_template, session, redirect, url_for, jsonify, request
from sqlalchemy import func, union_all
from app import db
from app.models.trench_excavation_model import TrenchExcavation
from app.models.tr_ex_client_model import TrenchExcavationClient
from app.models.manhole_excavation_model import ManholeExcavation
from app.models.mh_ex_client_model import ManholeExcavationClient
from app.models.laying_model import Laying
from app.models.laying_client_model import LayingClient
from app.models.subcontractor_model import Subcontractor
dashboard_bp = Blueprint("dashboard", __name__, url_prefix="/dashboard")
@dashboard_bp.route("/api/live-stats")
def live_stats():
# Configure logging for debugging
logger = logging.getLogger(__name__)
# API to get dynamic filters from database
@dashboard_bp.route("/api/filters")
def get_filters():
try:
# 1. Overall Volume
t_count = TrenchExcavation.query.count()
m_count = ManholeExcavation.query.count()
l_count = Laying.query.count()
logger.info("=" * 60)
logger.info("FETCHING RA BILLS - START")
logger.info("=" * 60)
# 1. Fetch Subcontractors (Linked to Trench entries)
logger.info("Step 1: Fetching Subcontractors...")
subcontractors = db.session.query(Subcontractor.subcontractor_name)\
.join(TrenchExcavation, Subcontractor.id == TrenchExcavation.subcontractor_id)\
.distinct().all()
logger.info(f"✓ Subcontractors found: {len(subcontractors)}")
logger.debug(f" Subcontractor list: {[s[0] for s in subcontractors if s[0]]}")
# 2. Check total records in TrenchExcavation table
logger.info("Step 2: Checking TrenchExcavation table...")
total_records = db.session.query(TrenchExcavation).count()
logger.info(f"✓ Total TrenchExcavation records: {total_records}")
# 3. Check records with RA_Bill_No
logger.info("Step 3: Checking records with RA_Bill_No...")
records_with_ra = db.session.query(TrenchExcavation).filter(TrenchExcavation.RA_Bill_No != None).count()
logger.info(f"✓ Records with RA_Bill_No (not null): {records_with_ra}")
# 4. Check for empty strings
records_with_ra_and_value = db.session.query(TrenchExcavation).filter(
TrenchExcavation.RA_Bill_No != None,
TrenchExcavation.RA_Bill_No != ""
).count()
logger.info(f"✓ Records with RA_Bill_No (not null & not empty): {records_with_ra_and_value}")
# 5. Raw sample of RA_Bill_No values
logger.info("Step 4: Sampling RA_Bill_No values from database...")
sample_bills = db.session.query(TrenchExcavation.RA_Bill_No).limit(10).all()
logger.debug(f" Sample RA_Bill_No values (Subcontractor): {[str(r[0]) for r in sample_bills]}")
sample_bills_client = db.session.query(TrenchExcavationClient.RA_Bill_No).limit(10).all()
logger.debug(f" Sample RA_Bill_No values (Client): {[str(r[0]) for r in sample_bills_client]}")
# 6. Fetch RA Bills from BOTH Subcontractor and Client tables
logger.info("Step 5: Fetching distinct RA Bills from both Subcontractor and Client data...")
# Get RA bills from Subcontractor data
subcon_ra_bills = db.session.query(TrenchExcavation.RA_Bill_No)\
.filter(TrenchExcavation.RA_Bill_No != None)\
.filter(TrenchExcavation.RA_Bill_No != "")\
.distinct()
logger.debug(f" Subcontractor RA Bills (before union): {len(subcon_ra_bills.all())}")
# Get RA bills from Client data
client_ra_bills = db.session.query(TrenchExcavationClient.RA_Bill_No)\
.filter(TrenchExcavationClient.RA_Bill_No != None)\
.filter(TrenchExcavationClient.RA_Bill_No != "")\
.distinct()
logger.debug(f" Client RA Bills (before union): {len(client_ra_bills.all())}")
# Union both queries to get all unique RA bills
ra_bills_union = db.session.query(TrenchExcavation.RA_Bill_No)\
.filter(TrenchExcavation.RA_Bill_No != None)\
.filter(TrenchExcavation.RA_Bill_No != "")\
.union(
db.session.query(TrenchExcavationClient.RA_Bill_No)\
.filter(TrenchExcavationClient.RA_Bill_No != None)\
.filter(TrenchExcavationClient.RA_Bill_No != "")
).order_by(TrenchExcavation.RA_Bill_No).all()
logger.info(f"✓ Distinct RA Bills found (Combined): {len(ra_bills_union)}")
ra_bills_list = [r[0] for r in ra_bills_union if r[0]]
logger.info(f" RA Bills list: {ra_bills_list}")
# 7. Debug: Check data types
if ra_bills_union:
logger.debug(f" First RA Bill value: {ra_bills_union[0][0]}")
logger.debug(f" First RA Bill type: {type(ra_bills_union[0][0])}")
# 2. Location Distribution (Business reach)
loc_results = db.session.query(
TrenchExcavation.Location,
func.count(TrenchExcavation.id)
).group_by(TrenchExcavation.Location).all()
# 3. Work Timeline (Business productivity trend)
# Assuming your models have a 'created_at' field
timeline_results = db.session.query(
func.date(TrenchExcavation.created_at),
func.count(TrenchExcavation.id)
).group_by(func.date(TrenchExcavation.created_at)).order_by(func.date(TrenchExcavation.created_at)).all()
return jsonify({
"summary": {
"trench": t_count,
"manhole": m_count,
"laying": l_count,
"total": t_count + m_count + l_count
},
"locations": {row[0]: row[1] for row in loc_results if row[0]},
"timeline": {str(row[0]): row[1] for row in timeline_results}
})
response = {
"subcontractors": [s[0] for s in subcontractors if s[0]],
"ra_bills": ra_bills_list
}
logger.info(f"✓ Response prepared successfully")
logger.info("=" * 60)
return jsonify(response)
except Exception as e:
logger.error("=" * 60)
logger.error(f"ERROR in get_filters(): {str(e)}")
logger.error(f"Error type: {type(e).__name__}")
logger.exception("Full traceback:")
logger.error("=" * 60)
return jsonify({"error": str(e)}), 500
# API for the live abstract data - handles multiple table types
@dashboard_bp.route("/api/excavation-abstract")
def excavation_abstract():
try:
logger.info("=" * 60)
logger.info("EXCAVATION ABSTRACT FETCH - START")
logger.info("=" * 60)
table_type = request.args.get('table_type', 'trench')
subcon_name = request.args.get('subcontractor', 'All')
ra_bill = request.args.get('ra_bill', 'Cumulative')
logger.info(f"Request Parameters:")
logger.info(f" Table Type: {table_type}")
logger.info(f" Subcontractor: {subcon_name}")
logger.info(f" RA Bill: {ra_bill}")
# Select models and match keys based on table type
if table_type == 'trench':
SubconModel = TrenchExcavation
ClientModel = TrenchExcavationClient
table_label = "Trench Excavation"
location_key = 'Location'
mh_key = 'MH_NO'
excavation_columns = [
("Soft Murum", "0-1.5m", "Soft_Murum_0_to_1_5"),
("Soft Murum", "1.5-3.0m", "Soft_Murum_1_5_to_3_0"),
("Soft Murum", "3.0-4.5m", "Soft_Murum_3_0_to_4_5"),
("Hard Murum", "0-1.5m", "Hard_Murum_0_to_1_5"),
("Hard Murum", "1.5-3.0m", "Hard_Murum_1_5_to_3_0"),
("Soft Rock", "0-1.5m", "Soft_Rock_0_to_1_5"),
("Soft Rock", "1.5-3.0m", "Soft_Rock_1_5_to_3_0"),
("Hard Rock", "0-1.5m", "Hard_Rock_0_to_1_5"),
("Hard Rock", "1.5-3.0m", "Hard_Rock_1_5_to_3_0"),
("Hard Rock", "3.0-4.5m", "Hard_Rock_3_0_to_4_5"),
("Hard Rock", "4.5-6.0m", "Hard_Rock_4_5_to_6_0"),
("Hard Rock", "6.0-7.5m", "Hard_Rock_6_0_to_7_5"),
]
elif table_type == 'manhole':
SubconModel = ManholeExcavation
ClientModel = ManholeExcavationClient
table_label = "Manhole Excavation"
location_key = 'Location'
mh_key = 'MH_NO'
excavation_columns = [
("Soft Murum", "0-1.5m", "Soft_Murum_0_to_1_5"),
("Soft Murum", "1.5-3.0m", "Soft_Murum_1_5_to_3_0"),
("Hard Murum", "0-1.5m", "Hard_Murum_0_to_1_5"),
("Hard Murum", "1.5-3.0m", "Hard_Murum_1_5_to_3_0"),
("Soft Rock", "0-1.5m", "Soft_Rock_0_to_1_5"),
("Soft Rock", "1.5-3.0m", "Soft_Rock_1_5_to_3_0"),
("Hard Rock", "0-1.5m", "Hard_Rock_0_to_1_5"),
("Hard Rock", "1.5-3.0m", "Hard_Rock_1_5_to_3_0"),
]
elif table_type == 'laying':
SubconModel = Laying
ClientModel = LayingClient
table_label = "Laying"
location_key = 'Location'
mh_key = 'MH_NO'
excavation_columns = [
("Soft Murum", "0-1.5m", "Soft_Murum_0_to_1_5"),
("Soft Murum", "1.5-3.0m", "Soft_Murum_1_5_to_3_0"),
("Hard Murum", "0-1.5m", "Hard_Murum_0_to_1_5"),
("Hard Murum", "1.5-3.0m", "Hard_Murum_1_5_to_3_0"),
("Soft Rock", "0-1.5m", "Soft_Rock_0_to_1_5"),
("Soft Rock", "1.5-3.0m", "Soft_Rock_1_5_to_3_0"),
("Hard Rock", "0-1.5m", "Hard_Rock_0_to_1_5"),
("Hard Rock", "1.5-3.0m", "Hard_Rock_1_5_to_3_0"),
]
else:
return jsonify({"error": f"Invalid table_type: {table_type}"}), 400
logger.info(f"Using table: {table_label}")
# ===== FETCH SUBCONTRACTOR DATA =====
logger.info(f"Fetching Subcontractor data ({SubconModel.__tablename__})...")
subcon_query = db.session.query(SubconModel)
# Check if SubconModel has subcontractor relationship
if hasattr(SubconModel, 'subcontractor_id'):
subcon_query = subcon_query.join(
Subcontractor, Subcontractor.id == SubconModel.subcontractor_id
)
if subcon_name != 'All':
subcon_query = subcon_query.filter(Subcontractor.subcontractor_name == subcon_name)
subcon_results = subcon_query.all()
logger.info(f" Found {len(subcon_results)} subcontractor records")
# ===== FETCH CLIENT DATA =====
logger.info(f"Fetching Client data ({ClientModel.__tablename__})...")
client_query = db.session.query(ClientModel)
if ra_bill != 'Cumulative' and hasattr(ClientModel, 'RA_Bill_No'):
client_query = client_query.filter(ClientModel.RA_Bill_No == ra_bill)
client_results = client_query.all()
logger.info(f" Found {len(client_results)} client records")
# ===== MATCH RECORDS BY MH_NO AND LOCATION =====
logger.info("Matching records by MH_NO and Location...")
matched_data = {}
# Build a map of client records by MH_NO + Location
client_map = {}
for client_record in client_results:
mh_no = getattr(client_record, mh_key)
location = getattr(client_record, location_key)
key = f"{location}|{mh_no}"
client_map[key] = client_record
logger.info(f" Client map has {len(client_map)} unique MH_NO+Location combinations")
# Match subcontractor records with client records
match_count = 0
for subcon_record in subcon_results:
mh_no = getattr(subcon_record, mh_key)
location = getattr(subcon_record, location_key)
key = f"{location}|{mh_no}"
# Only process if matching client record exists
if key in client_map:
match_count += 1
client_record = client_map[key]
# Aggregate excavation data for this matched pair
for soil, depth, col_name in excavation_columns:
record_key = f"{soil}|{depth}|{location}|{mh_no}"
# Get values
subcon_val = 0
client_val = 0
if hasattr(subcon_record, col_name):
subcon_val = getattr(subcon_record, col_name) or 0
if hasattr(client_record, col_name):
client_val = getattr(client_record, col_name) or 0
# Only add if at least one has data
if subcon_val > 0 or client_val > 0:
if record_key not in matched_data:
matched_data[record_key] = {
"soil_type": soil,
"depth": depth,
"location": location,
"mh_no": mh_no,
"client_qty": 0,
"subcon_qty": 0
}
matched_data[record_key]["client_qty"] += client_val
matched_data[record_key]["subcon_qty"] += subcon_val
logger.info(f" Matched {match_count} subcontractor records with client records")
logger.info(f" Found {len(matched_data)} excavation items with data")
# Calculate differences and format response
data = []
for key, item in matched_data.items():
difference = item["subcon_qty"] - item["client_qty"]
# Format label as: "Soft Murum 0-1.5m (Location - MH_NO)"
label = f"{item['soil_type']} {item['depth']}"
data.append({
"label": label,
"soil_type": item["soil_type"],
"depth": item["depth"],
"location": item["location"],
"mh_no": item["mh_no"],
"client_qty": round(item["client_qty"], 2),
"subcon_qty": round(item["subcon_qty"], 2),
"difference": round(difference, 2)
})
# Sort by location and mh_no for consistency
data.sort(key=lambda x: (x["location"], x["mh_no"], x["soil_type"], x["depth"]))
logger.info(f"Response prepared with {len(data)} matched records")
logger.info("=" * 60)
return jsonify(data)
except Exception as e:
logger.error("=" * 60)
logger.error(f"ERROR in excavation_abstract(): {str(e)}")
logger.error(f"Error type: {type(e).__name__}")
logger.exception("Full traceback:")
logger.error("=" * 60)
return jsonify({"error": str(e)}), 500
@dashboard_bp.route("/")
def dashboard():
if not session.get("user_id"):
return redirect(url_for("auth.login"))
return render_template("dashboard.html", title="Business Intelligence Dashboard")
return render_template("dashboard.html", title="Live Excavation Dashboard")

View File

@@ -1,5 +1,6 @@
import pandas as pd
import io
import logging
from flask import Blueprint, render_template, request, send_file, flash
from app.utils.helpers import login_required
@@ -18,6 +19,9 @@ from app.models.laying_client_model import LayingClient
# --- BLUEPRINT DEFINITION ---
file_report_bp = Blueprint("file_report", __name__, url_prefix="/file")
# Configure logging for debugging
logger = logging.getLogger(__name__)
# --- Client class ---
class ClientBill:
@@ -28,20 +32,57 @@ class ClientBill:
self.df_laying = pd.DataFrame()
def Fetch(self, RA_Bill_No):
trench = TrenchExcavationClient.query.filter_by(RA_Bill_No=RA_Bill_No).all()
mh = ManholeExcavationClient.query.filter_by(RA_Bill_No=RA_Bill_No).all()
dc = ManholeDomesticChamberClient.query.filter_by(RA_Bill_No=RA_Bill_No).all()
lay = LayingClient.query.filter_by(RA_Bill_No=RA_Bill_No).all()
self.df_tr = pd.DataFrame([c.serialize() for c in trench])
self.df_mh = pd.DataFrame([c.serialize() for c in mh])
self.df_dc = pd.DataFrame([c.serialize() for c in dc])
self.df_laying = pd.DataFrame([c.serialize() for c in lay])
drop_cols = ["id", "created_at", "_sa_instance_state"]
for df in [self.df_tr, self.df_mh, self.df_dc, self.df_laying]:
if not df.empty:
df.drop(columns=drop_cols, errors="ignore", inplace=True)
logger.info("=" * 60)
logger.info("ClientBill.Fetch() - START")
logger.info("=" * 60)
logger.info(f"Fetching data for RA_Bill_No: '{RA_Bill_No}'")
logger.debug(f" Type of RA_Bill_No: {type(RA_Bill_No)}")
try:
logger.info("Step 1: Fetching TrenchExcavationClient records...")
trench = TrenchExcavationClient.query.filter_by(RA_Bill_No=RA_Bill_No).all()
logger.info(f"✓ Trench records found: {len(trench)}")
logger.info("Step 2: Fetching ManholeExcavationClient records...")
mh = ManholeExcavationClient.query.filter_by(RA_Bill_No=RA_Bill_No).all()
logger.info(f"✓ Manhole records found: {len(mh)}")
logger.info("Step 3: Fetching ManholeDomesticChamberClient records...")
dc = ManholeDomesticChamberClient.query.filter_by(RA_Bill_No=RA_Bill_No).all()
logger.info(f"✓ Domestic Chamber records found: {len(dc)}")
logger.info("Step 4: Fetching LayingClient records...")
lay = LayingClient.query.filter_by(RA_Bill_No=RA_Bill_No).all()
logger.info(f"✓ Laying records found: {len(lay)}")
logger.info("Step 5: Converting to DataFrames...")
self.df_tr = pd.DataFrame([c.serialize() for c in trench])
self.df_mh = pd.DataFrame([c.serialize() for c in mh])
self.df_dc = pd.DataFrame([c.serialize() for c in dc])
self.df_laying = pd.DataFrame([c.serialize() for c in lay])
logger.debug(f" Trench DF shape: {self.df_tr.shape}")
logger.debug(f" Manhole DF shape: {self.df_mh.shape}")
logger.debug(f" Domestic Chamber DF shape: {self.df_dc.shape}")
logger.debug(f" Laying DF shape: {self.df_laying.shape}")
logger.info("Step 6: Cleaning DataFrames...")
drop_cols = ["id", "created_at", "_sa_instance_state"]
for df in [self.df_tr, self.df_mh, self.df_dc, self.df_laying]:
if not df.empty:
df.drop(columns=drop_cols, errors="ignore", inplace=True)
logger.debug(f" Cleaned DF with shape: {df.shape}")
logger.info("✓ ClientBill.Fetch() completed successfully")
logger.info("=" * 60)
except Exception as e:
logger.error("=" * 60)
logger.error(f"ERROR in ClientBill.Fetch(): {str(e)}")
logger.error(f"Error type: {type(e).__name__}")
logger.exception("Full traceback:")
logger.error("=" * 60)
raise
# --- Subcontractor class ---
class SubcontractorBill:
@@ -52,26 +93,68 @@ class SubcontractorBill:
self.df_laying = pd.DataFrame()
def Fetch(self, RA_Bill_No=None, subcontractor_id=None):
filters = {}
if subcontractor_id:
filters["subcontractor_id"] = subcontractor_id
if RA_Bill_No:
filters["RA_Bill_No"] = RA_Bill_No
trench = TrenchExcavation.query.filter_by(**filters).all()
mh = ManholeExcavation.query.filter_by(**filters).all()
dc = ManholeDomesticChamber.query.filter_by(**filters).all()
lay = Laying.query.filter_by(**filters).all()
self.df_tr = pd.DataFrame([c.serialize() for c in trench])
self.df_mh = pd.DataFrame([c.serialize() for c in mh])
self.df_dc = pd.DataFrame([c.serialize() for c in dc])
self.df_laying = pd.DataFrame([c.serialize() for c in lay])
drop_cols = ["id", "created_at", "_sa_instance_state"]
for df in [self.df_tr, self.df_mh, self.df_dc, self.df_laying]:
if not df.empty:
df.drop(columns=drop_cols, errors="ignore", inplace=True)
logger.info("=" * 60)
logger.info("SubcontractorBill.Fetch() - START")
logger.info("=" * 60)
logger.info(f"Parameters:")
logger.info(f" RA_Bill_No: '{RA_Bill_No}' (type: {type(RA_Bill_No)})")
logger.info(f" subcontractor_id: '{subcontractor_id}' (type: {type(subcontractor_id)})")
try:
filters = {}
if subcontractor_id:
filters["subcontractor_id"] = subcontractor_id
logger.debug(f" Added filter - subcontractor_id: {subcontractor_id}")
if RA_Bill_No:
filters["RA_Bill_No"] = RA_Bill_No
logger.debug(f" Added filter - RA_Bill_No: {RA_Bill_No}")
logger.info(f"Applied filters: {filters}")
logger.info("Step 1: Fetching TrenchExcavation records...")
trench = TrenchExcavation.query.filter_by(**filters).all()
logger.info(f"✓ Trench records found: {len(trench)}")
logger.info("Step 2: Fetching ManholeExcavation records...")
mh = ManholeExcavation.query.filter_by(**filters).all()
logger.info(f"✓ Manhole records found: {len(mh)}")
logger.info("Step 3: Fetching ManholeDomesticChamber records...")
dc = ManholeDomesticChamber.query.filter_by(**filters).all()
logger.info(f"✓ Domestic Chamber records found: {len(dc)}")
logger.info("Step 4: Fetching Laying records...")
lay = Laying.query.filter_by(**filters).all()
logger.info(f"✓ Laying records found: {len(lay)}")
logger.info("Step 5: Converting to DataFrames...")
self.df_tr = pd.DataFrame([c.serialize() for c in trench])
self.df_mh = pd.DataFrame([c.serialize() for c in mh])
self.df_dc = pd.DataFrame([c.serialize() for c in dc])
self.df_laying = pd.DataFrame([c.serialize() for c in lay])
logger.debug(f" Trench DF shape: {self.df_tr.shape}")
logger.debug(f" Manhole DF shape: {self.df_mh.shape}")
logger.debug(f" Domestic Chamber DF shape: {self.df_dc.shape}")
logger.debug(f" Laying DF shape: {self.df_laying.shape}")
logger.info("Step 6: Cleaning DataFrames...")
drop_cols = ["id", "created_at", "_sa_instance_state"]
for df in [self.df_tr, self.df_mh, self.df_dc, self.df_laying]:
if not df.empty:
df.drop(columns=drop_cols, errors="ignore", inplace=True)
logger.debug(f" Cleaned DF with shape: {df.shape}")
logger.info("✓ SubcontractorBill.Fetch() completed successfully")
logger.info("=" * 60)
except Exception as e:
logger.error("=" * 60)
logger.error(f"ERROR in SubcontractorBill.Fetch(): {str(e)}")
logger.error(f"Error type: {type(e).__name__}")
logger.exception("Full traceback:")
logger.error("=" * 60)
raise
# --- subcontractor report only ---

View File

@@ -1,45 +1,51 @@
from flask import Blueprint, render_template, request, send_file, flash
from collections import defaultdict
import pandas as pd
import io
import re
from collections import defaultdict
from app.models.subcontractor_model import Subcontractor
from app.models.trench_excavation_model import TrenchExcavation
from app.models.manhole_excavation_model import ManholeExcavation
from app.models.manhole_domestic_chamber_model import ManholeDomesticChamber
from app.models.laying_model import Laying
from app.models.tr_ex_client_model import TrenchExcavationClient
from app.models.mh_ex_client_model import ManholeExcavationClient
from app.models.mh_dc_client_model import ManholeDomesticChamberClient
from app.models.laying_client_model import LayingClient
from app.utils.helpers import login_required
import re
generate_report_bp = Blueprint("generate_report", __name__, url_prefix="/report")
# --- REGEX PATTERNS FOR TOTALING ---
# sum field of pipe laying (pipe_150_mm)
PIPE_MM_PATTERN = re.compile(r"^pipe_\d+_mm$")
D_RANGE_PATTERN = re.compile(r"^d_\d+(?:_\d+)?_to_\d+(?:_\d+)?$")
# --- UTILITIES ---
# sum fields of MH dc (d_0_to_0_75)
D_RANGE_PATTERN = re.compile( r"^d_\d+(?:_\d+)?_to_\d+(?:_\d+)?$")
# NORMALIZER
def normalize_key(value):
if value is None:
return ""
return None
return str(value).strip().upper()
# HEADER FORMATTER
def format_header(header):
if "-" in header:
prefix, rest = header.split("-", 1)
prefix = prefix.title()
else:
prefix, rest = None, header
parts = rest.split("_")
result = []
i = 0
while i < len(parts):
if i + 1 < len(parts) and parts[i].isdigit() and parts[i + 1].isdigit():
result.append(f"{parts[i]}.{parts[i + 1]}")
@@ -47,161 +53,190 @@ def format_header(header):
else:
result.append(parts[i].title())
i += 1
final_text = " ".join(result)
return f"{prefix}-{final_text}" if prefix else final_text
# LOOKUP CREATOR
def make_lookup(rows, key_field):
"""Creates a mapping of (Location, Key) to a list of records."""
lookup = defaultdict(list)
lookup = {}
for r in rows:
# Check both capitalized and lowercase keys for robustness
loc = normalize_key(r.get("Location") or r.get("location"))
key = normalize_key(r.get(key_field) or r.get(key_field.lower()))
if loc and key:
lookup[(loc, key)].append(r)
location = normalize_key(r.get("Location"))
key_val = normalize_key(r.get(key_field))
if location and key_val:
lookup.setdefault((location, key_val), []).append(r)
return lookup
def calculate_row_total(row_dict):
"""Calculates total based on _total suffix or regex patterns."""
return sum(
float(v or 0) for k, v in row_dict.items()
if k.endswith("_total") or D_RANGE_PATTERN.match(k) or PIPE_MM_PATTERN.match(k)
)
# --- CORE COMPARISON LOGIC ---
# COMPARISON BUILDER
def build_comparison(client_rows, contractor_rows, key_field):
# 1. Create Lookup for Subcontractors
contractor_lookup = {}
for r in contractor_rows:
loc = normalize_key(r.get("Location") or r.get("location"))
key = normalize_key(r.get(key_field) or r.get(key_field.lower()))
if loc and key:
contractor_lookup[(loc, key)] = r
contractor_lookup = make_lookup(contractor_rows, key_field)
output = []
# 2. Iterate through Client rows
used_index = defaultdict(int) # 🔥 THIS FIXES YOUR ISSUE
for c in client_rows:
loc_raw = c.get("Location") or c.get("location")
key_raw = c.get(key_field) or c.get(key_field.lower())
loc_norm = normalize_key(loc_raw)
key_norm = normalize_key(key_raw)
# Match check
s = contractor_lookup.get((loc_norm, key_norm))
# We only include the row if there is a match (Inner Join)
if s:
client_total = calculate_row_total(c)
sub_total = calculate_row_total(s)
row = {
"Location": loc_raw,
key_field.replace("_", " "): key_raw
}
# Add Client Data
for k, v in c.items():
if k in ["id", "created_at"]: continue
client_location = normalize_key(c.get("Location"))
client_key = normalize_key(c.get(key_field))
if not client_location or not client_key:
continue
subs = contractor_lookup.get((client_location, client_key))
if not subs:
continue
idx = used_index[(client_location, client_key)]
# ❗ If subcontractor rows are exhausted, skip
if idx >= len(subs):
continue
s = subs[idx] # ✅ take NEXT subcontractor row
used_index[(client_location, client_key)] += 1
# ---- totals ----
client_total = sum(
float(v or 0)
for k, v in c.items()
if k.endswith("_total")
or D_RANGE_PATTERN.match(k)
or PIPE_MM_PATTERN.match(k)
)
sub_total = sum(
float(v or 0)
for k, v in s.items()
if k.endswith("_total")
or D_RANGE_PATTERN.match(k)
or PIPE_MM_PATTERN.match(k)
)
row = {
"Location": client_location,
key_field.replace("_", " "): client_key
}
for k, v in c.items():
if k not in ["id", "created_at"]:
row[f"Client-{k}"] = v
row["Client-Total"] = round(client_total, 2)
row[" "] = "" # Spacer
# Add Subcontractor Data (Aligned on same row)
for k, v in s.items():
if k in ["id", "created_at", "subcontractor_id"]: continue
row["Client-Total"] = round(client_total, 2)
row[" "] = ""
for k, v in s.items():
if k not in ["id", "created_at", "subcontractor_id"]:
row[f"Subcontractor-{k}"] = v
row["Subcontractor-Total"] = round(sub_total, 2)
row["Diff"] = round(client_total - sub_total, 2)
output.append(row)
# 3. Handle the "Empty/Blank" scenario using pd.concat
if not output:
# Create a basic dataframe with a message so the Excel file isn't empty/corrupt
return pd.DataFrame([{"Location": "N/A", "Message": "No matching data found"}])
row["Subcontractor-Total"] = round(sub_total, 2)
row["Diff"] = round(client_total - sub_total, 2)
output.append(row)
df = pd.DataFrame(output)
df.columns = [format_header(col) for col in df.columns]
return df
# --- EXCEL WRITER ---
# EXCEL SHEET WRITER
def write_sheet(writer, df, sheet_name, subcontractor_name):
if df.empty:
return
workbook = writer.book
df.to_excel(writer, sheet_name=sheet_name, index=False, startrow=3)
ws = writer.sheets[sheet_name]
# Formats
title_fmt = workbook.add_format({"bold": True, "font_size": 14})
client_header_fmt = workbook.add_format({"bold": True, "border": 1, "bg_color": "#B6DAED", "align": "center"})
sub_header_fmt = workbook.add_format({"bold": True, "border": 1, "bg_color": "#F3A081", "align": "center"})
total_fmt = workbook.add_format({"bold": True, "border": 1, "bg_color": "#F7D261", "align": "center"})
diff_fmt = workbook.add_format({"bold": True, "border": 1, "bg_color": "#82DD49", "align": "center"})
default_header_fmt = workbook.add_format({"bold": True, "border": 1, "bg_color": "#E7E6E6", "align": "center"})
# Header Titles
ws.merge_range(0, 0, 0, len(df.columns) - 1, "CLIENT vs SUBCONTRACTOR COMPARISON", title_fmt)
ws.merge_range(1, 0, 1, len(df.columns) - 1, f"Subcontractor: {subcontractor_name}", title_fmt)
client_fmt = workbook.add_format({"bold": True, "border": 1, "bg_color": "#B6DAED"})
sub_fmt = workbook.add_format({"bold": True, "border": 1, "bg_color": "#F3A081"})
total_fmt = workbook.add_format({"bold": True, "border": 1, "bg_color": "#F7D261"})
diff_fmt = workbook.add_format({"bold": True, "border": 1, "bg_color": "#82DD49"})
default_header_fmt = workbook.add_format({"bold": True,"border": 1,"bg_color": "#E7E6E6","align": "center","valign": "vcenter"})
ws.merge_range(
0, 0, 0, len(df.columns) - 1,
"CLIENT vs SUBCONTRACTOR",
title_fmt
)
ws.merge_range(
1, 0, 1, len(df.columns) - 1,
f"Subcontractor Name - {subcontractor_name}",
title_fmt
)
for col_num, col_name in enumerate(df.columns):
if col_name.startswith("Client-"):
fmt = client_header_fmt
ws.write(3, col_num, col_name, client_fmt)
elif col_name.startswith("Subcontractor-"):
fmt = sub_header_fmt
elif "Total" in col_name:
fmt = total_fmt
ws.write(3, col_num, col_name, sub_fmt)
elif col_name.endswith("_total") or col_name.endswith("_total") :
ws.write(3, col_num, col_name, total_fmt)
elif col_name == "Diff":
fmt = diff_fmt
ws.write(3, col_num, col_name, diff_fmt)
else:
fmt = default_header_fmt
ws.write(3, col_num, col_name, fmt)
ws.set_column(col_num, col_num, 18)
# --- ROUTES ---
ws.write(3, col_num, col_name, default_header_fmt)
ws.set_column(col_num, col_num, 20)
# REPORT ROUTE
@generate_report_bp.route("/comparison_report", methods=["GET", "POST"])
@login_required
def comparison_report():
subcontractors = Subcontractor.query.all()
if request.method == "POST":
subcontractor_id = request.form.get("subcontractor_id")
if not subcontractor_id:
flash("Please select a subcontractor", "danger")
return render_template("generate_comparison_report.html", subcontractors=subcontractors)
flash("Please select subcontractor", "danger")
return render_template("generate_comparison_report.html",subcontractors=subcontractors)
subcontractor = Subcontractor.query.get_or_404(subcontractor_id)
# Build Dataframes for each section
sections = [
(TrenchExcavationClient, TrenchExcavation, "Tr.Ex"),
(ManholeExcavationClient, ManholeExcavation, "Mh.Ex"),
(ManholeDomesticChamberClient, ManholeDomesticChamber, "MH & DC"),
(LayingClient, Laying, "Laying")
]
# -------- DATA --------
tr_client = [r.serialize() for r in TrenchExcavationClient.query.all()]
tr_sub = [r.serialize() for r in TrenchExcavation.query.filter_by(
subcontractor_id=subcontractor_id
).all()]
df_tr = build_comparison(tr_client, tr_sub, "MH_NO")
mh_client = [r.serialize() for r in ManholeExcavationClient.query.all()]
mh_sub = [r.serialize() for r in ManholeExcavation.query.filter_by(
subcontractor_id=subcontractor_id
).all()]
df_mh = build_comparison(mh_client, mh_sub, "MH_NO")
dc_client = [r.serialize() for r in ManholeDomesticChamberClient.query.all()]
dc_sub = [r.serialize() for r in ManholeDomesticChamber.query.filter_by(
subcontractor_id=subcontractor_id
).all()]
df_dc = build_comparison(dc_client, dc_sub, "MH_NO")
# df_dc = build_comparison_mh_dc(dc_client, dc_sub, "MH_NO")
lay_client = [r.serialize() for r in LayingClient.query.all()]
lay_sub = [r.serialize() for r in Laying.query.filter_by(
subcontractor_id=subcontractor_id
).all()]
df_lay = build_comparison(lay_client, lay_sub, "MH_NO")
# df_lay = build_comparison_laying(lay_client, lay_sub, "MH_NO")
# -------- EXCEL --------
output = io.BytesIO()
filename = f"{subcontractor.subcontractor_name}_Comparison_Report.xlsx"
with pd.ExcelWriter(output, engine="xlsxwriter") as writer:
for client_model, sub_model, sheet_name in sections:
c_data = [r.serialize() for r in client_model.query.all()]
s_data = [r.serialize() for r in sub_model.query.filter_by(subcontractor_id=subcontractor_id).all()]
df = build_comparison(c_data, s_data, "MH_NO")
write_sheet(writer, df, sheet_name, subcontractor.subcontractor_name)
write_sheet(writer, df_tr, "Tr.Ex", subcontractor.subcontractor_name)
write_sheet(writer, df_mh, "Mh.Ex", subcontractor.subcontractor_name)
write_sheet(writer, df_dc, "MH & DC", subcontractor.subcontractor_name)
write_sheet(writer, df_lay, "Laying", subcontractor.subcontractor_name)
output.seek(0)
return send_file(
output,
@@ -209,5 +244,109 @@ def comparison_report():
download_name=filename,
mimetype="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
)
return render_template("generate_comparison_report.html", subcontractors=subcontractors)
return render_template("generate_comparison_report.html",subcontractors=subcontractors)
# def build_comparison_mh_dc(client_rows, contractor_rows, key_field):
# contractor_lookup = make_lookup(contractor_rows, key_field)
# mh_dc_fields = ManholeDomesticChamberClient.sum_mh_dc_fields()
# output = []
# for c in client_rows:
# loc = normalize_key(c.get("Location"))
# key = normalize_key(c.get(key_field))
# if not loc or not key:
# continue
# s = contractor_lookup.get((loc, key))
# if not s:
# continue
# client_total = sum(float(c.get(f, 0) or 0) for f in mh_dc_fields)
# sub_total = sum(float(s.get(f, 0) or 0) for f in mh_dc_fields)
# row = {
# "Location": loc,
# key_field.replace("_", " "): key
# }
# # CLIENT ALL FIELDS
# for k, v in c.items():
# if k in ["id", "created_at"]:
# continue
# row[f"Client-{k}"] = v
# row["Client-Total"] = round(client_total, 2)
# row[" "] = ""
# # SUBCONTRACTOR ALL FIELDS
# for k, v in s.items():
# if k in ["id", "created_at", "subcontractor_id"]:
# continue
# row[f"Subcontractor-{k}"] = v
# row["Subcontractor-Total"] = round(sub_total, 2)
# row["Diff"] = round(client_total - sub_total, 2)
# output.append(row)
# df = pd.DataFrame(output)
# df.columns = [format_header(col) for col in df.columns]
# return df
# def build_comparison_laying(client_rows, contractor_rows, key_field):
# contractor_lookup = make_lookup(contractor_rows, key_field)
# laying_fields = Laying.sum_laying_fields()
# output = []
# for c in client_rows:
# loc = normalize_key(c.get("Location"))
# key = normalize_key(c.get(key_field))
# if not loc or not key:
# continue
# s = contractor_lookup.get((loc, key))
# if not s:
# continue
# client_total = sum(float(c.get(f, 0) or 0) for f in laying_fields)
# sub_total = sum(float(s.get(f, 0) or 0) for f in laying_fields)
# print("--------------",key,"----------")
# print("sum -client_total ",client_total)
# print("sum -sub_total ",sub_total)
# print("Diff ---- ",client_total - sub_total)
# print("------------------------")
# row = {
# "Location": loc,
# key_field.replace("_", " "): key
# }
# # CLIENT ALL FIELDS
# for k, v in c.items():
# if k in ["id", "created_at"]:
# continue
# row[f"Client-{k}"] = v
# row["Client-Total"] = round(client_total, 2)
# row[" "] = ""
# # SUBCONTRACTOR ALL FIELDS
# for k, v in s.items():
# if k in ["id", "created_at", "subcontractor_id"]:
# continue
# row[f"Subcontractor-{k}"] = v
# row["Subcontractor-Total"] = round(sub_total, 2)
# row["Diff"] = round(client_total - sub_total, 2)
# output.append(row)
# df = pd.DataFrame(output)
# df.columns = [format_header(col) for col in df.columns]
# return df