uploading file changes as per sheet also add field RA_bill in model tr_ex, mh_ex.

This commit is contained in:
2025-12-18 18:29:25 +05:30
parent fe89d5e2eb
commit 51088975d3
11 changed files with 308 additions and 80 deletions

15
.gitignore vendored
View File

@@ -1,8 +1,11 @@
# Ignore folders # Ingnor upload files
instance/
app/static/uploads/ app/static/uploads/
# Ignore files
.env
# Ignore by type # Ignore files
*.log venv
# Ignore Log files
logs
# Ignore db folders
instance

View File

@@ -49,13 +49,14 @@ class ManholeExcavation(db.Model):
Soft_Rock_1_5_and_above_total = db.Column(db.Float) Soft_Rock_1_5_and_above_total = db.Column(db.Float)
Hard_Rock_0_to_1_5_total = db.Column(db.Float) Hard_Rock_0_to_1_5_total = db.Column(db.Float)
Hard_Rock_1_5_and_above_total = db.Column(db.Float) Hard_Rock_1_5_to_3_0_total = db.Column(db.Float)
Hard_Rock_3_0_to_4_5_total = db.Column(db.Float) Hard_Rock_3_0_to_4_5_total = db.Column(db.Float)
Hard_Rock_4_5_to_6_0_total = db.Column(db.Float) Hard_Rock_4_5_to_6_0_total = db.Column(db.Float)
Hard_Rock_6_0_to_7_5_total = db.Column(db.Float) Hard_Rock_6_0_to_7_5_total = db.Column(db.Float)
Remarks = db.Column(db.String(500))
Total = db.Column(db.Float) Total = db.Column(db.Float)
Remarks = db.Column(db.String(500))
RA_Bill_No=db.Column(db.String(500))
created_at = db.Column(db.DateTime, default=datetime.today) created_at = db.Column(db.DateTime, default=datetime.today)

View File

@@ -75,8 +75,8 @@ class TrenchExcavationClient(db.Model):
Hard_Rock_4_5_to_6_0_total = db.Column(db.Float) Hard_Rock_4_5_to_6_0_total = db.Column(db.Float)
Hard_Rock_6_0_to_7_5_total = db.Column(db.Float) Hard_Rock_6_0_to_7_5_total = db.Column(db.Float)
Remarks = db.Column(db.String(500))
Total = db.Column(db.Float) Total = db.Column(db.Float)
Remarks = db.Column(db.String(500))
created_at = db.Column(db.DateTime, default=datetime.today) created_at = db.Column(db.DateTime, default=datetime.today)

View File

@@ -60,15 +60,17 @@ class TrenchExcavation(db.Model):
Soft_Rock_1_5_and_above_total = db.Column(db.Float) Soft_Rock_1_5_and_above_total = db.Column(db.Float)
Hard_Rock_0_to_1_5_total = db.Column(db.Float) Hard_Rock_0_to_1_5_total = db.Column(db.Float)
Hard_Rock_1_5_and_above_total = db.Column(db.Float) Hard_Rock_1_5_to_3_0_total = db.Column(db.Float)
Hard_Rock_3_0_to_4_5_total = db.Column(db.Float) Hard_Rock_3_0_to_4_5_total = db.Column(db.Float)
Hard_Rock_4_5_to_6_0_total = db.Column(db.Float) Hard_Rock_4_5_to_6_0_total = db.Column(db.Float)
Hard_Rock_6_0_to_7_5_total = db.Column(db.Float) Hard_Rock_6_0_to_7_5_total = db.Column(db.Float)
Remarks = db.Column(db.String(500))
Total = db.Column(db.Float) Total = db.Column(db.Float)
Remarks = db.Column(db.String(500))
RA_Bill_No=db.Column(db.String(500))
created_at = db.Column(db.DateTime, default=datetime.today) created_at = db.Column(db.DateTime, default=datetime.today)
def __repr__(self): def __repr__(self):
return f"<TrenchExcavation {self.Location}>" return f"<TrenchExcavation {self.Location}>"

View File

@@ -23,6 +23,8 @@ class FileService:
# def handle_file_upload(self, file, subcontractor_id, file_type): # def handle_file_upload(self, file, subcontractor_id, file_type):
def handle_file_upload(self, file, subcontractor_id): def handle_file_upload(self, file, subcontractor_id):
RA_Bill_No = 1 # this RA bill define temp for upload (changes also)
if not subcontractor_id: if not subcontractor_id:
return False, "Please select subcontractor." return False, "Please select subcontractor."
# if not file_type: # if not file_type:
@@ -41,26 +43,26 @@ class FileService:
filepath = os.path.join(folder, filename) filepath = os.path.join(folder, filename)
file.save(filepath) file.save(filepath)
print("name::::",filename)
try: try:
# df = pd.read_csv(filepath) if filename.endswith(".csv") else pd.read_excel(filepath) # df = pd.read_csv(filepath) if filename.endswith(".csv") else pd.read_excel(filepath)
df = pd.read_excel(filepath, sheet_name ="Tr.Ex.", header=0)
df1 = pd.read_excel(filepath, sheet_name="MH Ex.", header=0)
df2 = pd.read_excel(filepath, sheet_name="MH & DC", header=0)
print("\n=== Uploaded File Preview ===") df_tr_ex = pd.read_excel(filepath, sheet_name ="Tr.Ex.", header=12)
print(" file name h:",df) df_mh_ex = pd.read_excel(filepath, sheet_name="MH Ex.", header=12)
print("=============================\n") # df2 = pd.read_excel(filepath, sheet_name="MH & DC", header=0)
print(" file name h1:",df1)
# print(df.head()) print("\n=== Uploaded File tr ex ===")
print("=============================\n") print(df_tr_ex.head())
print(" file name h1:",df2) print("=============================\n")
print("=== Uploaded File mh ex ===")
print(df_mh_ex.head())
print("========================================")
self.process_trench_excavation(df_tr_ex, subcontractor_id, RA_Bill_No)
self.process_manhole_excavation(df_mh_ex, subcontractor_id,RA_Bill_No)
self.process_trench_excavation(df,subcontractor_id)
self.process_manhole_excavation(df1,subcontractor_id)
self.process_manhole_domestic_chamber(df2, subcontractor_id)
# df = pd.read_csv(filepath) if filename.endswith(".csv") else pd.read_excel(filepath) # df = pd.read_csv(filepath) if filename.endswith(".csv") else pd.read_excel(filepath)
@@ -94,96 +96,316 @@ class FileService:
return False, f"Processing failed: {e}" return False, f"Processing failed: {e}"
# ---------------------- Sub contractor --------------------------
# Trench Excavation save method (TrenchExcavation model)
def process_trench_excavation(self, df, subcontractor_id): # new new
df.columns = [str(c).strip() for c in df.columns] def process_trench_excavation(self, df, subcontractor_id,RA_Bill_No):
# If the sheet has merged cells -> forward fill Location
print("RA_Bill_No of Tr Ex:",RA_Bill_No)
print("=== RAW HEADERS ===")
print(df.columns.tolist())
print("===================")
# Clean column names
df.columns = (
df.columns.astype(str)
.str.strip()
.str.replace(r"[^\w]", "_", regex=True)
.str.replace("__+", "_", regex=True)
.str.strip("_")
)
# Remove completely empty rows
df = df.dropna(how="all")
# Forward fill merged Location
if "Location" in df.columns: if "Location" in df.columns:
df["Location"] = df["Location"].ffill() df["Location"] = df["Location"].ffill()
df = df.dropna(how="all") # REMOVE empty rows
# Identify missing location rows before insert
missing_loc = df[df["Location"].isna() | (df["Location"].astype(str).str.strip() == "")]
if not missing_loc.empty:
return False, f"Error: Some rows have empty Location. Rows: {missing_loc.index.tolist()}"
saved_count = 0 saved_count = 0
skipped_count = 0
try: try:
for index, row in df.iterrows(): for index, row in df.iterrows():
record_data = {} record_data = {}
# Insert only fields that exist in model location = row.get("Location")
mh_no = row.get("MH_NO")
if (pd.isna(location) or str(location).strip() == "" or pd.isna(mh_no) or str(mh_no).strip() == ""):
skipped_count += 1
continue
# Map only model columns
for col in df.columns: for col in df.columns:
if hasattr(TrenchExcavation, col): if hasattr(TrenchExcavation, col):
value = row[col] value = row[col]
# Normalize empty values # Normalize empty values
if pd.isna(value) or str(value).strip() in ["", "-", "", "nan", "NaN"]: if pd.isna(value) or str(value).strip() in ["", "-", "", "nan"]:
value = None value = None
record_data[col] = value record_data[col] = value
# If all mapped fields are None → skip
if all(v is None for v in record_data.values()):
skipped_count += 1
continue
record = TrenchExcavation( record = TrenchExcavation(
subcontractor_id=subcontractor_id, subcontractor_id=subcontractor_id, RA_Bill_No=RA_Bill_No,
**record_data **record_data
) )
print("Saving Row → Location:", record.Location, " MH_NO:", record.MH_NO)
db.session.add(record) db.session.add(record)
saved_count += 1 saved_count += 1
db.session.commit() db.session.commit()
return True, f"Trench Excavation data saved successfully. Total rows: {saved_count}"
return True, (
f"Trench Excavation saved successfully. "
f"Inserted: {saved_count}, Skipped: {skipped_count}"
)
except Exception as e: except Exception as e:
db.session.rollback() db.session.rollback()
return False, f"Trench Excavation Save Failed: {e}" return False, f"Trench Excavation save failed: {e}"
# Manhole Excavation save method (ManholeExcavation model) # new new
def process_manhole_excavation(self, df, subcontractor_id): def process_manhole_excavation(self, df, subcontractor_id, RA_Bill_No):
# Clean column names (strip whitespace)
df.columns = [str(c).strip() for c in df.columns] print("RA_Bill_No of MH EX:",RA_Bill_No)
# If the sheet has merged cells -> forward fill Location
print("=== RAW HEADERS ===")
print(df.columns.tolist())
print("===================")
# Clean column names
df.columns = (
df.columns.astype(str)
.str.strip()
.str.replace(r"[^\w]", "_", regex=True)
.str.replace("__+", "_", regex=True)
.str.strip("_")
)
# Remove completely empty rows
df = df.dropna(how="all")
# Forward fill merged Location
if "Location" in df.columns: if "Location" in df.columns:
df["Location"] = df["Location"].ffill() df["Location"] = df["Location"].ffill()
# REMOVE empty rows
df = df.dropna(how="all")
# Identify missing location rows before insert
missing_loc = df[df["Location"].isna() | (df["Location"].astype(str).str.strip() == "")]
if not missing_loc.empty:
return False, f"Error: Some rows have empty Location. Rows: {missing_loc.index.tolist()}"
saved_count = 0 saved_count = 0
skipped_count = 0
try: try:
for index, row in df.iterrows(): for index, row in df.iterrows():
record_data = {} record_data = {}
# Insert only fields that exist in model location = row.get("Location")
mh_no = row.get("MH_NO")
if (pd.isna(location) or str(location).strip() == "" or pd.isna(mh_no) or str(mh_no).strip() == ""):
skipped_count += 1
continue
# Map only model columns
for col in df.columns: for col in df.columns:
if hasattr(ManholeExcavation, col): if hasattr(ManholeExcavation, col):
value = row[col] value = row[col]
# Normalize empty values # Normalize empty values
if pd.isna(value) or str(value).strip() in ["", "-", "", "nan", "NaN"]: if pd.isna(value) or str(value).strip() in ["", "-", "", "nan"]:
value = None value = None
record_data[col] = value record_data[col] = value
# If all mapped fields are None → skip
if all(v is None for v in record_data.values()):
skipped_count += 1
continue
record = ManholeExcavation( record = ManholeExcavation(
subcontractor_id=subcontractor_id, subcontractor_id=subcontractor_id, RA_Bill_No=RA_Bill_No,
**record_data **record_data
) )
print("Saving Row → Location:", record.Location, " MH_NO:", record.MH_NO)
db.session.add(record) db.session.add(record)
saved_count += 1 saved_count += 1
db.session.commit() db.session.commit()
return True, f"Manhole Excavation data saved successfully. Total rows: {saved_count}"
return True, (
f"Manhole Excavation saved successfully. "
f"Inserted: {saved_count}, Skipped: {skipped_count}"
)
except Exception as e: except Exception as e:
db.session.rollback() db.session.rollback()
return False, f"Manhole Excavation Save Failed: {e}" return False, f"Manhole Excavation save failed: {e}"
# olds
# def handle_file_upload(self, file, subcontractor_id, file_type):
# # def handle_file_upload(self, file, subcontractor_id):
# if not subcontractor_id:
# return False, "Please select subcontractor."
# # if not file_type:
# # return False, "Please select file type."
# if not file or file.filename == "":
# return False, "No file selected."
# if not self.allowed_file(file.filename):
# return False, "Invalid file type! Allowed: CSV, XLSX, XLS"
# ensure_upload_folder()
# folder = os.path.join(Config.UPLOAD_FOLDER, f"sub_{subcontractor_id}")
# os.makedirs(folder, exist_ok=True)
# filename = secure_filename(file.filename)
# filepath = os.path.join(folder, filename)
# file.save(filepath)
# try:
# # df = pd.read_csv(filepath) if filename.endswith(".csv") else pd.read_excel(filepath)
# df = pd.read_excel(filepath, sheet_name ="Tr.Ex.", header=0)
# df1 = pd.read_excel(filepath, sheet_name="MH Ex.", header=0)
# print("\n=== Uploaded File Preview ===")
# print(" file name h:",df)
# print("=============================\n")
# print(" file name h1:",df1)
# # print(df.head())
# print("=============================\n")
# # Trench Excavation save (subcontractor)
# if file_type == "trench_excavation":
# return self.process_trench_excavation(df, subcontractor_id)
# # Manhole Excavation save (subcontractor)
# if file_type == "manhole_excavation":
# return self.process_manhole_excavation(df, subcontractor_id)
# # Manhole and Domestic Chamber Construction save (subcontractor)
# if file_type == "manhole_domestic_chamber":
# return self.process_manhole_domestic_chamber(df, subcontractor_id)
# # Tr Ex save (client)
# if file_type =="tr_ex_client":
# return self.client_trench_excavation(df, subcontractor_id)
# # Mh Ex save (client)
# if file_type =="mh_ex_client":
# return self.client_manhole_excavation(df, subcontractor_id)
# # Mh and Dc save (client)
# if file_type == "mh_dc_client":
# return self.client_manhole_domestic_chamber(df, subcontractor_id)
# return True, "File uploaded successfully."
# except Exception as e:
# return False, f"Processing failed: {e}"
# ---------------------- Sub contractor --------------------------
# Trench Excavation save method (TrenchExcavation model)
# def process_trench_excavation(self, df, subcontractor_id):
# df.columns = [str(c).strip() for c in df.columns]
# # If the sheet has merged cells -> forward fill Location
# if "Location" in df.columns:
# df["Location"] = df["Location"].ffill()
# df = df.dropna(how="all") # REMOVE empty rows
# # Identify missing location rows before insert
# missing_loc = df[df["Location"].isna() | (df["Location"].astype(str).str.strip() == "")]
# if not missing_loc.empty:
# return False, f"Error: Some rows have empty Location. Rows: {missing_loc.index.tolist()}"
# saved_count = 0
# try:
# for index, row in df.iterrows():
# record_data = {}
# # Insert only fields that exist in model
# for col in df.columns:
# if hasattr(TrenchExcavation, col):
# value = row[col]
# # Normalize empty values
# if pd.isna(value) or str(value).strip() in ["", "-", "—", "nan", "NaN"]:
# value = None
# record_data[col] = value
# record = TrenchExcavation(
# subcontractor_id=subcontractor_id,
# **record_data
# )
# db.session.add(record)
# saved_count += 1
# db.session.commit()
# return True, f"Trench Excavation data saved successfully. Total rows: {saved_count}"
# except Exception as e:
# db.session.rollback()
# return False, f"Trench Excavation Save Failed: {e}"
# Manhole Excavation save method (ManholeExcavation model)
# def process_manhole_excavation(self, df, subcontractor_id):
# # Clean column names (strip whitespace)
# df.columns = [str(c).strip() for c in df.columns]
# # If the sheet has merged cells -> forward fill Location
# if "Location" in df.columns:
# df["Location"] = df["Location"].ffill()
# # REMOVE empty rows
# df = df.dropna(how="all")
# # Identify missing location rows before insert
# missing_loc = df[df["Location"].isna() | (df["Location"].astype(str).str.strip() == "")]
# if not missing_loc.empty:
# return False, f"Error: Some rows have empty Location. Rows: {missing_loc.index.tolist()}"
# saved_count = 0
# try:
# for index, row in df.iterrows():
# record_data = {}
# # Insert only fields that exist in model
# for col in df.columns:
# if hasattr(ManholeExcavation, col):
# value = row[col]
# # Normalize empty values
# if pd.isna(value) or str(value).strip() in ["", "-", "—", "nan", "NaN"]:
# value = None
# record_data[col] = value
# record = ManholeExcavation(
# subcontractor_id=subcontractor_id,
# **record_data
# )
# db.session.add(record)
# saved_count += 1
# db.session.commit()
# return True, f"Manhole Excavation data saved successfully. Total rows: {saved_count}"
# except Exception as e:
# db.session.rollback()
# return False, f"Manhole Excavation Save Failed: {e}"
# Manhole and Domestic Chamber Construction save method (ManholeDomesticChamber model) # Manhole and Domestic Chamber Construction save method (ManholeDomesticChamber model)