1456 lines
54 KiB
Python
1456 lines
54 KiB
Python
from __future__ import annotations
|
|
|
|
import os
|
|
import sys
|
|
import json
|
|
import re
|
|
import ast
|
|
import time
|
|
import unicodedata
|
|
import datetime as dt
|
|
from decimal import Decimal, ROUND_HALF_UP
|
|
|
|
# ============================================================
|
|
# Rocketbot portable boot (py_libs) CONTRATO CONTADO
|
|
# ============================================================
|
|
base_dir = os.path.dirname(sys.executable) # ...\Rocketbot
|
|
libs_dir = os.path.join(base_dir, "py_libs", "py310") #Apunta a la carpeta en la que están instaladas las librerías y dependencias
|
|
sys.path.insert(0, libs_dir)
|
|
|
|
# Rocketbot a veces ya trae pyparsing viejo en memoria y eso puede generar conflictos con la versión que usa googleapiclient, esto obliga a usar la versión que viene en py_libs, que no genera conflictos
|
|
for k in list(sys.modules.keys()):
|
|
if k == "pyparsing" or k.startswith("pyparsing."):
|
|
del sys.modules[k]
|
|
|
|
from googleapiclient.discovery import build
|
|
from googleapiclient.errors import HttpError
|
|
from google.oauth2 import service_account
|
|
|
|
# ============================================================
|
|
# Rocketbot vars helpers
|
|
# ============================================================
|
|
def _missing(v) -> bool: #Función auxiliar para verificar si una variable está vacía o no
|
|
if v is None:
|
|
return True
|
|
if isinstance(v, str):
|
|
s = v.strip()
|
|
return s == "" or s == "ERROR_NOT_VAR"
|
|
return False
|
|
|
|
|
|
def _gv(name, default=None): #Función auxiliar para obtener el valor de una variable
|
|
try:
|
|
v = GetVar(name)
|
|
except Exception:
|
|
return default
|
|
return default if _missing(v) else v
|
|
|
|
|
|
def _gvs(name, default="") -> str: #Función auxiliar para obtener el valor de una variable como string
|
|
v = _gv(name, default)
|
|
if v is None:
|
|
return default
|
|
s = str(v).strip()
|
|
return default if (s == "" or s == "ERROR_NOT_VAR") else s
|
|
|
|
|
|
def _sv(name, value): #Función auxiliar para establecer el valor de una variable
|
|
try:
|
|
SetVar(name, value)
|
|
except Exception:
|
|
pass
|
|
|
|
|
|
# ============================================================
|
|
# Formatting helpers
|
|
# ============================================================
|
|
def _to_float(val): #Función auxiliar para convertir un valor a float
|
|
if val is None:
|
|
return None
|
|
if isinstance(val, bool):
|
|
return None
|
|
if isinstance(val, (int, float)):
|
|
return float(val)
|
|
if isinstance(val, Decimal):
|
|
return float(val)
|
|
|
|
if isinstance(val, dict):
|
|
for k in ("amount", "value", "text", "name", "label", "display_name"):
|
|
if k in val and val.get(k) not in (None, ""):
|
|
out = _to_float(val.get(k))
|
|
if out is not None:
|
|
return out
|
|
return None
|
|
|
|
if isinstance(val, (list, tuple)):
|
|
for it in val:
|
|
out = _to_float(it)
|
|
if out is not None:
|
|
return out
|
|
return None
|
|
|
|
s = str(val).strip()
|
|
if s == "" or s.lower() in ("none", "false"):
|
|
return None
|
|
|
|
cleaned = []
|
|
for ch in s:
|
|
if ch.isdigit() or ch in ".,-":
|
|
cleaned.append(ch)
|
|
s2 = "".join(cleaned)
|
|
if s2 == "" or s2 in ("-", ",", "."):
|
|
return None
|
|
|
|
# 40.000,00 -> 40000.00
|
|
# 40,000.00 -> 40000.00
|
|
# 40000,00 -> 40000.00
|
|
if "," in s2 and "." in s2:
|
|
if s2.rfind(",") > s2.rfind("."):
|
|
s2 = s2.replace(".", "").replace(",", ".")
|
|
else:
|
|
s2 = s2.replace(",", "")
|
|
elif "," in s2:
|
|
s2 = s2.replace(",", ".")
|
|
|
|
try:
|
|
return float(s2)
|
|
except Exception:
|
|
return None
|
|
|
|
|
|
def _to_decimal(val): #Función auxiliar para convertir un valor a Decimal
|
|
n = _to_float(val)
|
|
if n is None:
|
|
return None
|
|
try:
|
|
return Decimal(str(n))
|
|
except Exception:
|
|
return None
|
|
|
|
|
|
def _to_decimal_zero(val): #Función auxiliar para convertir un valor a Decimal, si es None, devuelve 0
|
|
d = _to_decimal(val)
|
|
return d if d is not None else Decimal("0")
|
|
|
|
|
|
def format_money(val): #Función auxiliar para formatear un valor a moneda, agrega el símbolo $ y separa los miles con '.' y sin decimales.
|
|
"""Formato $ con separador de miles '.' y sin decimales.
|
|
Si el valor viene vacio/None, devuelve $0.
|
|
"""
|
|
d = _to_decimal_zero(val).quantize(Decimal("1"), rounding=ROUND_HALF_UP)
|
|
s = format(d, ",.0f")
|
|
s = s.replace(",", ".")
|
|
return "$" + s
|
|
|
|
def _format_var_2dec(val): #Función auxiliar para formatear un valor a moneda con 2 decimales
|
|
d = _to_decimal_zero(val).quantize(Decimal("0.01"), rounding=ROUND_HALF_UP)
|
|
s = format(d, ",.2f")
|
|
return s.replace(",", "X").replace(".", ",").replace("X", ".")
|
|
|
|
|
|
def _resolve_payment_columns(date_val, paid_if_today_or_past=False, preserve_unparsed_in_fecha_pago=False): #Función auxiliar para decidir en dónde colocar la fecha de pago,
|
|
if date_val is None: # si es menor o igual a hoy, se muestra en la columna de pagado, si no, en la columna de vencimiento
|
|
return "", ""
|
|
|
|
raw = str(date_val).strip()
|
|
if raw == "":
|
|
return "", ""
|
|
|
|
ok, shown = parse_date_ddmmyyyy(date_val) # ok es True si la fecha se parseó correctamente, shown es la fecha parseada en formato dd/mm/yyyy
|
|
shown_text = shown if ok else raw # si ok es True, shown_text es la fecha parseada, si no, shown_text es la fecha original
|
|
|
|
d_obj = _to_date_any(date_val) # convierte la fecha a formato date
|
|
today = dt.date.today() # obtiene la fecha actual
|
|
|
|
if d_obj is not None: # si la fecha se parseó correctamente
|
|
if paid_if_today_or_past and d_obj <= today: # si la fecha es menor o igual a hoy
|
|
return shown_text, "" # se muestra en la columna de pagado
|
|
return "", shown_text # se muestra en la columna de vencimiento
|
|
|
|
if preserve_unparsed_in_fecha_pago: # si la fecha no se parseó correctamente
|
|
return shown_text, "" # se muestra en la columna de pagado
|
|
|
|
return "", shown_text # se muestra en la columna de vencimiento
|
|
|
|
|
|
def parse_date_ddmmyyyy(val): #Función auxiliar para parsear fechas en formato dd/mm/yyyy
|
|
if val is None:
|
|
return (False, "")
|
|
s = str(val).strip()
|
|
if s == "":
|
|
return (False, "")
|
|
|
|
if len(s) >= 10 and s[2] == "/" and s[5] == "/": # si la fecha está en formato dd/mm/yyyy
|
|
return (True, s[:10])
|
|
|
|
s2 = s.replace("T", " ").replace("Z", "")
|
|
try: # si la fecha está en formato iso
|
|
dtx = dt.datetime.fromisoformat(s2) # convierte la fecha a formato iso
|
|
return (True, dtx.strftime("%d/%m/%Y"))
|
|
except Exception:
|
|
pass
|
|
|
|
if len(s) >= 8 and s[:8].isdigit():
|
|
y = s[0:4]
|
|
m = s[4:6]
|
|
d = s[6:8]
|
|
return (True, f"{d}/{m}/{y}")
|
|
|
|
return (False, s)
|
|
|
|
|
|
def _to_int(val, default=0): #Función auxiliar para convertir un valor a entero
|
|
if val is None:
|
|
return default # siempre devuelve 0 si el valor es None
|
|
if isinstance(val, bool):
|
|
return default # siempre devuelve 0 si el valor es booleano
|
|
if isinstance(val, int):
|
|
return val # si el valor es entero, devuelve el valor
|
|
if isinstance(val, float):
|
|
try:
|
|
return int(val) # si el valor es float, lo convierte a entero
|
|
except Exception:
|
|
return default # si el valor no se puede convertir a entero, devuelve 0
|
|
|
|
s = str(val).strip()
|
|
if s == "" or s == "ERROR_NOT_VAR":
|
|
return default # siempre devuelve 0 si el valor es None
|
|
|
|
m = re.search(r"-?\d+(?:[.,]\d+)?", s)
|
|
if not m:
|
|
return default # siempre devuelve 0 si el valor no se puede convertir a entero
|
|
|
|
num = m.group(0).replace(",", ".")
|
|
try:
|
|
return int(float(num)) # si el valor es float, lo convierte a entero
|
|
except Exception:
|
|
return default # si el valor no se puede convertir a entero, devuelve 0
|
|
|
|
|
|
# ============================================================
|
|
# Parser Rocketbot (listas/dicts desde Expander)
|
|
# ============================================================
|
|
def _parse_any(v, default=None): #Función auxiliar para parsear cualquier valor
|
|
if v is None:
|
|
return default # siempre devuelve None si el valor es None
|
|
if isinstance(v, (list, dict)):
|
|
return v # si el valor es una lista o un diccionario, devuelve el valor
|
|
|
|
s = str(v).strip()
|
|
if s == "" or s == "ERROR_NOT_VAR":
|
|
return default # siempre devuelve None si el valor es None
|
|
|
|
s = s.replace("\u200b", "").replace("\ufeff", "") # elimina caracteres invisibles
|
|
|
|
if len(s) >= 2 and (s[0] == s[-1]) and s[0] in ("'", '"'):
|
|
inner = s[1:-1].strip() # elimina los espacios en blanco al inicio y al final
|
|
if inner.startswith("[") or inner.startswith("{"): # si el valor es una lista o un diccionario, devuelve el valor
|
|
s = inner # si el valor es una lista o un diccionario, devuelve el valor
|
|
|
|
try:
|
|
return json.loads(s) # intenta convertir el valor a json
|
|
except Exception:
|
|
pass
|
|
|
|
fixed = s
|
|
fixed = re.sub(r"\bnull\b", "None", fixed, flags=re.I) # reemplaza null por None
|
|
fixed = re.sub(r"\btrue\b", "True", fixed, flags=re.I) # reemplaza true por True
|
|
fixed = re.sub(r"\bfalse\b", "False", fixed, flags=re.I) # reemplaza false por False
|
|
|
|
try:
|
|
return ast.literal_eval(fixed) # intenta convertir el valor a literal
|
|
except Exception:
|
|
return default # siempre devuelve None si el valor no se puede convertir a literal
|
|
|
|
|
|
def _to_date_any(val): #Función auxiliar para convertir un valor a fecha
|
|
if val is None:
|
|
return None # siempre devuelve None si el valor es None
|
|
if isinstance(val, dt.datetime):
|
|
return val.date() # si el valor es un datetime, devuelve la fecha
|
|
if isinstance(val, dt.date):
|
|
return val # si el valor es una fecha, devuelve la fecha
|
|
|
|
s = str(val).strip()
|
|
if s == "":
|
|
return None # siempre devuelve None si el valor es None
|
|
|
|
m = re.match(r"^(\d{1,2})[/-](\d{1,2})[/-](\d{4})", s) # si la fecha está en formato dd/mm/yyyy
|
|
if m:
|
|
try:
|
|
d = int(m.group(1)); mo = int(m.group(2)); y = int(m.group(3)) # convierte la fecha a formato date
|
|
return dt.date(y, mo, d)
|
|
except Exception:
|
|
pass
|
|
|
|
s2 = s.replace("T", " ").replace("Z", "") # reemplaza T y Z por espacios en blanco
|
|
try:
|
|
return dt.datetime.fromisoformat(s2).date() # intenta convertir la fecha a formato iso
|
|
except Exception:
|
|
pass
|
|
try:
|
|
return dt.date.fromisoformat(s[:10]) # intenta convertir la fecha a formato iso
|
|
except Exception:
|
|
pass
|
|
|
|
m2 = re.match(r"^(\d{4})(\d{2})(\d{2})", s) # si la fecha está en formato yyyymmdd
|
|
if m2:
|
|
try:
|
|
y = int(m2.group(1)); mo = int(m2.group(2)); d = int(m2.group(3)) # convierte la fecha a formato date
|
|
return dt.date(y, mo, d)
|
|
except Exception:
|
|
pass
|
|
|
|
return None # siempre devuelve None si el valor no se puede convertir a fecha
|
|
|
|
|
|
def _extract_date_text_from_any(val): #Función auxiliar para extraer el texto de una fecha
|
|
if val is None:
|
|
return "" # siempre devuelve "" si el valor es None
|
|
if isinstance(val, dict):
|
|
for k in ("date", "text", "value", "name"): # busca la fecha en el diccionario
|
|
if k in val and val.get(k) not in (None, ""): # si la fecha está en el diccionario
|
|
out = _extract_date_text_from_any(val.get(k)) # extrae la fecha
|
|
if out:
|
|
return out
|
|
return ""
|
|
if isinstance(val, (list, tuple)): # si el valor es una lista o una tupla
|
|
for it in val: # recorre la lista o la tupla
|
|
out = _extract_date_text_from_any(it) # extrae la fecha
|
|
if out:
|
|
return out
|
|
return ""
|
|
ok, shown = parse_date_ddmmyyyy(val) # parsea la fecha
|
|
return shown if ok else str(val).strip() # devuelve la fecha parseada o el valor original
|
|
|
|
|
|
# ============================================================
|
|
# Google Auth
|
|
# ============================================================
|
|
SCOPES = {scopes_api_google} # Define los scopes de la API de Google
|
|
|
|
|
|
def _load_json(path): #Función auxiliar para cargar un archivo JSON
|
|
with open(path, "r", encoding="utf-8") as f:
|
|
return json.load(f)
|
|
|
|
|
|
def get_services(credentials_json_path, impersonated_user): #Funci?n auxiliar para obtener los servicios de Google
|
|
info = _load_json(credentials_json_path) # carga el archivo JSON
|
|
if not isinstance(info, dict) or info.get("type") != "service_account":
|
|
raise RuntimeError("gdoc_sa_json2 debe apuntar a un JSON de cuenta de servicio.")
|
|
|
|
impersonated_user = (impersonated_user or "").strip()
|
|
if not impersonated_user:
|
|
raise RuntimeError("Falta la variable gdoc_impersonated_user.")
|
|
|
|
creds = service_account.Credentials.from_service_account_file(
|
|
credentials_json_path,
|
|
scopes=SCOPES,
|
|
subject=impersonated_user,
|
|
) # crea las credenciales delegadas
|
|
docs = build("docs", "v1", credentials=creds, cache_discovery=False) # crea el cliente de Google Docs
|
|
drive = build("drive", "v3", credentials=creds, cache_discovery=False) # crea el cliente de Google Drive
|
|
return docs, drive, "service_account_impersonated" # devuelve los clientes y el tipo de cuenta
|
|
|
|
|
|
# ============================================================
|
|
# Drive helpers
|
|
# ============================================================
|
|
def extract_doc_id_from_url(url):
|
|
m = re.search(r"/document/d/([a-zA-Z0-9_-]+)", url or "")
|
|
return m.group(1) if m else ""
|
|
|
|
|
|
def ensure_docs_api_compatible(drive_service, file_id: str):
|
|
meta = drive_service.files().get(
|
|
fileId=file_id,
|
|
fields="id,name,mimeType,shortcutDetails",
|
|
supportsAllDrives=True,
|
|
).execute()
|
|
|
|
if meta.get("mimeType") == "application/vnd.google-apps.shortcut":
|
|
target = (meta.get("shortcutDetails") or {}).get("targetId", "")
|
|
if not target:
|
|
raise RuntimeError("Es shortcut pero no trae targetId.")
|
|
file_id = target
|
|
meta = drive_service.files().get(
|
|
fileId=file_id,
|
|
fields="id,name,mimeType",
|
|
supportsAllDrives=True,
|
|
).execute()
|
|
|
|
if meta.get("mimeType") == "application/vnd.google-apps.document":
|
|
return file_id, meta, False
|
|
|
|
cache_from = _gvs("gdoc_cache_from_id", "")
|
|
cache_to = _gvs("gdoc_cache_converted_id", "")
|
|
if cache_from == file_id and cache_to:
|
|
try:
|
|
meta2 = drive_service.files().get(
|
|
fileId=cache_to,
|
|
fields="id,name,mimeType",
|
|
supportsAllDrives=True,
|
|
).execute()
|
|
if meta2.get("mimeType") == "application/vnd.google-apps.document":
|
|
_sv("gdoc_converted", "1")
|
|
return cache_to, meta2, True
|
|
except Exception:
|
|
pass
|
|
|
|
new_name = (meta.get("name") or "Documento") + " (Google Docs)"
|
|
converted = drive_service.files().copy(
|
|
fileId=file_id,
|
|
body={"name": new_name, "mimeType": "application/vnd.google-apps.document"},
|
|
fields="id,name,mimeType",
|
|
supportsAllDrives=True,
|
|
).execute()
|
|
|
|
_sv("gdoc_cache_from_id", file_id)
|
|
_sv("gdoc_cache_converted_id", converted["id"])
|
|
return converted["id"], converted, True
|
|
|
|
|
|
# ============================================================
|
|
# Docs GET (tabs)
|
|
# ============================================================
|
|
def docs_get(docs_service, doc_id: str):
|
|
try:
|
|
_sv("dbg_includeTabsContent", "1")
|
|
return docs_service.documents().get(documentId=doc_id, includeTabsContent=True).execute()
|
|
except TypeError:
|
|
_sv("dbg_includeTabsContent", "0")
|
|
return docs_service.documents().get(documentId=doc_id).execute()
|
|
|
|
|
|
def _walk_tabs(tabs):
|
|
if not tabs:
|
|
return
|
|
for t in tabs:
|
|
yield t
|
|
for x in _walk_tabs(t.get("childTabs") or []):
|
|
yield x
|
|
|
|
|
|
def _list_scopes(doc):
|
|
scopes = [("main", None)]
|
|
tabs = doc.get("tabs") or []
|
|
for t in _walk_tabs(tabs):
|
|
tid = (t.get("tabProperties") or {}).get("tabId")
|
|
if tid is None:
|
|
continue
|
|
scopes.append(("tab", str(tid)))
|
|
return scopes
|
|
|
|
|
|
def _get_body_content_for_scope(doc, scope_kind, scope_id):
|
|
if scope_kind == "main":
|
|
return (doc.get("body") or {}).get("content", []) or []
|
|
|
|
tabs = doc.get("tabs") or []
|
|
for t in _walk_tabs(tabs):
|
|
tid = (t.get("tabProperties") or {}).get("tabId")
|
|
if tid is None:
|
|
continue
|
|
if str(tid) == str(scope_id):
|
|
dtab = t.get("documentTab") or {}
|
|
return (dtab.get("body") or {}).get("content", []) or []
|
|
return []
|
|
|
|
|
|
# ============================================================
|
|
# Marker search (robusto, char-level)
|
|
# ============================================================
|
|
def _norm_alnum(s: str) -> str:
|
|
if s is None:
|
|
return ""
|
|
s = unicodedata.normalize("NFKD", str(s)).lower()
|
|
out = []
|
|
for ch in s:
|
|
if unicodedata.category(ch) == "Mn":
|
|
continue
|
|
if ch.isalnum():
|
|
out.append(ch)
|
|
return "".join(out)
|
|
|
|
|
|
def _iter_text_chars(content_list):
|
|
def walk(lst):
|
|
for el in lst or []:
|
|
if not isinstance(el, dict):
|
|
continue
|
|
|
|
p = el.get("paragraph")
|
|
if p:
|
|
base = el.get("startIndex")
|
|
if base is None:
|
|
base = 0
|
|
cursor = int(base)
|
|
|
|
for pe in (p.get("elements") or []):
|
|
tr = pe.get("textRun")
|
|
if not tr:
|
|
en = pe.get("endIndex")
|
|
if en is not None:
|
|
cursor = int(en)
|
|
continue
|
|
|
|
txt = tr.get("content", "")
|
|
if txt is None:
|
|
continue
|
|
|
|
st = pe.get("startIndex")
|
|
if st is None:
|
|
st = cursor
|
|
st = int(st)
|
|
|
|
for i, ch in enumerate(txt):
|
|
if ch in ("\u200b", "\ufeff"):
|
|
continue
|
|
yield (st + i, ch)
|
|
|
|
en = pe.get("endIndex")
|
|
if en is not None:
|
|
cursor = int(en)
|
|
else:
|
|
cursor = st + len(txt)
|
|
continue
|
|
|
|
t = el.get("table")
|
|
if t:
|
|
for row in (t.get("tableRows") or []):
|
|
for cell in (row.get("tableCells") or []):
|
|
yield from walk(cell.get("content", []))
|
|
continue
|
|
|
|
toc = el.get("tableOfContents")
|
|
if toc:
|
|
yield from walk(toc.get("content", []))
|
|
continue
|
|
|
|
yield from walk(content_list)
|
|
|
|
|
|
def find_marker(doc, marker_raw: str):
|
|
mr = (marker_raw or "").strip()
|
|
mt = mr.strip("_").strip()
|
|
candidates = []
|
|
for m in [mr, mt, "TABLA_PROFORMA", "__TABLA_PROFORMA__", "TABLA PROFORMA"]:
|
|
if m and m not in candidates:
|
|
candidates.append(m)
|
|
|
|
for scope_kind, scope_id in _list_scopes(doc):
|
|
content = _get_body_content_for_scope(doc, scope_kind, scope_id)
|
|
|
|
raw_chars = []
|
|
raw_map = []
|
|
for doc_i, ch in _iter_text_chars(content):
|
|
raw_chars.append(ch)
|
|
raw_map.append(int(doc_i))
|
|
raw_text = "".join(raw_chars)
|
|
|
|
for m in candidates:
|
|
pos = raw_text.find(m)
|
|
if pos != -1:
|
|
sdoc = raw_map[pos]
|
|
edoc = raw_map[pos + len(m) - 1] + 1
|
|
return {"hit": {"scope_kind": scope_kind, "tabId": scope_id, "start": sdoc, "end": edoc, "mode": "exact", "used": m}, "candidates": candidates}
|
|
|
|
pos2 = raw_text.lower().find(m.lower())
|
|
if pos2 != -1:
|
|
sdoc = raw_map[pos2]
|
|
edoc = raw_map[pos2 + len(m) - 1] + 1
|
|
return {"hit": {"scope_kind": scope_kind, "tabId": scope_id, "start": sdoc, "end": edoc, "mode": "case_insensitive", "used": m}, "candidates": candidates}
|
|
|
|
alnum = []
|
|
alnum_map = []
|
|
for i, ch in enumerate(raw_chars):
|
|
if ch.isalnum():
|
|
alnum.append(ch.lower())
|
|
alnum_map.append(raw_map[i])
|
|
alnum_text = "".join(alnum)
|
|
|
|
for m in candidates:
|
|
target = _norm_alnum(m)
|
|
if not target:
|
|
continue
|
|
pos = alnum_text.find(target)
|
|
if pos != -1:
|
|
sdoc = alnum_map[pos]
|
|
edoc = alnum_map[pos + len(target) - 1] + 1
|
|
return {"hit": {"scope_kind": scope_kind, "tabId": scope_id, "start": sdoc, "end": edoc, "mode": "fuzzy_alnum", "used": m}, "candidates": candidates}
|
|
|
|
return {"hit": None, "candidates": candidates}
|
|
|
|
|
|
# ============================================================
|
|
# Detectar si ya existe la tabla proforma (idempotencia)
|
|
# ============================================================
|
|
def _cell_text_alnum(cell):
|
|
out = []
|
|
for el in (cell.get("content") or []):
|
|
p = el.get("paragraph")
|
|
if not p:
|
|
continue
|
|
for pe in (p.get("elements") or []):
|
|
tr = pe.get("textRun")
|
|
if not tr:
|
|
continue
|
|
txt = tr.get("content") or ""
|
|
if txt:
|
|
out.append(txt)
|
|
return _norm_alnum(" ".join(out))
|
|
|
|
|
|
def _table_looks_like_proforma(table):
|
|
try:
|
|
for row in (table.get("tableRows") or []):
|
|
cells = row.get("tableCells") or []
|
|
joined = " ".join(_cell_text_alnum(c) for c in cells)
|
|
if ("costototal" in joined) and ("monto" in joined) and (("venc" in joined) or ("fechadepago" in joined) or ("fechapago" in joined)):
|
|
return True
|
|
except Exception:
|
|
pass
|
|
return False
|
|
|
|
|
|
def proforma_table_exists(doc):
|
|
for scope_kind, scope_id in _list_scopes(doc):
|
|
body = _get_body_content_for_scope(doc, scope_kind, scope_id)
|
|
for el in body or []:
|
|
table = el.get("table")
|
|
if not table:
|
|
continue
|
|
if _table_looks_like_proforma(table):
|
|
return True
|
|
return False
|
|
|
|
|
|
# ============================================================
|
|
# Docs batchUpdate helpers (tab-aware)
|
|
# ============================================================
|
|
def _loc(index, tab_id):
|
|
d = {"index": int(index)}
|
|
if tab_id is not None:
|
|
d["tabId"] = str(tab_id)
|
|
return d
|
|
|
|
|
|
def _range(start, end, tab_id):
|
|
d = {"startIndex": int(start), "endIndex": int(end)}
|
|
if tab_id is not None:
|
|
d["tabId"] = str(tab_id)
|
|
return d
|
|
|
|
|
|
def _rgb(r, g, b):
|
|
return {"color": {"rgbColor": {"red": r, "green": g, "blue": b}}}
|
|
|
|
|
|
def _merge_req(table_start, tab_id, row, col, row_span, col_span):
|
|
loc = {"tableStartLocation": _loc(table_start, tab_id), "rowIndex": row, "columnIndex": col}
|
|
tr = {"tableCellLocation": loc, "rowSpan": row_span, "columnSpan": col_span}
|
|
return {"mergeTableCells": {"tableRange": tr}}
|
|
|
|
|
|
def _cell_bg_req(table_start, tab_id, row, col, row_span, col_span, rgb):
|
|
loc = {"tableStartLocation": _loc(table_start, tab_id), "rowIndex": row, "columnIndex": col}
|
|
tr = {"tableCellLocation": loc, "rowSpan": row_span, "columnSpan": col_span}
|
|
style = {"backgroundColor": rgb}
|
|
return {"updateTableCellStyle": {"tableRange": tr, "tableCellStyle": style, "fields": "backgroundColor"}}
|
|
|
|
|
|
def _batch_update(docs_service, doc_id, reqs, chunk=500):
|
|
for i in range(0, len(reqs), chunk):
|
|
docs_service.documents().batchUpdate(documentId=doc_id, body={"requests": reqs[i:i + chunk]}).execute()
|
|
|
|
|
|
def get_first_table_after_index(doc, scope_kind, tab_id, index_hint):
|
|
body = _get_body_content_for_scope(doc, scope_kind, tab_id)
|
|
best_table = None
|
|
best_start = None
|
|
for el in body:
|
|
table = el.get("table")
|
|
if not table:
|
|
continue
|
|
st = el.get("startIndex", 0)
|
|
if st >= index_hint:
|
|
if best_start is None or st < best_start:
|
|
best_start = st
|
|
best_table = table
|
|
return (best_table, best_start)
|
|
|
|
|
|
def get_cell_insert_index(table, r, c):
|
|
cell = table["tableRows"][r]["tableCells"][c]
|
|
content = cell.get("content", [])
|
|
|
|
for el in content:
|
|
if el.get("paragraph") is not None:
|
|
en = el.get("endIndex")
|
|
if en is not None:
|
|
return int(en) - 1
|
|
|
|
for el in content:
|
|
en = el.get("endIndex")
|
|
if en is not None:
|
|
return int(en) - 1
|
|
|
|
raise RuntimeError(f"No pude calcular indice de insercion para celda ({r},{c}).")
|
|
|
|
|
|
def get_cell_text_range(table, r, c):
|
|
"""Return (min_start, max_end) indices for the given cell content."""
|
|
cell = table["tableRows"][r]["tableCells"][c]
|
|
mins = None
|
|
maxe = None
|
|
for el in (cell.get("content") or []):
|
|
st = el.get("startIndex")
|
|
en = el.get("endIndex")
|
|
if st is not None:
|
|
mins = int(st) if mins is None else min(mins, int(st))
|
|
if en is not None:
|
|
maxe = int(en) if maxe is None else max(maxe, int(en))
|
|
if mins is None or maxe is None:
|
|
idx = get_cell_insert_index(table, r, c)
|
|
return idx, idx + 1
|
|
return mins, maxe
|
|
|
|
|
|
def _cell_borders_req(table_start, tab_id, row, col, row_span, col_span, *, top=None, bottom=None, left=None, right=None):
|
|
loc = {"tableStartLocation": _loc(table_start, tab_id), "rowIndex": row, "columnIndex": col}
|
|
tr = {"tableCellLocation": loc, "rowSpan": row_span, "columnSpan": col_span}
|
|
style = {}
|
|
fields = []
|
|
|
|
if top is not None:
|
|
style["borderTop"] = top
|
|
fields.append("borderTop")
|
|
if bottom is not None:
|
|
style["borderBottom"] = bottom
|
|
fields.append("borderBottom")
|
|
if left is not None:
|
|
style["borderLeft"] = left
|
|
fields.append("borderLeft")
|
|
if right is not None:
|
|
style["borderRight"] = right
|
|
fields.append("borderRight")
|
|
|
|
if not fields:
|
|
return None
|
|
|
|
return {"updateTableCellStyle": {"tableRange": tr, "tableCellStyle": style, "fields": ",".join(fields)}}
|
|
|
|
|
|
def _border_none():
|
|
return {"width": {"magnitude": 0, "unit": "PT"}, "dashStyle": "SOLID", "color": {"color": {"rgbColor": {"red": 1, "green": 1, "blue": 1}}}}
|
|
|
|
|
|
# ============================================================
|
|
# Build rows (CALENDARIOS del expander + fallback simple)
|
|
# Columnas: [CONCEPTO, MONTO, RECIBO, FACTURA, FECHA PAGO, VENC]
|
|
# ============================================================
|
|
def build_rows(data):
|
|
rows = []
|
|
info_rows = []
|
|
|
|
def add_info(label, value):
|
|
idx = len(rows)
|
|
rows.append([label, str(value) if value is not None else "", "", ""])
|
|
info_rows.append(idx)
|
|
|
|
# Se eliminan CLIENTE, C.I., DIRECCION, TELEFONO y EMAIL
|
|
# Se conservan EDIFICIO y DPTO con formato verde
|
|
add_info("EDIFICIO:", data.get("proyecto", ""))
|
|
add_info("DPTO.:", data.get("producto_odoo", ""))
|
|
|
|
header_row_index = len(rows)
|
|
rows.append(["COSTO TOTAL", "MONTO", "FECHA DE PAGO", "VENC"])
|
|
rows.append(["COSTO TOTAL", format_money(data.get("valor_total_compra")), "", ""])
|
|
|
|
pago_senia = _to_decimal_zero(data.get("pago_senia"))
|
|
fecha_sena = _extract_date_text_from_any(data.get("fecha_pago_sena"))
|
|
if pago_senia > 0:
|
|
# La fecha de la seña debe ir siempre en FECHA DE PAGO, sin depender de la fecha de hoy.
|
|
rows.append(["SEÑA", format_money(pago_senia), fecha_sena, ""])
|
|
|
|
pago_inicial = _to_decimal_zero(data.get("pago_inicial"))
|
|
fecha_ini = _extract_date_text_from_any(data.get("fecha_pago_inicial"))
|
|
if pago_inicial > 0:
|
|
# La fecha de la entrega/pago inicial debe ir siempre en FECHA DE PAGO, sin depender de la fecha de hoy.
|
|
rows.append(["ENTREGA INICIAL", format_money(pago_inicial), fecha_ini, ""])
|
|
|
|
# Se eliminan SEÑA y ENTREGA INICIAL
|
|
# pago_senia = _to_decimal_zero(data.get("pago_senia"))
|
|
# ok_sena, fecha_sena = parse_date_ddmmyyyy(data.get("fecha_pago_sena"))
|
|
# rows.append(["SEÑA", format_money(pago_senia), (fecha_sena if ok_sena else ""), ""])
|
|
|
|
# pago_inicial = _to_decimal_zero(data.get("pago_inicial"))
|
|
# ok_ini, fecha_ini = parse_date_ddmmyyyy(data.get("fecha_pago_inicial"))
|
|
# rows.append(["ENTREGA INICIAL", format_money(pago_inicial), (fecha_ini if ok_ini else ""), ""])
|
|
|
|
pre_sections = []
|
|
post_sections = []
|
|
merge_fecha_venc_rows = []
|
|
refuerzo_rows = []
|
|
|
|
def _tipo_infos(calendarios_obj):
|
|
if not calendarios_obj:
|
|
return []
|
|
calendarios = calendarios_obj if not isinstance(calendarios_obj, dict) else [calendarios_obj]
|
|
tipos = []
|
|
for cal in calendarios:
|
|
if isinstance(cal, list) and cal:
|
|
dates = []
|
|
for r in cal:
|
|
d = _to_date_any((r or {}).get("date"))
|
|
if d:
|
|
dates.append(d)
|
|
if not dates:
|
|
continue
|
|
tipos.append({
|
|
"min": min(dates),
|
|
"max": max(dates),
|
|
"quota_dates": sorted(set(dates)),
|
|
"cal": cal,
|
|
})
|
|
tipos.sort(key=lambda t: t.get("min") or dt.date.max)
|
|
for i, t in enumerate(tipos, start=1):
|
|
t["idx"] = i
|
|
return tipos
|
|
|
|
ref_items = []
|
|
ref_list = data.get("refuerzos") or []
|
|
if isinstance(ref_list, dict):
|
|
ref_list = [ref_list]
|
|
if isinstance(ref_list, list):
|
|
for j, rr in enumerate(ref_list):
|
|
if not isinstance(rr, dict):
|
|
continue
|
|
raw_date = (rr or {}).get("date")
|
|
raw_amt = (rr or {}).get("amount")
|
|
amt = _to_float(raw_amt)
|
|
if amt is None or amt <= 0:
|
|
continue
|
|
ok_v, venc_txt = parse_date_ddmmyyyy(raw_date)
|
|
if not ok_v:
|
|
venc_txt = str(raw_date or "")
|
|
d_obj = _to_date_any(raw_date) or dt.date.max
|
|
ref_items.append({"d": d_obj, "amt": float(amt), "venc": venc_txt, "orig": j})
|
|
|
|
ref_items.sort(key=lambda x: (x["d"], x["orig"]))
|
|
for k, it in enumerate(ref_items, start=1):
|
|
it["no"] = k
|
|
|
|
# Comentado para no agregar cuotas desde cuotas_pre_calendarios / cuotas_post_calendarios.
|
|
# pre_infos = _tipo_infos(data.get("pre_calendarios"))
|
|
# post_infos = _tipo_infos(data.get("post_calendarios"))
|
|
|
|
# Desde esta version se usa solo cuotas_pre, que ya llega convertido a pre_calendarios
|
|
# en el MAIN mediante _gen_calendarios_from_blocks(_gv("cuotas_pre", None)).
|
|
pre_infos = _tipo_infos(data.get("pre_calendarios"))
|
|
post_infos = []
|
|
|
|
ref_map_pre = {t["idx"]: [] for t in pre_infos} if pre_infos else {}
|
|
ref_map_post = {t["idx"]: [] for t in post_infos} if post_infos else {}
|
|
|
|
candidates = []
|
|
for t in pre_infos:
|
|
candidates.append(("pre", t))
|
|
for t in post_infos:
|
|
candidates.append(("post", t))
|
|
|
|
for it in ref_items:
|
|
d = it["d"]
|
|
best = None
|
|
for group, t in candidates:
|
|
mind = t["min"]
|
|
maxd = t["max"]
|
|
if d < mind:
|
|
dist_range = (mind - d).days
|
|
elif d > maxd:
|
|
dist_range = (d - maxd).days
|
|
else:
|
|
dist_range = 0
|
|
|
|
if dist_range == 0 and t.get("quota_dates"):
|
|
nearest_quota = min(abs((qd - d).days) for qd in t["quota_dates"])
|
|
else:
|
|
nearest_quota = dist_range
|
|
|
|
grp_prio = 0 if group == "pre" else 1
|
|
score = (dist_range, nearest_quota, grp_prio, mind)
|
|
if best is None or score < best[0]:
|
|
best = (score, group, t["idx"])
|
|
|
|
if best is None:
|
|
continue
|
|
_, group, idx_tipo = best
|
|
payload = (it["d"], it["amt"], it["venc"], it["no"])
|
|
if group == "pre":
|
|
ref_map_pre.setdefault(idx_tipo, []).append(payload)
|
|
else:
|
|
ref_map_post.setdefault(idx_tipo, []).append(payload)
|
|
|
|
for m in (ref_map_pre, ref_map_post):
|
|
for k in list(m.keys()):
|
|
m[k].sort(key=lambda x: (x[0], x[3]))
|
|
|
|
def _add_sections(title_base, calendarios_obj, ref_by_tipo=None, paid_if_today_or_past=False):
|
|
sections_meta = []
|
|
if not calendarios_obj:
|
|
return sections_meta
|
|
|
|
calendarios = calendarios_obj if not isinstance(calendarios_obj, dict) else [calendarios_obj]
|
|
tipos = []
|
|
for cal in calendarios:
|
|
if isinstance(cal, list) and cal:
|
|
min_d = None
|
|
for r in cal:
|
|
d = _to_date_any((r or {}).get("date"))
|
|
if d and (min_d is None or d < min_d):
|
|
min_d = d
|
|
tipos.append((min_d or dt.date.max, cal))
|
|
|
|
if not tipos:
|
|
return sections_meta
|
|
|
|
tipos.sort(key=lambda x: x[0])
|
|
multi = len(tipos) > 1
|
|
|
|
for idx_tipo, (_mind, cal) in enumerate(tipos, start=1):
|
|
title_row = len(rows)
|
|
rows.append([f"{title_base} (TIPO {idx_tipo})" if multi else title_base, "", "", ""])
|
|
cuota_rows = []
|
|
entries = []
|
|
|
|
for rr in cal:
|
|
d_obj = _to_date_any((rr or {}).get("date")) or dt.date.max
|
|
entries.append((d_obj, 0, rr))
|
|
|
|
local_refs = []
|
|
if ref_by_tipo:
|
|
if isinstance(ref_by_tipo, dict):
|
|
local_refs = ref_by_tipo.get(idx_tipo, [])
|
|
else:
|
|
local_refs = ref_by_tipo
|
|
|
|
for (d_obj, amt, venc_txt, ref_no) in local_refs or []:
|
|
entries.append((d_obj, 1, ("ref", ref_no, amt, venc_txt)))
|
|
|
|
entries.sort(key=lambda x: (x[0], x[1]))
|
|
|
|
for _d, _prio, payload in entries:
|
|
if isinstance(payload, tuple) and payload and payload[0] == "ref":
|
|
_, ref_no, amt, venc_txt = payload
|
|
refuerzo_rows.append(len(rows))
|
|
rows.append([f"{ref_no}° REFUERZO", format_money(amt), "", venc_txt])
|
|
else:
|
|
rr = payload
|
|
n = _to_int((rr or {}).get("qty"), 0)
|
|
amt = _to_float((rr or {}).get("amount"))
|
|
fecha_pago, venc = _resolve_payment_columns((rr or {}).get("date"), paid_if_today_or_past=paid_if_today_or_past)
|
|
cuota_rows.append(len(rows))
|
|
rows.append([
|
|
f"CUOTA N° {n}" if n > 0 else "CUOTA",
|
|
format_money(amt),
|
|
fecha_pago,
|
|
venc,
|
|
])
|
|
|
|
sections_meta.append({"title_row": title_row, "cuota_rows": cuota_rows})
|
|
return sections_meta
|
|
|
|
pre_ref_payload = ref_map_pre if len(pre_infos) > 1 else ref_map_pre.get(1, []) if ref_map_pre else None
|
|
post_ref_payload = ref_map_post if len(post_infos) > 1 else ref_map_post.get(1, []) if ref_map_post else None
|
|
|
|
# Comentado para no agregar cuotas desde cuotas_pre_calendarios / cuotas_post_calendarios.
|
|
# pre_sections = _add_sections("CUOTAS PRE ENTREGA", data.get("pre_calendarios"), pre_ref_payload, paid_if_today_or_past=True)
|
|
# post_sections = _add_sections("CUOTAS POST ENTREGA", data.get("post_calendarios"), post_ref_payload, paid_if_today_or_past=False)
|
|
|
|
# Solo usar cuotas_pre (ya transformado a pre_calendarios en el MAIN).
|
|
pre_sections = _add_sections("CUOTAS PRE ENTREGA", data.get("pre_calendarios"), pre_ref_payload, paid_if_today_or_past=True)
|
|
post_sections = []
|
|
|
|
def _add_simple(title, qty, amount, date_val, paid_if_today_or_past=False):
|
|
q = _to_int(qty, 0)
|
|
if q <= 0:
|
|
return
|
|
rows.append([title, "", "", ""])
|
|
for i in range(1, q + 1):
|
|
fecha_pago, venc = _resolve_payment_columns(date_val, paid_if_today_or_past=paid_if_today_or_past, preserve_unparsed_in_fecha_pago=True)
|
|
rows.append([f"CUOTA N° {i}", format_money(amount), fecha_pago, venc])
|
|
if fecha_pago and not venc and _to_date_any(date_val) is None and str(date_val or "").strip():
|
|
merge_fecha_venc_rows.append(len(rows) - 1)
|
|
|
|
if not pre_sections:
|
|
_add_simple("CUOTAS PRE ENTREGA", data.get("pre_qty"), data.get("pre_amount"), data.get("pre_date"), paid_if_today_or_past=True)
|
|
|
|
# Comentado para que no use cuotas_post ni agregue CUOTAS POST ENTREGA.
|
|
# if not post_sections:
|
|
# _add_simple("CUOTAS POST ENTREGA", data.get("post_qty"), data.get("post_amount"), data.get("post_date"), paid_if_today_or_past=False)
|
|
|
|
total_pagado_row_idx = len(rows)
|
|
rows.append(["TOTAL PAGADO", format_money(data.get("total_pagado")), "", ""])
|
|
saldo_row_idx = len(rows)
|
|
rows.append(["SALDO A PAGAR", format_money(data.get("saldo_a_pagar")), "", ""])
|
|
|
|
blank_row_idx = len(rows)
|
|
rows.append(["", "", "", ""])
|
|
line_row_idx = len(rows)
|
|
rows.append(["_______________________________________________", "", "", ""])
|
|
buyer_row_idx = len(rows)
|
|
rows.append(["COMPRADOR", "", "", ""])
|
|
|
|
meta = {
|
|
"column_count": 4,
|
|
"info_rows": info_rows,
|
|
"green_rows": info_rows[:],
|
|
"header_row_index": header_row_index,
|
|
"pre_sections": pre_sections,
|
|
"post_sections": post_sections,
|
|
"merge_fecha_venc_rows": merge_fecha_venc_rows,
|
|
"refuerzo_rows": refuerzo_rows,
|
|
"total_pagado_row": total_pagado_row_idx,
|
|
"saldo_row": saldo_row_idx,
|
|
"blank_row": blank_row_idx,
|
|
"line_row": line_row_idx,
|
|
"signer_row": buyer_row_idx,
|
|
}
|
|
return rows, meta
|
|
|
|
|
|
# ============================================================
|
|
# Insert table at marker (IDEMPOTENTE + GUARD)
|
|
# ============================================================
|
|
def _wait_doc_ready(docs_service, doc_id, tries=8):
|
|
delay = 0.5
|
|
for _ in range(tries):
|
|
doc = docs_get(docs_service, doc_id)
|
|
main_len = len((doc.get("body") or {}).get("content", []) or [])
|
|
tabs_present = 1 if (doc.get("tabs") or []) else 0
|
|
_sv("dbg_doc_main_len", str(main_len))
|
|
_sv("dbg_doc_tabs_present", str(tabs_present))
|
|
if main_len > 0 or tabs_present == 1:
|
|
return doc
|
|
time.sleep(delay)
|
|
delay = min(delay * 1.6, 3.0)
|
|
return docs_get(docs_service, doc_id)
|
|
|
|
|
|
def insert_table_at_marker(docs_service, doc_id, marker, rows, meta):
|
|
doc = _wait_doc_ready(docs_service, doc_id)
|
|
|
|
if proforma_table_exists(doc):
|
|
_sv("gdoc_status", "OK_ALREADY_DONE")
|
|
_sv("gdoc_error", "")
|
|
_sv("gdoc_marker_found", "0")
|
|
_sv("gdoc_marker_missing_but_table_found", "1")
|
|
return
|
|
|
|
found = find_marker(doc, marker)
|
|
hit = (found or {}).get("hit")
|
|
|
|
if not hit:
|
|
_sv("gdoc_marker_found", "0")
|
|
_sv("gdoc_marker", str(marker))
|
|
_sv("dbg_marker_candidates", "|".join((found or {}).get("candidates") or [])[:800])
|
|
raise RuntimeError("No encontre el marcador en el BODY/TABS del documento: " + str(marker))
|
|
|
|
scope_kind = hit.get("scope_kind", "main")
|
|
tab_id = hit.get("tabId")
|
|
start = int(hit["start"])
|
|
end = int(hit["end"])
|
|
num_cols = int(meta.get("column_count") or (len(rows[0]) if rows else 1))
|
|
|
|
_sv("gdoc_marker_found", "1")
|
|
_sv("gdoc_marker_used", str(hit.get("used", marker)))
|
|
_sv("gdoc_marker_mode", str(hit.get("mode", "")))
|
|
_sv("gdoc_marker_scope", str(scope_kind))
|
|
_sv("gdoc_marker_tabId", "" if tab_id is None else str(tab_id))
|
|
_sv("gdoc_marker_start", str(start))
|
|
_sv("gdoc_marker_end", str(end))
|
|
|
|
reqs = [
|
|
{"deleteContentRange": {"range": _range(start, end, tab_id)}},
|
|
{"insertTable": {"rows": len(rows), "columns": num_cols, "location": _loc(start, tab_id)}},
|
|
]
|
|
_batch_update(docs_service, doc_id, reqs)
|
|
|
|
doc2 = docs_get(docs_service, doc_id)
|
|
table, table_start = get_first_table_after_index(doc2, scope_kind, tab_id, start)
|
|
if not table:
|
|
raise RuntimeError("Inserte la tabla pero no pude ubicarla al releer el doc.")
|
|
|
|
fill_items = []
|
|
for r in range(len(rows)):
|
|
for c in range(num_cols):
|
|
txt = ""
|
|
if c < len(rows[r]) and rows[r][c] is not None:
|
|
txt = str(rows[r][c])
|
|
if txt.strip() == "":
|
|
continue
|
|
idx = get_cell_insert_index(table, r, c)
|
|
fill_items.append((idx, {"insertText": {"location": _loc(idx, tab_id), "text": txt}}))
|
|
|
|
if fill_items:
|
|
fill_items.sort(key=lambda x: x[0], reverse=True)
|
|
_batch_update(docs_service, doc_id, [req for _, req in fill_items])
|
|
|
|
GREEN = _rgb(0.82, 0.91, 0.78)
|
|
YELLOW = _rgb(1.0, 0.95, 0.45)
|
|
GRAY = _rgb(0.90, 0.90, 0.90)
|
|
|
|
style = []
|
|
|
|
for rr in meta.get("info_rows", []):
|
|
if num_cols > 1:
|
|
style.append(_merge_req(table_start, tab_id, rr, 1, 1, num_cols - 1))
|
|
|
|
for rr in meta.get("green_rows", []):
|
|
style.append(_cell_bg_req(table_start, tab_id, rr, 0, 1, num_cols, GREEN))
|
|
|
|
hdr = meta["header_row_index"]
|
|
style.append(_cell_bg_req(table_start, tab_id, hdr, 0, 1, num_cols, GRAY))
|
|
style.append(_cell_bg_req(table_start, tab_id, hdr + 1, 0, 1, num_cols, YELLOW))
|
|
|
|
for sec in meta.get("pre_sections", []):
|
|
tr = sec.get("title_row")
|
|
if tr is not None:
|
|
style.append(_cell_bg_req(table_start, tab_id, tr, 0, 1, num_cols, YELLOW))
|
|
if num_cols > 1:
|
|
style.append(_merge_req(table_start, tab_id, tr, 1, 1, num_cols - 1))
|
|
|
|
for sec in meta.get("post_sections", []):
|
|
tr = sec.get("title_row")
|
|
if tr is not None:
|
|
style.append(_cell_bg_req(table_start, tab_id, tr, 0, 1, num_cols, YELLOW))
|
|
if num_cols > 1:
|
|
style.append(_merge_req(table_start, tab_id, tr, 1, 1, num_cols - 1))
|
|
|
|
for rr in meta.get("refuerzo_rows", []):
|
|
style.append(_cell_bg_req(table_start, tab_id, rr, 0, 1, num_cols, YELLOW))
|
|
|
|
total_pagado_row = int(meta.get("total_pagado_row"))
|
|
saldo_row = int(meta.get("saldo_row"))
|
|
style.append(_cell_bg_req(table_start, tab_id, total_pagado_row, 0, 1, num_cols, YELLOW))
|
|
style.append(_cell_bg_req(table_start, tab_id, saldo_row, 0, 1, num_cols, YELLOW))
|
|
|
|
if num_cols > 2:
|
|
style.append(_merge_req(table_start, tab_id, total_pagado_row, 2, 1, num_cols - 2))
|
|
style.append(_merge_req(table_start, tab_id, saldo_row, 2, 1, num_cols - 2))
|
|
|
|
for rr in meta.get("merge_fecha_venc_rows", []):
|
|
if num_cols >= 4:
|
|
style.append(_merge_req(table_start, tab_id, rr, 2, 1, 2))
|
|
|
|
if style:
|
|
_batch_update(docs_service, doc_id, style)
|
|
|
|
doc3 = docs_get(docs_service, doc_id)
|
|
table3, _ = get_first_table_after_index(doc3, scope_kind, tab_id, start)
|
|
if table3:
|
|
try:
|
|
blank_row = int(meta.get("blank_row"))
|
|
line_row = int(meta.get("line_row"))
|
|
sg_row = int(meta.get("signer_row"))
|
|
|
|
style_extra = []
|
|
|
|
black = {"width": {"magnitude": 1.0, "unit": "PT"},
|
|
"dashStyle": "SOLID",
|
|
"color": {"color": {"rgbColor": {"red": 0, "green": 0, "blue": 0}}}}
|
|
br = _cell_borders_req(table_start, tab_id, saldo_row, 0, 1, num_cols, bottom=black)
|
|
if br:
|
|
style_extra.append(br)
|
|
|
|
style_extra.append(_merge_req(table_start, tab_id, blank_row, 0, 1, num_cols))
|
|
style_extra.append(_merge_req(table_start, tab_id, line_row, 0, 1, num_cols))
|
|
style_extra.append(_merge_req(table_start, tab_id, sg_row, 0, 1, num_cols))
|
|
|
|
nb = _border_none()
|
|
black_top = {"width": {"magnitude": 1.0, "unit": "PT"},
|
|
"dashStyle": "SOLID",
|
|
"color": {"color": {"rgbColor": {"red": 0, "green": 0, "blue": 0}}}}
|
|
br1 = _cell_borders_req(table_start, tab_id, blank_row, 0, 1, num_cols, top=black_top, bottom=nb, left=nb, right=nb)
|
|
br2 = _cell_borders_req(table_start, tab_id, line_row, 0, 1, num_cols, top=nb, bottom=nb, left=nb, right=nb)
|
|
br3 = _cell_borders_req(table_start, tab_id, sg_row, 0, 1, num_cols, top=nb, bottom=nb, left=nb, right=nb)
|
|
for x in (br1, br2, br3):
|
|
if x:
|
|
style_extra.append(x)
|
|
|
|
l_st, l_en = get_cell_text_range(table3, line_row, 0)
|
|
style_extra.append({
|
|
"updateParagraphStyle": {
|
|
"range": _range(l_st, l_en, tab_id),
|
|
"paragraphStyle": {"alignment": "CENTER"},
|
|
"fields": "alignment",
|
|
}
|
|
})
|
|
|
|
s_st, s_en = get_cell_text_range(table3, sg_row, 0)
|
|
style_extra.append({
|
|
"updateParagraphStyle": {
|
|
"range": _range(s_st, s_en, tab_id),
|
|
"paragraphStyle": {"alignment": "CENTER"},
|
|
"fields": "alignment",
|
|
}
|
|
})
|
|
style_extra.append({
|
|
"updateTextStyle": {
|
|
"range": _range(s_st, s_en, tab_id),
|
|
"textStyle": {"bold": True},
|
|
"fields": "bold",
|
|
}
|
|
})
|
|
|
|
_batch_update(docs_service, doc_id, style_extra)
|
|
except Exception as _e:
|
|
_sv("dbg_signer_style_error", str(_e))
|
|
|
|
_sv("gdoc_status", "OK")
|
|
_sv("gdoc_error", "")
|
|
|
|
|
|
# Rocketbot quirk FIX
|
|
globals().update(locals())
|
|
|
|
|
|
# ============================================================
|
|
# MAIN
|
|
# ============================================================
|
|
try:
|
|
marker = _gvs("gdoc_marker", "TABLA_PROFORMA")
|
|
_sv("dbg_marker_var", marker)
|
|
|
|
cred_path = _gvs("gdoc_sa_json2", "")
|
|
if cred_path == "":
|
|
raise RuntimeError("Falta gdoc_sa_json2 (ruta al JSON de cuenta de servicio)")
|
|
if not os.path.isabs(cred_path):
|
|
cred_path = os.path.join(base_dir, cred_path)
|
|
if not os.path.exists(cred_path):
|
|
raise RuntimeError("No existe JSON de cuenta de servicio: " + cred_path)
|
|
|
|
impersonated_user = _gvs("gdoc_impersonated_user", "")
|
|
|
|
_sv("gdoc_credentials_used", cred_path)
|
|
_sv("gdoc_impersonated_user_used", impersonated_user)
|
|
|
|
docs_service, drive_service, auth_mode = get_services(cred_path, impersonated_user)
|
|
_sv("gdoc_auth_mode", auth_mode)
|
|
|
|
url = _gvs("current_url", "")
|
|
_sv("gdoc_url_used", url)
|
|
|
|
doc_id = extract_doc_id_from_url(url)
|
|
if doc_id == "":
|
|
raise RuntimeError("No pude extraer documentId de la URL: " + url)
|
|
|
|
doc_id_final, meta_file, converted = ensure_docs_api_compatible(drive_service, doc_id)
|
|
_sv("gdoc_original_id", doc_id)
|
|
_sv("gdoc_id", doc_id_final)
|
|
_sv("gdoc_file_name", meta_file.get("name", ""))
|
|
_sv("gdoc_file_mimeType", meta_file.get("mimeType", ""))
|
|
_sv("gdoc_converted", "1" if converted else "0")
|
|
_sv("gdoc_url_final", f"https://docs.google.com/document/d/{doc_id_final}/edit")
|
|
|
|
done = _gvs("gdoc_proforma_done", "0") == "1"
|
|
done_doc = _gvs("gdoc_proforma_done_docid", "")
|
|
if done and done_doc == doc_id_final:
|
|
_sv("gdoc_status", "OK_ALREADY_DONE")
|
|
_sv("gdoc_error", "")
|
|
raise SystemExit
|
|
|
|
# --------------------------------------------------------
|
|
# Si no vinieron calendarios ya armados, intentar generarlos.
|
|
#
|
|
# Comentado el uso de cuotas_pre_calendarios / cuotas_post_calendarios
|
|
# y tambien el uso de cuotas_post: desde esta version el script solo
|
|
# usa cuotas_pre para generar las cuotas de la tabla.
|
|
# --------------------------------------------------------
|
|
FREQ_TO_MONTHS_LOCAL = {
|
|
"mensual": 1,
|
|
"bimestral": 2,
|
|
"bimensual": 2,
|
|
"trimestral": 3,
|
|
"cuatrimestral": 4,
|
|
"semestral": 6,
|
|
"anual": 12,
|
|
}
|
|
_TRANS_LOCAL = str.maketrans("áéíóúÁÉÍÓÚ", "aeiouAEIOU")
|
|
|
|
def _norm_freq_local(freq):
|
|
if freq is None:
|
|
return ""
|
|
s = str(freq).strip().translate(_TRANS_LOCAL).lower()
|
|
s = s.replace("-", "").replace(" ", "")
|
|
return s
|
|
|
|
def _add_months(d: dt.date, months: int) -> dt.date:
|
|
y = d.year + (d.month - 1 + months) // 12
|
|
m = (d.month - 1 + months) % 12 + 1
|
|
last_day = [31, 29 if (y % 4 == 0 and (y % 100 != 0 or y % 400 == 0)) else 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31][m - 1]
|
|
day = min(d.day, last_day)
|
|
return dt.date(y, m, day)
|
|
|
|
def _gen_calendarios_from_blocks(raw_blocks):
|
|
blocks = _parse_any(raw_blocks, [])
|
|
if isinstance(blocks, dict):
|
|
blocks = [blocks]
|
|
out = []
|
|
if not isinstance(blocks, list):
|
|
return out
|
|
for block in blocks:
|
|
if not isinstance(block, dict):
|
|
continue
|
|
qty_total = _to_int(block.get("qty"), 0)
|
|
start_date = _to_date_any(block.get("date"))
|
|
months_step = FREQ_TO_MONTHS_LOCAL.get(_norm_freq_local(block.get("frequency")), 1)
|
|
amount = _to_decimal(block.get("amount"))
|
|
if qty_total <= 0 or start_date is None or amount is None:
|
|
continue
|
|
cal = []
|
|
for i in range(qty_total):
|
|
d = _add_months(start_date, i * months_step)
|
|
cal.append({"qty": i + 1, "date": d.isoformat(), "amount": str(amount)})
|
|
out.append(cal)
|
|
return out
|
|
|
|
# Comentado para que no tome cuotas desde cuotas_pre_calendarios ni desde cuotas_post_calendarios.
|
|
# pre_calendarios = _parse_any(_gv("cuotas_pre_calendarios", None), None)
|
|
# post_calendarios = _parse_any(_gv("cuotas_post_calendarios", None), None)
|
|
# if not pre_calendarios:
|
|
# pre_calendarios = _gen_calendarios_from_blocks(_gv("cuotas_pre", None))
|
|
# if not post_calendarios:
|
|
# post_calendarios = _gen_calendarios_from_blocks(_gv("cuotas_post", None))
|
|
|
|
# Solo usar cuotas_pre.
|
|
# OJO: en la version anterior se forzaba directamente
|
|
# _gen_calendarios_from_blocks(_gv("cuotas_pre", None)), y eso era demasiado
|
|
# restrictivo porque solo acepta bloques tipo {qty,date,frequency,amount}.
|
|
# Aca se mantiene el pedido de usar solo cuotas_pre, pero aceptando tambien:
|
|
# - lista de cuotas ya expandida [{qty,date,amount}, ...]
|
|
# - lista de listas de cuotas [[{qty,date,amount}, ...], ...]
|
|
raw_cuotas_pre = _gv("cuotas_pre", None)
|
|
|
|
def _normalize_only_cuotas_pre(raw):
|
|
parsed = _parse_any(raw, None)
|
|
|
|
# Caso 1: ya viene como lista de listas de cuotas
|
|
if isinstance(parsed, list) and parsed and all(isinstance(x, list) for x in parsed):
|
|
valid = []
|
|
for cal in parsed:
|
|
cur = []
|
|
for row in cal:
|
|
if not isinstance(row, dict):
|
|
continue
|
|
amt = _to_decimal(row.get("amount"))
|
|
if amt is None:
|
|
continue
|
|
cur.append({
|
|
"qty": _to_int(row.get("qty"), 0),
|
|
"date": str(row.get("date") or ""),
|
|
"amount": str(amt),
|
|
})
|
|
if cur:
|
|
valid.append(cur)
|
|
if valid:
|
|
return valid
|
|
|
|
# Caso 2: viene como lista simple y puede ser bloque o cuotas ya expandidas
|
|
if isinstance(parsed, list) and parsed:
|
|
if all(isinstance(x, dict) and ("frequency" in x or "qty" in x and _to_date_any(x.get("date")) is not None and x.get("amount") is not None) for x in parsed):
|
|
has_frequency = any(isinstance(x, dict) and x.get("frequency") not in (None, "") for x in parsed)
|
|
if has_frequency:
|
|
return _gen_calendarios_from_blocks(raw)
|
|
cur = []
|
|
for row in parsed:
|
|
if not isinstance(row, dict):
|
|
continue
|
|
amt = _to_decimal(row.get("amount"))
|
|
if amt is None:
|
|
continue
|
|
cur.append({
|
|
"qty": _to_int(row.get("qty"), 0),
|
|
"date": str(row.get("date") or ""),
|
|
"amount": str(amt),
|
|
})
|
|
return [cur] if cur else []
|
|
|
|
# Caso 3: viene como un solo dict
|
|
if isinstance(parsed, dict):
|
|
if parsed.get("frequency") not in (None, ""):
|
|
return _gen_calendarios_from_blocks(raw)
|
|
amt = _to_decimal(parsed.get("amount"))
|
|
if amt is not None:
|
|
return [[{
|
|
"qty": _to_int(parsed.get("qty"), 0),
|
|
"date": str(parsed.get("date") or ""),
|
|
"amount": str(amt),
|
|
}]]
|
|
|
|
# Fallback: intentar como bloques clasicos
|
|
return _gen_calendarios_from_blocks(raw)
|
|
|
|
pre_calendarios = _normalize_only_cuotas_pre(raw_cuotas_pre)
|
|
post_calendarios = None
|
|
|
|
data = {
|
|
"nombre_comprador": _gvs("nombre_comprador", ""),
|
|
"numero_documento": _gvs("numero_documento", ""),
|
|
"domicilio": _gvs("domicilio", ""),
|
|
"telefono": _gvs("telefono", ""),
|
|
"email": _gvs("email", ""),
|
|
"proyecto": _gvs("proyecto", ""),
|
|
"producto_odoo": _gvs("producto_odoo", ""),
|
|
"valor_total_compra": _gv("valor_total_compra", None),
|
|
"pago_senia": _gv("pago_senia", _gv("pago_sena", None)),
|
|
"fecha_pago_sena": _gv("fecha_pago_sena", None),
|
|
"pago_inicial": _gv("pago_inicial", None),
|
|
"fecha_pago_inicial": _gv("fecha_pago_inicial", None),
|
|
"refuerzos": _parse_any(_gv("refuerzos", None), []),
|
|
"pre_calendarios": pre_calendarios,
|
|
"post_calendarios": post_calendarios,
|
|
"pre_qty": _gv("pre_qty", None),
|
|
"pre_amount": _gv("pre_amount", None),
|
|
"pre_date": _gv("pre_date", None),
|
|
# Comentado para que no use cuotas_post.
|
|
# "post_qty": _gv("post_qty", None),
|
|
# "post_amount": _gv("post_amount", None),
|
|
# "post_date": _gv("post_date", None),
|
|
"post_qty": None,
|
|
"post_amount": None,
|
|
"post_date": None,
|
|
"total_pagado": _gv("total_pagado", None),
|
|
"saldo_a_pagar": _gv("saldo_a_pagar", None),
|
|
}
|
|
|
|
# normalización explícita a Decimal en montos de alto uso.
|
|
# Si la variable viene vacía o no existe, queda en 0 en vez de None.
|
|
for k in (
|
|
"valor_total_compra",
|
|
"pago_senia",
|
|
"pago_inicial",
|
|
"pre_amount",
|
|
"post_amount",
|
|
"total_pagado",
|
|
"saldo_a_pagar",
|
|
):
|
|
data[k] = _to_decimal_zero(data.get(k))
|
|
|
|
rows, meta = build_rows(data)
|
|
insert_table_at_marker(docs_service, doc_id_final, marker, rows, meta)
|
|
|
|
_sv("gdoc_proforma_done", "1")
|
|
_sv("gdoc_proforma_done_docid", doc_id_final)
|
|
|
|
except SystemExit:
|
|
pass
|
|
except Exception as e:
|
|
try:
|
|
SetVar("gdoc_status", "ERROR")
|
|
SetVar("gdoc_error", str(e))
|
|
except Exception:
|
|
pass
|
|
raise
|