Skip to main content

Changes not applied to Excel file after calculation using MS Graph API

I'm using Python to create a wrapper for MS Graph Excel API to handle calculation.

from typing import Any, Union
from urllib.parse import urljoin

import requests


class MsGraph:
    _graph_uri = "https://graph.microsoft.com/v1.0/"

    def __init__(self, token: str, file_path: str):
        self.file_path = file_path
        self.token = token
        self.request_headers = {"Authorization": f"Bearer {self.token}"}

    def _call_api(self, endpoint: str, req_type="GET", payload=None, headers=None) -> Any:
        if headers:
            headers.update(self.request_headers)
        else:
            headers = self.request_headers
        endpoint = self._sanitize_endpoint(endpoint)

        kwargs = {"headers": headers}

        if payload:
            kwargs["json" if "json" in headers.get("Content-Type") else "data"] = payload

        resp = requests.request(req_type, endpoint, **kwargs)
        resp.raise_for_status()
        if resp.status_code is requests.codes.no_content:
            return resp.text

        if resp.status_code is requests.codes.found:
            return resp.text

        return resp.json()

    def _sanitize_endpoint(self, uri: str) -> str:
        if uri.startswith(self._graph_uri):
            return uri
        return urljoin(self._graph_uri, uri)

    def list_onedrive_files_for_auth_user(self) -> Any:
        return self._call_api("me/drive/root/children")

    def get_onedrive_id_from_user(self) -> Any:
        return self._call_api("me/drives")

    def get_onedrive_id_for_app(self) -> Any:
        return self._call_api("drives")

    def upload_to_onedrive(self, file_name: str, content: Union[bytes, str]) -> Any:
        """content: file content in the text/binary form """
        headers = {"Content-Type": "text/plain"}
        drive_uri = f"me/drive/root:/{file_name}:/content"
        return self._call_api(drive_uri, "PUT", content, headers)

    def excel_read(self, drive_item_id: Any) -> Any:
        return self._call_api(f"me/drive/items/{drive_item_id}/workbook/worksheets")

    def excel_create_session(self, drive_item_id: Any) -> Any:
        headers = {"Content-Type": "application/json"}
        payload = {"persistChanges": True}

        uri = f"me/drive/items/{drive_item_id}/workbook/createSession"
        return self._call_api(uri, "POST", payload, headers)

    def excel_close_session(self, drive_item_id: Any, persist_session_id: str) -> Any:
        headers = {
            "Content-Type": "application/json",
            "workbook-session-id": persist_session_id
        }

        uri = f"me/drive/items/{drive_item_id}/workbook/closeSession"
        return self._call_api(uri, "POST", None, headers)

    def excel_calculate(self, drive_item_id: Any, persist_session_id: str, calc_type="FullRebuild") -> Any:
        supported_types = ["Recalculate", "Full", "FullRebuild"]
        if calc_type not in supported_types:
            raise AttributeError(
                f"calculation type '{calc_type}' need to be one of supported: {supported_types}"
            )
        headers = {
            "Content-Type": "application/json",
            "workbook-session-id": persist_session_id
        }
        payload = {"calculationType": calc_type}

        calc_uri = f"me/drive/items/{drive_item_id}/workbook/application/calculate"
        return self._call_api(calc_uri, "POST", payload, headers)

    def download_from_onedrive(self, drive_item_id: Any, file_path: str) -> None:
        uri = f"{self._graph_uri}/me/drive/items/{drive_item_id}/content"
        response = requests.get(uri, headers=self.request_headers)
        with open(file_path, "wb") as handle:
            handle.write(response.content)

    def delete_from_onedrive(self, drive_item_id: Any) -> Any:
        drive_uri = f"me/drive/items/{drive_item_id}"
        return self._call_api(drive_uri, "DELETE")

    def read_excel_from_path(self) -> bytes:
        with open(self.file_path, "rb") as excel_file:
            return excel_file.read()

I managed to authorize and get tokens. File uploads correctly, persistent session creates correctly, calculation triggers without any errors, but after all I want to download the Excel file with recalculated formulas. Unfortunately, formulas didn't reevaluate and I see the old data (doing this programatically using Apache POI).

        msgraph = MsGraph(token, file_path)
        excel_file_content = msgraph.read_excel_from_path()

        file_name = os.path.basename(file_path)
        drive_item = msgraph.upload_to_onedrive(file_name, excel_file_content)
        persist_session_response = msgraph.excel_create_session(drive_item["id"])
        msgraph.excel_calculate(drive_item["id"], persist_session_response["id"])
        msgraph.excel_close_session(drive_item["id"], persist_session_response["id"])
        msgraph.download_from_onedrive(drive_item["id"], file_path)
        msgraph.delete_from_onedrive(drive_item["id"])

Excel formulas seem to be filled correctly, because when I open them with Excel Online GUI and trigger reevaluation/calculation with F9, everything gets updated correctly. Not sure what I'm doing wrong. Tried { persistSession: true } (and false and without creating a session).



source https://stackoverflow.com/questions/73057052/changes-not-applied-to-excel-file-after-calculation-using-ms-graph-api

Comments

Popular posts from this blog

How to split a rinex file if I need 24 hours data

Trying to divide rinex file using the command gfzrnx but getting this error. While doing that getting this error msg 'gfzrnx' is not recognized as an internal or external command Trying to split rinex file using the command gfzrnx. also install'gfzrnx'. my doubt is I need to run this program in 'gfzrnx' or in 'cmdprompt'. I am expecting a rinex file with 24 hrs or 1 day data.I Have 48 hrs data in RINEX format. Please help me to solve this issue. source https://stackoverflow.com/questions/75385367/how-to-split-a-rinex-file-if-i-need-24-hours-data

ValueError: X has 10 features, but LinearRegression is expecting 1 features as input

So, I am trying to predict the model but its throwing error like it has 10 features but it expacts only 1. So I am confused can anyone help me with it? more importantly its not working for me when my friend runs it. It works perfectly fine dose anyone know the reason about it? cv = KFold(n_splits = 10) all_loss = [] for i in range(9): # 1st for loop over polynomial orders poly_order = i X_train = make_polynomial(x, poly_order) loss_at_order = [] # initiate a set to collect loss for CV for train_index, test_index in cv.split(X_train): print('TRAIN:', train_index, 'TEST:', test_index) X_train_cv, X_test_cv = X_train[train_index], X_test[test_index] t_train_cv, t_test_cv = t[train_index], t[test_index] reg.fit(X_train_cv, t_train_cv) loss_at_order.append(np.mean((t_test_cv - reg.predict(X_test_cv))**2)) # collect loss at fold all_loss.append(np.mean(loss_at_order)) # collect loss at order plt.plot(np.log(al...

Sorting large arrays of big numeric stings

I was solving bigSorting() problem from hackerrank: Consider an array of numeric strings where each string is a positive number with anywhere from to digits. Sort the array's elements in non-decreasing, or ascending order of their integer values and return the sorted array. I know it works as follows: def bigSorting(unsorted): return sorted(unsorted, key=int) But I didnt guess this approach earlier. Initially I tried below: def bigSorting(unsorted): int_unsorted = [int(i) for i in unsorted] int_sorted = sorted(int_unsorted) return [str(i) for i in int_sorted] However, for some of the test cases, it was showing time limit exceeded. Why is it so? PS: I dont know exactly what those test cases were as hacker rank does not reveal all test cases. source https://stackoverflow.com/questions/73007397/sorting-large-arrays-of-big-numeric-stings