Skip to main content

KeyError: 'SQLALCHEMY_TRACK_MODIFICATIONS' , I keep getting this error even after trying the solution to previous posts

I was trying to make a dashboard that would have an API for retrieving data from the database then app.py would have all the code for analytics and then I would have designed the dashboard. The problem is I was following this tutorial for APIs and everything was going well and then I got this error, I referred to the answers to similar errors in StackOverflow but couldn't make much sense.

http://192.168.224.111:5000/dataapi/jsondata

This is the address which when loaded is causing this error

Here is the code:-

dataapi.py

from flask import Flask, request, jsonify 
from flask_sqlalchemy import SQLAlchemy 
from flask_marshmallow import Marshmallow 
from flask_restful import Resource, Api


dataapi = Flask(__name__) 
api = Api(dataapi) 
dataapi.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///jsondata.db' 
dataapi.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False 
db = SQLAlchemy(dataapi) 
ma = Marshmallow(dataapi)

class Data(db.Model):
    id = db.Column(db.Integer, primary_key=True)
    end_year = db.Column(db.String(32))
    intensity = db.Column(db.String(32))
    sector = db.Column(db.String(32))
    topic = db.Column(db.String(32))
    insight = db.Column(db.Integer)
    url = db.Column(db.String(1000))
    region = db.Column(db.String(50))
    start_year = db.Column(db.String(50))
    impact = db.Column(db.String(50))
    added = db.Column(db.String(50))
    published = db.Column(db.String(50))
    country = db.Column(db.String(50))
    relevance = db.Column(db.Integer)
    pestle = db.Column(db.String(50))
    source = db.Column(db.String(50))
    title = db.Column(db.String(100))
    likelihood = db.Column(db.Integer)
    
    def __init__(self, end_year, intensity, sector, topic, insight, url, region, start_year, impact, added, published, country, relevance, pestle, source, title, likelihood):
        self.end_year = end_year
        self.intensity = intensity
        self.sector = sector
        self.topic = topic
        self.insight = insight
        self.url = url
        self.region = region
        self.start_year = start_year
        self.impact = impact
        self.added = added
        self.published = published
        self.country = country
        self.relevance = relevance
        self.pestle = pestle
        self.source = source
        self.title =title
        self.likelihood =likelihood


class DataSchema(ma.Schema):
    class Meta:
        fields = ('id', 'end_year', 'intensity', 'sector', 'topic', 'insight', 'url', 'region', 'start_year', 'impact', 'added', 'published', 'country', 'relevance', 'pestle', 'source', 'title', 'liklihood')

db.create_all()

data_schema = DataSchema() 
datas_schema = DataSchema(many=True)

class DataManager(Resource): 
    
    @staticmethod
    def get():
        try: id = request.args['id']
        except Exception as _: id = None

        if not id:
            datas = Data.query.all()
            return jsonify(datas_schema.dump(datas))
        data = Data.query.get(id)
        return jsonify(data_schema.dump(data))

    @staticmethod
    def post():
        k = 0
        try:
            while True:
                end_year = request.json[k]['end_year']
                intensity = request.json[k]['intensity']
                sector = request.json[k]['sector']
                topic = request.json[k]['topic']
                insight = request.json[k]['insight']
                url = request.json[k]['url']
                region = request.json[k]['region']
                start_year = request.json[k]['start_year']
                impact = request.json[k]['impact']
                added = request.json[k]['added']
                published = request.json[k]['published']
                country = request.json[k]['country']
                relevance = request.json[k]['relevance']
                pestle = request.json[k]['pestle']
                source = request.json[k]['source']
                title = request.json[k]['title']
                likelihood = request.json[k]['likelihood']
                data = Data(end_year, intensity, sector, topic, insight, url, region, start_year, impact, added, published, country, relevance, pestle, source, title, likelihood)
                db.session.add(data)
                db.session.commit()
                k+=1

        except: return jsonify({
                'Message': f'data inserted.'
            })

    @staticmethod
    def put():
        try: id = request.args['id']
        except Exception as _: id = None

        if not id:
            return jsonify({ 'Message': 'Must provide the data ID' })

        data = Data.query.get(id)
        end_year = request.json['end_year']
        intensity = request.json['intensity']
        sector = request.json['sector']
        topic = request.json['topic']
        insight = request.json['insight']
        url = request.json['url']
        region = request.json['region']
        start_year = request.json['start_year']
        impact = request.json['impact']
        added = request.json['added']
        published = request.json['published']
        country = request.json['country']
        relevance = request.json['relevance']
        pestle = request.json['pestle']
        source = request.json['source']
        title = request.json['title']
        likelihood = request.json['likelihood']

        data.end_year = end_year
        data.intensity = intensity
        data.sector = sector
        data.topic = topic
        data.insight = insight
        data.url = url
        data.region = region
        data.start_year = start_year
        data.impact = impact
        data.added = added
        data.published = published
        data.country = country
        data.relevance = relevance
        data.pestle = pestle
        data.source = source
        data.title = title
        data.likelihood = likelihood

        db.session.commit()
        return jsonify({
            'Message': f'data altered.'
        })

    @staticmethod
    def delete():
        try: id = request.args['id']
        except Exception as _: id = None

        if not id:
            return jsonify({ 'Message': 'Must provide the data ID' })

        data = Data.query.get(id)
        db.session.delete(data)
        db.session.commit()

        return jsonify({
            'Message': f'Data {str(id)} deleted.'
        })


api.add_resource(DataManager, '/dataapi/jsondata')

if __name__ == '__main__':
    dataapi.run(debug=True)

app.py

from flask import render_template
import connexion

app = connexion.App(__name__, specification_dir="./")
app.add_api("swagger.yml")

@app.route("/")
def home():
    return render_template("home.html")

if __name__ == "__main__":
    app.run(host="0.0.0.0", port=5000, debug=True)

swagger.yml

openapi: 3.0.0
info:
  title: "API to return Data"
  description: "this is the api which dels with the data being used to create the visalization"
  version: "1.0.0"

servers:
  - url: "/dataapi"

paths:
  /jsondata:
    get:
      operationId: "dataapi.DataManager.get"
      tags:
        - "Data"
      summary: "Read the data in the file"
      responses:
        "200":
          description: "Successfully read the data"

home.html

<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
    <meta http-equiv="X-UA-Compatible" content="IE=edge">
    <meta name="viewport" content="width=device-width, initial-scale=1.0">
    <title>Hello world</title>
</head>
<body>
    Hello World
</body>
</html>

Just in case here is the data in the database

{
            "end_year": "",
            "intensity": 6,
            "sector": "Energy",
            "topic": "gas",
            "insight": "Annual Energy Outlook",
            "url": "http://www.eia.gov/outlooks/aeo/pdf/0383(2017).pdf",
            "region": "Northern America",
            "start_year": "",
            "impact": "",
            "added": "January, 20 2017 03:51:25",
            "published": "January, 09 2017 00:00:00",
            "country": "United States of America",
            "relevance": 2,
            "pestle": "Industries",
            "source": "EIA",
            "title": "U.S. natural gas consumption is expected to increase during much of the projection period.",
            "likelihood": 3
        },
        {
            "end_year": "",
            "intensity": 6,
            "sector": "Energy",
            "topic": "oil",
            "insight": "Annual Energy Outlook",
            "url": "http://www.eia.gov/outlooks/aeo/pdf/0383(2017).pdf",
            "region": "Northern America",
            "start_year": "",
            "impact": "",
            "added": "January, 20 2017 03:51:24",
            "published": "January, 09 2017 00:00:00",
            "country": "United States of America",
            "relevance": 2,
            "pestle": "Industries",
            "source": "EIA",
            "title": "Reference case U.S. crude oil production is projected to recover from recent declines.",
            "likelihood": 3
        },
        {
            "end_year": "",
            "intensity": 6,
            "sector": "Energy",
            "topic": "consumption",
            "insight": "Annual Energy Outlook",
            "url": "http://www.eia.gov/outlooks/aeo/pdf/0383(2017).pdf",
            "region": "Northern America",
            "start_year": "",
            "impact": "",
            "added": "January, 20 2017 03:51:23",
            "published": "January, 09 2017 00:00:00",
            "country": "United States of America",
            "relevance": 2,
            "pestle": "Industries",
            "source": "EIA",
            "title": "U.S. petroleum consumption is projected to remain below the 2005 level.",
            "likelihood": 3
        }

Please help me figure out why I am having this error and also pls suggest any modification with my code.



source https://stackoverflow.com/questions/76274057/keyerror-sqlalchemy-track-modifications-i-keep-getting-this-error-even-afte

Comments

Popular posts from this blog

ValueError: X has 10 features, but LinearRegression is expecting 1 features as input

So, I am trying to predict the model but its throwing error like it has 10 features but it expacts only 1. So I am confused can anyone help me with it? more importantly its not working for me when my friend runs it. It works perfectly fine dose anyone know the reason about it? cv = KFold(n_splits = 10) all_loss = [] for i in range(9): # 1st for loop over polynomial orders poly_order = i X_train = make_polynomial(x, poly_order) loss_at_order = [] # initiate a set to collect loss for CV for train_index, test_index in cv.split(X_train): print('TRAIN:', train_index, 'TEST:', test_index) X_train_cv, X_test_cv = X_train[train_index], X_test[test_index] t_train_cv, t_test_cv = t[train_index], t[test_index] reg.fit(X_train_cv, t_train_cv) loss_at_order.append(np.mean((t_test_cv - reg.predict(X_test_cv))**2)) # collect loss at fold all_loss.append(np.mean(loss_at_order)) # collect loss at order plt.plot(np.log(al...

Sorting large arrays of big numeric stings

I was solving bigSorting() problem from hackerrank: Consider an array of numeric strings where each string is a positive number with anywhere from to digits. Sort the array's elements in non-decreasing, or ascending order of their integer values and return the sorted array. I know it works as follows: def bigSorting(unsorted): return sorted(unsorted, key=int) But I didnt guess this approach earlier. Initially I tried below: def bigSorting(unsorted): int_unsorted = [int(i) for i in unsorted] int_sorted = sorted(int_unsorted) return [str(i) for i in int_sorted] However, for some of the test cases, it was showing time limit exceeded. Why is it so? PS: I dont know exactly what those test cases were as hacker rank does not reveal all test cases. source https://stackoverflow.com/questions/73007397/sorting-large-arrays-of-big-numeric-stings

How to load Javascript with imported modules?

I am trying to import modules from tensorflowjs, and below is my code. test.html <!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8"> <title>Document</title </head> <body> <script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs@2.0.0/dist/tf.min.js"></script> <script type="module" src="./test.js"></script> </body> </html> test.js import * as tf from "./node_modules/@tensorflow/tfjs"; import {loadGraphModel} from "./node_modules/@tensorflow/tfjs-converter"; const MODEL_URL = './model.json'; const model = await loadGraphModel(MODEL_URL); const cat = document.getElementById('cat'); model.execute(tf.browser.fromPixels(cat)); Besides, I run the server using python -m http.server in my command prompt(Windows 10), and this is the error prompt in the console log of my browser: Failed to loa...