I am implementing pagination via an ajax call in Django. I have everything working on my dev server, but when I move to my outward facing server to show the client, the get requests for the next page still route back to my localhost.
This is what I get in the javascript console:
"GET http://127.0.0.1:8001/uploadedwords/?page=2&title=indextest"
This is what it should be:
"GET http://nameofmywebsite.com/uploadedwords/?page=2&title=indextest"
Is this a caching issue. Is this because I still have debug mode on? How does Django pagination decide what URL to send requests to?
Here is my views.py code. I am using the Django Rest Framework:
def uploadedword_list(request):
from mainsite.serializers import PaginatedUploadedWordSerializer
"""
List all code snippets, or create a new snippet.
"""
if request.method == 'GET':
submitted = SubmittedTextFile.objects.get(title=request.GET['title'])
text = NlpParseText.objects.get(text=submitted)
#words = UploadedWord.objects.filter(user=request.user, text=text)
queryset = UploadedWord.objects.filter(user=request.user, text=text)
paginator = Paginator(queryset, 500)
page = request.GET['page']
try:
words = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
words = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999),
# deliver last page of results.
words = paginator.page(paginator.num_pages)
serializer_context = {'request': request}
serializer = PaginatedUploadedWordSerializer(words,
context=serializer_context)
#return Response(serializer.data)
# serializer = UploadedWordSerializer(words, many=True)
return JSONResponse(serializer.data)
elif request.method == 'POST':
data = JSONParser().parse(request)
serializer = UploadedWordSerializer(data=data)
if serializer.is_valid():
serializer.save()
return JSONResponse(serializer.data, status=201)
return JSONResponse(serializer.errors, status=400)
Related
I am trying to implement load more button so I am writing this view for load json data but I am not understanding why I am getting this error. No user profile found matching the query Raised by: members.views.UserProfileUpdate
here is my view:
class PostJsonListView(View):
def get(self, *args, **kwargs):
print(kwargs)
upper = kwargs.get('num_posts')
lower = upper - 3
posts = list(Blog.objects.values()[lower:upper])
posts_size = len(Blog.objects.all())
max_size = True if upper >= posts_size else False
return JsonResponse({'data': posts, 'max': max_size}, safe=False)
this is my blog app
urls.py
path('posts-json/<int:num_posts>',PostJsonListView.as_view(),name='json-view')
this is my members app
urls.py
path('<slug:slug>/', UserProfileUpdate.as_view(), name='update-profile'),
if I put any wrong url like this http://127.0.0.1:8000/sassassdsadasdd/ giving me the same error No user profile found matching the query Raised by: members.views.UserProfileUpdate
you are in the UserProfileUpdate view, using the slug for getting the profile object like this:
Profile.objects.get(username=slug)
but!
you should use the get_object_or_404 shortcut functions.
from django.shortcuts import get_object_or_404
get_object_or_404(Profile, username=slug)
refrence
I have a model named VisitorInfo in models.py which is as follows:
class VisitorInfo(models.Model):
#To maintain clarity i am not showing all the fields.
visitor_id = models.CharField(max_length=120)
pages_visited = models.ManyToManyField(Page,blank=True)
time_zone = models.CharField(max_length=120, null= True, blank=True)
ip = models.CharField(max_length=120, null= True, blank=True)
device_type = models.CharField(max_length=120, null=True)
Following is the view where i am using this model.
class Live_Now(LoginRequiredMixin, TemplateView):
template_name = 'dashboard/live_now.html'
def get_context_data(self, **kwargs):
context = super(Live_Now, self).get_context_data(**kwargs)
search = self.request.GET.get('q')
if search:
if VisitorInfo.objects.filter(Q_filter).filter(active=True).count() >0:
context['list_no'] = VisitorInfo.objects.filter(Q_filter).filter(active=True)
else:
context['list_no'] = "N"
return context
What i need to do as soon as live_now.html page loads and Live_Now view runs, a new tab will open with following url.
url = 54.161.109.227:5000/visitor_id
I know that to open a new tab i have to write down script in live_now.html which is as follows.
<script type='text/javascript>
window.open(url); // Above written url
</script>
But how do i fetch visitor_id of the visitor at runtime. Fetching visitor_id is the task fo python function and opening new tab is the function of javascript. How do i pass value of python variable in javascript program ?
For time being i am fetching visitor_id of first visitor in visitors and i have written the following function to do this.
def myfunc():
visitors= VisitorInfo.objects.all()
for visitor in visitors[:1]:
visitorId = visitor.visitor_id
return str(visitorId)
You're making this much harder than it needs to be. If you need your JS to have a particular value, then pass it there.
def get_context_data(self, **kwargs):
... existing code ...
context['visitor_id'] = Visitor.objects.first().user_id
return context
...
url = "54.161.109.227:5000/{{ visitor_id }}";
window.open(url)
Of course you can just as easily pass the whole Visitor object to the template and do {{ visitor.user_id }}, it's up to you.
I am new at Django and I'm making a demo chat website through Django 2.0. My motive here is to save people's photos when they sign up, and run a face authentication python script on the backend (which I have already done, through the open source face_recognition library on Python) to recognise user's when they login. My script uses cv2 right now to click a photo and sends it over to the face recognition engine.
I have to save the photos of the users in a directory on the server-side, with the name of the image as the Name of the user when they sign up, so I can run a face authenticator to for-loop over the list of faces in my folder to find the matching face. And when it finds that, I can return the name to query over my database and create a session for that particular User. (I know it is time taking and resource intensive, but since it is a demo, I think i can get by it. Also kindly suggest if there can be a faster way for face based authentication)
My User Model looks like this
class User(models.Model):
username = models.CharField(max_length=100)
name = models.CharField(max_length=100)
age = models.CharField(max_length=100)
image = models.ImageFile()
Can anybody experienced in Django guide me the pathway and required steps through which I can do this?
I assume that you installed OpenCV, cv2 for python, numpy for sure , face_recognition
utils.py
def recognize_face(id_image, q_image):
"""
:param image: image of the face in the id
:param q_image: image of the face from the cam
:return:
"""
q_face_encoding = face_recognition.face_encodings(id_image)[0]
face_locations = face_recognition.face_locations(q_image)
face_encodings = face_recognition.face_encodings(q_image, face_locations)
# Loop through each face in this image
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
match = face_recognition.compare_faces([q_face_encoding], face_encoding)
result = False
if match[0]:
result = True
return result
def _grab_image(path=None, stream=None, url=None):
# if the path is not None, then load the image from disk
if path is not None:
image = cv2.imread(path)
# otherwise, the image does not reside on disk
else:
# if the URL is not None, then download the image
if url is not None:
resp = requests.get(url)
data = resp.text()
# if the stream is not None, then the image has been uploaded
elif stream is not None:
data = stream.read()
# convert the image to a NumPy array and then read it into
# OpenCV format
image = np.asarray(bytearray(data), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
# return the image
return image
views.py
class FaceVerifyAPIView(APIView):
http_method_names = ['get', 'post', 'options']
parser_classes = (parsers.MultiPartParser, parsers.FormParser)
renderer_classes = (renderers.JSONRenderer, )
def post(self, request, *args, **kwargs):
serializer = FaceDectSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
id_image = data['id_image']
q_image = data['q_image'] # OR from request.user.image
id_image = _grab_image(stream=id_image)
q_image = _grab_image(stream=q_image)
result = recognize_face(id_image, q_image)
return JsonResponse({'message': str(result)}, status=200)
def get(self, request, *args, **kwargs):
return JsonResponse({'message': 'You\'re here but use post method'}, status=201)
Capture a photo for his face, take this reference
So I'm building a webcrawler that logs into my bank account and gathers data about my spending. I originally was going to use only Scrapy but it didn't work since the First Merit page uses Javascript to log in so I piled Selenium on top.
My code logs in (first you need to input username, and then password, not in conjunction as in most pages) through a series of yielding Requests with specific callback functions that handle the next step.
import scrapy
from scrapy import Request
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
import selenium
import time
class LoginSpider(scrapy.Spider):
name = 'www.firstmerit.com'
# allowed_domains = ['https://www.firstmeritib.com']
start_urls = ['https://www.firstmeritib.com/AccountHistory.aspx?a=1']
def __init__(self):
self.driver = webdriver.Firefox()
def parse(self, response):
self.driver.get(response.url)
# Obtaining necessary components to input my own stuff
username = WebDriverWait(self.driver, 10).until(lambda driver: self.driver.find_element_by_xpath('//*[#id="txtUsername"]'))
login_button = WebDriverWait(self.driver, 10).until(lambda driver: self.driver.find_element_by_xpath('//*[#id="btnLogin"]'))
# The actual interaction
username.send_keys("username")
login_button.click()
# The process of logging in is broken up in two functions since the website requires me
# to enter my username first which redirects me to a password page where I cna finally enter my account (after inputting password)
yield Request(url = self.driver.current_url,
callback = self.password_handling,
meta = {'dont_redirect' : True,
'handle_httpstatus_list': [302],
'cookiejar' : response}
)
def password_handling(self, response):
print("^^^^^^")
print(response.url)
password = WebDriverWait(self.driver, 10).until(lambda driver: self.driver.find_element_by_xpath('//*[#id="MainContent_txtPassword"]'))
login_button2 = WebDriverWait(self.driver, 10).until(lambda driver: self.driver.find_element_by_xpath('//*[#id="MainContent_btnLogin"]'))
password.send_keys("password")
login_button2.click()
print("*****")
print(self.driver.current_url)
print("*****")
yield Request (url = self.driver.current_url,
callback = self.after_login, #, dont_filter = True,
meta = {'dont_redirect' : True,
'handle_httpstatus_list': [302],
'cookiejar' : response.meta['cookiejar'] }
)
def after_login(self, response):
print"***"
print(response.url)
print"***"
print(response.body)
if "Account Activity" in response.body:
self.logger.error("Login failed")
return
else:
print("you got through!")
print()
The issue is that once I finally get to my account page where all my spending is displayed, I can't actually access the HTML data. I've properly handles 302 redirections, but the "meta = " options seem to take me to the page through selenium, but don't let me scrape it.
Instead of getting all the data from response.body in the after_login function, I get the following:
<html><head><title>Object moved</title></head><body>
<h2>Object moved to here.</h2>
</body></html>
How do I manage to actually be able to get that information to scrape it?
Is this redirection in place by the bank to protect account from being crawled?
Thank you!
I'm new to scrapy and python and i am trying to scrape data off the following start url .
After login, this is my start url--->
start_urls = ["http://www.flightstats.com/go/HistoricalFlightStatus/flightStatusByFlight.do?"]
(a) from there i need to interact with the webpage to select ---by-airport---
and then make ---airport, date, time period selection---
how can i do that? i would like to loop over all time periods and past dates..
I have used firebug to see the source, I cannot show here as I do not have enough points to post images..
i read a post mentioning the use of Splinter..
(b) after the selections it will lead me to a page where there are links to the eventual page with the information i want. How do i populate the links and make scrapy look into every one to extract the information?
-using rules? where should i insert the rules/ linkextractor function?
I am willing to try myself, hope help can be given to find posts that can guide me.. I am a student and I have spent more than a week on this.. I have done the scrapy tutorial, python tutorial, read the scrapy documentation and searched for previous posts in stackoverflow but I did not manage to find posts that cover this.
a million thanks.
my code so far to log-in and the items to scrape via xpath from the eventual target site:
`import scrapy
from tutorial.items import FlightItem
from scrapy.http import FormRequest
class flightSpider(scrapy.Spider):
name = "flight"
allowed_domains = ["flightstats.com"]
login_page = 'https://www.flightstats.com/go/Login/login_input.do;jsessionid=0DD6083A334AADE3FD6923ACB8DDCAA2.web1:8009?'
start_urls = [
"http://www.flightstats.com/go/HistoricalFlightStatus/flightStatusByFlight.do?"]
def init_request(self):
#"""This function is called before crawling starts."""
return Request(url=self.login_page, callback=self.login)
def login(self, response):
#"""Generate a login request."""
return FormRequest.from_response(response,formdata= {'loginForm_email': 'marvxxxxxx#hotmail.com', 'password': 'xxxxxxxx'},callback=self.check_login_response)
def check_login_response(self, response):
#"""Check the response returned by a login request to see if we aresuccessfully logged in."""
if "Sign Out" in response.body:
self.log("\n\n\nSuccessfully logged in. Let's start crawling!\n\n\n")
# Now the crawling can begin..
return self.initialized() # ****THIS LINE FIXED THE LAST PROBLEM*****
else:
self.log("\n\n\nFailed, Bad times :(\n\n\n")
# Something went wrong, we couldn't log in, so nothing happens.
def parse(self, response):
for sel in response.xpath('/html/body/div[2]/div[2]/div'):
item = flightstatsItem()
item['flight_number'] = sel.xpath('/div[1]/div[1]/h2').extract()
item['aircraft_make'] = sel.xpath('/div[4]/div[2]/div[2]/div[2]').extract()
item['dep_date'] = sel.xpath('/div[2]/div[1]/div').extract()
item['dep_airport'] = sel.xpath('/div[1]/div[2]/div[2]/div[1]').extract()
item['arr_airport'] = sel.xpath('/div[1]/div[2]/div[2]/div[2]').extract()
item['dep_gate_scheduled'] = sel.xpath('/div[2]/div[2]/div[1]/div[2]/div[2]').extract()
item['dep_gate_actual'] = sel.xpath('/div[2]/div[2]/div[1]/div[3]/div[2]').extract()
item['dep_runway_actual'] = sel.xpath('/div[2]/div[2]/div[2]/div[3]/div[2]').extract()
item['dep_terminal'] = sel.xpath('/div[2]/div[2]/div[3]/div[2]/div[1]').extract()
item['dep_gate'] = sel.xpath('/div[2]/div[2]/div[3]/div[2]/div[2]').extract()
item['arr_gate_scheduled'] = sel.xpath('/div[3]/div[2]/div[1]/div[2]/div[2]').extract()
item['arr_gate_actual'] = sel.xpath('/div[3]/div[2]/div[1]/div[3]/div[2]').extract()
item['arr_terminal'] = sel.xpath('/div[3]/div[2]/div[3]/div[2]/div[1]').extract()
item['arr_gate'] = sel.xpath('/div[3]/div[2]/div[3]/div[2]/div[2]').extract()
yield item`