content
stringlengths
85
101k
title
stringlengths
0
150
question
stringlengths
15
48k
answers
sequence
answers_scores
sequence
non_answers
sequence
non_answers_scores
sequence
tags
sequence
name
stringlengths
35
137
Q: Django many to many model get none I have been dealing with a project for a few days and today I encountered an error. I wanted to write it here as I have no idea how to solve it. My first model: from django.db import models # Create your models here. class Video(models.Model): title = models.CharField(max_length=100) video_slug = models.SlugField(unique=True) description = models.TextField(max_length=500) image = models.ImageField(upload_to='images/' , blank=True, null=True) video = models.FileField(upload_to='videos/', blank=True, null=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) category = models.ManyToManyField('categories.Category', blank=True) def __str__(self): return self.title def get_absolute_url(self): return "/video/" + self.video_slug class Meta: verbose_name_plural = "Videos" ordering = ['-created_at'] get_latest_by = 'created_at' secondary model: class Category(models.Model): id = models.AutoField(primary_key=True) name = models.CharField(max_length=100) category_slug = models.SlugField(max_length=100, unique=True) image = models.ImageField(upload_to='category', blank=True, null=True) description = models.TextField(blank=True) created = models.DateTimeField(auto_now_add=True) updated = models.DateTimeField(auto_now=True) def __str__(self): return self.name def get_absolute_url(self): return "/category/" + self.category_slug class Meta: verbose_name_plural = "categories" ordering = ['name'] and view.py ... class VideoListView(ListAPIView): queryset = Video.objects.all() serializer_class = VideoSerializer When i send get request i got this [ { ... "category": "categories.Category.None", ... } ] What should i do? Please help me. A: Im assuming your mistake is you are using an empty Serializer and not including the queryset instance. So something like this will fix your issue: queryset = Video.objects.all() serializer_class = VideoSerializer(queryset, many=True)
Django many to many model get none
I have been dealing with a project for a few days and today I encountered an error. I wanted to write it here as I have no idea how to solve it. My first model: from django.db import models # Create your models here. class Video(models.Model): title = models.CharField(max_length=100) video_slug = models.SlugField(unique=True) description = models.TextField(max_length=500) image = models.ImageField(upload_to='images/' , blank=True, null=True) video = models.FileField(upload_to='videos/', blank=True, null=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) category = models.ManyToManyField('categories.Category', blank=True) def __str__(self): return self.title def get_absolute_url(self): return "/video/" + self.video_slug class Meta: verbose_name_plural = "Videos" ordering = ['-created_at'] get_latest_by = 'created_at' secondary model: class Category(models.Model): id = models.AutoField(primary_key=True) name = models.CharField(max_length=100) category_slug = models.SlugField(max_length=100, unique=True) image = models.ImageField(upload_to='category', blank=True, null=True) description = models.TextField(blank=True) created = models.DateTimeField(auto_now_add=True) updated = models.DateTimeField(auto_now=True) def __str__(self): return self.name def get_absolute_url(self): return "/category/" + self.category_slug class Meta: verbose_name_plural = "categories" ordering = ['name'] and view.py ... class VideoListView(ListAPIView): queryset = Video.objects.all() serializer_class = VideoSerializer When i send get request i got this [ { ... "category": "categories.Category.None", ... } ] What should i do? Please help me.
[ "Im assuming your mistake is you are using an empty Serializer and not including the queryset instance. So something like this will fix your issue:\nqueryset = Video.objects.all()\nserializer_class = VideoSerializer(queryset, many=True)\n\n" ]
[ 2 ]
[]
[]
[ "django", "django_models", "django_rest_framework", "orm", "python" ]
stackoverflow_0074671896_django_django_models_django_rest_framework_orm_python.txt
Q: Is there any way to skip number request in telethon? Every time if my session is invalid i got this output: "Please enter your phone (or bot token): " Is there anyway to pass it or ignore it? I tried to edit telethon source code, but im bad at it)) A: So, for everyone who have the same problem client = TelegramClient(f"session", api_id, api_hash) client.connect() print(client) if client.is_user_authorized(): print("VALID") else: print('NO VALID') Everything was much easier, than i thought
Is there any way to skip number request in telethon?
Every time if my session is invalid i got this output: "Please enter your phone (or bot token): " Is there anyway to pass it or ignore it? I tried to edit telethon source code, but im bad at it))
[ "So, for everyone who have the same problem\nclient = TelegramClient(f\"session\", api_id, api_hash)\nclient.connect()\nprint(client)\nif client.is_user_authorized():\n print(\"VALID\")\nelse:\n print('NO VALID')\n\nEverything was much easier, than i thought\n" ]
[ 0 ]
[ "Telethon project implements Telegram API and there is no way to skip it since it's required by protocol.\nAs an option I can advice you to check if authenticating as a bot instead of a normal user can solve your problem depending on your task.\n" ]
[ -1 ]
[ "python", "telethon" ]
stackoverflow_0074671835_python_telethon.txt
Q: I'm doing some basic logic function + decimal rounding stuff, and this doesn't seem to work...why? temperature=int(input("What temperature are you?")) if temperature>=37 and temperature<50: print("your temperature is healthy, as it is" , "%.2f" %temperature) else: print("You said your temperature was" , "%.2f" %temperature , "You are unhealthy") #why is this not working??? when I input the temperature at a whole number, it's fine. When I type in 37.888888, the terminal spits out error. I tried adjusting it and it didn't work A: temperature=int(input("What temperature are you?")) When you pass a string to int(), it must be a whole number. Decimals like 37.8 aren't allowed. I think you could just use float() instead of int().
I'm doing some basic logic function + decimal rounding stuff, and this doesn't seem to work...why?
temperature=int(input("What temperature are you?")) if temperature>=37 and temperature<50: print("your temperature is healthy, as it is" , "%.2f" %temperature) else: print("You said your temperature was" , "%.2f" %temperature , "You are unhealthy") #why is this not working??? when I input the temperature at a whole number, it's fine. When I type in 37.888888, the terminal spits out error. I tried adjusting it and it didn't work
[ "temperature=int(input(\"What temperature are you?\"))\n\nWhen you pass a string to int(), it must be a whole number. Decimals like 37.8 aren't allowed.\nI think you could just use float() instead of int().\n" ]
[ 0 ]
[]
[]
[ "decimal", "logic", "python", "variables" ]
stackoverflow_0074672016_decimal_logic_python_variables.txt
Q: tensorflow MDA custom loss and ValueError: No gradients provided for any variable I would like to use the MDA (mean direction accuracy) as a custom loss function for a tensorflow neural network. I am trying to implement this as described in here: Custom Mean Directional Accuracy loss function in Keras def mda(y_true, y_pred): s = K.equal(K.sign(y_true[1:] - y_true[:-1]), K.sign(y_pred[1:] - y_pred[:-1])) return K.mean(K.cast(s, K.floatx())) The network works fine but when I try to fit my data I am getting this error: ValueError: No gradients provided for any variable I think that this is because I am loosing the gradient info from my pred tensor but I don't know how can implement this.... or if this makes any sense at all.... Finally I want to predict is if some numeric series is going up or down, that is why this function made sense to me. A: It looks like the error you're seeing is because the mda() function you've defined doesn't have any differentiable operations. Because of this, TensorFlow doesn't know how to compute the gradients of the function, and it's unable to optimize the weights of your neural network using backpropagation. To fix this, you'll need to make sure that your mda() function uses only differentiable operations. This will allow TensorFlow to compute the gradients of the function and use them to optimize the weights of your network. One way to do this would be to use the tf.math.sign() function instead of K.sign(), and the tf.math.reduce_mean() function instead of K.mean() in your mda() function. Both of these functions are differentiable, so TensorFlow will be able to compute the gradients of your mda() function and use them to optimize the weights of your network. Here's an example of how you could modify your mda() function to use differentiable operations: import tensorflow as tf def mda(y_true, y_pred): s = tf.equal(tf.math.sign(y_true[1:] - y_true[:-1]), tf.math.sign(y_pred[1:] - y_pred[:-1])) return tf.math.reduce_mean(tf.cast(s, tf.float32)) This should allow you to use the mda() function as a custom loss function for your TensorFlow neural network. A: The problem is that with K.equal and K.cast, you change numbers into bools. As a result, no gradient can be calculated. You could replace them with a calculation; using the fact that when two numbers are equal, their difference is zero, and that since sign returns only [-1, 0, 1], the absolute difference can only be 0, 1 or 2: def mda(y_true, y_pred): d = K.abs(K.sign(y_true[1:] - y_true[:-1]) - (K.sign(y_pred[1:] - y_pred[:-1]))) s = (1. - d) * (d - 1.) * (d - 2.) / 2. return K.mean(s) s is equal 1 when your K.equal is true, and 0 otherwise A: Thanks Reda and AndrzeO for your answers my question. As AndrzejO mention, equals transform the data to boolean so no gradient there. I implemented this other solution as an alternative to AndrzejO solution: def mda_custom_loss(y_true, y_pred): res = tf.math.sign(y_true[1:] - y_true[:-1]) - tf.math.sign(y_pred[1:] - y_pred[:-1]) s = tf.math.abs(tf.math.sign(res)) return 1 - tf.math.reduce_mean(tf.math.sign(s))
tensorflow MDA custom loss and ValueError: No gradients provided for any variable
I would like to use the MDA (mean direction accuracy) as a custom loss function for a tensorflow neural network. I am trying to implement this as described in here: Custom Mean Directional Accuracy loss function in Keras def mda(y_true, y_pred): s = K.equal(K.sign(y_true[1:] - y_true[:-1]), K.sign(y_pred[1:] - y_pred[:-1])) return K.mean(K.cast(s, K.floatx())) The network works fine but when I try to fit my data I am getting this error: ValueError: No gradients provided for any variable I think that this is because I am loosing the gradient info from my pred tensor but I don't know how can implement this.... or if this makes any sense at all.... Finally I want to predict is if some numeric series is going up or down, that is why this function made sense to me.
[ "It looks like the error you're seeing is because the mda() function you've defined doesn't have any differentiable operations. Because of this, TensorFlow doesn't know how to compute the gradients of the function, and it's unable to optimize the weights of your neural network using backpropagation.\nTo fix this, you'll need to make sure that your mda() function uses only differentiable operations. This will allow TensorFlow to compute the gradients of the function and use them to optimize the weights of your network.\nOne way to do this would be to use the tf.math.sign() function instead of K.sign(), and the tf.math.reduce_mean() function instead of K.mean() in your mda() function. Both of these functions are differentiable, so TensorFlow will be able to compute the gradients of your mda() function and use them to optimize the weights of your network.\nHere's an example of how you could modify your mda() function to use differentiable operations:\nimport tensorflow as tf\n\ndef mda(y_true, y_pred):\n s = tf.equal(tf.math.sign(y_true[1:] - y_true[:-1]),\n tf.math.sign(y_pred[1:] - y_pred[:-1]))\n return tf.math.reduce_mean(tf.cast(s, tf.float32))\n\n\nThis should allow you to use the mda() function as a custom loss function for your TensorFlow neural network.\n", "The problem is that with K.equal and K.cast, you change numbers into bools. As a result, no gradient can be calculated.\nYou could replace them with a calculation; using the fact that when two numbers are equal, their difference is zero, and that since sign returns only [-1, 0, 1], the absolute difference can only be 0, 1 or 2:\ndef mda(y_true, y_pred):\n d = K.abs(K.sign(y_true[1:] - y_true[:-1]) - (K.sign(y_pred[1:] - y_pred[:-1])))\n s = (1. - d) * (d - 1.) * (d - 2.) / 2.\nreturn K.mean(s)\n\ns is equal 1 when your K.equal is true, and 0 otherwise\n", "Thanks Reda and AndrzeO for your answers my question. As AndrzejO mention, equals transform the data to boolean so no gradient there.\nI implemented this other solution as an alternative to AndrzejO solution:\ndef mda_custom_loss(y_true, y_pred):\n res = tf.math.sign(y_true[1:] - y_true[:-1]) - tf.math.sign(y_pred[1:] - y_pred[:-1])\n s = tf.math.abs(tf.math.sign(res))\n return 1 - tf.math.reduce_mean(tf.math.sign(s))\n\n" ]
[ 1, 1, 0 ]
[]
[]
[ "keras", "loss_function", "python", "tensorflow" ]
stackoverflow_0074671602_keras_loss_function_python_tensorflow.txt
Q: Cannot get data from request.get_json(force=True) import requests import numpy as np import json from flask import Flask, request, jsonify url = 'http://localhost:5000/api' dat = np.genfromtxt('/home/panos/Manti_Milk/BigData/50_0_50_3.5_3-3.dat') d1 = dat[:,0] data = {"w0": d1[0], "w1": d1[1], "w2": d1[2], "w3": d1[3], "w4": d1[4], "w5": d1[5], "w6": d1[6], "w7": d1[7], "w8": d1[8], "w9": d1[9], "w10": d1[10], "w11": d1[11], "w12": d1[12], "w13": d1[13], "w14": d1[14], "w15": d1[15]} jsondata = json.dumps(data, indent=4) r = request.post(url, json = jsondata) @app.route('/api',methods=['POST','GET']) def predict(): jsondata = request.get_json(force=True) dummy = json.loads(jsondata) arr = np.fromiter(dummy.values(), dtype=float).reshape(16,1) return {"data": arr} if __name__ == '__main__': app.run(port=5000, debug=True) It returns Bad Request Failed to decode JSON object: Expecting value: line 1 column 1 (char 0) When setting force=False, returns "Null" Any Help? I have read several questoins/answers where it should work. But this is not the case! A: You do not need to decode json data after request.get_json(), it is already the Python dict. So the line dummy = json.loads(jsondata) is unnecessary. @app.route('/api',methods=['POST','GET']) def predict(): jsondata = request.get_json(force=True) arr = np.fromiter(jsondata.values(), dtype=float) EDIT: First file - server.py: import numpy as np from flask import Flask, request app = Flask(__name__) @app.route('/api', methods=['POST', 'GET']) def predict(): jsondata = request.get_json(force=True) arr = np.fromiter(jsondata.values(), dtype=float).reshape(16, 1) print(arr) # do whatever you want here return "1" app.run(debug=True) Second file - client.py: import requests import numpy as np dat = np.genfromtxt('data.dat') d1 = dat[:, 0] data = {"w0": d1[0], "w1": d1[1], "w2": d1[2], "w3": d1[3], "w4": d1[4], "w5": d1[5], "w6": d1[6], "w7": d1[7], "w8": d1[8], "w9": d1[9], "w10": d1[10], "w11": d1[11], "w12": d1[12], "w13": d1[13], "w14": d1[14], "w15": d1[15]} requests.post("http://127.0.0.1:5000/api", json=data) Then execute them separately (from different console tabs): At first, start the server in [1] tab: $ python server.py * Running on http://127.0.0.1:5000 Press CTRL+C to quit * Restarting with stat * Debugger is active! And after that run request from client in another [2] tab: $ python client.py Then you will see desired output in server tab [1]: [[2297.] [1376.] [1967.] [2414.] [2012.] [2348.] [2293.] [1800.] [2011.] [2340.] [1949.] [2015.] [2338.] [1866.] [1461.] [2158.]]
Cannot get data from request.get_json(force=True)
import requests import numpy as np import json from flask import Flask, request, jsonify url = 'http://localhost:5000/api' dat = np.genfromtxt('/home/panos/Manti_Milk/BigData/50_0_50_3.5_3-3.dat') d1 = dat[:,0] data = {"w0": d1[0], "w1": d1[1], "w2": d1[2], "w3": d1[3], "w4": d1[4], "w5": d1[5], "w6": d1[6], "w7": d1[7], "w8": d1[8], "w9": d1[9], "w10": d1[10], "w11": d1[11], "w12": d1[12], "w13": d1[13], "w14": d1[14], "w15": d1[15]} jsondata = json.dumps(data, indent=4) r = request.post(url, json = jsondata) @app.route('/api',methods=['POST','GET']) def predict(): jsondata = request.get_json(force=True) dummy = json.loads(jsondata) arr = np.fromiter(dummy.values(), dtype=float).reshape(16,1) return {"data": arr} if __name__ == '__main__': app.run(port=5000, debug=True) It returns Bad Request Failed to decode JSON object: Expecting value: line 1 column 1 (char 0) When setting force=False, returns "Null" Any Help? I have read several questoins/answers where it should work. But this is not the case!
[ "You do not need to decode json data after request.get_json(), it is already the Python dict. So the line dummy = json.loads(jsondata) is unnecessary.\[email protected]('/api',methods=['POST','GET'])\ndef predict():\n jsondata = request.get_json(force=True)\n arr = np.fromiter(jsondata.values(), dtype=float)\n\nEDIT:\nFirst file - server.py:\nimport numpy as np\nfrom flask import Flask, request\n\n\napp = Flask(__name__)\n\[email protected]('/api', methods=['POST', 'GET'])\ndef predict():\n jsondata = request.get_json(force=True)\n arr = np.fromiter(jsondata.values(), dtype=float).reshape(16, 1)\n print(arr) # do whatever you want here\n return \"1\"\n\n\napp.run(debug=True)\n\nSecond file - client.py:\nimport requests\nimport numpy as np\n\ndat = np.genfromtxt('data.dat')\nd1 = dat[:, 0]\n\ndata = {\"w0\": d1[0], \"w1\": d1[1], \"w2\": d1[2], \"w3\": d1[3], \"w4\": d1[4],\n \"w5\": d1[5], \"w6\": d1[6], \"w7\": d1[7], \"w8\": d1[8], \"w9\": d1[9], \"w10\": d1[10],\n \"w11\": d1[11], \"w12\": d1[12], \"w13\": d1[13], \"w14\": d1[14], \"w15\": d1[15]}\n\nrequests.post(\"http://127.0.0.1:5000/api\", json=data)\n\nThen execute them separately (from different console tabs):\nAt first, start the server in [1] tab:\n$ python server.py\n\n * Running on http://127.0.0.1:5000\nPress CTRL+C to quit\n * Restarting with stat\n * Debugger is active!\n\nAnd after that run request from client in another [2] tab:\n$ python client.py\n\nThen you will see desired output in server tab [1]:\n[[2297.]\n [1376.]\n [1967.]\n [2414.]\n [2012.]\n [2348.]\n [2293.]\n [1800.]\n [2011.]\n [2340.]\n [1949.]\n [2015.]\n [2338.]\n [1866.]\n [1461.]\n [2158.]]\n\n" ]
[ 0 ]
[]
[]
[ "flask", "python" ]
stackoverflow_0074670424_flask_python.txt
Q: How can I get my python code to be more efficient? I've been struggling for the past couple days with getting my python code to be more efficient, while also getting the run time to be with in the given specifications of the problem below ( 3 seconds, for any given input). Was told that linear time may help, but was hoping I can get some help on how I'd approach it with my existing code here, would really appreciate the help. This is the given problem: *Dan has a list of problems suitable for Assignment 4. The difficulties of these problems are stored in a list of integers a. The i-th problem’s difficulty is represented by a[i] (the higher the integer, the more difficult the problem). Dan is too busy eating saltines to worry about Assignment 4 decisions, so he asks Michael the TA to select at least two problems from the list for the assignment. Since there are many possible subsets of the problems to consider and Michael has a life, he decides to consider only sublists (definition follows) of the list of problems. To make grading the assignment easier, Michael wants to pick problems that don’t vary too much in difficulty. What is the smallest difference between the difficulties of the most difficult selected problem and the least difficult selected problem he can achieve by selecting a sublist of length at least 2 of the original list of problems? Definition: A sublist of a list a is any list you can obtain by removing some (possibly 0) elements from the start of a and then removing some (possibly 0) elements from the end of it. (It’s like the definition of segment from lecture.) .* Input The input consists of a single line containing the integers in the list a, separated by single spaces. Output Print a single integer indicating the smallest difference in difficulties Michael can achieve. Constraints 2 <= len(a) <= 500000 1 <= a[i] <= 10**9 Time Limit: Your program must finish running on any valid input within 3 seconds Sample Input 1 10 6 9 1 Sample Output 1 3 My code: import time # import time module arr = list(map(int, input().split(" "))) st = time.time() diff = 10**9 for i in range(len(arr)-1): max_ele = min_ele = arr[i] for j in range(i+1, len(arr)): max_ele = max(max_ele, arr[j]) min_ele = min(min_ele, arr[j]) if max_ele - min_ele <= diff: diff = max_ele - min_ele print(diff) # end = time.time() - st #print(end) ``` ` A: It looks like your current approach is to iterate through all possible pairs of elements in the list and calculate the difference between the maximum and minimum element in each pair. This approach will take quadratic time, which might not efficient enough to solve this problem within the given time constraints. To make your code more efficient, you can sort the list and then iterate through the sorted list to find the smallest difference between the maximum and minimum element in any sublist of length at least 2. Since the list is sorted, you can simply keep track of the minimum and maximum element you have seen so far, and then update the minimum and maximum as you iterate through the list. This approach will take linear time, which should be fast enough to solve this problem within the time constraints. Here is an example of how you can implement this approach: import time # import time module arr = list(map(int, input().split(" "))) # sort the list in ascending order arr.sort() # initialize the minimum and maximum elements we have seen so far min_ele = arr[0] max_ele = arr[1] # initialize the smallest difference between the maximum and minimum element # in any sublist of length at least 2 diff = max_ele - min_ele # iterate through the list, starting from the second element for i in range(1, len(arr)): # update the minimum and maximum elements min_ele = min(min_ele, arr[i]) max_ele = max(max_ele, arr[i]) # update the smallest difference diff = min(diff, max_ele - min_ele) # print the smallest difference print(diff) I hope this helps! Let me know if you have any other questions. A: You can think of the sublists or segments as sliding windows over the array. Because the minimum length is two, the question is equivalent to asking for the minimum difference in a sublist of exactly two, ie consecutive elements. In python you can do this with a list, or better generator, comprehension diff=min((abs(a-b) for a,b in zip(arr[:-1],arr[1:]))) As a loop diff=10**9 for a,b in zip(arr[:-1],arr[1:]): diff=min(diff,abs(a-b)) return diff
How can I get my python code to be more efficient?
I've been struggling for the past couple days with getting my python code to be more efficient, while also getting the run time to be with in the given specifications of the problem below ( 3 seconds, for any given input). Was told that linear time may help, but was hoping I can get some help on how I'd approach it with my existing code here, would really appreciate the help. This is the given problem: *Dan has a list of problems suitable for Assignment 4. The difficulties of these problems are stored in a list of integers a. The i-th problem’s difficulty is represented by a[i] (the higher the integer, the more difficult the problem). Dan is too busy eating saltines to worry about Assignment 4 decisions, so he asks Michael the TA to select at least two problems from the list for the assignment. Since there are many possible subsets of the problems to consider and Michael has a life, he decides to consider only sublists (definition follows) of the list of problems. To make grading the assignment easier, Michael wants to pick problems that don’t vary too much in difficulty. What is the smallest difference between the difficulties of the most difficult selected problem and the least difficult selected problem he can achieve by selecting a sublist of length at least 2 of the original list of problems? Definition: A sublist of a list a is any list you can obtain by removing some (possibly 0) elements from the start of a and then removing some (possibly 0) elements from the end of it. (It’s like the definition of segment from lecture.) .* Input The input consists of a single line containing the integers in the list a, separated by single spaces. Output Print a single integer indicating the smallest difference in difficulties Michael can achieve. Constraints 2 <= len(a) <= 500000 1 <= a[i] <= 10**9 Time Limit: Your program must finish running on any valid input within 3 seconds Sample Input 1 10 6 9 1 Sample Output 1 3 My code: import time # import time module arr = list(map(int, input().split(" "))) st = time.time() diff = 10**9 for i in range(len(arr)-1): max_ele = min_ele = arr[i] for j in range(i+1, len(arr)): max_ele = max(max_ele, arr[j]) min_ele = min(min_ele, arr[j]) if max_ele - min_ele <= diff: diff = max_ele - min_ele print(diff) # end = time.time() - st #print(end) ``` `
[ "It looks like your current approach is to iterate through all possible pairs of elements in the list and calculate the difference between the maximum and minimum element in each pair. This approach will take quadratic time, which might not efficient enough to solve this problem within the given time constraints.\nTo make your code more efficient, you can sort the list and then iterate through the sorted list to find the smallest difference between the maximum and minimum element in any sublist of length at least 2. Since the list is sorted, you can simply keep track of the minimum and maximum element you have seen so far, and then update the minimum and maximum as you iterate through the list. This approach will take linear time, which should be fast enough to solve this problem within the time constraints.\nHere is an example of how you can implement this approach:\nimport time # import time module\narr = list(map(int, input().split(\" \")))\n\n# sort the list in ascending order\narr.sort()\n\n# initialize the minimum and maximum elements we have seen so far\nmin_ele = arr[0]\nmax_ele = arr[1]\n\n# initialize the smallest difference between the maximum and minimum element\n# in any sublist of length at least 2\ndiff = max_ele - min_ele\n\n# iterate through the list, starting from the second element\nfor i in range(1, len(arr)):\n # update the minimum and maximum elements\n min_ele = min(min_ele, arr[i])\n max_ele = max(max_ele, arr[i])\n\n # update the smallest difference\n diff = min(diff, max_ele - min_ele)\n\n# print the smallest difference\nprint(diff)\n\nI hope this helps! Let me know if you have any other questions.\n", "You can think of the sublists or segments as sliding windows over the array. Because the minimum length is two, the question is equivalent to asking for the minimum difference in a sublist of exactly two, ie consecutive elements. In python you can do this with a list, or better generator, comprehension\ndiff=min((abs(a-b) for a,b in zip(arr[:-1],arr[1:])))\n\nAs a loop\ndiff=10**9\nfor a,b in zip(arr[:-1],arr[1:]):\n diff=min(diff,abs(a-b))\nreturn diff\n\n" ]
[ 1, 0 ]
[]
[]
[ "processing_efficiency", "python" ]
stackoverflow_0074671865_processing_efficiency_python.txt
Q: Question on exponential function and random variable I am trying to understand the following code. Could someone explain what each step essentially means? (especially the 1st, 2nd and 4th line of code) X = stats.expon(scale=10) xs = X.rvs(100000) plt.figure(figsize=(10, 4)) plt.hist(xs, bins=100, color="navy") plt.xlim(0, 80); This was a sample code from a data science course and I am trying to understand the syntax. A: This code is using the expon() function from the stats module in the Python library scipy to generate random samples from an exponential distribution with a scale parameter of 10. The expon() function returns an object representing the exponential distribution, which can then be used to generate random samples using the rvs() method. The first line of code, X = stats.expon(scale=10), creates an exponential distribution object with a scale parameter of 10 and assigns it to the variable X. The second line, xs = X.rvs(100000), generates 100000 random samples from the exponential distribution represented by X and assigns them to the variable xs. The last two lines use the plt.hist() and plt.xlim() functions from the matplotlib library to create a histogram of the generated samples and set the x-axis limits to 0 and 80, respectively. These lines create a figure with a size of 10 x 4 inches and plot the samples from the xs variable in a histogram with 100 bins. You should see something like this (representing exponential distribution)
Question on exponential function and random variable
I am trying to understand the following code. Could someone explain what each step essentially means? (especially the 1st, 2nd and 4th line of code) X = stats.expon(scale=10) xs = X.rvs(100000) plt.figure(figsize=(10, 4)) plt.hist(xs, bins=100, color="navy") plt.xlim(0, 80); This was a sample code from a data science course and I am trying to understand the syntax.
[ "This code is using the expon() function from the stats module in the Python library scipy to generate random samples from an exponential distribution with a scale parameter of 10. The expon() function returns an object representing the exponential distribution, which can then be used to generate random samples using the rvs() method.\nThe first line of code, X = stats.expon(scale=10), creates an exponential distribution object with a scale parameter of 10 and assigns it to the variable X.\nThe second line, xs = X.rvs(100000), generates 100000 random samples from the exponential distribution represented by X and assigns them to the variable xs.\nThe last two lines use the plt.hist() and plt.xlim() functions from the matplotlib library to create a histogram of the generated samples and set the x-axis limits to 0 and 80, respectively. These lines create a figure with a size of 10 x 4 inches and plot the samples from the xs variable in a histogram with 100 bins.\nYou should see something like this (representing exponential distribution)\n\n" ]
[ 2 ]
[]
[]
[ "exponential", "matplotlib", "python" ]
stackoverflow_0074672077_exponential_matplotlib_python.txt
Q: Return dataframes containing unique column pairs in Pandas? I am trying to use pandas to select rows based on unique column pairs. For example with the dataframe below read of of an csv: col1 col2 col3 0 1 10 [a, b, c, d] 1 1 10 [e, f, g, h] 2 2 11 [a, b, c, d] 3 3 12 [i, j, k, l] 4 3 12 [e, f, g, h] 5 5 14 [a, b, c, d] 6 3 10 [m, n, o, p] This will give me the unique pairs out of col1, col2 df_unique = df['col1', 'col2'].drop_duplicates() However, I am not sure about how to use each row in df_unique to return a dataframe containing rows that match. I believe that I could use merge here, but uncertain about the method to use to go about it. df.merge(df_unique, on=['col1', 'col2'], how='left') Something like below but an for loop seems like an inefficient way to do this: for ['col1','col2'] in df_unique: df_dict['col1, 'col2'] = df.merge(some_subframe, on=['col1', 'col2'], how='left') Resulting in dataframes like so: df_uniq_list[(1,10)] col1 col2 col3 0 1 10 [a, b, c, d] 1 1 10 [e, f, g, h] df_uniq_list[(2,11)] col1 col2 col3 2 2 11 [a, b, c, d] df_uniq_list[(3,12)] col1 col2 col3 3 3 12 [i, j, k, l] 4 3 12 [e, f, g, h] A: You could try with df_uniq_list = dict([*df.groupby(['col1','col2'])]) df_uniq_list[(1,10)] col1 col2 col3 0 1 10 [a, b, c, d] 1 1 10 [e, f, g, h]
Return dataframes containing unique column pairs in Pandas?
I am trying to use pandas to select rows based on unique column pairs. For example with the dataframe below read of of an csv: col1 col2 col3 0 1 10 [a, b, c, d] 1 1 10 [e, f, g, h] 2 2 11 [a, b, c, d] 3 3 12 [i, j, k, l] 4 3 12 [e, f, g, h] 5 5 14 [a, b, c, d] 6 3 10 [m, n, o, p] This will give me the unique pairs out of col1, col2 df_unique = df['col1', 'col2'].drop_duplicates() However, I am not sure about how to use each row in df_unique to return a dataframe containing rows that match. I believe that I could use merge here, but uncertain about the method to use to go about it. df.merge(df_unique, on=['col1', 'col2'], how='left') Something like below but an for loop seems like an inefficient way to do this: for ['col1','col2'] in df_unique: df_dict['col1, 'col2'] = df.merge(some_subframe, on=['col1', 'col2'], how='left') Resulting in dataframes like so: df_uniq_list[(1,10)] col1 col2 col3 0 1 10 [a, b, c, d] 1 1 10 [e, f, g, h] df_uniq_list[(2,11)] col1 col2 col3 2 2 11 [a, b, c, d] df_uniq_list[(3,12)] col1 col2 col3 3 3 12 [i, j, k, l] 4 3 12 [e, f, g, h]
[ "You could try with\ndf_uniq_list = dict([*df.groupby(['col1','col2'])])\ndf_uniq_list[(1,10)]\n col1 col2 col3 \n0 1 10 [a, b, c, d]\n1 1 10 [e, f, g, h]\n\n" ]
[ 0 ]
[]
[]
[ "pandas", "python", "set_comprehension" ]
stackoverflow_0074672035_pandas_python_set_comprehension.txt
Q: Use selenium pull the link from an href which is an attribute of a class element that has a random class name? Here is the element. Be aware I slimmed it down, there is much more in the </div>: <a class="123abc456def" download="" href="https://www.downloadme.com/1jk43jkls.txt role="menuitem" tabindex="-1"><div></div></a> The class name is a random string of characters so I cant use that as an identifier. I want to grab the href link. How can I do that with selenium in python? I tried the following but they did not work: link_elm = driver.find_element(By.CSS_SELECTOR, "//download[@href]") link_elm = driver.find_element(By.XPATH, "//a[@href]") A: Figured it out. I used the xpath, searched for an a class element, and then searched for if it contained part of the url. Then I pulled the href link with get_attribute link = driver.find_element(By.XPATH,"//a[@class and contains(@href, 'downloadme')]").get_attribute("href")
Use selenium pull the link from an href which is an attribute of a class element that has a random class name?
Here is the element. Be aware I slimmed it down, there is much more in the </div>: <a class="123abc456def" download="" href="https://www.downloadme.com/1jk43jkls.txt role="menuitem" tabindex="-1"><div></div></a> The class name is a random string of characters so I cant use that as an identifier. I want to grab the href link. How can I do that with selenium in python? I tried the following but they did not work: link_elm = driver.find_element(By.CSS_SELECTOR, "//download[@href]") link_elm = driver.find_element(By.XPATH, "//a[@href]")
[ "Figured it out. I used the xpath, searched for an a class element, and then searched for if it contained part of the url. Then I pulled the href link with get_attribute\nlink = driver.find_element(By.XPATH,\"//a[@class and contains(@href, 'downloadme')]\").get_attribute(\"href\")\n\n" ]
[ 0 ]
[]
[]
[ "class", "element", "href", "python", "selenium" ]
stackoverflow_0074672012_class_element_href_python_selenium.txt
Q: Sorting a List by frequency of occurrence in a list I have a list of integers(or could be even strings), which I would like to sort by the frequency of occurrences in Python, for instance: a = [1, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 5] Here the element 5 appears 4 times in the list, 4 appears 3 times. So the output sorted list would be : result = [5, 5, 5, 5, 3, 3, 3, 4, 4, 4, 1, 1, 2] I tried using a.count(), but it gives the number of occurrence of the element. I would like to sort it. Any idea how to do it ? Thanks A: from collections import Counter print [item for items, c in Counter(a).most_common() for item in [items] * c] # [5, 5, 5, 5, 3, 3, 3, 4, 4, 4, 1, 1, 2] Or even better (efficient) implementation from collections import Counter from itertools import repeat, chain print list(chain.from_iterable(repeat(i, c) for i,c in Counter(a).most_common())) # [5, 5, 5, 5, 3, 3, 3, 4, 4, 4, 1, 1, 2] Or from collections import Counter print sorted(a, key=Counter(a).get, reverse=True) # [5, 5, 5, 5, 3, 3, 3, 4, 4, 4, 1, 1, 2] If you prefer in-place sort a.sort(key=Counter(a).get, reverse=True) A: Using Python 3.3 and the built in sorted function, with the count as the key: >>> a = [1,1,2,3,3,3,4,4,4,5,5,5,5] >>> sorted(a,key=a.count) [2, 1, 1, 3, 3, 3, 4, 4, 4, 5, 5, 5, 5] >>> sorted(a,key=a.count,reverse=True) [5, 5, 5, 5, 3, 3, 3, 4, 4, 4, 1, 1, 2] A: In [15]: a = [1,1,2,3,3,3,4,4,4,5,5,5,5] In [16]: counts = collections.Counter(a) In [17]: list(itertools.chain.from_iterable([[k for _ in range(counts[k])] for k in sorted(counts, key=counts.__getitem__, reverse=True)])) Out[17]: [5, 5, 5, 5, 3, 3, 3, 4, 4, 4, 1, 1, 2] Alternatively: answer = [] for k in sorted(counts, key=counts.__getitem__, reverse=True): answer.extend([k for _ in range(counts[k])]) Of course, [k for _ in range(counts[k])] can be replaced with [k]*counts[k]. So line 17 becomes list(itertools.chain.from_iterable([[k]*counts[k] for k in sorted(counts, key=counts.__getitem__, reverse=True)])) A: If you happen to be using numpy already, or if using it is an option, here's another alternative: In [309]: import numpy as np In [310]: a = [1, 2, 3, 3, 1, 3, 5, 4, 4, 4, 5, 5, 5] In [311]: vals, counts = np.unique(a, return_counts=True) In [312]: order = np.argsort(counts)[::-1] In [313]: np.repeat(vals[order], counts[order]) Out[313]: array([5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 1, 1, 2]) That result is a numpy array. If you want to end up with a Python list, call the array's tolist() method: In [314]: np.repeat(vals[order], counts[order]).tolist() Out[314]: [5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 1, 1, 2] A: Not interesting way... a = [1,1,2,3,3,3,4,4,4,5,5,5,5] from collections import Counter result = [] for v, times in sorted(Counter(a).iteritems(), key=lambda x: x[1], reverse=True): result += [v] * times One liner: reduce(lambda a, b: a + [b[0]] * b[1], sorted(Counter(a).iteritems(), key=lambda x: x[1], reverse=True), []) A: Occurrence in array and within a sets of equal size: rev=True arr = [6, 6, 5, 2, 9, 2, 5, 9, 2, 5, 6, 5, 4, 6, 9, 1, 2, 3, 4, 7 ,8 ,8, 8, 2] print arr arr.sort(reverse=rev) ARR = {} for n in arr: if n not in ARR: ARR[n] = 0 ARR[n] += 1 arr=[] for k,v in sorted(ARR.iteritems(), key=lambda (k,v): (v,k), reverse=rev): arr.extend([k]*v) print arr Results: [6, 6, 5, 2, 9, 2, 5, 9, 2, 5, 6, 5, 4, 6, 9, 1, 2, 3, 4, 7, 8, 8, 8, 2] [2, 2, 2, 2, 2, 6, 6, 6, 6, 5, 5, 5, 5, 9, 9, 9, 8, 8, 8, 4, 4, 7, 3, 1] A: Dart Solution String sortedString = ''; Map map = {}; for (int i = 0; i < s.length; i++) { map[s[i]] = (map[s[i]] ?? 0) + 1; // OR // map.containsKey(s[i]) // ? map.update(s[i], (value) => ++value) // : map.addAll({s[i]: 1}); } var sortedByValueMap = Map.fromEntries( map.entries.toList()..sort((e1, e2) => e1.value.compareTo(e2.value))); sortedByValueMap.forEach((key, value) { sortedString += key * value; }); return sortedString.split('').reversed. Join();
Sorting a List by frequency of occurrence in a list
I have a list of integers(or could be even strings), which I would like to sort by the frequency of occurrences in Python, for instance: a = [1, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 5] Here the element 5 appears 4 times in the list, 4 appears 3 times. So the output sorted list would be : result = [5, 5, 5, 5, 3, 3, 3, 4, 4, 4, 1, 1, 2] I tried using a.count(), but it gives the number of occurrence of the element. I would like to sort it. Any idea how to do it ? Thanks
[ "from collections import Counter\nprint [item for items, c in Counter(a).most_common() for item in [items] * c]\n# [5, 5, 5, 5, 3, 3, 3, 4, 4, 4, 1, 1, 2]\n\nOr even better (efficient) implementation\nfrom collections import Counter\nfrom itertools import repeat, chain\nprint list(chain.from_iterable(repeat(i, c) for i,c in Counter(a).most_common()))\n# [5, 5, 5, 5, 3, 3, 3, 4, 4, 4, 1, 1, 2]\n\nOr\nfrom collections import Counter\nprint sorted(a, key=Counter(a).get, reverse=True)\n# [5, 5, 5, 5, 3, 3, 3, 4, 4, 4, 1, 1, 2]\n\nIf you prefer in-place sort\na.sort(key=Counter(a).get, reverse=True)\n\n", "Using Python 3.3 and the built in sorted function, with the count as the key:\n>>> a = [1,1,2,3,3,3,4,4,4,5,5,5,5]\n>>> sorted(a,key=a.count)\n[2, 1, 1, 3, 3, 3, 4, 4, 4, 5, 5, 5, 5]\n>>> sorted(a,key=a.count,reverse=True)\n[5, 5, 5, 5, 3, 3, 3, 4, 4, 4, 1, 1, 2]\n\n", "In [15]: a = [1,1,2,3,3,3,4,4,4,5,5,5,5]\n\nIn [16]: counts = collections.Counter(a)\n\nIn [17]: list(itertools.chain.from_iterable([[k for _ in range(counts[k])] for k in sorted(counts, key=counts.__getitem__, reverse=True)]))\nOut[17]: [5, 5, 5, 5, 3, 3, 3, 4, 4, 4, 1, 1, 2]\n\nAlternatively:\nanswer = []\nfor k in sorted(counts, key=counts.__getitem__, reverse=True):\n answer.extend([k for _ in range(counts[k])])\n\nOf course, [k for _ in range(counts[k])] can be replaced with [k]*counts[k].\nSo line 17 becomes \nlist(itertools.chain.from_iterable([[k]*counts[k] for k in sorted(counts, key=counts.__getitem__, reverse=True)]))\n\n", "If you happen to be using numpy already, or if using it is an option, here's another alternative:\nIn [309]: import numpy as np\n\nIn [310]: a = [1, 2, 3, 3, 1, 3, 5, 4, 4, 4, 5, 5, 5]\n\nIn [311]: vals, counts = np.unique(a, return_counts=True)\n\nIn [312]: order = np.argsort(counts)[::-1]\n\nIn [313]: np.repeat(vals[order], counts[order])\nOut[313]: array([5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 1, 1, 2])\n\nThat result is a numpy array. If you want to end up with a Python list, call the array's tolist() method:\nIn [314]: np.repeat(vals[order], counts[order]).tolist()\nOut[314]: [5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 1, 1, 2]\n\n", "Not interesting way...\na = [1,1,2,3,3,3,4,4,4,5,5,5,5]\n\nfrom collections import Counter\nresult = []\nfor v, times in sorted(Counter(a).iteritems(), key=lambda x: x[1], reverse=True):\n result += [v] * times\n\nOne liner:\nreduce(lambda a, b: a + [b[0]] * b[1], sorted(Counter(a).iteritems(), key=lambda x: x[1], reverse=True), [])\n\n", "Occurrence in array and within a sets of equal size:\nrev=True\n\narr = [6, 6, 5, 2, 9, 2, 5, 9, 2, 5, 6, 5, 4, 6, 9, 1, 2, 3, 4, 7 ,8 ,8, 8, 2]\nprint arr\n\narr.sort(reverse=rev)\n\nARR = {}\nfor n in arr:\n if n not in ARR:\n ARR[n] = 0\n ARR[n] += 1\n\narr=[]\nfor k,v in sorted(ARR.iteritems(), key=lambda (k,v): (v,k), reverse=rev):\n arr.extend([k]*v)\nprint arr\n\nResults:\n[6, 6, 5, 2, 9, 2, 5, 9, 2, 5, 6, 5, 4, 6, 9, 1, 2, 3, 4, 7, 8, 8, 8, 2]\n[2, 2, 2, 2, 2, 6, 6, 6, 6, 5, 5, 5, 5, 9, 9, 9, 8, 8, 8, 4, 4, 7, 3, 1]\n\n", "Dart Solution\nString sortedString = '';\nMap map = {};\nfor (int i = 0; i < s.length; i++) {\n map[s[i]] = (map[s[i]] ?? 0) + 1;\n // OR \n // map.containsKey(s[i])\n // ? map.update(s[i], (value) => ++value)\n // : map.addAll({s[i]: 1});\n}\nvar sortedByValueMap = Map.fromEntries(\n map.entries.toList()..sort((e1, e2) => e1.value.compareTo(e2.value)));\nsortedByValueMap.forEach((key, value) {\n sortedString += key * value;\n});\nreturn sortedString.split('').reversed. Join();\n\n" ]
[ 37, 8, 3, 1, 0, 0, 0 ]
[]
[]
[ "list", "python", "sorting" ]
stackoverflow_0023429426_list_python_sorting.txt
Q: How to split a row into 2 rows by setting a delimiter? How can I split this line: Basis of the Consolidated <> Financial Statements by setting <> as a delimiter and create a new row which should not affect other columns? The data looks like this. I need to do this in Python code and I tried this but its not working: for i in range(len(df5)): df5['text'].iloc[i]=str(df5['text'].iloc[i]) if((math.isnan(df5['note_number'].iloc[i]==False)): a=str(df5['text'].iloc[i]) a=a.strip() u=a.split('<>') if j<=(len(df5)): j=i+1 df5['text'].iloc[i]=u[0] df5['text'].iloc[i]=u[1] A: To split a row into two rows by a delimiter such as "<>" in Python, you can use the split() method of the str class. The split() method splits a string into a list of substrings based on the specified delimiter and returns the resulting list. # Define the row row = "Basis of the Consolidated<>Financial Statements" # Split the row into two rows by the "<>" delimiter rows = row.split("<>") # Print the resulting list print(rows) # Output: ['Basis of the Consolidated', 'Financial Statements'] You can then use the rows list to access the two rows separately, for example to add a row or to process them further in your code.
How to split a row into 2 rows by setting a delimiter?
How can I split this line: Basis of the Consolidated <> Financial Statements by setting <> as a delimiter and create a new row which should not affect other columns? The data looks like this. I need to do this in Python code and I tried this but its not working: for i in range(len(df5)): df5['text'].iloc[i]=str(df5['text'].iloc[i]) if((math.isnan(df5['note_number'].iloc[i]==False)): a=str(df5['text'].iloc[i]) a=a.strip() u=a.split('<>') if j<=(len(df5)): j=i+1 df5['text'].iloc[i]=u[0] df5['text'].iloc[i]=u[1]
[ "To split a row into two rows by a delimiter such as \"<>\" in Python, you can use the split() method of the str class. The split() method splits a string into a list of substrings based on the specified delimiter and returns the resulting list.\n# Define the row\nrow = \"Basis of the Consolidated<>Financial Statements\"\n\n# Split the row into two rows by the \"<>\" delimiter\nrows = row.split(\"<>\")\n\n# Print the resulting list\nprint(rows) # Output: ['Basis of the Consolidated', 'Financial Statements']\n\nYou can then use the rows list to access the two rows separately, for example to add a row or to process them further in your code.\n" ]
[ 0 ]
[]
[]
[ "dataframe", "pandas", "python" ]
stackoverflow_0074669736_dataframe_pandas_python.txt
Q: How to extract a specific text when web scraping for this situation I need to scrape texts from a website, but could not figure out a way to scrape a specific text for this situation: <td valign="top" class="testo_normale"> <font face="Geneva"> <i>W. Richard Bowen</i> <br> "Water engineering for the promotion of peace" <br> "1(2009)1-6" <br> "DOI: " <br> "Received:26/08/2008; Accepted: 25/11/2008; " So in the above example, I want to only get Water engineering and 1(2009)1-6 I tried to do that all day but I either get all the texts having tag <br> : "W. Richard Bowen" "Water engineering for the promotion of peace" "1(2009)1-6" "DOI: " "Received:26/08/2008; Accepted: 25/11/2008;" or I get empty output. here is website I'm trying to scrape, and a picture of what I want to scrape This is my code: from bs4 import BeautifulSoup import requests r = requests.get('https://www.deswater.com/vol.php?vol=1&oth=1|1-3|January|2009') soup = BeautifulSoup(r.content, 'html.parser') s = soup.find('td', class_='testo_normale') lines = s.find_all('br') for line in lines: print(line.text.strip()) A: You can apply split() method like: from bs4 import BeautifulSoup html =''' <td valign="top" class="testo_normale"> <font face="Geneva"> <i>W. Richard Bowen</i> <br> "Water engineering for the promotion of peace" <br> "1(2009)1-6" <br> "DOI: " <br> "Received:26/08/2008; Accepted: 25/11/2008; " ''' soup= BeautifulSoup(html, 'lxml') txt = soup.select_one('.testo_normale font') print(' '.join(' '.join(txt.get_text(strip=True).split('"')).strip().split(':')[0].split()[3:-1])) #OR for u in soup.select('.testo_normale font'): txt = ' '.join(' '.join(u.get_text(strip=True).split('"')).strip().split(':')[0].split()[3:-1]) print(txt) Output: Water engineering for the promotion of peace 1(2009)1-6 Update with full working code: from bs4 import BeautifulSoup import requests r = requests.get('https://www.deswater.com/vol.php?vol=1&oth=1|1-3|January|2009') soup = BeautifulSoup(r.content, 'html.parser') for u in soup.select('font[face="Geneva, Arial, Helvetica, san-serif"]')[6:]: txt = u.contents[2:-1] for i in txt: print(i.get_text(strip=True)) Output: Editorial and Obituary for Sidney Loeb by Miriam Balaban 1(2009)vii-viii Water engineering for the promotion of peace 1(2009)1-6 Modeling the permeate transient response to perturbations from steady state in a nanofiltration process 1(2009)7-16 Modeling the effect of anti-scalant on CaCO3 precipitation in continuous flow 1(2009)17-24 Alternative primary energy for power desalting plants in Kuwait: the nuclear option I 1(2009)25-41 Alternative primary energy for power desalting plants in Kuwait: the nuclear option II The steam cycle and its combination with desalting units 1(2009)42-57 Potential applications of quarry dolomite for post treatment of desalinated water 1(2009)58-67 Salinity tolerance evaluation methodology for desalination plant discharge 1(2009)68-74 Studies on a water-based absortion heat transformer for desalination using MED 1(2009)75-81 Estimation of stream compositions in reverse osmosis seawater desalination systems 1(2009)82-87 Genetic algorithm-based optimization of a multi-stage flash desalination plant 1(2009)88-106 Numerical simulation on a dynamic mixing process in ducts of a rotary pressure exchanger for SWRO 1(2009)107-113 Simulation of an autonomous, two-stage solar organic Rankine cycle system for reverse osmosis desalination 1(2009)114-127 Experiment and optimal parameters of a solar heating system study on an absorption solar desalination unit 1(2009)128-138 Roles of various mixed liquor constituents in membrane filtration of activated sludge 1(2009)139-149 Natural organic matter fouling using a cellulose acetate copolymer ultrafiltration membrane 1(2009)150-156 Progress of enzyme immobilization and its potential application 1(2009)157-171 Investigating microbial activities of constructed wetlands with respect to nitrate and sulfate reduction 1(2009)172-179 Membrane fouling caused by soluble microbial products in an activated sludge system under starvation 1(2009)180-185 Characterization of an ultrafiltration membrane modified by sorption of branched polyethyleneimine 1(2009)186-193 Combined humic substance coagulation and membrane filtration under saline conditions 1(2009)194-200 Preparation, characterization and performance of phenolphthalein polyethersulfone ultrafiltration hollow fiber membranes 1(2009)201-207 Application of coagulants in pretreatment of fish wastewater using factorial design 1(2009)208-214 Performance analysis of a trihybrid NF/RO/MSF desalination plant 1(2009)215-222 Nitrogen speciation by microstill flow injection analysis 1(2009)223-231 Wastewater from a mountain village treated with a constructed wetland 1(2009)232-236 The influence of various operating conditions on specific cake resistance in the crossflow microfiltration of yeast suspensions 1(2009)237-247 On-line monitoring of floc formation in various flocculants for piggery wastewater treatment 1(2009)248-258 Rigorous steady-state modeling of MSFBR desalination systems 1(2009)259-276 Detailed numerical simulations of flow mechanics and membrane performance in spacer-filled channels, flat and curved 1(2009)277-288 Removal of polycyclic aromatic hydrocarbons from Ismailia Canal water by chlorine, chlorine dioxide and ozone 1(2009)289-298 Water resources management to satisfy high water demand in the arid Sharm El Sheikh, the Red Sea, Egypt 1(2009)299-306 Effect of storage of NF membranes on fouling deposits and cleaning efficiency 1(2009)307-311 Laboratory studies and CFD modeling of photocatalytic degradation of colored textile wastewater by titania nanoparticles 1(2009)312-317 Startup operation and process control of a two-stage sequencing batch reactor (TSSBR) for biological nitrogen removal via nitrite 1(2009)318-325 A: To extact ANY text in the position of 'Water engineering' which is what I think you want, you can write a regex function like the following: import re def extract_text(string): pattern = r'<br>\s*(.*?)\s*(?:<br>|<)' regex = re.compile(pattern) matches = regex.finditer(string) texts = [] for match in matches: texts.append(match.group(1)) return texts string = """ <td valign="top" class="testo_normale"> <font face="Geneva"> <i>Mariam B</i> <br> "some other text" <br> "1(2009)1-6" <br>""" text = extract_text(string) print(text) The regular expression consists of the following parts: <br>: This matches the tag literally. This indicates that the text we are looking for is preceded by this tag in the string. \s*: This matches any whitespace characters (space, tab, newline, etc.) zero or more times. This allows the <br> tag to be followed by any amount of whitespace, including none at all. (.*?): This is a capturing group that matches any sequence of characters (except a newline) zero or more times, as few times as possible. This is the part of the regular expression that actually captures the text we are looking for. The ? after the * makes the * "lazy", which means it will match as few characters as possible. This is necessary to prevent the regular expression from matching too much text. \s*: This is the same as the second \s* in the pattern, and it allows the text we are looking for to be followed by any amount of whitespace, including none at all. (?:<br>|<): This is a non-capturing group that matches either a <br> tag or a < character. This indicates that the text we are looking for is followed by one of these two patterns in the string. This regular expression will match any sequence of characters that is preceded by a <br> tag and followed by a <br> or < tag. For example, in the given string <td valign="top" class="testo_normale"> ... <br>"Water engineering" <br>"1(2009)1-6"<br>", it will match the text Water engineering because it is preceded by <br> and followed by <br>. Note that this regular expression is not perfect and may not work in all cases. For example, if the text you are looking for contains a < or <br> character, this regular expression will not match it correctly. You may need to adjust the regular expression pattern to handle such cases.
How to extract a specific text when web scraping for this situation
I need to scrape texts from a website, but could not figure out a way to scrape a specific text for this situation: <td valign="top" class="testo_normale"> <font face="Geneva"> <i>W. Richard Bowen</i> <br> "Water engineering for the promotion of peace" <br> "1(2009)1-6" <br> "DOI: " <br> "Received:26/08/2008; Accepted: 25/11/2008; " So in the above example, I want to only get Water engineering and 1(2009)1-6 I tried to do that all day but I either get all the texts having tag <br> : "W. Richard Bowen" "Water engineering for the promotion of peace" "1(2009)1-6" "DOI: " "Received:26/08/2008; Accepted: 25/11/2008;" or I get empty output. here is website I'm trying to scrape, and a picture of what I want to scrape This is my code: from bs4 import BeautifulSoup import requests r = requests.get('https://www.deswater.com/vol.php?vol=1&oth=1|1-3|January|2009') soup = BeautifulSoup(r.content, 'html.parser') s = soup.find('td', class_='testo_normale') lines = s.find_all('br') for line in lines: print(line.text.strip())
[ "You can apply split() method like:\nfrom bs4 import BeautifulSoup\n\nhtml ='''\n\n<td valign=\"top\" class=\"testo_normale\">\n <font face=\"Geneva\">\n <i>W. Richard Bowen</i>\n <br>\n \"Water engineering for the promotion of peace\" \n <br>\n \"1(2009)1-6\"\n <br>\n \"DOI: \"\n <br>\n \"Received:26/08/2008; Accepted: 25/11/2008; \"\n \n'''\n\nsoup= BeautifulSoup(html, 'lxml')\n\ntxt = soup.select_one('.testo_normale font')\nprint(' '.join(' '.join(txt.get_text(strip=True).split('\"')).strip().split(':')[0].split()[3:-1]))\n\n#OR \n\nfor u in soup.select('.testo_normale font'):\n txt = ' '.join(' '.join(u.get_text(strip=True).split('\"')).strip().split(':')[0].split()[3:-1])\n print(txt)\n\nOutput:\nWater engineering for the promotion of peace 1(2009)1-6\n\nUpdate with full working code:\nfrom bs4 import BeautifulSoup\nimport requests\nr = requests.get('https://www.deswater.com/vol.php?vol=1&oth=1|1-3|January|2009')\nsoup = BeautifulSoup(r.content, 'html.parser')\n\nfor u in soup.select('font[face=\"Geneva, Arial, Helvetica, san-serif\"]')[6:]:\n txt = u.contents[2:-1]\n for i in txt:\n print(i.get_text(strip=True))\n\nOutput:\nEditorial and Obituary for Sidney Loeb by Miriam Balaban\n\n1(2009)vii-viii\nWater engineering for the promotion of peace\n\n1(2009)1-6\nModeling the permeate transient response to perturbations from steady state in a nanofiltration process\n\n1(2009)7-16\nModeling the effect of anti-scalant on CaCO3 precipitation in continuous flow\n\n1(2009)17-24\nAlternative primary energy for power desalting plants in Kuwait: the nuclear option I\n\n1(2009)25-41\nAlternative primary energy for power desalting plants in Kuwait: the nuclear\noption II The steam cycle and its combination with desalting units\n\n1(2009)42-57\nPotential applications of quarry dolomite for post treatment of desalinated water\n\n1(2009)58-67\nSalinity tolerance evaluation methodology for desalination plant discharge\n\n1(2009)68-74\nStudies on a water-based absortion heat transformer for desalination using MED\n\n1(2009)75-81\nEstimation of stream compositions in reverse osmosis seawater desalination systems\n\n1(2009)82-87\nGenetic algorithm-based optimization of a multi-stage flash desalination plant\n\n1(2009)88-106\nNumerical simulation on a dynamic mixing process in ducts of a rotary pressure exchanger for SWRO\n\n1(2009)107-113\nSimulation of an autonomous, two-stage solar organic Rankine cycle system for reverse osmosis desalination\n\n1(2009)114-127\nExperiment and optimal parameters of a solar heating system study on an absorption solar desalination unit\n\n1(2009)128-138\nRoles of various mixed liquor constituents in membrane filtration of activated sludge\n\n1(2009)139-149\nNatural organic matter fouling using a cellulose acetate copolymer ultrafiltration membrane\n\n1(2009)150-156\nProgress of enzyme immobilization and its potential application\n\n1(2009)157-171\nInvestigating microbial activities of constructed wetlands with respect to nitrate and sulfate reduction\n\n1(2009)172-179\nMembrane fouling caused by soluble microbial products in an activated sludge system under starvation\n\n1(2009)180-185\nCharacterization of an ultrafiltration membrane modified by sorption of branched polyethyleneimine\n\n1(2009)186-193\nCombined humic substance coagulation and membrane filtration under saline conditions\n\n1(2009)194-200\nPreparation, characterization and performance of phenolphthalein polyethersulfone ultrafiltration hollow fiber membranes\n\n1(2009)201-207\nApplication of coagulants in pretreatment of fish wastewater using factorial design\n\n1(2009)208-214\nPerformance analysis of a trihybrid NF/RO/MSF desalination plant\n\n1(2009)215-222\nNitrogen speciation by microstill flow injection analysis\n\n1(2009)223-231\nWastewater from a mountain village treated with a constructed wetland\n\n1(2009)232-236\nThe influence of various operating conditions on specific cake resistance in the crossflow microfiltration of yeast suspensions\n\n1(2009)237-247\nOn-line monitoring of floc formation in various flocculants for piggery wastewater treatment\n\n1(2009)248-258\nRigorous steady-state modeling of MSFBR desalination systems\n\n1(2009)259-276\nDetailed numerical simulations of flow mechanics and membrane performance in spacer-filled channels, flat and curved\n\n1(2009)277-288\nRemoval of polycyclic aromatic hydrocarbons from Ismailia Canal water by chlorine, chlorine dioxide and ozone\n\n1(2009)289-298\nWater resources management to satisfy high water demand in the arid Sharm El Sheikh, the Red Sea, Egypt\n\n1(2009)299-306\nEffect of storage of NF membranes on fouling deposits and cleaning efficiency\n\n1(2009)307-311\nLaboratory studies and CFD modeling of photocatalytic degradation of colored textile wastewater by titania nanoparticles\n\n1(2009)312-317\nStartup operation and process control of a two-stage sequencing batch reactor (TSSBR) for biological nitrogen removal via nitrite\n\n1(2009)318-325\n\n", "To extact ANY text in the position of 'Water engineering' which is what I think you want, you can write a regex function like the following:\nimport re\n\ndef extract_text(string):\n pattern = r'<br>\\s*(.*?)\\s*(?:<br>|<)'\n regex = re.compile(pattern)\n matches = regex.finditer(string)\n texts = []\n for match in matches:\n texts.append(match.group(1))\n return texts\n\nstring = \"\"\"\n<td valign=\"top\" class=\"testo_normale\">\n <font face=\"Geneva\">\n <i>Mariam B</i>\n <br>\n \"some other text\" \n <br>\n \"1(2009)1-6\"\n <br>\"\"\"\n\ntext = extract_text(string)\nprint(text)\n\n\nThe regular expression consists of the following parts:\n<br>: This matches the tag literally. This indicates that the text we are looking for is preceded by this tag in the string.\n\\s*: This matches any whitespace characters (space, tab, newline, etc.) zero or more times. This allows the <br> tag to be followed by any amount of whitespace, including none at all.\n(.*?): This is a capturing group that matches any sequence of characters (except a newline) zero or more times, as few times as possible. This is the part of the regular expression that actually captures the text we are looking for. The ? after the * makes the * \"lazy\", which means it will match as few characters as possible. This is necessary to prevent the regular expression from matching too much text.\n\\s*: This is the same as the second \\s* in the pattern, and it allows the text we are looking for to be followed by any amount of whitespace, including none at all.\n(?:<br>|<): This is a non-capturing group that matches either a <br> tag or a < character. This indicates that the text we are looking for is followed by one of these two patterns in the string.\nThis regular expression will match any sequence of characters that is preceded by a <br> tag and followed by a <br> or < tag. For example, in the given string <td valign=\"top\" class=\"testo_normale\"> ... <br>\"Water engineering\" <br>\"1(2009)1-6\"<br>\", it will match the text Water engineering because it is preceded by <br> and followed by <br>.\nNote that this regular expression is not perfect and may not work in all cases. For example, if the text you are looking for contains a < or <br> character, this regular expression will not match it correctly. You may need to adjust the regular expression pattern to handle such cases.\n" ]
[ 1, 0 ]
[]
[]
[ "beautifulsoup", "python", "web_scraping" ]
stackoverflow_0074672015_beautifulsoup_python_web_scraping.txt
Q: Unable to iterate over nested loop to calculate sum I am unable find correct logic to find summation. I have binary_values and function as: binary_values =[[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]] def f(x): m = np.matrix([0.5,-0.5, 0.3]) w = m.transpose() Y = np.dot(x,w) return Y f(x) I have to find summation of 1) f(1,0,0)-f(0,0,0))+(f(1,0,1)-f(0,0,1))+(f(1,1,0)-f(0,1,0))+(f(1,1,1)-f(0,1,1)) #this is for whenever "1" comes in 0th position inside nested loop. 2) (f(0,1,0)-f(0,0,0))+(f(0,1,1)-f(0,0,1))+(f(1,1,0)-f(1,0,0))+(f(1,1,1)-f(1,0,1)) #this is for whenever "1" comes in 1st position 3) (f(0,0,1)-f(0,0,0))+(f(0,1,1)-f(0,1,0))+(f(1,0,1)-f(1,0,0))+(f(1,1,1)-f(1,1,0)) #this is for whenever "1" comes in 2nd position The code I tried: sum = 0 for i in binary_values: for j in i: if(binary_values[i][0] == 1): sum = (f(1,0,0)-f(0,0,0))+(f(1,0,1)-f(0,0,1))+(f(1,1,0)- f(0,1,0))+(f(1,1,1)-f(0,1,1)) elif(binary_values[i][1] == 1): sum = (f(0,1,0)-f(0,0,0))+(f(0,1,1)-f(0,0,1))+(f(1,1,0)- f(1,0,0))+(f(1,1,1)-f(1,0,1)) elif(binary_values[i][2] == 1): sum = (f(0,0,1)-f(0,0,0))+(f(0,1,1)-f(0,1,0))+(f(1,0,1)- f(1,0,0))+(f(1,1,1)-f(1,1,0)) else: print("Error") print(sum) Please suggest me the better logic A: The problem you described is because you do: for i in binary _values # Will put the value of i to a inner list item like [0,1,1] then you do: for j in i # makes j a value inside the inner list like # 0 - first iteration , 1 - second iteration, 1 - third iteration and then you try to use i as a position variable for binary_values, but this will not work because i will contain a part of the binary_values list as described. Does the following code do what you wanted? As well i think if you want to sum up all of them, you will need the += operator to add up the sums... But feel free to describe your problem further and i will edit my answer as well. Edit Answer (see comments): import numpy as np binary_values = [[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]] def f(x): m =np.matrix([0.5,-0.5, 0.3]) w = m.transpose() Y = np.dot(x,w) return Y[0,0] sums = {"sum1":0,"sum2":0,"sum3":0,"fullsum1":0,"fullsum2":0,"fullsum3":0,"allsum":0} for i in binary_values: if(i[0] == 1): sums["sum1"] = (f([1,0,0])-f([0,0,0]))+(f([1,0,1])-f([0,0,1]))+(f([1,1,0])-f([0,1,0]))+(f([1,1,1])-f([0,1,1])) if(i[1] == 1): sums["sum2"] = (f([0,1,0])-f([0,0,0]))+(f([0,1,1])-f([0,0,1]))+(f([1,1,0])-f([1,0,0]))+(f([1,1,1])-f([1,0,1])) if(i[2] == 1): sums["sum3"] = (f([0,0,1])-f([0,0,0]))+(f([0,1,1])-f([0,1,0]))+(f([1,0,1])-f([1,0,0]))+(f([1,1,1])-f([1,1,0])) sums["fullsum1"] += sums["sum1"] sums["fullsum2"] += sums["sum2"] sums["fullsum3"] += sums["sum3"] sums["allsum"] += sums["sum1"] + sums["sum2"] + sums["sum3"] print("For:", i) print(f" Result Sum i[1] = %8.2f ; Result Sum i[2] = %8.2f ; Result Sum i[3] = %8.2f" % (sums["sum1"],sums["sum2"],sums["sum3"])) print(f" Sum of i[1] = %8.2f ; Sum of i[2] = %8.2f ; Sum of i[3] = %8.2f" % (sums["fullsum1"],sums["fullsum2"],sums["fullsum3"])) print(f" All sum up = %8.2f" % (sums["allsum"])) print() print("Final:") print(f"Result Sum i[1] = %8.2f ; Result Sum i[2] = %8.2f ; Result Sum i[3] = %8.2f" % (sums["sum1"],sums["sum2"],sums["sum3"])) print(f" Sum of i[1] = %8.2f ; Sum of i[2] = %8.2f ; Sum of i[3] = %8.2f" % (sums["fullsum1"],sums["fullsum2"],sums["fullsum3"])) print(f" All sum up = %8.2f" % (sums["allsum"])) Edit: There was another mistake, i forget to add to my answer. In your code you tried to call the function f(x) with 3 values like (1,0,0), but the argument list contains takes only a single value for x. So you could the three values into a single list to pass them in this way... Otherwise it will end up into a: TypeError: f() takes 1 positional argument but 3 were given
Unable to iterate over nested loop to calculate sum
I am unable find correct logic to find summation. I have binary_values and function as: binary_values =[[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]] def f(x): m = np.matrix([0.5,-0.5, 0.3]) w = m.transpose() Y = np.dot(x,w) return Y f(x) I have to find summation of 1) f(1,0,0)-f(0,0,0))+(f(1,0,1)-f(0,0,1))+(f(1,1,0)-f(0,1,0))+(f(1,1,1)-f(0,1,1)) #this is for whenever "1" comes in 0th position inside nested loop. 2) (f(0,1,0)-f(0,0,0))+(f(0,1,1)-f(0,0,1))+(f(1,1,0)-f(1,0,0))+(f(1,1,1)-f(1,0,1)) #this is for whenever "1" comes in 1st position 3) (f(0,0,1)-f(0,0,0))+(f(0,1,1)-f(0,1,0))+(f(1,0,1)-f(1,0,0))+(f(1,1,1)-f(1,1,0)) #this is for whenever "1" comes in 2nd position The code I tried: sum = 0 for i in binary_values: for j in i: if(binary_values[i][0] == 1): sum = (f(1,0,0)-f(0,0,0))+(f(1,0,1)-f(0,0,1))+(f(1,1,0)- f(0,1,0))+(f(1,1,1)-f(0,1,1)) elif(binary_values[i][1] == 1): sum = (f(0,1,0)-f(0,0,0))+(f(0,1,1)-f(0,0,1))+(f(1,1,0)- f(1,0,0))+(f(1,1,1)-f(1,0,1)) elif(binary_values[i][2] == 1): sum = (f(0,0,1)-f(0,0,0))+(f(0,1,1)-f(0,1,0))+(f(1,0,1)- f(1,0,0))+(f(1,1,1)-f(1,1,0)) else: print("Error") print(sum) Please suggest me the better logic
[ "The problem you described is because you do:\nfor i in binary _values # Will put the value of i to a inner list item like [0,1,1]\n\nthen you do:\nfor j in i # makes j a value inside the inner list like\n # 0 - first iteration , 1 - second iteration, 1 - third iteration \n\nand then you try to use i as a position variable for binary_values, but this will not work because i will contain a part of the binary_values list as described.\nDoes the following code do what you wanted? As well i think if you want to sum up all of them, you will need the += operator to add up the sums... But feel free to describe your problem further and i will edit my answer as well.\nEdit Answer (see comments):\nimport numpy as np\n\nbinary_values = [[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]]\n\ndef f(x):\n m =np.matrix([0.5,-0.5, 0.3])\n w = m.transpose()\n Y = np.dot(x,w)\n return Y[0,0]\n\nsums = {\"sum1\":0,\"sum2\":0,\"sum3\":0,\"fullsum1\":0,\"fullsum2\":0,\"fullsum3\":0,\"allsum\":0}\n\nfor i in binary_values:\n if(i[0] == 1):\n sums[\"sum1\"] = (f([1,0,0])-f([0,0,0]))+(f([1,0,1])-f([0,0,1]))+(f([1,1,0])-f([0,1,0]))+(f([1,1,1])-f([0,1,1]))\n if(i[1] == 1):\n sums[\"sum2\"] = (f([0,1,0])-f([0,0,0]))+(f([0,1,1])-f([0,0,1]))+(f([1,1,0])-f([1,0,0]))+(f([1,1,1])-f([1,0,1]))\n if(i[2] == 1):\n sums[\"sum3\"] = (f([0,0,1])-f([0,0,0]))+(f([0,1,1])-f([0,1,0]))+(f([1,0,1])-f([1,0,0]))+(f([1,1,1])-f([1,1,0]))\n\n sums[\"fullsum1\"] += sums[\"sum1\"]\n sums[\"fullsum2\"] += sums[\"sum2\"]\n sums[\"fullsum3\"] += sums[\"sum3\"]\n\n sums[\"allsum\"] += sums[\"sum1\"] + sums[\"sum2\"] + sums[\"sum3\"]\n\n print(\"For:\", i)\n print(f\" Result Sum i[1] = %8.2f ; Result Sum i[2] = %8.2f ; Result Sum i[3] = %8.2f\" % (sums[\"sum1\"],sums[\"sum2\"],sums[\"sum3\"]))\n print(f\" Sum of i[1] = %8.2f ; Sum of i[2] = %8.2f ; Sum of i[3] = %8.2f\" % (sums[\"fullsum1\"],sums[\"fullsum2\"],sums[\"fullsum3\"]))\n print(f\" All sum up = %8.2f\" % (sums[\"allsum\"]))\n print()\n\nprint(\"Final:\")\nprint(f\"Result Sum i[1] = %8.2f ; Result Sum i[2] = %8.2f ; Result Sum i[3] = %8.2f\" % (sums[\"sum1\"],sums[\"sum2\"],sums[\"sum3\"]))\nprint(f\" Sum of i[1] = %8.2f ; Sum of i[2] = %8.2f ; Sum of i[3] = %8.2f\" % (sums[\"fullsum1\"],sums[\"fullsum2\"],sums[\"fullsum3\"]))\nprint(f\" All sum up = %8.2f\" % (sums[\"allsum\"]))\n\nEdit:\nThere was another mistake, i forget to add to my answer.\nIn your code you tried to call the function f(x) with 3 values like (1,0,0), but the argument list contains takes only a single value for x. So you could the three values into a single list to pass them in this way... Otherwise it will end up into a:\nTypeError: f() takes 1 positional argument but 3 were given\n\n" ]
[ 0 ]
[]
[]
[ "numpy", "python" ]
stackoverflow_0074671984_numpy_python.txt
Q: Getting strange and unexpected output from python while loop I made a simple while loop to increase a number. And then I made a completely separate if condition to print a statement under certain circumstances. I don't understand why the two are being joined together..... Write a program whose input is two integers. Output the first integer and subsequent increments of 5 as long as the value is less than or equal to the second integer. Ex: If the input is: -15 10 the output is: -15 -10 -5 0 5 10 Ex: If the second integer is less than the first as in: 20 5 the output is: Second integer can't be less than the first. For coding simplicity, output a space after every integer, including the last. My code: ''' Type your code here. ''' firstNum = int(input()) secondNum = int(input()) while firstNum <= secondNum: print(firstNum, end=" ") firstNum +=5 if firstNum > secondNum: print("Second integer can't be less than the first.") Enter program input (optional) -15 10 Program output displayed here -15 -10 -5 0 5 10 Second integer can't be less than the first. A: Your while loop is ensuring firstNum > secondNum by the time it finishes running. Then, you check to see if firstNum > secondNum (which it is), and your print statement gets executed. A: a = int(input()) b = int(input()) if b < a: print("Second integer can't be less than the first.",end="") while a <= b: print(a, end=" ") a = a + 5 print("")
Getting strange and unexpected output from python while loop
I made a simple while loop to increase a number. And then I made a completely separate if condition to print a statement under certain circumstances. I don't understand why the two are being joined together..... Write a program whose input is two integers. Output the first integer and subsequent increments of 5 as long as the value is less than or equal to the second integer. Ex: If the input is: -15 10 the output is: -15 -10 -5 0 5 10 Ex: If the second integer is less than the first as in: 20 5 the output is: Second integer can't be less than the first. For coding simplicity, output a space after every integer, including the last. My code: ''' Type your code here. ''' firstNum = int(input()) secondNum = int(input()) while firstNum <= secondNum: print(firstNum, end=" ") firstNum +=5 if firstNum > secondNum: print("Second integer can't be less than the first.") Enter program input (optional) -15 10 Program output displayed here -15 -10 -5 0 5 10 Second integer can't be less than the first.
[ "Your while loop is ensuring firstNum > secondNum by the time it finishes running. Then, you check to see if firstNum > secondNum (which it is), and your print statement gets executed.\n", "a = int(input())\nb = int(input())\nif b < a:\n print(\"Second integer can't be less than the first.\",end=\"\")\nwhile a <= b:\n print(a, end=\" \")\na = a + 5\nprint(\"\")\n\n" ]
[ 2, 0 ]
[]
[]
[ "python", "while_loop" ]
stackoverflow_0072081945_python_while_loop.txt
Q: Sorting a Dataframe with alternating positive and negative values in one column please help me sort df into df1, in other words, I am trying to sort df by col3 ensuring that the values in col3 alternate from positive to negative: df (original dataframe) col1 col2 col3 0 1 -1 -38 1 2 -2 45 2 3 -3 79 3 4 -4 -55 4 5 -5 31 5 6 -6 38 6 7 -7 -45 7 8 -8 -79 8 9 -9 55 9 10 -10 -31 10 11 -11 55 11 12 -12 -55 desired dataframe col1 col2 col3 0 5 -5 31 1 10 -10 -31 2 6 -6 38 3 1 -1 -38 4 2 -2 45 5 7 -7 -45 6 9 -9 55 7 4 -4 -55 8 11 -11 55 9 12 -12 -55 10 3 -3 79 11 8 -8 -79 I tried sorting by col3 and using a lambda function as key and got the below result which is not what I want ` # first, we need to import the Pandas library import pandas as pd # create a sample DataFrame with three columns df = pd.DataFrame({'col1': [1, 2, 3, 4, 5,6,7,8,9,10,11,12], 'col2': [-1, -2, -3, -4, -5,-6,-7,-8,-9,-10,-11,-12], \ 'col3': [-38,45,79,-55,31,38,-45,-79,55,-31,55,-55]}) # sort the 'col3' column in ascending order by the absolute value of each element df = df.sort_values(by='col3', key=lambda x: abs(x)) ` col1 col2 col3 4 5 -5 31 9 10 -10 -31 0 1 -1 -38 5 6 -6 38 1 2 -2 45 6 7 -7 -45 3 4 -4 -55 8 9 -9 55 10 11 -11 55 11 12 -12 -55 2 3 -3 79 7 8 -8 -79 A: One way using pandas.DataFrame.groupby then sort_values with multiple colums: keys = ["abs", "order", "sign"] s = df["col3"] df["abs"] = s.abs() df["order"] = df.groupby(["abs", "col3"]).cumcount() # If you want positive to come first df["sign"] = s.lt(0) # If you want negative to come first # df["sign"] = s.gt(0) new_df = df.sort_values(keys).drop(keys, axis=1) print(new_df) Output (Positive first): col1 col2 col3 4 5 -5 31 9 10 -10 -31 5 6 -6 38 0 1 -1 -38 1 2 -2 45 6 7 -7 -45 8 9 -9 55 3 4 -4 -55 10 11 -11 55 11 12 -12 -55 2 3 -3 79 7 8 -8 -79 Output (Negative first): col1 col2 col3 9 10 -10 -31 4 5 -5 31 0 1 -1 -38 5 6 -6 38 6 7 -7 -45 1 2 -2 45 3 4 -4 -55 8 9 -9 55 11 12 -12 -55 10 11 -11 55 7 8 -8 -79 2 3 -3 79
Sorting a Dataframe with alternating positive and negative values in one column
please help me sort df into df1, in other words, I am trying to sort df by col3 ensuring that the values in col3 alternate from positive to negative: df (original dataframe) col1 col2 col3 0 1 -1 -38 1 2 -2 45 2 3 -3 79 3 4 -4 -55 4 5 -5 31 5 6 -6 38 6 7 -7 -45 7 8 -8 -79 8 9 -9 55 9 10 -10 -31 10 11 -11 55 11 12 -12 -55 desired dataframe col1 col2 col3 0 5 -5 31 1 10 -10 -31 2 6 -6 38 3 1 -1 -38 4 2 -2 45 5 7 -7 -45 6 9 -9 55 7 4 -4 -55 8 11 -11 55 9 12 -12 -55 10 3 -3 79 11 8 -8 -79 I tried sorting by col3 and using a lambda function as key and got the below result which is not what I want ` # first, we need to import the Pandas library import pandas as pd # create a sample DataFrame with three columns df = pd.DataFrame({'col1': [1, 2, 3, 4, 5,6,7,8,9,10,11,12], 'col2': [-1, -2, -3, -4, -5,-6,-7,-8,-9,-10,-11,-12], \ 'col3': [-38,45,79,-55,31,38,-45,-79,55,-31,55,-55]}) # sort the 'col3' column in ascending order by the absolute value of each element df = df.sort_values(by='col3', key=lambda x: abs(x)) ` col1 col2 col3 4 5 -5 31 9 10 -10 -31 0 1 -1 -38 5 6 -6 38 1 2 -2 45 6 7 -7 -45 3 4 -4 -55 8 9 -9 55 10 11 -11 55 11 12 -12 -55 2 3 -3 79 7 8 -8 -79
[ "One way using pandas.DataFrame.groupby then sort_values with multiple colums:\nkeys = [\"abs\", \"order\", \"sign\"]\n\ns = df[\"col3\"]\n\ndf[\"abs\"] = s.abs()\ndf[\"order\"] = df.groupby([\"abs\", \"col3\"]).cumcount()\n\n# If you want positive to come first\ndf[\"sign\"] = s.lt(0)\n\n# If you want negative to come first\n# df[\"sign\"] = s.gt(0)\n\nnew_df = df.sort_values(keys).drop(keys, axis=1)\nprint(new_df)\n\nOutput (Positive first):\n col1 col2 col3\n4 5 -5 31\n9 10 -10 -31\n5 6 -6 38\n0 1 -1 -38\n1 2 -2 45\n6 7 -7 -45\n8 9 -9 55\n3 4 -4 -55\n10 11 -11 55\n11 12 -12 -55\n2 3 -3 79\n7 8 -8 -79\n\nOutput (Negative first):\n col1 col2 col3\n9 10 -10 -31\n4 5 -5 31\n0 1 -1 -38\n5 6 -6 38\n6 7 -7 -45\n1 2 -2 45\n3 4 -4 -55\n8 9 -9 55\n11 12 -12 -55\n10 11 -11 55\n7 8 -8 -79\n2 3 -3 79\n\n" ]
[ 0 ]
[]
[]
[ "pandas", "python" ]
stackoverflow_0074672188_pandas_python.txt
Q: pydantic.error_wrappers.ValidationError: 11 validation errors for For Trip type=value_error.missing Im getting this error with my pydantic schema, but oddly it is generating the object correctly, and sending it to the SQLAlchemy models, then it suddenly throws error for all elements in the model. response -> id field required (type=value_error.missing) response -> date field required (type=value_error.missing) response -> time field required (type=value_error.missing) response -> price field required (type=value_error.missing) response -> distance field required (type=value_error.missing) response -> origin_id field required (type=value_error.missing) response -> destination_id field required (type=value_error.missing) response -> driver_id field required (type=value_error.missing) response -> passenger_id field required (type=value_error.missing) response -> vehicle_id field required (type=value_error.missing) response -> status field required (type=value_error.missing) i must say that all the fields should have values. And the error trace do not references any part of my code so i dont even know where to debug. Im a noob in SQLAlchemy/pydantic here are some parts of the code class Trip(BaseModel): id: int date: str time: str price: float distance: float origin_id: int destination_id: int driver_id: int passenger_id: int vehicle_id: int status: Status class Config: orm_mode = True class TripDB(Base): __tablename__ = 'trip' __table_args__ = {'extend_existing': True} id = Column(Integer, primary_key=True, index=True) date = Column(DateTime, nullable=False) time = Column(String(64), nullable=False) price = Column(Float, nullable=False) distance = Column(Float, nullable=False) status = Column(String(64), nullable=False) origin_id = Column( Integer, ForeignKey('places.id'), nullable=False) destination_id = Column( Integer, ForeignKey('places.id'), nullable=False) origin = relationship("PlaceDB", foreign_keys=[origin_id]) destination = relationship("PlaceDB", foreign_keys=[destination_id]) driver_id = Column( Integer, ForeignKey('driver.id'), nullable=False) vehicle_id = Column( Integer, ForeignKey('vehicle.id'), nullable=False) passenger_id = Column( Integer, ForeignKey('passenger.id'), nullable=False) def create_trip(trip: Trip, db: Session): origin = db.query(models.PlaceDB).filter(models.PlaceDB.id == trip.origin_id).first() destination = db.query(models.PlaceDB).filter(models.PlaceDB.id == trip.destination_id).first() db_trip = TripDB( id=(trip.id or None), date=trip.date or None, time=trip.time or None, price=trip.price or None, distance=trip.distance or None, origin_id=trip.origin_id or None, destination_id=(trip.destination_id or None), status=trip.status or None, driver_id=trip.driver_id or None, passenger_id=trip.passenger_id or None, vehicle_id=trip.vehicle_id or None, origin=origin, destination=destination) try: db.add(db_trip) db.commit() db.refresh(db_trip) return db_trip except: return "Somethig went wrong" A: It seems like a bug on the pydantic model, it happened to me as well, and i was not able to fix it, but indeed if you just skip the type check in the route it works fine A: It seems like there is a conflict in your schema and create_trip function. Have you checked whether you are passing the correct param to your schema? You have defined most of the fields as not nullable, and you are passing None as an alternative value in db.add() command. I had a similar problem with my code, and I figured that naming and type convention between schema and server.py. After matching the field names and type in both file, I resolved the error of the field required (type=value_error.missing). # project/schema.py from pydantic import BaseModel # here I had made mistake by using target_URL in server.py # variable name and type should be same in both schema and server class URLBase(BaseModel): target_url: str class URL(URLBase): is_active: bool clicks: int class Config: orm_mode = True class URLInfo(URL): url: str admin_url: str # project/server.py @app.post('/url', response_model=schema.URLInfo) def create_short_url(url: schema.URLBase, db: Session = Depends(get_db)): if not validators.url(url.target_url): raise bad_request_message(message="Your provided URL is not valid!") chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" key = "".join(secrets.choice(chars) for _ in range(5)) secret_key = "".join(secrets.choice(chars) for _ in range(8)) db_url = models.URL(target_url=url.target_url, key=key, secret_key=secret_key) db.add(db_url) db.commit() db.refresh(db_url) db_url.url = key db_url.admin_url = secret_key return db_url A: Please check the return statement, i had similar issue and got the issue reolved by correcting my return statement. I see return statement missing or wrong in create_trip function.
pydantic.error_wrappers.ValidationError: 11 validation errors for For Trip type=value_error.missing
Im getting this error with my pydantic schema, but oddly it is generating the object correctly, and sending it to the SQLAlchemy models, then it suddenly throws error for all elements in the model. response -> id field required (type=value_error.missing) response -> date field required (type=value_error.missing) response -> time field required (type=value_error.missing) response -> price field required (type=value_error.missing) response -> distance field required (type=value_error.missing) response -> origin_id field required (type=value_error.missing) response -> destination_id field required (type=value_error.missing) response -> driver_id field required (type=value_error.missing) response -> passenger_id field required (type=value_error.missing) response -> vehicle_id field required (type=value_error.missing) response -> status field required (type=value_error.missing) i must say that all the fields should have values. And the error trace do not references any part of my code so i dont even know where to debug. Im a noob in SQLAlchemy/pydantic here are some parts of the code class Trip(BaseModel): id: int date: str time: str price: float distance: float origin_id: int destination_id: int driver_id: int passenger_id: int vehicle_id: int status: Status class Config: orm_mode = True class TripDB(Base): __tablename__ = 'trip' __table_args__ = {'extend_existing': True} id = Column(Integer, primary_key=True, index=True) date = Column(DateTime, nullable=False) time = Column(String(64), nullable=False) price = Column(Float, nullable=False) distance = Column(Float, nullable=False) status = Column(String(64), nullable=False) origin_id = Column( Integer, ForeignKey('places.id'), nullable=False) destination_id = Column( Integer, ForeignKey('places.id'), nullable=False) origin = relationship("PlaceDB", foreign_keys=[origin_id]) destination = relationship("PlaceDB", foreign_keys=[destination_id]) driver_id = Column( Integer, ForeignKey('driver.id'), nullable=False) vehicle_id = Column( Integer, ForeignKey('vehicle.id'), nullable=False) passenger_id = Column( Integer, ForeignKey('passenger.id'), nullable=False) def create_trip(trip: Trip, db: Session): origin = db.query(models.PlaceDB).filter(models.PlaceDB.id == trip.origin_id).first() destination = db.query(models.PlaceDB).filter(models.PlaceDB.id == trip.destination_id).first() db_trip = TripDB( id=(trip.id or None), date=trip.date or None, time=trip.time or None, price=trip.price or None, distance=trip.distance or None, origin_id=trip.origin_id or None, destination_id=(trip.destination_id or None), status=trip.status or None, driver_id=trip.driver_id or None, passenger_id=trip.passenger_id or None, vehicle_id=trip.vehicle_id or None, origin=origin, destination=destination) try: db.add(db_trip) db.commit() db.refresh(db_trip) return db_trip except: return "Somethig went wrong"
[ "It seems like a bug on the pydantic model, it happened to me as well, and i was not able to fix it, but indeed if you just skip the type check in the route it works fine\n", "It seems like there is a conflict in your schema and create_trip function. Have you checked whether you are passing the correct param to your schema? You have defined most of the fields as not nullable, and you are passing None as an alternative value in db.add() command.\nI had a similar problem with my code, and I figured that naming and type convention between schema and server.py. After matching the field names and type in both file, I resolved the error of the field required (type=value_error.missing).\n# project/schema.py\nfrom pydantic import BaseModel\n\n# here I had made mistake by using target_URL in server.py\n# variable name and type should be same in both schema and server\nclass URLBase(BaseModel):\n target_url: str\n\n\nclass URL(URLBase):\n is_active: bool\n clicks: int\n\n class Config:\n orm_mode = True\n\n\nclass URLInfo(URL):\n url: str\n admin_url: str\n\n# project/server.py\[email protected]('/url', response_model=schema.URLInfo)\ndef create_short_url(url: schema.URLBase, db: Session = Depends(get_db)):\n if not validators.url(url.target_url):\n raise bad_request_message(message=\"Your provided URL is not valid!\")\n chars = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n key = \"\".join(secrets.choice(chars) for _ in range(5))\n secret_key = \"\".join(secrets.choice(chars) for _ in range(8))\n db_url = models.URL(target_url=url.target_url, key=key, secret_key=secret_key)\n db.add(db_url)\n db.commit()\n db.refresh(db_url)\n db_url.url = key\n db_url.admin_url = secret_key\n\n return db_url\n\n", "Please check the return statement, i had similar issue and got the issue reolved by correcting my return statement. I see return statement missing or wrong in create_trip function.\n" ]
[ 2, 0, 0 ]
[]
[]
[ "pydantic", "python", "sqlalchemy" ]
stackoverflow_0072476094_pydantic_python_sqlalchemy.txt
Q: Pagination in Flask I'm trying to display 5 record per page. However, I not sure how to configure the li class. For instance, click the 2 and it will redirect to next 5 record on second page and click previous it will redirect from 2 to 1 to first page. manageSmartphone.html <div class="clearfix"> <div class="hint-text">Showing <b>5</b> out of <b>{{ total }}</b> entries</div> <ul class="pagination"> <li class="page-item"><a href="#">Previous</a></li> <li class="page-item active"><a href="#" class="page-link" id="page" name="page">1</a></li> <li class="page-item"><a href="#" class="page-link" id="page" name="page">2</a></li> <li class="page-item"><a href="#" class="page-link" id="page" name="page">3</a></li> <li class="page-item"><a href="#" class="page-link" id="page" name="page">4</a></li> <li class="page-item"><a href="#" class="page-link" id="page" name="page">5</a></li> <li class="page-item"><a href="#" class="page-link" id="page" name="page">Next</a></li> </ul> </div> app.py @app.route('/manageSmartphone', methods = ['GET','POST']) def manageSmartphone(): conn = get_db_connection() page = request.args.get('page', type=int, default=1) limit= 5 offset = page*limit - limit smartphones = conn.execute('SELECT * FROM Smartphone').fetchall() total = len(smartphones) smartphones = conn.execute("SELECT * FROM Smartphone LIMIT ? OFFSET ?", (limit, offset)).fetchall() return render_template('manageSmartphone.html', smartphones = smartphones , total = total) A: Implementing pagination can be challenging for several reasons, I would suggest you using the paginate() method of the Flask-SQLAlchemy lib This method takes a page parameter and a per_page parameter that you can use to specify the current page and the number of records to display per page Here is an example of how you could use the paginate() method to implement pagination that displays 5 records per page in Flask: from flask import Flask, render_template from flask_sqlalchemy import SQLAlchemy from flask_sqlalchemy import Pagination app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///database.db' db = SQLAlchemy(app) # Define a model representing a record in the database class Record(db.Model): id = db.Column(db.Integer, primary_key=True) brand = db.Column(db.String(80), nullable=False) model = db.Column(db.String(80), nullable=False) price = db.Column(db.Integer, nullable=False) @app.route('/') def index(): # Get the current page from the request parameters page = request.args.get('page', 1, type=int) # Use the paginate() method to get the records for the current page records = Record.query.paginate(page=page, per_page=5) # Render the records on a template return render_template('index.html', records=records)
Pagination in Flask
I'm trying to display 5 record per page. However, I not sure how to configure the li class. For instance, click the 2 and it will redirect to next 5 record on second page and click previous it will redirect from 2 to 1 to first page. manageSmartphone.html <div class="clearfix"> <div class="hint-text">Showing <b>5</b> out of <b>{{ total }}</b> entries</div> <ul class="pagination"> <li class="page-item"><a href="#">Previous</a></li> <li class="page-item active"><a href="#" class="page-link" id="page" name="page">1</a></li> <li class="page-item"><a href="#" class="page-link" id="page" name="page">2</a></li> <li class="page-item"><a href="#" class="page-link" id="page" name="page">3</a></li> <li class="page-item"><a href="#" class="page-link" id="page" name="page">4</a></li> <li class="page-item"><a href="#" class="page-link" id="page" name="page">5</a></li> <li class="page-item"><a href="#" class="page-link" id="page" name="page">Next</a></li> </ul> </div> app.py @app.route('/manageSmartphone', methods = ['GET','POST']) def manageSmartphone(): conn = get_db_connection() page = request.args.get('page', type=int, default=1) limit= 5 offset = page*limit - limit smartphones = conn.execute('SELECT * FROM Smartphone').fetchall() total = len(smartphones) smartphones = conn.execute("SELECT * FROM Smartphone LIMIT ? OFFSET ?", (limit, offset)).fetchall() return render_template('manageSmartphone.html', smartphones = smartphones , total = total)
[ "Implementing pagination can be challenging for several reasons,\nI would suggest you using the paginate() method of the Flask-SQLAlchemy lib\nThis method takes a page parameter and a per_page parameter that you can use to specify the current page and the number of records to display per page\nHere is an example of how you could use the paginate() method to implement pagination that displays 5 records per page in Flask:\nfrom flask import Flask, render_template\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_sqlalchemy import Pagination\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///database.db'\ndb = SQLAlchemy(app)\n\n# Define a model representing a record in the database\nclass Record(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n brand = db.Column(db.String(80), nullable=False)\n model = db.Column(db.String(80), nullable=False)\n price = db.Column(db.Integer, nullable=False)\n\[email protected]('/')\ndef index():\n # Get the current page from the request parameters\n page = request.args.get('page', 1, type=int)\n\n # Use the paginate() method to get the records for the current page\n records = Record.query.paginate(page=page, per_page=5)\n\n # Render the records on a template\n return render_template('index.html', records=records)\n\n" ]
[ 1 ]
[]
[]
[ "flask", "html", "python" ]
stackoverflow_0074666012_flask_html_python.txt
Q: Python: Print string in reverse Write a program that takes in a line of text as input, and outputs that line of text in reverse. The program repeats, ending when the user enters "Done", "done", or "d" for the line of text. Ex: If the input is: Hello there Hey done then the output is: ereht olleH yeH I have already the code like this. I don't understand what I have done wrong. Please help. word = str(input()) the_no_word = ['Done', 'done', 'd'] while word == "Done" and word == "done" and word == "d": break print(word[-1::-1]) A: This may work for you: word = "" the_no_word = ['Done', 'done', 'd'] while word not in the_no_word: word = str(input()) print(word[-1::-1]) You need to get the user input into word after every loop and check if word is not in the list of the_no_word. Let me know if this is what you were looking for. A: You could do it like this: while (word := input()) not in ['Done', 'done', 'd']: print(word[::-1]) A: var1 = str(input()) bad_word = ['done', 'd', 'Done'] while var1 not in bad_word: print(var1[::-1]) var1 = str(input()) Just did the problem and used this answer. A: This should work for you! word = str(input()) the_no_word = ['Done', 'done', 'd'] while word not in the_no_word: print(word[-1::-1]) word = str(input())
Python: Print string in reverse
Write a program that takes in a line of text as input, and outputs that line of text in reverse. The program repeats, ending when the user enters "Done", "done", or "d" for the line of text. Ex: If the input is: Hello there Hey done then the output is: ereht olleH yeH I have already the code like this. I don't understand what I have done wrong. Please help. word = str(input()) the_no_word = ['Done', 'done', 'd'] while word == "Done" and word == "done" and word == "d": break print(word[-1::-1])
[ "This may work for you:\nword = \"\"\nthe_no_word = ['Done', 'done', 'd']\nwhile word not in the_no_word:\n word = str(input())\n print(word[-1::-1])\n\nYou need to get the user input into word after every loop and check if word is not in the list of the_no_word. Let me know if this is what you were looking for.\n", "You could do it like this:\nwhile (word := input()) not in ['Done', 'done', 'd']:\n print(word[::-1])\n\n", "var1 = str(input())\nbad_word = ['done', 'd', 'Done']\nwhile var1 not in bad_word:\n print(var1[::-1])\n var1 = str(input())\n\nJust did the problem and used this answer.\n", "This should work for you!\nword = str(input())\nthe_no_word = ['Done', 'done', 'd']\nwhile word not in the_no_word:\n print(word[-1::-1])\nword = str(input())\n\n" ]
[ 0, 0, 0, 0 ]
[ "string = str(input())\n\nno_words = ['Done','done','d']\nwhile string not in no_words:\n if string in no_words:\n print()\n else:\n print(string[-1::-1])\n string = str(input())\n\n" ]
[ -2 ]
[ "performance", "python", "python_3.x" ]
stackoverflow_0071360039_performance_python_python_3.x.txt
Q: How to remove fractioned Items and add sales to another row So basically my POS reports don't add up split bills. If you look at df.Item there are items with fractions (1/2, 1/3, etc). I want to drop those lines but add the sales to the proper row. Item Outlet1 Outlet2 Outlet3 Outlet4 2 AIR GIN 162.0 NaN 189.0 54.0 3 AIR GIN 1/3 NaN NaN NaN 9.0 4 AIR VODKA 468.0 NaN 585.0 144.0 5 AIR VODKA 1/2 NaN NaN 18.0 NaN Example output: Item Outlet1 Outlet2 Outlet3 Outlet4 2 AIR GIN 162.0 NaN 189.0 63.0 3 AIR VODKA 468.0 NaN 603.0 144.0 I'm not sure where to start, New to python. A: I'm assuming the data doesn't contain any duplicate items. It looks like it's total sales over a certain period, but just the itemization is messed up. In that case, you can simply remove the fractions with .str.replace(), then group and sum. df['Item'] = df['Item'].str.replace(r'\s+\d+/\d+$', '', regex=True) df.groupby('Item').sum(min_count=1) # `min_count` to respect NaNs Outlet1 Outlet2 Outlet3 Outlet4 Item AIR GIN 162.0 NaN 189.0 63.0 AIR VODKA 468.0 NaN 603.0 144.0 Then you can .reset_index() if you want. P.S. Before your edit, you also had two other columns that weren't really relevant to the problem, but change the process. You could just include them in the groupby: df.groupby(['Category', 'SubCategory', 'Item'])
How to remove fractioned Items and add sales to another row
So basically my POS reports don't add up split bills. If you look at df.Item there are items with fractions (1/2, 1/3, etc). I want to drop those lines but add the sales to the proper row. Item Outlet1 Outlet2 Outlet3 Outlet4 2 AIR GIN 162.0 NaN 189.0 54.0 3 AIR GIN 1/3 NaN NaN NaN 9.0 4 AIR VODKA 468.0 NaN 585.0 144.0 5 AIR VODKA 1/2 NaN NaN 18.0 NaN Example output: Item Outlet1 Outlet2 Outlet3 Outlet4 2 AIR GIN 162.0 NaN 189.0 63.0 3 AIR VODKA 468.0 NaN 603.0 144.0 I'm not sure where to start, New to python.
[ "I'm assuming the data doesn't contain any duplicate items. It looks like it's total sales over a certain period, but just the itemization is messed up.\nIn that case, you can simply remove the fractions with .str.replace(), then group and sum.\ndf['Item'] = df['Item'].str.replace(r'\\s+\\d+/\\d+$', '', regex=True)\n\ndf.groupby('Item').sum(min_count=1) # `min_count` to respect NaNs\n\n Outlet1 Outlet2 Outlet3 Outlet4\nItem \nAIR GIN 162.0 NaN 189.0 63.0\nAIR VODKA 468.0 NaN 603.0 144.0\n\nThen you can .reset_index() if you want.\n\nP.S. Before your edit, you also had two other columns that weren't really relevant to the problem, but change the process. You could just include them in the groupby:\ndf.groupby(['Category', 'SubCategory', 'Item'])\n\n" ]
[ 0 ]
[]
[]
[ "dataframe", "pandas", "python" ]
stackoverflow_0074671929_dataframe_pandas_python.txt
Q: Scraping Table Data from Multiple URLS, but first link is repeating I'm looking to iterate through the URL with "count" as variables between 1 and 65. Right now, I'm close but really struggling to figure out the last piece. I'm receiving the same table (from variable 1) 65 times, instead of receiving the different tables. import requests import pandas as pd url = 'https://basketball.realgm.com/international/stats/2023/Averages/Qualified/All/player/All/desc/{count}' res = [] for count in range(1, 65): html = requests.get(url).content df_list = pd.read_html(html) df = df_list[-1] res.append(df) print(res) df.to_csv('my data.csv') Any thoughts? A: A few errors: Your URL was templated incorrectly. It remains at .../{count} literally, without substituting or updating from the loop variable. If you want to get page 1 to 65, use range(1, 66) Unless you want to export only the last dataframe, you need to concatenate all of them first # No count here, we will add it later url = 'https://basketball.realgm.com/international/stats/2023/Averages/Qualified/All/player/All/desc' res = [] for count in range(1, 66): # pd.read_html accepts a URL too so no need to make a separate request df_list = pd.read_html(f"{url}/{count}") res.append(df_list[-1]) pd.concat(res).to_csv('my data.csv')
Scraping Table Data from Multiple URLS, but first link is repeating
I'm looking to iterate through the URL with "count" as variables between 1 and 65. Right now, I'm close but really struggling to figure out the last piece. I'm receiving the same table (from variable 1) 65 times, instead of receiving the different tables. import requests import pandas as pd url = 'https://basketball.realgm.com/international/stats/2023/Averages/Qualified/All/player/All/desc/{count}' res = [] for count in range(1, 65): html = requests.get(url).content df_list = pd.read_html(html) df = df_list[-1] res.append(df) print(res) df.to_csv('my data.csv') Any thoughts?
[ "A few errors:\n\nYour URL was templated incorrectly. It remains at .../{count} literally, without substituting or updating from the loop variable.\nIf you want to get page 1 to 65, use range(1, 66)\nUnless you want to export only the last dataframe, you need to concatenate all of them first\n\n# No count here, we will add it later\nurl = 'https://basketball.realgm.com/international/stats/2023/Averages/Qualified/All/player/All/desc'\nres = []\n\nfor count in range(1, 66):\n # pd.read_html accepts a URL too so no need to make a separate request\n df_list = pd.read_html(f\"{url}/{count}\")\n res.append(df_list[-1])\n\npd.concat(res).to_csv('my data.csv')\n\n" ]
[ 1 ]
[]
[]
[ "dataframe", "loops", "pandas", "python", "python_requests" ]
stackoverflow_0074672238_dataframe_loops_pandas_python_python_requests.txt
Q: Consume a docker container inside Django docker container? Connecting two docker containers I have a Django container and I want to consume another DL container inside it? For example, I have a Django app that predicting images classes and I want to make the prediction using a docker container and not a python library. That Django app will be containerised as well. In production, I will have three docker containers: Django container + Postgres container + YoloV5 container. How can I link the Django with the YoloV5 so that the prediction inside the Django will be done using the YoloV5? I want to connect a deep learning container with Django container to make prediction using the DL container and not a python package. A: The easiest way to do this is to make a network call to the other container. You may find it simplest to wrap the YoloV5 code in a very thin web layer, e.g. using Flask, to create an API. Then call that in your Django container when you need it using requests. A: As suggested by Nick and others, the solution is: by calling the YoloV5 docker container inside Django container using host.docker.internal. I mean that inside Django container (views.py) I used host.docker.internal to call the YoloV5 container.
Consume a docker container inside Django docker container? Connecting two docker containers
I have a Django container and I want to consume another DL container inside it? For example, I have a Django app that predicting images classes and I want to make the prediction using a docker container and not a python library. That Django app will be containerised as well. In production, I will have three docker containers: Django container + Postgres container + YoloV5 container. How can I link the Django with the YoloV5 so that the prediction inside the Django will be done using the YoloV5? I want to connect a deep learning container with Django container to make prediction using the DL container and not a python package.
[ "The easiest way to do this is to make a network call to the other container. You may find it simplest to wrap the YoloV5 code in a very thin web layer, e.g. using Flask, to create an API. Then call that in your Django container when you need it using requests.\n", "As suggested by Nick and others, the solution is: by calling the YoloV5 docker container inside Django container using host.docker.internal. I mean that inside Django container (views.py) I used host.docker.internal to call the YoloV5 container.\n" ]
[ 0, 0 ]
[]
[]
[ "deep_learning", "django", "docker", "docker_compose", "python" ]
stackoverflow_0074634267_deep_learning_django_docker_docker_compose_python.txt
Q: Invalid Shape Error when trying to leverage Keras's VGG16 pretrained model I am trying to leverage kera's VGG16 model in my own image classification problem. My code is heavily based upon Francois Chollet's example (Chapter 8 of Deep Learning in Python - code). I have three classes I'm trying to predict. Directory structure: data/ training/ class_1 class_2 class_3 Note: this my first time working with Keras so I may just be doing something wrong. My call to model.fit() fails with: ValueError: Shapes (32, 1) and (32, 3) are incompatible. See the bottom of this question for the full error messages. If I look at the output from .summary() calls, I don't see a layer of dimension (32, 1). import pathlib import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.utils import image_dataset_from_directory DATA_DIR = pathlib.Path('./data/') batch_size = 32 img_width = image_height = 256 train_dataset = image_dataset_from_directory( DATA_DIR / "training", image_size=img_width_height, batch_size=batch_size) validation_dataset = image_dataset_from_directory( DATA_DIR / "validation", image_size=img_width_height, batch_size=batch_size) # Found 128400 files belonging to 3 classes. # Found 15600 files belonging to 3 classes. vgg16_convolution_base = keras.applications.vgg16.VGG16( weights="imagenet", include_top=False, input_shape=(img_width, image_height, 3)) vgg16_convolution_base.summary() # block3_conv3 (Conv2D) (None, 64, 64, 256) 590080 # block3_pool (MaxPooling2D) (None, 32, 32, 256) 0 # block4_conv1 (Conv2D) (None, 32, 32, 512) 1180160 # block4_conv2 (Conv2D) (None, 32, 32, 512) 2359808 # block4_conv3 (Conv2D) (None, 32, 32, 512) 2359808 # block4_pool (MaxPooling2D) (None, 16, 16, 512) 0 # block5_conv1 (Conv2D) (None, 16, 16, 512) 2359808 # block5_conv2 (Conv2D) (None, 16, 16, 512) 2359808 # block5_conv3 (Conv2D) (None, 16, 16, 512) 2359808 # block5_pool (MaxPooling2D) (None, 8, 8, 512) 0 def get_features_and_labels(dataset): all_features = [] all_labels = [] for images, labels in dataset: preprocessed_images = keras.applications.vgg16.preprocess_input(images) features = vgg16_convolution_base.predict(preprocessed_images) all_features.append(features) all_labels.append(labels) return np.concatenate(all_features), np.concatenate(all_labels) train_features, train_labels = get_features_and_labels(train_dataset) val_features, val_labels = get_features_and_labels(validation_dataset) print(train_features.shape) print(train_labels.shape) # (128400, 8, 8, 512) # (128400,) print(val_features.shape) print(val_labels.shape) # (15600, 8, 8, 512) # (15600,) inputs = keras.Input(shape=(8, 8, 512)) x = layers.Flatten()(inputs) x = layers.Dense(256)(x) x = layers.Dropout(0.5)(x) outputs = layers.Dense(3, activation="softmax")(x) model = keras.Model(inputs, outputs) model.compile(loss="categorical_crossentropy", optimizer="rmsprop", metrics=["accuracy"]) model.summary() # input_4 (InputLayer) [(None, 8, 8, 512)] 0 # flatten_1 (Flatten) (None, 32768) 0 # dense_2 (Dense) (None, 256) 8388864 # dropout_1 (Dropout) (None, 256) 0 # dense_3 (Dense) (None, 3) 771 # ================================================================ # Total params: 8,389,635 # Trainable params: 8,389,635 history = model.fit( train_features, train_labels, epochs=20, validation_data=(val_features, val_labels) My call to model.fit() fails with: ValueError: Shapes (32, 1) and (32, 3) are incompatible ... File "C:\Users\x\anaconda3\lib\site-packages\keras\losses.py", line 1990, in categorical_crossentropy return backend.categorical_crossentropy( File "C:\Users\x\anaconda3\lib\site-packages\keras\backend.py", line 5529, in categorical_crossentropy target.shape.assert_is_compatible_with(output.shape) full traceback A: The categorical_crossentropy loss for 3 classes together with the batch size of 32 dictate the shape of labels (for each bach) to be (32, 3). The labels are currently ordinal: 0, 1, and 2. One can use the SparseCategoricalCrossentropy loss for ordinal labels: loss= tf.keras.losses.SparseCategoricalCrossentropy() Alternatively, one can still use the categorical_crossentropy loss, but in conjunction with the one-hot encoded labels (1, 0, 0) for 0, (0, 1, 0) for 1, and (0, 0, 1) for 2. The following code snippet can accomplish such an encoding: #one-hot encoding num_class = len(set(train_labels)) train_labels=tf.one_hot(indices=train_labels, depth=num_class) val_labels=tf.one_hot(indices=val_labels, depth=num_class) The nature of data (ordered or unordered) helps determining whether one-hot encoding is preferred or ordinal.
Invalid Shape Error when trying to leverage Keras's VGG16 pretrained model
I am trying to leverage kera's VGG16 model in my own image classification problem. My code is heavily based upon Francois Chollet's example (Chapter 8 of Deep Learning in Python - code). I have three classes I'm trying to predict. Directory structure: data/ training/ class_1 class_2 class_3 Note: this my first time working with Keras so I may just be doing something wrong. My call to model.fit() fails with: ValueError: Shapes (32, 1) and (32, 3) are incompatible. See the bottom of this question for the full error messages. If I look at the output from .summary() calls, I don't see a layer of dimension (32, 1). import pathlib import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.utils import image_dataset_from_directory DATA_DIR = pathlib.Path('./data/') batch_size = 32 img_width = image_height = 256 train_dataset = image_dataset_from_directory( DATA_DIR / "training", image_size=img_width_height, batch_size=batch_size) validation_dataset = image_dataset_from_directory( DATA_DIR / "validation", image_size=img_width_height, batch_size=batch_size) # Found 128400 files belonging to 3 classes. # Found 15600 files belonging to 3 classes. vgg16_convolution_base = keras.applications.vgg16.VGG16( weights="imagenet", include_top=False, input_shape=(img_width, image_height, 3)) vgg16_convolution_base.summary() # block3_conv3 (Conv2D) (None, 64, 64, 256) 590080 # block3_pool (MaxPooling2D) (None, 32, 32, 256) 0 # block4_conv1 (Conv2D) (None, 32, 32, 512) 1180160 # block4_conv2 (Conv2D) (None, 32, 32, 512) 2359808 # block4_conv3 (Conv2D) (None, 32, 32, 512) 2359808 # block4_pool (MaxPooling2D) (None, 16, 16, 512) 0 # block5_conv1 (Conv2D) (None, 16, 16, 512) 2359808 # block5_conv2 (Conv2D) (None, 16, 16, 512) 2359808 # block5_conv3 (Conv2D) (None, 16, 16, 512) 2359808 # block5_pool (MaxPooling2D) (None, 8, 8, 512) 0 def get_features_and_labels(dataset): all_features = [] all_labels = [] for images, labels in dataset: preprocessed_images = keras.applications.vgg16.preprocess_input(images) features = vgg16_convolution_base.predict(preprocessed_images) all_features.append(features) all_labels.append(labels) return np.concatenate(all_features), np.concatenate(all_labels) train_features, train_labels = get_features_and_labels(train_dataset) val_features, val_labels = get_features_and_labels(validation_dataset) print(train_features.shape) print(train_labels.shape) # (128400, 8, 8, 512) # (128400,) print(val_features.shape) print(val_labels.shape) # (15600, 8, 8, 512) # (15600,) inputs = keras.Input(shape=(8, 8, 512)) x = layers.Flatten()(inputs) x = layers.Dense(256)(x) x = layers.Dropout(0.5)(x) outputs = layers.Dense(3, activation="softmax")(x) model = keras.Model(inputs, outputs) model.compile(loss="categorical_crossentropy", optimizer="rmsprop", metrics=["accuracy"]) model.summary() # input_4 (InputLayer) [(None, 8, 8, 512)] 0 # flatten_1 (Flatten) (None, 32768) 0 # dense_2 (Dense) (None, 256) 8388864 # dropout_1 (Dropout) (None, 256) 0 # dense_3 (Dense) (None, 3) 771 # ================================================================ # Total params: 8,389,635 # Trainable params: 8,389,635 history = model.fit( train_features, train_labels, epochs=20, validation_data=(val_features, val_labels) My call to model.fit() fails with: ValueError: Shapes (32, 1) and (32, 3) are incompatible ... File "C:\Users\x\anaconda3\lib\site-packages\keras\losses.py", line 1990, in categorical_crossentropy return backend.categorical_crossentropy( File "C:\Users\x\anaconda3\lib\site-packages\keras\backend.py", line 5529, in categorical_crossentropy target.shape.assert_is_compatible_with(output.shape) full traceback
[ "The categorical_crossentropy loss for 3 classes together with the batch size of 32 dictate the shape of labels (for each bach) to be (32, 3).\nThe labels are currently ordinal: 0, 1, and 2. One can use the SparseCategoricalCrossentropy loss for ordinal labels:\nloss= tf.keras.losses.SparseCategoricalCrossentropy()\n\nAlternatively, one can still use the categorical_crossentropy loss, but in conjunction with the one-hot encoded labels (1, 0, 0) for 0, (0, 1, 0) for 1, and (0, 0, 1) for 2. The following code snippet can accomplish such an encoding:\n#one-hot encoding\nnum_class = len(set(train_labels))\ntrain_labels=tf.one_hot(indices=train_labels, depth=num_class)\nval_labels=tf.one_hot(indices=val_labels, depth=num_class)\n\nThe nature of data (ordered or unordered) helps determining whether one-hot encoding is preferred or ordinal.\n" ]
[ 0 ]
[]
[]
[ "deep_learning", "keras", "python", "vgg_net" ]
stackoverflow_0074667517_deep_learning_keras_python_vgg_net.txt
Q: Why is the InverseMelScale torchaudio function so slow? I am currently exploring and learning machine learning for music/audio generation and I am already failing in the first steps. My idea is to use image-based learning algorithms on audio. To do so, I want to convert the audio into a MEL spectrogram and then apply the machine learning stuff. Then, when the model is trained, it obviously should generate music again, which will be MEL spectrogram. So I have to convert the MEL spectrogram back to audio. Generating the MEL spectrogram is straight forward using pytorch's torchaudio framwork: waveform, _ = torchaudio.load(os.path.join(folder, "drums.mp3"), normalize=True, format="mp3") waveform = waveform.to(device) mel_spectrogram_transform = torchaudio.transforms.MelSpectrogram(sample_rate=44100, hop_length=512, n_fft=2048, n_mels=512, f_max=16384).to(device) mel_spectrogram = mel_spectrogram_transform(waveform) There are some more pre-processing steps in order to be able to save the spectrogram as an image, but I skip it here for brevity. What makes me headaches is the inverse step. torchaudio has a function for that, InverseMelScale. But it is painstakingly slow. Here is the code: inverse_melscale_transform = torchaudio.transforms.InverseMelScale(sample_rate=44100, n_mels=512, n_stft=2048 // 2 + 1).to(device) mel_spectrogram = mel_spectrogram.to(device) spectrogram = inverse_melscale_transform(mel_spectrogram) Again, I leave out the some more steps here, e.g., using GriffinLim to get the actual audio from spectrogram. Here is what I did so far: I ran the code on my MacBook Pro (Intel), which took forever. I then tested it on a AMD Ryzen server with 256 cores, where I was able to get the result within a couple of minutes. Now my idea was to utilize a GPU, a Titan XP in this case, to get the result even faster, but even after 30 minutes of computing with 100% GPU utilization, there is no result in sight. What am I doing wrong? Why is the AMD Ryzen so much faster? A: Currently, InverseMelScale is implemented as inference using SGD, that is inside of InverseMelScale, loss function is defined and optimizer run. This implementation is not only inefficient, but also no accurate. For the reason this implementation was picked, you can check out https://github.com/pytorch/audio/pull/366. It is suggested to use L-BFGS-B optimizer https://github.com/pytorch/audio/issues/2643
Why is the InverseMelScale torchaudio function so slow?
I am currently exploring and learning machine learning for music/audio generation and I am already failing in the first steps. My idea is to use image-based learning algorithms on audio. To do so, I want to convert the audio into a MEL spectrogram and then apply the machine learning stuff. Then, when the model is trained, it obviously should generate music again, which will be MEL spectrogram. So I have to convert the MEL spectrogram back to audio. Generating the MEL spectrogram is straight forward using pytorch's torchaudio framwork: waveform, _ = torchaudio.load(os.path.join(folder, "drums.mp3"), normalize=True, format="mp3") waveform = waveform.to(device) mel_spectrogram_transform = torchaudio.transforms.MelSpectrogram(sample_rate=44100, hop_length=512, n_fft=2048, n_mels=512, f_max=16384).to(device) mel_spectrogram = mel_spectrogram_transform(waveform) There are some more pre-processing steps in order to be able to save the spectrogram as an image, but I skip it here for brevity. What makes me headaches is the inverse step. torchaudio has a function for that, InverseMelScale. But it is painstakingly slow. Here is the code: inverse_melscale_transform = torchaudio.transforms.InverseMelScale(sample_rate=44100, n_mels=512, n_stft=2048 // 2 + 1).to(device) mel_spectrogram = mel_spectrogram.to(device) spectrogram = inverse_melscale_transform(mel_spectrogram) Again, I leave out the some more steps here, e.g., using GriffinLim to get the actual audio from spectrogram. Here is what I did so far: I ran the code on my MacBook Pro (Intel), which took forever. I then tested it on a AMD Ryzen server with 256 cores, where I was able to get the result within a couple of minutes. Now my idea was to utilize a GPU, a Titan XP in this case, to get the result even faster, but even after 30 minutes of computing with 100% GPU utilization, there is no result in sight. What am I doing wrong? Why is the AMD Ryzen so much faster?
[ "Currently, InverseMelScale is implemented as inference using SGD, that is inside of InverseMelScale, loss function is defined and optimizer run.\nThis implementation is not only inefficient, but also no accurate.\nFor the reason this implementation was picked, you can check out https://github.com/pytorch/audio/pull/366.\nIt is suggested to use L-BFGS-B optimizer\nhttps://github.com/pytorch/audio/issues/2643\n" ]
[ 0 ]
[]
[]
[ "gpu", "python", "pytorch", "spectrogram" ]
stackoverflow_0074447735_gpu_python_pytorch_spectrogram.txt
Q: How can I get the text in a textbox in customtkinter? I am building a text editor but I can't save the file because I can't get the text within the textbox. Even thought in the entry widget I can use .get() to get the text. I tried .get() but it displays an error that it isn't an option. A: Customtkinter library is still under-development and it's getting updated consistently. .get() function support was added couple of days ago, you can now use it. Make sure your customtkinter library is up-to-date. (pip3 install customtkinter --upgrade) Example code: from pytube import * from tkinter import * import customtkinter def getText(): print(textbox.get('1.0', END)) root = customtkinter.CTk() textbox = customtkinter.CTkTextbox(root) button = customtkinter.CTkButton(root, command=getText) textbox.pack(pady=30, padx=20) button.pack(pady=30, padx=20) root.mainloop()
How can I get the text in a textbox in customtkinter?
I am building a text editor but I can't save the file because I can't get the text within the textbox. Even thought in the entry widget I can use .get() to get the text. I tried .get() but it displays an error that it isn't an option.
[ "Customtkinter library is still under-development and it's getting updated consistently.\n.get() function support was added couple of days ago, you can now use it. Make sure your customtkinter library is up-to-date. (pip3 install customtkinter --upgrade)\nExample code:\nfrom pytube import *\nfrom tkinter import *\nimport customtkinter\n\ndef getText():\n print(textbox.get('1.0', END))\n\nroot = customtkinter.CTk()\n\ntextbox = customtkinter.CTkTextbox(root)\nbutton = customtkinter.CTkButton(root, command=getText)\ntextbox.pack(pady=30, padx=20)\nbutton.pack(pady=30, padx=20)\n\nroot.mainloop()\n\n\n" ]
[ 0 ]
[]
[]
[ "customtkinter", "python", "tkinter" ]
stackoverflow_0074616256_customtkinter_python_tkinter.txt
Q: How to read a big tif file in python? I'm loading a tiff file from http://oceancolor.gsfc.nasa.gov/DOCS/DistFromCoast/ from PIL import Image im = Image.open('GMT_intermediate_coast_distance_01d.tif') The data is large (im.size=(36000, 18000) 1.3GB) and conventional conversion doesn't work; i.e, imarray.shape returns () import numpy as np imarray=np.zeros(im.size) imarray=np.array(im) How can I convert this tiff file to a numpy.array? A: May you dont have too much Ram for this image.You'll need at least some more than 1.3GB free memory. I don't know what you're doing with the image and you read the entire into your memory but i recommend you to read it bit by bit if its possible to avoid blowing up your computer. You can use Image.getdata() which returns one pixel per time. Also read some more for Image.open on this link : http://www.pythonware.com/library/pil/handbook/ A: So far I have tested many alternatives but only gdal worked always even with huge 16bit images. You can open an image with something like this: from osgeo import gdal import numpy as np ds = gdal.Open("name.tif") channel = np.array(ds.GetRasterBand(1).ReadAsArray()) A: I had huge tif files between 1 and 3 GB and managed to finally open them with Image.open() after manually changing the value of MAX_IMAGE_PIXELS inside the Image.py source code to an arbitrarily large number: from PIL import Image im = np.asarray(Image.open("location/image.tif") A: For Python 32 bit, version 2.7 you are limited by the number of bytes you can add to the stack at a given time. One option is to read in the image in parts and then resize the individual chunks and reassemble them into a image that requires less RAM. I recommend using the packages libtiff and opencv for that. import os os.environ["PATH"] += os.pathsep + "C:\\Program Files (x86)\\GnuWin32\\bin" import numpy as np import libtiff import cv2 tif = libtiff.TIFF.open("HUGETIFFILE.tif", 'r') width = tif.GetField("ImageWidth") height = tif.GetField("ImageLength") bits = tif.GetField('BitsPerSample') sample_format = tif.GetField('SampleFormat') ResizeFactor = 10 #Reduce Image Size by 10 Chunks = 8 #Read Image in 8 Chunks to prevent Memory Error (can be increased for # bigger files) ReadStrip = tif.ReadEncodedStrip typ = tif.get_numpy_type(bits, sample_format) #ReadStrip newarr = np.zeros((1, width/ResizeFactor), typ) for ii in range(0,Chunks): pos = 0 arr = np.empty((height/Chunks, width), typ) size = arr.nbytes for strip in range((ii*tif.NumberOfStrips()/Chunks),((ii+1)*tif.NumberOfStrips()/Chunks)): elem = ReadStrip(strip, arr.ctypes.data + pos, max(size-pos, 0)) pos = pos + elem resized = cv2.resize(arr, (0,0), fx=float(1)/float(ResizeFactor), fy=float(1)/float(ResizeFactor)) # Now remove the large array to free up Memory for the next chunk del arr # Finally recombine the individual resized chunks into the final resized image. newarr = np.vstack((newarr,resized)) newarr = np.delete(newarr, (0), axis=0) cv2.imwrite('resized.tif', newarr) A: you can try to use 'dask' library: import dask_image.imread ds = dask_image.imread.imread('name.tif')
How to read a big tif file in python?
I'm loading a tiff file from http://oceancolor.gsfc.nasa.gov/DOCS/DistFromCoast/ from PIL import Image im = Image.open('GMT_intermediate_coast_distance_01d.tif') The data is large (im.size=(36000, 18000) 1.3GB) and conventional conversion doesn't work; i.e, imarray.shape returns () import numpy as np imarray=np.zeros(im.size) imarray=np.array(im) How can I convert this tiff file to a numpy.array?
[ "May you dont have too much Ram for this image.You'll need at least some more than 1.3GB free memory.\nI don't know what you're doing with the image and you read the entire into your memory but i recommend you to read it bit by bit if its possible to avoid blowing up your computer. \nYou can use Image.getdata() which returns one pixel per time.\nAlso read some more for Image.open on this link : \nhttp://www.pythonware.com/library/pil/handbook/\n", "So far I have tested many alternatives but only gdal worked always even with huge 16bit images. \nYou can open an image with something like this:\nfrom osgeo import gdal\nimport numpy as np\nds = gdal.Open(\"name.tif\")\nchannel = np.array(ds.GetRasterBand(1).ReadAsArray())\n\n", "I had huge tif files between 1 and 3 GB and managed to finally open them with Image.open() after manually changing the value of MAX_IMAGE_PIXELS inside the Image.py source code to an arbitrarily large number:\nfrom PIL import Image\nim = np.asarray(Image.open(\"location/image.tif\")\n\n", "For Python 32 bit, version 2.7 you are limited by the number of bytes you can add to the stack at a given time. One option is to read in the image in parts and then resize the individual chunks and reassemble them into a image that requires less RAM.\nI recommend using the packages libtiff and opencv for that.\nimport os\nos.environ[\"PATH\"] += os.pathsep + \"C:\\\\Program Files (x86)\\\\GnuWin32\\\\bin\"\nimport numpy as np\nimport libtiff\nimport cv2\n\ntif = libtiff.TIFF.open(\"HUGETIFFILE.tif\", 'r')\nwidth = tif.GetField(\"ImageWidth\")\nheight = tif.GetField(\"ImageLength\")\nbits = tif.GetField('BitsPerSample')\nsample_format = tif.GetField('SampleFormat')\n\nResizeFactor = 10 #Reduce Image Size by 10\nChunks = 8 #Read Image in 8 Chunks to prevent Memory Error (can be increased for \n# bigger files)\n\nReadStrip = tif.ReadEncodedStrip\ntyp = tif.get_numpy_type(bits, sample_format)\n\n\n#ReadStrip\nnewarr = np.zeros((1, width/ResizeFactor), typ)\nfor ii in range(0,Chunks):\n pos = 0\n arr = np.empty((height/Chunks, width), typ)\n size = arr.nbytes\n for strip in range((ii*tif.NumberOfStrips()/Chunks),((ii+1)*tif.NumberOfStrips()/Chunks)):\n elem = ReadStrip(strip, arr.ctypes.data + pos, max(size-pos, 0))\n pos = pos + elem\n\n resized = cv2.resize(arr, (0,0), fx=float(1)/float(ResizeFactor), fy=float(1)/float(ResizeFactor))\n\n # Now remove the large array to free up Memory for the next chunk\n del arr\n # Finally recombine the individual resized chunks into the final resized image.\n newarr = np.vstack((newarr,resized))\n\nnewarr = np.delete(newarr, (0), axis=0)\ncv2.imwrite('resized.tif', newarr)\n\n", "you can try to use 'dask' library:\nimport dask_image.imread\n\nds = dask_image.imread.imread('name.tif')\n\n" ]
[ 4, 3, 2, 1, 0 ]
[]
[]
[ "numpy", "python", "python_imaging_library", "tiff" ]
stackoverflow_0030465635_numpy_python_python_imaging_library_tiff.txt
Q: Blinking box when I attempt to execute my code I am new to programming, and I am working with web scraping YouTube video using pytube. When I execute the code below, I get the boldly lined box. It seems to want some input but I'm not sure what to do next. When I press 'enter' without typing anything else, I get the following error message: https://www.youtube.com/RJH6_fx9aT8 --------------------------------------------------------------------------- RegexMatchError Traceback (most recent call last) <ipython-input-4-724a0c70ced1> in <module> 1 link = input ('https://www.youtube.com/RJH6_fx9aT8') ----> 2 yt = YouTube(link) 2 frames /usr/local/lib/python3.7/dist-packages/pytube/helpers.py in regex_search(pattern, string, group) 32 results = regex.search(string) 33 if not results: ---> 34 raise RegexMatchError(caller="regex_search", pattern=pattern) 35 36 logger.debug("matched regex search: %s", pattern) RegexMatchError: regex_search: could not find match for (?:v=|\/)([0-9A-Za-z_-]{11}).* A: https://www.youtube.com/RJH6_fx9aT8 isn't a valid YouTube URL, but https://www.youtube.com/watch?v=RJH6_fx9aT8 is a valid YouTube URL.
Blinking box when I attempt to execute my code
I am new to programming, and I am working with web scraping YouTube video using pytube. When I execute the code below, I get the boldly lined box. It seems to want some input but I'm not sure what to do next. When I press 'enter' without typing anything else, I get the following error message: https://www.youtube.com/RJH6_fx9aT8 --------------------------------------------------------------------------- RegexMatchError Traceback (most recent call last) <ipython-input-4-724a0c70ced1> in <module> 1 link = input ('https://www.youtube.com/RJH6_fx9aT8') ----> 2 yt = YouTube(link) 2 frames /usr/local/lib/python3.7/dist-packages/pytube/helpers.py in regex_search(pattern, string, group) 32 results = regex.search(string) 33 if not results: ---> 34 raise RegexMatchError(caller="regex_search", pattern=pattern) 35 36 logger.debug("matched regex search: %s", pattern) RegexMatchError: regex_search: could not find match for (?:v=|\/)([0-9A-Za-z_-]{11}).*
[ "https://www.youtube.com/RJH6_fx9aT8 isn't a valid YouTube URL, but https://www.youtube.com/watch?v=RJH6_fx9aT8 is a valid YouTube URL.\n" ]
[ 0 ]
[]
[]
[ "python", "pytube", "web_scraping", "youtube" ]
stackoverflow_0074324936_python_pytube_web_scraping_youtube.txt
Q: How to make popouts with CTkInput encode I'm working on password manager and had structure like that: def popUp(text): answer = simpledialocusg.askstring("input string", text) return answer And it works perfectly, but I want to make popouts looks better with Custom Tkinter. When I made def popUp(text): answer = customtkinter.CTkInputDialog("input string", text) return answer I got an error: AttributeError: 'CTkInputDialog' object has no attribute 'encode' Expect popouts works correctly A: You should check the wiki page before opening a question here. https://github.com/TomSchimansky/CustomTkinter/wiki/CTkInputDialog The syntax should be like this: def getInput(): answer = customtkinter.CTkInputDialog(text = "input string") print(answer.get_input()) root = customtkinter.CTk() button = customtkinter.CTkButton(root, command=getInput) button.pack(pady=30, padx=20) root.mainloop()
How to make popouts with CTkInput encode
I'm working on password manager and had structure like that: def popUp(text): answer = simpledialocusg.askstring("input string", text) return answer And it works perfectly, but I want to make popouts looks better with Custom Tkinter. When I made def popUp(text): answer = customtkinter.CTkInputDialog("input string", text) return answer I got an error: AttributeError: 'CTkInputDialog' object has no attribute 'encode' Expect popouts works correctly
[ "You should check the wiki page before opening a question here.\nhttps://github.com/TomSchimansky/CustomTkinter/wiki/CTkInputDialog\nThe syntax should be like this:\ndef getInput():\n answer = customtkinter.CTkInputDialog(text = \"input string\")\n print(answer.get_input())\n\nroot = customtkinter.CTk()\n\nbutton = customtkinter.CTkButton(root, command=getInput)\nbutton.pack(pady=30, padx=20)\n\nroot.mainloop()\n\n" ]
[ 0 ]
[]
[]
[ "customtkinter", "python", "tkinter" ]
stackoverflow_0074641655_customtkinter_python_tkinter.txt
Q: How to pin Youtube comments with python automatically I need to find a way to pin comments in YouTube automatically. I have checked YouTube API v3 documentation but it does not have this feature. Is there any idea? A: To initialize the automatic mechanism, you first need to open your web-browser Web Developer Tools Network tab, then pin an ad hoc comment, you should notice a XHR request to perform_comment_action endpoint. Right-click this request and copy it as cURL. Notice the last field actions in the JSON encoded --data-raw argument. Decode this base64 encoded field and modify the first plaintext argument Ug...Ag to the comment id you want to pin and re-encode the field in base64 and then execute the cURL request and that's it! Note that there is no need to modify any other parameter for pinning a comment on another video than the ad hoc comment is posted on.
How to pin Youtube comments with python automatically
I need to find a way to pin comments in YouTube automatically. I have checked YouTube API v3 documentation but it does not have this feature. Is there any idea?
[ "To initialize the automatic mechanism, you first need to open your web-browser Web Developer Tools Network tab, then pin an ad hoc comment, you should notice a XHR request to perform_comment_action endpoint. Right-click this request and copy it as cURL. Notice the last field actions in the JSON encoded --data-raw argument. Decode this base64 encoded field and modify the first plaintext argument Ug...Ag to the comment id you want to pin and re-encode the field in base64 and then execute the cURL request and that's it!\nNote that there is no need to modify any other parameter for pinning a comment on another video than the ad hoc comment is posted on.\n" ]
[ 0 ]
[]
[]
[ "api", "comments", "python", "youtube" ]
stackoverflow_0073444163_api_comments_python_youtube.txt
Q: Python - Adding Custom Values Into A Table From Web Scraping I wrote a basic web scraper that returns values into nested lists like the one below: results = [['a', 'b', 'c'], ['a', 'b', 'c'], ['a', 'b', 'c']] But I want to add 2 custom values when they get pushed into the lists to look like something below: results = [['customvalue1', 'customvalue2', 'a', 'b', 'c'], ['customvalue1', 'customvalue2', 'a', 'b', 'c'], ['customvalue1', 'customvalue2', 'a', 'b', 'c']] Specifically, I want 'customvalue1' to be the current date in dd/mm/yyyy format, and 'customvalue2' to be a string that I define. I tried to create the custom values within the for loop right before the append method, but haven' had luck so far. A: One of the ways to achieve it in the case of python. import datetime # Define customvalue2 customvalue2 = "mystring" results = [] # Get current date today = datetime.datetime.now() # Loop over the data you want to add to the results list for data in [['a', 'b', 'c'], ['a', 'b', 'c'], ['a', 'b', 'c']]: # Format current date as dd/mm/yyyy customvalue1 = today.strftime("%d/%m/%Y") # Create a new list with the custom values and the data entry = [customvalue1, customvalue2] + data # Append the entry to the results list results.append(entry)
Python - Adding Custom Values Into A Table From Web Scraping
I wrote a basic web scraper that returns values into nested lists like the one below: results = [['a', 'b', 'c'], ['a', 'b', 'c'], ['a', 'b', 'c']] But I want to add 2 custom values when they get pushed into the lists to look like something below: results = [['customvalue1', 'customvalue2', 'a', 'b', 'c'], ['customvalue1', 'customvalue2', 'a', 'b', 'c'], ['customvalue1', 'customvalue2', 'a', 'b', 'c']] Specifically, I want 'customvalue1' to be the current date in dd/mm/yyyy format, and 'customvalue2' to be a string that I define. I tried to create the custom values within the for loop right before the append method, but haven' had luck so far.
[ "One of the ways to achieve it in the case of python.\nimport datetime\n\n# Define customvalue2\ncustomvalue2 = \"mystring\"\n\nresults = []\n\n# Get current date\ntoday = datetime.datetime.now()\n\n# Loop over the data you want to add to the results list\nfor data in [['a', 'b', 'c'], ['a', 'b', 'c'], ['a', 'b', 'c']]:\n # Format current date as dd/mm/yyyy\n customvalue1 = today.strftime(\"%d/%m/%Y\")\n\n # Create a new list with the custom values and the data\n entry = [customvalue1, customvalue2] + data\n\n # Append the entry to the results list\n results.append(entry)\n\n" ]
[ 0 ]
[]
[]
[ "list", "nested", "python", "web_scraping" ]
stackoverflow_0074672411_list_nested_python_web_scraping.txt
Q: "detail": "CSRF Failed: CCSRF token missing." when sending post data from angular 13 to django connected database i need to send the post data from angular to DRF through angular form but geeting the error i checked almost all the answers available on the internet but did not found and useful answer. "detail": "CSRF Failed: CSRF token missing." //post logic sources.service.ts import { Injectable } from '@angular/core'; import { sources } from './sources'; import { HttpClient } from '@angular/common/http'; import { Observable , of, throwError } from 'rxjs'; import { catchError, retry } from 'rxjs/operators'; import { HttpHeaders } from '@angular/common/http'; const httpOptions = { headers: new HttpHeaders({ 'Content-Type': 'application/json', // Authorization: 'my-auth-token', cookieName: 'csrftoken', headerName: 'X-CSRFToken', // X-CSRFToken: 'sjd8q2x8hgjkvs1GJcOOcgnVGEkdP8f02shB', // headerName: 'X-CSRFToken', // headerName: , }) }; @Injectable({ providedIn: 'root' }) export class SourcesService { API_URL = 'http://127.0.0.1:8000/sourceapi.api'; constructor(private http: HttpClient) { } /** GET sources from the server */ Sources() : Observable<sources[]> { return this.http.get<sources[]>(this.API_URL); } /** POST: add a new source to the server */ // addSource(data: object) : Observable<object>{ // return this.http.post<object>(this.API_URL,data, httpOptions); // } addSource(source : sources[]): Observable<sources[]>{ return this.http.post<sources[]> (this.API_URL, source, httpOptions); //console.log(user); } } //add-source.component.ts import { Component, OnInit } from '@angular/core'; import { sources } from '../sources'; import { SourcesService } from '../sources.service'; import { FormGroup, FormControl, ReactiveFormsModule} from '@angular/forms'; @Component({ selector: 'app-add-source', templateUrl: './add-source.component.html', styleUrls: ['./add-source.component.css'] }) export class AddSourceComponent implements OnInit { // a form for entering and validating data sourceForm = new FormGroup({ name : new FormControl(), url : new FormControl(), client : new FormControl(), }); constructor(private sourcesService: SourcesService) { } ngOnInit(): void { } sourceData_post: any; saveSource(){ if(this.validate_form()){ this.sourceData_post = this.sourceForm.value; this.sourcesService.addSource(this.sourceData_post).subscribe((source)=>{ alert('source added'); }); } else{ alert('please fill from correctly'); } } validate_form(){ const formData = this.sourceForm.value; if(formData.name == null){ return false; }else if(formData.url == null){ return false; }else{ return true; } } } // add-source.component.html <div class="bread-crumb"> <div> <span>Add Source</span> </div> </div> <div class="container flex"> <div class="form"> <form action="" [formGroup]="sourceForm" (ngSubmit)="saveSource()"> <table> <tr> <td>Source Name:</td> <td> <input class="input" type="text" formControlName="name"> </td> </tr> <tr> <td>Source URL:</td> <td> <input class="input" type="text" formControlName="url"> </td> </tr> <tr> <td>Source client:</td> <td> <input class="input" type="text" formControlName="client"> </td> </tr> <tr> <td colspan="2"> <div class="center"> <button type="submit">submit</button> </div> </td> </tr> </table> </form> </div> </div> i tried imports: [ BrowserModule, AppRoutingModule, HttpClientModule, Ng2SearchPipeModule, FormsModule, ReactiveFormsModule, HttpClientXsrfModule, HttpClientXsrfModule.withOptions({ cookieName: 'XSRF-TOKEN', headerName: 'X-XSRF-TOKEN', }) but did not help Note :- this is angular 13 A: (Partial answer) You get this error message because the CSRF protection is activated by default and you don't send the CSRF token. Someone wrote a good description of what CSRF is here On the first GET request, the server sends you the CSRF token in a cookie, and you have to send it back on every request, as a cookie AND as a request header. The server will check that the CSRF value in the cookie matches with the CSRF value that is in the header. It can be tedious to repeat that on every request so Angular has a builtin module for that : HttpClientXsrfModule that you configured here : HttpClientXsrfModule.withOptions({ cookieName: 'XSRF-TOKEN', headerName: 'X-XSRF-TOKEN', }) One problem is that you override this behavior by setting again the header by hand here : const httpOptions = { headers: new HttpHeaders({ 'Content-Type': 'application/json', cookieName: 'csrftoken', headerName: 'X-CSRFToken', }) }; [...] addSource(source : sources[]): Observable<sources[]>{ return this.http.post<sources[]> (this.API_URL, source, httpOptions); You don't need that. Just leave it like this : addSource(source : sources[]): Observable<sources[]>{ return this.http.post<sources[]> (this.API_URL, source); Another problem is that the name for the CSRF header/cookie is not standard. It can be CSRF, XSRF, or whatever you want. Of course, if you send it as CSRF and the server expects it as XSRF, it will not be detected. As I can see from the comments on the question, the server sends you that Set-Cookie: csrftoken=sjd8q2xsdfgfhjgfnVGEkdP8f02shB So we are sure that the cookie name is csrftoken. So it should be the same in the configuration of the HttpClientXsrfModule. Can you try like this HttpClientXsrfModule.withOptions({ cookieName: 'csrftoken', // << This one is certain headerName: 'X-XSRF-TOKEN', // << For this one, I don't know yet }) Can you try this with different values for the headerName ? Preferably csrftoken also ? header name and cookie name are often the same. Update : According to the Django documentation, the default CSRF header name is HTTP_X_CSRFTOKEN. So you can try this : HttpClientXsrfModule.withOptions({ cookieName: 'csrftoken', headerName: 'HTTP_X_CSRFTOKEN', }) A: The logic on the front-end side was correct, the reason for showing csrf token missing was from the Django rest framework. once i removed the @api_view from my views.py and returned the json response it worked.
"detail": "CSRF Failed: CCSRF token missing." when sending post data from angular 13 to django connected database
i need to send the post data from angular to DRF through angular form but geeting the error i checked almost all the answers available on the internet but did not found and useful answer. "detail": "CSRF Failed: CSRF token missing." //post logic sources.service.ts import { Injectable } from '@angular/core'; import { sources } from './sources'; import { HttpClient } from '@angular/common/http'; import { Observable , of, throwError } from 'rxjs'; import { catchError, retry } from 'rxjs/operators'; import { HttpHeaders } from '@angular/common/http'; const httpOptions = { headers: new HttpHeaders({ 'Content-Type': 'application/json', // Authorization: 'my-auth-token', cookieName: 'csrftoken', headerName: 'X-CSRFToken', // X-CSRFToken: 'sjd8q2x8hgjkvs1GJcOOcgnVGEkdP8f02shB', // headerName: 'X-CSRFToken', // headerName: , }) }; @Injectable({ providedIn: 'root' }) export class SourcesService { API_URL = 'http://127.0.0.1:8000/sourceapi.api'; constructor(private http: HttpClient) { } /** GET sources from the server */ Sources() : Observable<sources[]> { return this.http.get<sources[]>(this.API_URL); } /** POST: add a new source to the server */ // addSource(data: object) : Observable<object>{ // return this.http.post<object>(this.API_URL,data, httpOptions); // } addSource(source : sources[]): Observable<sources[]>{ return this.http.post<sources[]> (this.API_URL, source, httpOptions); //console.log(user); } } //add-source.component.ts import { Component, OnInit } from '@angular/core'; import { sources } from '../sources'; import { SourcesService } from '../sources.service'; import { FormGroup, FormControl, ReactiveFormsModule} from '@angular/forms'; @Component({ selector: 'app-add-source', templateUrl: './add-source.component.html', styleUrls: ['./add-source.component.css'] }) export class AddSourceComponent implements OnInit { // a form for entering and validating data sourceForm = new FormGroup({ name : new FormControl(), url : new FormControl(), client : new FormControl(), }); constructor(private sourcesService: SourcesService) { } ngOnInit(): void { } sourceData_post: any; saveSource(){ if(this.validate_form()){ this.sourceData_post = this.sourceForm.value; this.sourcesService.addSource(this.sourceData_post).subscribe((source)=>{ alert('source added'); }); } else{ alert('please fill from correctly'); } } validate_form(){ const formData = this.sourceForm.value; if(formData.name == null){ return false; }else if(formData.url == null){ return false; }else{ return true; } } } // add-source.component.html <div class="bread-crumb"> <div> <span>Add Source</span> </div> </div> <div class="container flex"> <div class="form"> <form action="" [formGroup]="sourceForm" (ngSubmit)="saveSource()"> <table> <tr> <td>Source Name:</td> <td> <input class="input" type="text" formControlName="name"> </td> </tr> <tr> <td>Source URL:</td> <td> <input class="input" type="text" formControlName="url"> </td> </tr> <tr> <td>Source client:</td> <td> <input class="input" type="text" formControlName="client"> </td> </tr> <tr> <td colspan="2"> <div class="center"> <button type="submit">submit</button> </div> </td> </tr> </table> </form> </div> </div> i tried imports: [ BrowserModule, AppRoutingModule, HttpClientModule, Ng2SearchPipeModule, FormsModule, ReactiveFormsModule, HttpClientXsrfModule, HttpClientXsrfModule.withOptions({ cookieName: 'XSRF-TOKEN', headerName: 'X-XSRF-TOKEN', }) but did not help Note :- this is angular 13
[ "(Partial answer)\nYou get this error message because the CSRF protection is activated by default and you don't send the CSRF token. Someone wrote a good description of what CSRF is here\nOn the first GET request, the server sends you the CSRF token in a cookie, and you have to send it back on every request, as a cookie AND as a request header. The server will check that the CSRF value in the cookie matches with the CSRF value that is in the header.\nIt can be tedious to repeat that on every request so Angular has a builtin module for that : HttpClientXsrfModule that you configured here :\nHttpClientXsrfModule.withOptions({\n cookieName: 'XSRF-TOKEN',\n headerName: 'X-XSRF-TOKEN',\n})\n\nOne problem is that you override this behavior by setting again the header by hand here :\nconst httpOptions = {\n headers: new HttpHeaders({\n 'Content-Type': 'application/json',\n cookieName: 'csrftoken',\n headerName: 'X-CSRFToken',\n })\n};\n[...]\n\naddSource(source : sources[]): Observable<sources[]>{\n return this.http.post<sources[]> (this.API_URL, source, httpOptions);\n\nYou don't need that. Just leave it like this :\naddSource(source : sources[]): Observable<sources[]>{\n return this.http.post<sources[]> (this.API_URL, source);\n\nAnother problem is that the name for the CSRF header/cookie is not standard. It can be CSRF, XSRF, or whatever you want. Of course, if you send it as CSRF and the server expects it as XSRF, it will not be detected.\nAs I can see from the comments on the question, the server sends you that\n\nSet-Cookie: csrftoken=sjd8q2xsdfgfhjgfnVGEkdP8f02shB\n\nSo we are sure that the cookie name is csrftoken. So it should be the same in the configuration of the HttpClientXsrfModule. Can you try like this\nHttpClientXsrfModule.withOptions({\n cookieName: 'csrftoken', // << This one is certain\n headerName: 'X-XSRF-TOKEN', // << For this one, I don't know yet\n})\n\nCan you try this with different values for the headerName ? Preferably csrftoken also ? header name and cookie name are often the same.\nUpdate :\nAccording to the Django documentation, the default CSRF header name is HTTP_X_CSRFTOKEN. So you can try this :\nHttpClientXsrfModule.withOptions({\n cookieName: 'csrftoken',\n headerName: 'HTTP_X_CSRFTOKEN',\n})\n\n", "The logic on the front-end side was correct, the reason for showing csrf token missing was from the Django rest framework.\nonce i removed the @api_view from my views.py and returned the json response it worked.\n" ]
[ 0, 0 ]
[ "you need to exempt csrf in views.py\nfrom django.views.decorators.csrf import csrf_exempt\n\nand then\n@csrf_exempt\ndef index(request):\npass\n\n" ]
[ -1 ]
[ "angular", "angular_fullstack", "csrf", "django", "python" ]
stackoverflow_0074598711_angular_angular_fullstack_csrf_django_python.txt
Q: multiprocessing vs multithreading vs asyncio I found that in Python 3.4 there are few different libraries for multiprocessing/threading: multiprocessing vs threading vs asyncio. But I don't know which one to use or is the "recommended one". Do they do the same thing, or are different? If so, which one is used for what? I want to write a program that uses multicores in my computer. But I don't know which library I should learn. A: TL;DR Making the Right Choice: We have walked through the most popular forms of concurrency. But the question remains - when should choose which one? It really depends on the use cases. From my experience (and reading), I tend to follow this pseudo code: if io_bound: if io_very_slow: print("Use Asyncio") else: print("Use Threads") else: print("Multi Processing") CPU Bound => Multi Processing I/O Bound, Fast I/O, Limited Number of Connections => Multi Threading I/O Bound, Slow I/O, Many connections => Asyncio Reference [NOTE]: If you have a long call method (e.g. a method containing a sleep time or lazy I/O), the best choice is asyncio, Twisted or Tornado approach (coroutine methods), that works with a single thread as concurrency. asyncio works on Python3.4 and later. Tornado and Twisted are ready since Python2.7 uvloop is ultra fast asyncio event loop (uvloop makes asyncio 2-4x faster). [UPDATE (2019)]: Japranto (GitHub) is a very fast pipelining HTTP server based on uvloop. A: They are intended for (slightly) different purposes and/or requirements. CPython (a typical, mainline Python implementation) still has the global interpreter lock so a multi-threaded application (a standard way to implement parallel processing nowadays) is suboptimal. That's why multiprocessing may be preferred over threading. But not every problem may be effectively split into [almost independent] pieces, so there may be a need in heavy interprocess communications. That's why multiprocessing may not be preferred over threading in general. asyncio (this technique is available not only in Python, other languages and/or frameworks also have it, e.g. Boost.ASIO) is a method to effectively handle a lot of I/O operations from many simultaneous sources w/o need of parallel code execution. So it's just a solution (a good one indeed!) for a particular task, not for parallel processing in general. A: In multiprocessing you leverage multiple CPUs to distribute your calculations. Since each of the CPUs runs in parallel, you're effectively able to run multiple tasks simultaneously. You would want to use multiprocessing for CPU-bound tasks. An example would be trying to calculate a sum of all elements of a huge list. If your machine has 8 cores, you can "cut" the list into 8 smaller lists and calculate the sum of each of those lists separately on separate core and then just add up those numbers. You'll get a ~8x speedup by doing that. In (multi)threading you don't need multiple CPUs. Imagine a program that sends lots of HTTP requests to the web. If you used a single-threaded program, it would stop the execution (block) at each request, wait for a response, and then continue once received a response. The problem here is that your CPU isn't really doing work while waiting for some external server to do the job; it could have actually done some useful work in the meantime! The fix is to use threads - you can create many of them, each responsible for requesting some content from the web. The nice thing about threads is that, even if they run on one CPU, the CPU from time to time "freezes" the execution of one thread and jumps to executing the other one (it's called context switching and it happens constantly at non-deterministic intervals). So if your task is I/O bound - use threading. asyncio is essentially threading where not the CPU but you, as a programmer (or actually your application), decide where and when does the context switch happen. In Python you use an await keyword to suspend the execution of your coroutine (defined using async keyword). A: This is the basic idea: Is it IO-BOUND ? -----------> USE asyncio IS IT CPU-HEAVY ? ---------> USE multiprocessing ELSE ? ----------------------> USE threading So basically stick to threading unless you have IO/CPU problems. A: Many of the answers suggest how to choose only 1 option, but why not be able to use all 3? In this answer I explain how you can use asyncio to manage combining all 3 forms of concurrency instead as well as easily swap between them later if need be. The short answer Many developers that are first-timers to concurrency in Python will end up using processing.Process and threading.Thread. However, these are the low-level APIs which have been merged together by the high-level API provided by the concurrent.futures module. Furthermore, spawning processes and threads has overhead, such as requiring more memory, a problem which plagued one of the examples I showed below. To an extent, concurrent.futures manages this for you so that you cannot as easily do something like spawn a thousand processes and crash your computer by only spawning a few processes and then just re-using those processes each time one finishes. These high-level APIs are provided through concurrent.futures.Executor, which are then implemented by concurrent.futures.ProcessPoolExecutor and concurrent.futures.ThreadPoolExecutor. In most cases, you should use these over the multiprocessing.Process and threading.Thread, because it's easier to change from one to the other in the future when you use concurrent.futures and you don't have to learn the detailed differences of each. Since these share a unified interfaces, you'll also find that code using multiprocessing or threading will often use concurrent.futures. asyncio is no exception to this, and provides a way to use it via the following code: import asyncio from concurrent.futures import Executor from functools import partial from typing import Any, Callable, Optional, TypeVar T = TypeVar("T") async def run_in_executor( executor: Optional[Executor], func: Callable[..., T], /, *args: Any, **kwargs: Any, ) -> T: """ Run `func(*args, **kwargs)` asynchronously, using an executor. If the executor is None, use the default ThreadPoolExecutor. """ return await asyncio.get_running_loop().run_in_executor( executor, partial(func, *args, **kwargs), ) # Example usage for running `print` in a thread. async def main(): await run_in_executor(None, print, "O" * 100_000) asyncio.run(main()) In fact it turns out that using threading with asyncio was so common that in Python 3.9 they added asyncio.to_thread(func, *args, **kwargs) to shorten it for the default ThreadPoolExecutor. The long answer Are there any disadvantages to this approach? Yes. With asyncio, the biggest disadvantage is that asynchronous functions aren't the same as synchronous functions. This can trip up new users of asyncio a lot and cause a lot of rework to be done if you didn't start programming with asyncio in mind from the beginning. Another disadvantage is that users of your code will also become forced to use asyncio. All of this necessary rework will often leave first-time asyncio users with a really sour taste in their mouth. Are there any non-performance advantages to this? Yes. Similar to how using concurrent.futures is advantageous over threading.Thread and multiprocessing.Process for its unified interface, this approach can be considered a further abstraction from an Executor to an asynchronous function. You can start off using asyncio, and if later you find a part of it you need threading or multiprocessing, you can use asyncio.to_thread or run_in_executor. Likewise, you may later discover that an asynchronous version of what you're trying to run with threading already exists, so you can easily step back from using threading and switch to asyncio instead. Are there any performance advantages to this? Yes... and no. Ultimately it depends on the task. In some cases, it may not help (though it likely does not hurt), while in other cases it may help a lot. The rest of this answer provides some explanations as to why using asyncio to run an Executor may be advantageous. - Combining multiple executors and other asynchronous code asyncio essentially provides significantly more control over concurrency at the cost of you need to take control of the concurrency more. If you want to simultaneously run some code using a ThreadPoolExecutor along side some other code using a ProcessPoolExecutor, it is not so easy managing this using synchronous code, but it is very easy with asyncio. import asyncio from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor async def with_processing(): with ProcessPoolExecutor() as executor: tasks = [...] for task in asyncio.as_completed(tasks): result = await task ... async def with_threading(): with ThreadPoolExecutor() as executor: tasks = [...] for task in asyncio.as_completed(tasks): result = await task ... async def main(): await asyncio.gather(with_processing(), with_threading()) asyncio.run(main()) How does this work? Essentially asyncio asks the executors to run their functions. Then, while an executor is running, asyncio will go run other code. For example, the ProcessPoolExecutor starts a bunch of processes, and then while waiting for those processes to finish, the ThreadPoolExecutor starts a bunch of threads. asyncio will then check in on these executors and collect their results when they are done. Furthermore, if you have other code using asyncio, you can run them while waiting for the processes and threads to finish. - Narrowing in on what sections of code needs executors It is not common that you will have many executors in your code, but what is a common problem that I have seen when people use threads/processes is that they will shove the entirety of their code into a thread/process, expecting it to work. For example, I once saw the following code (approximately): from concurrent.futures import ThreadPoolExecutor import requests def get_data(url): return requests.get(url).json()["data"] urls = [...] with ThreadPoolExecutor() as executor: for data in executor.map(get_data, urls): print(data) The funny thing about this piece of code is that it was slower with concurrency than without. Why? Because the resulting json was large, and having many threads consume a huge amount of memory was disastrous. Luckily the solution was simple: from concurrent.futures import ThreadPoolExecutor import requests urls = [...] with ThreadPoolExecutor() as executor: for response in executor.map(requests.get, urls): print(response.json()["data"]) Now only one json is unloaded into memory at a time, and everything is fine. The lesson here? You shouldn't try to just slap all of your code into threads/processes, you should instead focus in on what part of the code actually needs concurrency. But what if get_data was not a function as simple as this case? What if we had to apply the executor somewhere deep in the middle of the function? This is where asyncio comes in: import asyncio import requests async def get_data(url): # A lot of code. ... # The specific part that needs threading. response = await asyncio.to_thread(requests.get, url, some_other_params) # A lot of code. ... return data urls = [...] async def main(): tasks = [get_data(url) for url in urls] for task in asyncio.as_completed(tasks): data = await task print(data) asyncio.run(main()) Attempting the same with concurrent.futures is by no means pretty. You could use things such as callbacks, queues, etc., but it would be significantly harder to manage than basic asyncio code. A: Already a lot of good answers. Can't elaborate more on the when to use each one. This is more an interesting combination of two. Multiprocessing + asyncio: https://pypi.org/project/aiomultiprocess/. The use case for which it was designed was highio, but still utilizing as many of the cores available. Facebook used this library to write some kind of python based File server. Asyncio allowing for IO bound traffic, but multiprocessing allowing multiple event loops and threads on multiple cores. Ex code from the repo: import asyncio from aiohttp import request from aiomultiprocess import Pool async def get(url): async with request("GET", url) as response: return await response.text("utf-8") async def main(): urls = ["https://jreese.sh", ...] async with Pool() as pool: async for result in pool.map(get, urls): ... # process result if __name__ == '__main__': # Python 3.7 asyncio.run(main()) # Python 3.6 # loop = asyncio.get_event_loop() # loop.run_until_complete(main()) Just and addition here, would not working in say jupyter notebook very well, as the notebook already has a asyncio loop running. Just a little note for you to not pull your hair out. A: Multiprocessing can be run parallelly. Multithreading and asyncio cannot be run parallelly. With Intel(R) Core(TM) i7-8700K CPU @ 3.70GHz and 32.0 GB RAM, I timed how many prime numbers are between 2 and 100000 with 2 processes, 2 threads and 2 asyncio tasks as shown below. *This is CPU bound calculation: Multiprocessing Multithreading asyncio 23.87 seconds 45.24 seconds 44.77 seconds Because multiprocessing can be run parallelly so multiprocessing is double more faster than multithreading and asyncio as shown above. I used 3 sets of code below: Multiprocessing: # "process_test.py" from multiprocessing import Process import time start_time = time.time() def test(): num = 100000 primes = 0 for i in range(2, num + 1): for j in range(2, i): if i % j == 0: break else: primes += 1 print(primes) if __name__ == "__main__": # This is needed to run processes on Windows process_list = [] for _ in range(0, 2): # 2 processes process = Process(target=test) process_list.append(process) for process in process_list: process.start() for process in process_list: process.join() print(round((time.time() - start_time), 2), "seconds") # 23.87 seconds Result: ... 9592 9592 23.87 seconds Multithreading: # "thread_test.py" from threading import Thread import time start_time = time.time() def test(): num = 100000 primes = 0 for i in range(2, num + 1): for j in range(2, i): if i % j == 0: break else: primes += 1 print(primes) thread_list = [] for _ in range(0, 2): # 2 threads thread = Thread(target=test) thread_list.append(thread) for thread in thread_list: thread.start() for thread in thread_list: thread.join() print(round((time.time() - start_time), 2), "seconds") # 45.24 seconds Result: ... 9592 9592 45.24 seconds Asyncio: # "asyncio_test.py" import asyncio import time start_time = time.time() async def test(): num = 100000 primes = 0 for i in range(2, num + 1): for j in range(2, i): if i % j == 0: break else: primes += 1 print(primes) async def call_tests(): tasks = [] for _ in range(0, 2): # 2 asyncio tasks tasks.append(test()) await asyncio.gather(*tasks) asyncio.run(call_tests()) print(round((time.time() - start_time), 2), "seconds") # 44.77 seconds Result: ... 9592 9592 44.77 seconds A: Multiprocessing Each process has its own Python interpreter and can run on a separate core of a processor. Python multiprocessing is a package that supports spawning processes using an API similar to the threading module. The multiprocessing package offers true parallelism, effectively side-stepping the Global Interpreter Lock by using sub processes instead of threads. Use multiprocessing when you have CPU intensive tasks. Multithreading Python multithreading allows you to spawn multiple threads within the process. These threads can share the same memory and resources of the process. In CPython due to Global interpreter lock at any given time only a single thread can run, hence you cannot utilize multiple cores. Multithreading in Python does not offer true parallelism due to GIL limitation. Asyncio Asyncio works on co-operative multitasking concepts. Asyncio tasks run on the same thread so there is no parallelism, but it provides better control to the developer instead of the OS which is the case in multithreading. There is a nice discussion on this link regarding the advantages of asyncio over threads. There is a nice blog by Lei Mao on Python concurrency here Multiprocessing VS Threading VS AsyncIO in Python Summary A: I’m not a professional Python user, but as a student in computer architecture I think I can share some of my considerations when choosing between multi processing and multi threading. Besides, some of the other answers (even among those with higher votes) are misusing technical terminology, so I thinks it’s also necessary to make some clarification on those as well, and I’ll do it first. The fundamental difference between multiprocessing and multithreading is whether they share the same memory space. Threads share access to the same virtual memory space, so it is efficient and easy for threads to exchange their computation results (zero copy, and totally user-space execution). Processes on the other hand have separate virtual memory spaces. They cannot directly read or write the other process’ memory space, just like a person cannot read or alter the mind of another person without talking to him. (Allowing so would be a violation of memory protection and defeat the purpose of using virtual memory. ) To exchange data between processes, they have to rely on the operating system’s facility (e.g. message passing), and for more than one reasons this is more costly to do than the “shared memory” scheme used by threads. One reason is that invoking the OS’ message passing mechanism requires making a system call which will switch the code execution from user mode to kernel mode, which is time consuming; another reason is likely that OS message passing scheme will have to copy the data bytes from the senders’ memory space to the receivers’ memory space, so non-zero copy cost. It is incorrect to say a multithread program can only use one CPU. The reason why many people say so is due to an artifact of the CPython implementation: global interpreter lock (GIL). Because of the GIL, threads in a CPython process are serialized. As a result, it appears that the multithreaded python program only uses one CPU. But multi thread computer programs in general are not restricted to one core, and for Python, implementations that do not use the GIL can indeed run many threads concurrently, that is, run on more than one CPU at the same time. (See https://wiki.python.org/moin/GlobalInterpreterLock). Given that CPython is the predominant implementation of Python, it’s understandable why multithreaded python programs are commonly equated to being bound to a single core. With Python with GIL, the only way to unleash the power of multicores is to use multiprocessing (there are exceptions to this as mentioned below). But your problem better be easily partition-able into parallel sub-problems that have minimal intercommunication, otherwise a lot of inter-process communication will have to take place and as explained above, the overhead of using the OS’ message passing mechanism will be costly, sometimes so costly the benefits of parallel processing are totally offset. If the nature of your problem requires intense communication between concurrent routines, multithreading is the natural way to go. Unfortunately with CPython, true, effectively concurrent multithreading is not possible due to the GIL. In this case you should realize Python is not the optimal tool for your project and consider using another language. There’s one alternative solution, that is to implement the concurrent processing routines in an external library written in C (or other languages), and import that module to Python. The CPython GIL will not bother to block the threads spawned by that external library. So, with the burdens of GIL, is multithreading in CPython any good? It still offers benefits though, as other answers have mentioned, if you’re doing IO or network communication. In these cases the relevant computation is not done by your CPU but done by other devices (in the case of IO, the disk controller and DMA (direct memory access) controller will transfer the data with minimal CPU participation; in the case of networking, the NIC (network interface card) and DMA will take care of much of the task without CPU’s participation), so once a thread delegates such task to the NIC or disk controller, the OS can put that thread to a sleeping state and switch to other threads of the same program to do useful work. In my understanding, the asyncio module is essentially a specific case of multithreading for IO operations. So: CPU-intensive programs, that can easily be partitioned to run on multiple processes with limited communication: Use multithreading if GIL does not exist (eg Jython), or use multiprocess if GIL is present (eg CPython). CPU-intensive programs, that requires intensive communication between concurrent routines: Use multithreading if GIL does not exist, or use another programming language. Lot’s of IO: asyncio
multiprocessing vs multithreading vs asyncio
I found that in Python 3.4 there are few different libraries for multiprocessing/threading: multiprocessing vs threading vs asyncio. But I don't know which one to use or is the "recommended one". Do they do the same thing, or are different? If so, which one is used for what? I want to write a program that uses multicores in my computer. But I don't know which library I should learn.
[ "TL;DR\nMaking the Right Choice:\n\nWe have walked through the most popular forms of concurrency. But the question remains - when should choose which one? It really depends on the use cases. From my experience (and reading), I tend to follow this pseudo code:\n\nif io_bound:\n if io_very_slow:\n print(\"Use Asyncio\")\n else:\n print(\"Use Threads\")\nelse:\n print(\"Multi Processing\")\n\n\n\nCPU Bound => Multi Processing\nI/O Bound, Fast I/O, Limited Number of Connections => Multi Threading\nI/O Bound, Slow I/O, Many connections => Asyncio\n\n\nReference\n\n[NOTE]:\n\nIf you have a long call method (e.g. a method containing a sleep time or lazy I/O), the best choice is asyncio, Twisted or Tornado approach (coroutine methods), that works with a single thread as concurrency.\nasyncio works on Python3.4 and later.\nTornado and Twisted are ready since Python2.7\nuvloop is ultra fast asyncio event loop (uvloop makes asyncio 2-4x faster).\n\n\n[UPDATE (2019)]:\n\nJapranto (GitHub) is a very fast pipelining HTTP server based on uvloop.\n\n", "They are intended for (slightly) different purposes and/or requirements. CPython (a typical, mainline Python implementation) still has the global interpreter lock so a multi-threaded application (a standard way to implement parallel processing nowadays) is suboptimal. That's why multiprocessing may be preferred over threading. But not every problem may be effectively split into [almost independent] pieces, so there may be a need in heavy interprocess communications. That's why multiprocessing may not be preferred over threading in general.\nasyncio (this technique is available not only in Python, other languages and/or frameworks also have it, e.g. Boost.ASIO) is a method to effectively handle a lot of I/O operations from many simultaneous sources w/o need of parallel code execution. So it's just a solution (a good one indeed!) for a particular task, not for parallel processing in general.\n", "In multiprocessing you leverage multiple CPUs to distribute your calculations. Since each of the CPUs runs in parallel, you're effectively able to run multiple tasks simultaneously. You would want to use multiprocessing for CPU-bound tasks. An example would be trying to calculate a sum of all elements of a huge list. If your machine has 8 cores, you can \"cut\" the list into 8 smaller lists and calculate the sum of each of those lists separately on separate core and then just add up those numbers. You'll get a ~8x speedup by doing that.\nIn (multi)threading you don't need multiple CPUs. Imagine a program that sends lots of HTTP requests to the web. If you used a single-threaded program, it would stop the execution (block) at each request, wait for a response, and then continue once received a response. The problem here is that your CPU isn't really doing work while waiting for some external server to do the job; it could have actually done some useful work in the meantime! The fix is to use threads - you can create many of them, each responsible for requesting some content from the web. The nice thing about threads is that, even if they run on one CPU, the CPU from time to time \"freezes\" the execution of one thread and jumps to executing the other one (it's called context switching and it happens constantly at non-deterministic intervals). So if your task is I/O bound - use threading.\nasyncio is essentially threading where not the CPU but you, as a programmer (or actually your application), decide where and when does the context switch happen. In Python you use an await keyword to suspend the execution of your coroutine (defined using async keyword).\n", "This is the basic idea:\n\nIs it IO-BOUND ? -----------> USE asyncio\nIS IT CPU-HEAVY ? ---------> USE multiprocessing\nELSE ? ----------------------> USE threading\n\nSo basically stick to threading unless you have IO/CPU problems.\n", "Many of the answers suggest how to choose only 1 option, but why not be able to use all 3? In this answer I explain how you can use asyncio to manage combining all 3 forms of concurrency instead as well as easily swap between them later if need be.\nThe short answer\n\nMany developers that are first-timers to concurrency in Python will end up using processing.Process and threading.Thread. However, these are the low-level APIs which have been merged together by the high-level API provided by the concurrent.futures module. Furthermore, spawning processes and threads has overhead, such as requiring more memory, a problem which plagued one of the examples I showed below. To an extent, concurrent.futures manages this for you so that you cannot as easily do something like spawn a thousand processes and crash your computer by only spawning a few processes and then just re-using those processes each time one finishes.\nThese high-level APIs are provided through concurrent.futures.Executor, which are then implemented by concurrent.futures.ProcessPoolExecutor and concurrent.futures.ThreadPoolExecutor. In most cases, you should use these over the multiprocessing.Process and threading.Thread, because it's easier to change from one to the other in the future when you use concurrent.futures and you don't have to learn the detailed differences of each.\nSince these share a unified interfaces, you'll also find that code using multiprocessing or threading will often use concurrent.futures. asyncio is no exception to this, and provides a way to use it via the following code:\nimport asyncio\nfrom concurrent.futures import Executor\nfrom functools import partial\nfrom typing import Any, Callable, Optional, TypeVar\n\nT = TypeVar(\"T\")\n\nasync def run_in_executor(\n executor: Optional[Executor],\n func: Callable[..., T],\n /,\n *args: Any,\n **kwargs: Any,\n) -> T:\n \"\"\"\n Run `func(*args, **kwargs)` asynchronously, using an executor.\n\n If the executor is None, use the default ThreadPoolExecutor.\n \"\"\"\n return await asyncio.get_running_loop().run_in_executor(\n executor,\n partial(func, *args, **kwargs),\n )\n\n# Example usage for running `print` in a thread.\nasync def main():\n await run_in_executor(None, print, \"O\" * 100_000)\n\nasyncio.run(main())\n\nIn fact it turns out that using threading with asyncio was so common that in Python 3.9 they added asyncio.to_thread(func, *args, **kwargs) to shorten it for the default ThreadPoolExecutor.\nThe long answer\n\nAre there any disadvantages to this approach?\nYes. With asyncio, the biggest disadvantage is that asynchronous functions aren't the same as synchronous functions. This can trip up new users of asyncio a lot and cause a lot of rework to be done if you didn't start programming with asyncio in mind from the beginning.\nAnother disadvantage is that users of your code will also become forced to use asyncio. All of this necessary rework will often leave first-time asyncio users with a really sour taste in their mouth.\nAre there any non-performance advantages to this?\nYes. Similar to how using concurrent.futures is advantageous over threading.Thread and multiprocessing.Process for its unified interface, this approach can be considered a further abstraction from an Executor to an asynchronous function. You can start off using asyncio, and if later you find a part of it you need threading or multiprocessing, you can use asyncio.to_thread or run_in_executor. Likewise, you may later discover that an asynchronous version of what you're trying to run with threading already exists, so you can easily step back from using threading and switch to asyncio instead.\nAre there any performance advantages to this?\nYes... and no. Ultimately it depends on the task. In some cases, it may not help (though it likely does not hurt), while in other cases it may help a lot. The rest of this answer provides some explanations as to why using asyncio to run an Executor may be advantageous.\n- Combining multiple executors and other asynchronous code\nasyncio essentially provides significantly more control over concurrency at the cost of you need to take control of the concurrency more. If you want to simultaneously run some code using a ThreadPoolExecutor along side some other code using a ProcessPoolExecutor, it is not so easy managing this using synchronous code, but it is very easy with asyncio.\nimport asyncio\nfrom concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor\n\nasync def with_processing():\n with ProcessPoolExecutor() as executor:\n tasks = [...]\n for task in asyncio.as_completed(tasks):\n result = await task\n ...\n\nasync def with_threading():\n with ThreadPoolExecutor() as executor:\n tasks = [...]\n for task in asyncio.as_completed(tasks):\n result = await task\n ...\n\nasync def main():\n await asyncio.gather(with_processing(), with_threading())\n\nasyncio.run(main())\n\nHow does this work? Essentially asyncio asks the executors to run their functions. Then, while an executor is running, asyncio will go run other code. For example, the ProcessPoolExecutor starts a bunch of processes, and then while waiting for those processes to finish, the ThreadPoolExecutor starts a bunch of threads. asyncio will then check in on these executors and collect their results when they are done. Furthermore, if you have other code using asyncio, you can run them while waiting for the processes and threads to finish.\n- Narrowing in on what sections of code needs executors\nIt is not common that you will have many executors in your code, but what is a common problem that I have seen when people use threads/processes is that they will shove the entirety of their code into a thread/process, expecting it to work. For example, I once saw the following code (approximately):\nfrom concurrent.futures import ThreadPoolExecutor\nimport requests\n\ndef get_data(url):\n return requests.get(url).json()[\"data\"]\n\nurls = [...]\n\nwith ThreadPoolExecutor() as executor:\n for data in executor.map(get_data, urls):\n print(data)\n\nThe funny thing about this piece of code is that it was slower with concurrency than without. Why? Because the resulting json was large, and having many threads consume a huge amount of memory was disastrous. Luckily the solution was simple:\nfrom concurrent.futures import ThreadPoolExecutor\nimport requests\n\nurls = [...]\n\nwith ThreadPoolExecutor() as executor:\n for response in executor.map(requests.get, urls):\n print(response.json()[\"data\"])\n\nNow only one json is unloaded into memory at a time, and everything is fine.\nThe lesson here?\n\nYou shouldn't try to just slap all of your code into threads/processes, you should instead focus in on what part of the code actually needs concurrency.\n\nBut what if get_data was not a function as simple as this case? What if we had to apply the executor somewhere deep in the middle of the function? This is where asyncio comes in:\nimport asyncio\nimport requests\n\nasync def get_data(url):\n # A lot of code.\n ...\n # The specific part that needs threading.\n response = await asyncio.to_thread(requests.get, url, some_other_params)\n # A lot of code.\n ...\n return data\n\nurls = [...]\n\nasync def main():\n tasks = [get_data(url) for url in urls]\n for task in asyncio.as_completed(tasks):\n data = await task\n print(data)\n\nasyncio.run(main())\n\nAttempting the same with concurrent.futures is by no means pretty. You could use things such as callbacks, queues, etc., but it would be significantly harder to manage than basic asyncio code.\n", "Already a lot of good answers. Can't elaborate more on the when to use each one. This is more an interesting combination of two. Multiprocessing + asyncio: https://pypi.org/project/aiomultiprocess/.\nThe use case for which it was designed was highio, but still utilizing as many of the cores available. Facebook used this library to write some kind of python based File server. Asyncio allowing for IO bound traffic, but multiprocessing allowing multiple event loops and threads on multiple cores.\nEx code from the repo:\nimport asyncio\nfrom aiohttp import request\nfrom aiomultiprocess import Pool\n\nasync def get(url):\n async with request(\"GET\", url) as response:\n return await response.text(\"utf-8\")\n\nasync def main():\n urls = [\"https://jreese.sh\", ...]\n async with Pool() as pool:\n async for result in pool.map(get, urls):\n ... # process result\n \nif __name__ == '__main__':\n # Python 3.7\n asyncio.run(main())\n \n # Python 3.6\n # loop = asyncio.get_event_loop()\n # loop.run_until_complete(main())\n\nJust and addition here, would not working in say jupyter notebook very well, as the notebook already has a asyncio loop running. Just a little note for you to not pull your hair out.\n", "\nMultiprocessing can be run parallelly.\n\nMultithreading and asyncio cannot be run parallelly.\n\n\nWith Intel(R) Core(TM) i7-8700K CPU @ 3.70GHz and 32.0 GB RAM, I timed how many prime numbers are between 2 and 100000 with 2 processes, 2 threads and 2 asyncio tasks as shown below. *This is CPU bound calculation:\n\n\n\n\nMultiprocessing\nMultithreading\nasyncio\n\n\n\n\n23.87 seconds\n45.24 seconds\n44.77 seconds\n\n\n\n\nBecause multiprocessing can be run parallelly so multiprocessing is double more faster than multithreading and asyncio as shown above.\nI used 3 sets of code below:\nMultiprocessing:\n# \"process_test.py\"\n\nfrom multiprocessing import Process\nimport time\nstart_time = time.time()\n\ndef test():\n num = 100000\n primes = 0\n for i in range(2, num + 1):\n for j in range(2, i):\n if i % j == 0:\n break\n else:\n primes += 1\n print(primes)\n\nif __name__ == \"__main__\": # This is needed to run processes on Windows\n process_list = []\n\n for _ in range(0, 2): # 2 processes\n process = Process(target=test)\n process_list.append(process)\n\n for process in process_list:\n process.start()\n\n for process in process_list:\n process.join()\n\n print(round((time.time() - start_time), 2), \"seconds\") # 23.87 seconds\n\nResult:\n...\n9592\n9592\n23.87 seconds\n\nMultithreading:\n# \"thread_test.py\"\n\nfrom threading import Thread\nimport time\nstart_time = time.time()\n\ndef test():\n num = 100000\n primes = 0\n for i in range(2, num + 1):\n for j in range(2, i):\n if i % j == 0:\n break\n else:\n primes += 1\n print(primes)\n\nthread_list = []\n\nfor _ in range(0, 2): # 2 threads\n thread = Thread(target=test)\n thread_list.append(thread)\n \nfor thread in thread_list:\n thread.start()\n\nfor thread in thread_list:\n thread.join()\n\nprint(round((time.time() - start_time), 2), \"seconds\") # 45.24 seconds\n\nResult:\n...\n9592\n9592\n45.24 seconds\n\nAsyncio:\n# \"asyncio_test.py\"\n\nimport asyncio\nimport time\nstart_time = time.time()\n\nasync def test():\n num = 100000\n primes = 0\n for i in range(2, num + 1):\n for j in range(2, i):\n if i % j == 0:\n break\n else:\n primes += 1\n print(primes)\n\nasync def call_tests():\n tasks = []\n\n for _ in range(0, 2): # 2 asyncio tasks\n tasks.append(test())\n\n await asyncio.gather(*tasks)\n\nasyncio.run(call_tests())\n\nprint(round((time.time() - start_time), 2), \"seconds\") # 44.77 seconds\n\nResult:\n...\n9592\n9592\n44.77 seconds\n\n", "Multiprocessing\nEach process has its own Python interpreter and can run on a separate core of a processor. Python multiprocessing is a package that supports spawning processes using an API similar to the threading module. The multiprocessing package offers true parallelism, effectively side-stepping the Global Interpreter Lock by using sub processes instead of threads.\nUse multiprocessing when you have CPU intensive tasks.\nMultithreading\nPython multithreading allows you to spawn multiple threads within the process. These threads can share the same memory and resources of the process. In CPython due to Global interpreter lock at any given time only a single thread can run, hence you cannot utilize multiple cores. Multithreading in Python does not offer true parallelism due to GIL limitation.\nAsyncio\nAsyncio works on co-operative multitasking concepts. Asyncio tasks run on the same thread so there is no parallelism, but it provides better control to the developer instead of the OS which is the case in multithreading.\nThere is a nice discussion on this link regarding the advantages of asyncio over threads.\nThere is a nice blog by Lei Mao on Python concurrency here\nMultiprocessing VS Threading VS AsyncIO in Python Summary\n", "I’m not a professional Python user, but as a student in computer architecture I think I can share some of my considerations when choosing between multi processing and multi threading. Besides, some of the other answers (even among those with higher votes) are misusing technical terminology, so I thinks it’s also necessary to make some clarification on those as well, and I’ll do it first.\nThe fundamental difference between multiprocessing and multithreading is whether they share the same memory space. Threads share access to the same virtual memory space, so it is efficient and easy for threads to exchange their computation results (zero copy, and totally user-space execution).\nProcesses on the other hand have separate virtual memory spaces. They cannot directly read or write the other process’ memory space, just like a person cannot read or alter the mind of another person without talking to him. (Allowing so would be a violation of memory protection and defeat the purpose of using virtual memory. ) To exchange data between processes, they have to rely on the operating system’s facility (e.g. message passing), and for more than one reasons this is more costly to do than the “shared memory” scheme used by threads. One reason is that invoking the OS’ message passing mechanism requires making a system call which will switch the code execution from user mode to kernel mode, which is time consuming; another reason is likely that OS message passing scheme will have to copy the data bytes from the senders’ memory space to the receivers’ memory space, so non-zero copy cost.\nIt is incorrect to say a multithread program can only use one CPU. The reason why many people say so is due to an artifact of the CPython implementation: global interpreter lock (GIL). Because of the GIL, threads in a CPython process are serialized. As a result, it appears that the multithreaded python program only uses one CPU.\nBut multi thread computer programs in general are not restricted to one core, and for Python, implementations that do not use the GIL can indeed run many threads concurrently, that is, run on more than one CPU at the same time. (See https://wiki.python.org/moin/GlobalInterpreterLock).\nGiven that CPython is the predominant implementation of Python, it’s understandable why multithreaded python programs are commonly equated to being bound to a single core.\nWith Python with GIL, the only way to unleash the power of multicores is to use multiprocessing (there are exceptions to this as mentioned below). But your problem better be easily partition-able into parallel sub-problems that have minimal intercommunication, otherwise a lot of inter-process communication will have to take place and as explained above, the overhead of using the OS’ message passing mechanism will be costly, sometimes so costly the benefits of parallel processing are totally offset. If the nature of your problem requires intense communication between concurrent routines, multithreading is the natural way to go. Unfortunately with CPython, true, effectively concurrent multithreading is not possible due to the GIL. In this case you should realize Python is not the optimal tool for your project and consider using another language.\nThere’s one alternative solution, that is to implement the concurrent processing routines in an external library written in C (or other languages), and import that module to Python. The CPython GIL will not bother to block the threads spawned by that external library.\nSo, with the burdens of GIL, is multithreading in CPython any good? It still offers benefits though, as other answers have mentioned, if you’re doing IO or network communication. In these cases the relevant computation is not done by your CPU but done by other devices (in the case of IO, the disk controller and DMA (direct memory access) controller will transfer the data with minimal CPU participation; in the case of networking, the NIC (network interface card) and DMA will take care of much of the task without CPU’s participation), so once a thread delegates such task to the NIC or disk controller, the OS can put that thread to a sleeping state and switch to other threads of the same program to do useful work.\nIn my understanding, the asyncio module is essentially a specific case of multithreading for IO operations.\nSo:\nCPU-intensive programs, that can easily be partitioned to run on multiple processes with limited communication: Use multithreading if GIL does not exist (eg Jython), or use multiprocess if GIL is present (eg CPython).\nCPU-intensive programs, that requires intensive communication between concurrent routines: Use multithreading if GIL does not exist, or use another programming language.\nLot’s of IO: asyncio\n" ]
[ 247, 140, 74, 39, 25, 7, 1, 0, 0 ]
[]
[]
[ "multiprocessing", "multithreading", "python", "python_3.x", "python_asyncio" ]
stackoverflow_0027435284_multiprocessing_multithreading_python_python_3.x_python_asyncio.txt
Q: How to filter the unique values I have 900k rows and 10 unique values. First 100k rows have only one unique value remaining are after 100k rows. I want 100k rows with all the unique values from the 900k rows. I cant able to find solution for this. A: A solution to the problem to this problem: Use the set() function to create a set of the unique values in your data. This will remove any duplicates. Use the random.sample() function to select a random sample of 1 lakh (100000) items from the set of unique values. Use the random.shuffle() function to shuffle the list of 1 lakh items. Use a for loop to iterate over the first 1 lakh rows of your data. For each row, add one of the shuffled unique values from step 3 to the row. import random # Sample data with 9 lakh (900000) rows and 10 unique values data = [ ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], # ... ] # Create a set of the unique values in the data unique_values = set([val for row in data for val in row]) # Select a random sample of 100000 items from the set of unique values sample = random.sample(unique_values, 100000) # Shuffle the list of unique values random.shuffle(sample) # Iterate over the first 100000 rows of the data for i in range(100000): # Add one of the shuffled unique values to the row data[i].append(sample[i]) # The first 100000 rows of data now have all the unique values This approach will randomly select 1 lakh unique values from the original data and add them to the first 1 lakh rows. You can adjust the code to fit your specific needs. With pandas: import pandas as pd # Load the DataFrame df = pd.read_csv("data.csv") # Select one lakh rows with replacement sample = df.sample(n=100000, replace=True) #Remove any duplicate rows df_unique = df.drop_duplicates() # Select one lakh rows without replacement sample = df_unique.sample(n=100000, replace=False)
How to filter the unique values
I have 900k rows and 10 unique values. First 100k rows have only one unique value remaining are after 100k rows. I want 100k rows with all the unique values from the 900k rows. I cant able to find solution for this.
[ "A solution to the problem to this problem:\n\nUse the set() function to create a set of the unique values in your\ndata. This will remove any duplicates.\n\nUse the random.sample() function to select a random sample of 1 lakh (100000) items from the set of unique values.\n\nUse the random.shuffle() function to shuffle the list of 1 lakh items.\n\nUse a for loop to iterate over the first 1 lakh rows of your data. For each row, add one of the shuffled unique values from step 3 to the row.\nimport random\n\n # Sample data with 9 lakh (900000) rows and 10 unique values\n data = [\n ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'],\n ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'],\n ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'],\n # ...\n ]\n\n # Create a set of the unique values in the data\n unique_values = set([val for row in data for val in row])\n\n # Select a random sample of 100000 items from the set of unique values\n sample = random.sample(unique_values, 100000)\n\n # Shuffle the list of unique values\n random.shuffle(sample)\n\n # Iterate over the first 100000 rows of the data\n for i in range(100000):\n # Add one of the shuffled unique values to the row\n data[i].append(sample[i])\n\n # The first 100000 rows of data now have all the unique values\n\n\n\nThis approach will randomly select 1 lakh unique values from the original data and add them to the first 1 lakh rows. You can adjust the code to fit your specific needs.\nWith pandas:\nimport pandas as pd\n\n# Load the DataFrame\ndf = pd.read_csv(\"data.csv\")\n\n# Select one lakh rows with replacement\nsample = df.sample(n=100000, replace=True) \n#Remove any duplicate rows\ndf_unique = df.drop_duplicates()\n \n# Select one lakh rows without replacement\nsample = df_unique.sample(n=100000, replace=False)\n\n" ]
[ 0 ]
[]
[]
[ "filter", "pandas", "python", "unique", "unique_values" ]
stackoverflow_0074672464_filter_pandas_python_unique_unique_values.txt
Q: Python Pandas equivalent in JavaScript With this CSV example: Source,col1,col2,col3 foo,1,2,3 bar,3,4,5 The standard method I use Pandas is this: Parse CSV Select columns into a data frame (col1 and col3) Process the column (e.g. avarage the values of col1 and col3) Is there a JavaScript library that does that like Pandas? A: This wiki will summarize and compare many pandas-like Javascript libraries. In general, you should check out the d3 Javascript library. d3 is very useful "swiss army knife" for handling data in Javascript, just like pandas is helpful for Python. You may see d3 used frequently like pandas, even if d3 is not exactly a DataFrame/Pandas replacement (i.e. d3 doesn't have the same API; d3 does not have Series / DataFrame classes with methods that match the pandas behavior) Ahmed's answer explains how d3 can be used to achieve some DataFrame functionality, and some of the libraries below were inspired by things like LearnJsData which uses d3 and lodash. As for DataFrame-style data transformation (splitting, joining, group by etc) , here is a quick list of some of the Javascript libraries. Note some libraries are Node.js aka Server-side Javascript, some are browser-compatible aka client-side Javascript, and some are Typescript. So use the option that's right for you. danfo-js (browser-support AND NodeJS-support) From Vignesh's answer danfo (which is often imported and aliased as dfd); has a basic DataFrame-type data structure, with the ability to plot directly Built by the team at Tensorflow: "One of the main goals of Danfo.js is to bring data processing, machine learning and AI tools to JavaScript developers. ... Open-source libraries like Numpy and Pandas..." pandas is built on top of numpy; likewise danfo-js is built on tensorflow-js please note danfo may not (yet?) support multi-column indexes pandas-js UPDATE The pandas-js repo has not been updated in awhile From STEEL and Feras' answers "pandas.js is an open source (experimental) library mimicking the Python pandas library. It relies on Immutable.js as the NumPy logical equivalent. The main data objects in pandas.js are, like in Python pandas, the Series and the DataFrame." dataframe-js "DataFrame-js provides an immutable data structure for javascript and datascience, the DataFrame, which allows to work on rows and columns with a sql and functional programming inspired api." data-forge Seen in Ashley Davis' answer "JavaScript data transformation and analysis toolkit inspired by Pandas and LINQ." Note the old data-forge JS repository is no longer maintained; now a new repository uses Typescript jsdataframe "Jsdataframe is a JavaScript data wrangling library inspired by data frame functionality in R and Python Pandas." dataframe "explore data by grouping and reducing." SQL Frames "DataFrames meet SQL, in the Browser" "SQL Frames is a low code data management framework that can be directly embedded in the browser to provide rich data visualization and UX. Complex DataFrames can be composed using familiar SQL constructs. With its powerful built-in analytics engine, data sources can come in any shape, form and frequency and they can be analyzed directly within the browser. It allows scaling to big data backends by transpiling the composed DataFrame logic to SQL." Then after coming to this question, checking other answers here and doing more searching, I found options like: Apache Arrow in JS Thanks to user Back2Basics suggestion: "Apache Arrow is a columnar memory layout specification for encoding vectors and table-like containers of flat and nested data. Apache Arrow is the emerging standard for large in-memory columnar data (Spark, Pandas, Drill, Graphistry, ...)" polars Polars is a blazingly fast DataFrames library implemented in Rust using Apache Arrow Columnar Format as memory model. Observable At first glance, seems like a JS alternative to the IPython/Jupyter "notebooks" Observable's page promises: "Reactive programming", a "Community", on a "Web Platform" See 5 minute intro here portal.js (formerly recline; from Rufus' answer) MAY BE OUTDATED: Does not use a "DataFrame" API MAY BE OUTDATED: Instead emphasizes its "Multiview" (the UI) API, (similar to jQuery/DOM model) which doesn't require jQuery but does require a browser! More examples MAY BE OUTDATED: Also emphasizes its MVC-ish architecture; including back-end stuff (i.e. database connections) js-data Really more of an ORM! Most of its modules correspond to different data storage questions (js-data-mongodb, js-data-redis, js-data-cloud-datastore), sorting, filtering, etc. On plus-side does work on Node.js as a first-priority; "Works in Node.js and in the Browser." miso (another suggestion from Rufus) Impressive backers like Guardian and bocoup. AlaSQL "AlaSQL" is an open source SQL database for Javascript with a strong focus on query speed and data source flexibility for both relational data and schemaless data. It works in your browser, Node.js, and Cordova." Some thought experiments: "Scaling a DataFrame in Javascript" - Gary Sieling Here are the criteria we used to consider the above choices General Criteria Language (NodeJS vs browser JS vs Typescript) Dependencies (i.e. if it uses an underlying library / AJAX/remote API's) Actively supported (active user-base, active source repository, etc) Size/speed of JS library Panda's criterias in its R comparison Performance Functionality/flexibility Ease-of-use Similarity to Pandas / Dataframe API's Specifically hits on their main features Data-science emphasis Built-in visualization functions Demonstrated integration in combination with other tools like Jupyter (interactive notebooks), etc A: I've been working on a data wrangling library for JavaScript called data-forge. It's inspired by LINQ and Pandas. It can be installed like this: npm install --save data-forge Your example would work like this: var csvData = "Source,col1,col2,col3\n" + "foo,1,2,3\n" + "bar,3,4,5\n"; var dataForge = require('data-forge'); var dataFrame = dataForge.fromCSV(csvData) .parseInts([ "col1", "col2", "col3" ]) ; If your data was in a CSV file you could load it like this: var dataFrame = dataForge.readFileSync(fileName) .parseCSV() .parseInts([ "col1", "col2", "col3" ]) ; You can use the select method to transform rows. You can extract a column using getSeries then use the select method to transform values in that column. You get your data back out of the data-frame like this: var data = dataFrame.toArray(); To average a column: var avg = dataFrame.getSeries("col1").average(); There is much more you can do with this. You can find more documentation on npm. A: Ceaveat The following is applicable only to d3 v3, and not the latest d4v4! I am partial to d3.js, and while it won't be a total replacement for Pandas, if you spend some time learning its paradigm, it should be able to take care of all your data wrangling for you. (And if you wind up wanting to display results in the browser, it's ideally suited to that.) Example. My CSV file data.csv: name,age,color Mickey,65,black Donald,58,white Pluto,64,orange In the same directory, create an index.html containing the following: <!DOCTYPE html> <html> <head> <meta charset="utf-8"/> <title>My D3 demo</title> <script src="http://d3js.org/d3.v3.min.js" charset="utf-8"></script> </head> <body> <script charset="utf-8" src="demo.js"></script> </body> </html> and also a demo.js file containing the following: d3.csv('/data.csv', // How to format each row. Since the CSV file has a header, `row` will be // an object with keys derived from the header. function(row) { return {name : row.name, age : +row.age, color : row.color}; }, // Callback to run once all data's loaded and ready. function(data) { // Log the data to the JavaScript console console.log(data); // Compute some interesting results var averageAge = data.reduce(function(prev, curr) { return prev + curr.age; }, 0) / data.length; // Also, display it var ulSelection = d3.select('body').append('ul'); var valuesSelection = ulSelection.selectAll('li').data(data).enter().append('li').text( function(d) { return d.age; }); var totalSelection = ulSelection.append('li').text('Average: ' + averageAge); }); In the directory, run python -m SimpleHTTPServer 8181, and open http://localhost:8181 in your browser to see a simple listing of the ages and their average. This simple example shows a few relevant features of d3: Excellent support for ingesting online data (CSV, TSV, JSON, etc.) Data wrangling smarts baked in Data-driven DOM manipulation (maybe the hardest thing to wrap one's head around): your data gets transformed into DOM elements. A: Pandas.js at the moment is an experimental library, but seems very promising it uses under the hood immutable.js and NumpPy logic, both data objects series and DataFrame are there.. 10-Feb-2021 Update as @jarthur mentioned it seems no update on this repo for last 4 years A: @neversaint your wait is over. say welcome to Danfo.js which is pandas like Javascript library built on tensorflow.js and supports tensors out of the box. This means you can convert danfo data structure to Tensors. And you can do groupby, merging, joining, plotting and other data processing. A: Below is Python numpy and pandas ``` import numpy as np import pandas as pd data_frame = pd.DataFrame(np.random.randn(5, 4), ['A', 'B', 'C', 'D', 'E'], [1, 2, 3, 4]) data_frame[5] = np.random.randint(1, 50, 5) print(data_frame.loc[['C', 'D'], [2, 3]]) # axis 1 = Y | 0 = X data_frame.drop(5, axis=1, inplace=True) print(data_frame) ``` The same can be achieved in JavaScript* [numjs works only with Node.js] But D3.js has much advanced Data file set options. Both numjs and Pandas-js still in works.. import np from 'numjs'; import { DataFrame } from 'pandas-js'; const df = new DataFrame(np.random.randn(5, 4), ['A', 'B', 'C', 'D', 'E'], [1, 2, 3, 4]) // df /* 1 2 3 4 A 0.023126 1.078130 -0.521409 -1.480726 B 0.920194 -0.201019 0.028180 0.558041 C -0.650564 -0.505693 -0.533010 0.441858 D -0.973549 0.095626 -1.302843 1.109872 E -0.989123 -1.382969 -1.682573 -0.637132 */ A: I think the closest thing are libraries like: ReclineJS Miso Project Dataset Recline in particular has a Dataset object with a structure somewhat similar to Pandas data frames. It then allows you to connect your data with "Views" such as a data grid, graphing, maps etc. Views are usually thin wrappers around existing best of breed visualization libraries such as D3, Flot, SlickGrid etc. Here's an example for Recline: // Load some data var dataset = recline.Model.Dataset({ records: [ { value: 1, date: '2012-08-07' }, { value: 5, b: '2013-09-07' } ] // Load CSV data instead // (And Recline has support for many more data source types) // url: 'my-local-csv-file.csv', // backend: 'csv' }); // get an element from your HTML for the viewer var $el = $('#data-viewer'); var allInOneDataViewer = new recline.View.MultiView({ model: dataset, el: $el }); // Your new Data Viewer will be live! A: It's pretty easy to parse CSV in javascript because each line's already essentially a javascript array. If you load your csv into an array of strings (one per line) it's pretty easy to load an array of arrays with the values: var pivot = function(data){ var result = []; for (var i = 0; i < data.length; i++){ for (var j=0; j < data[i].length; j++){ if (i === 0){ result[j] = []; } result[j][i] = data[i][j]; } } return result; }; var getData = function() { var csvString = $(".myText").val(); var csvLines = csvString.split(/\n?$/m); var dataTable = []; for (var i = 0; i < csvLines.length; i++){ var values; eval("values = [" + csvLines[i] + "]"); dataTable[i] = values; } return pivot(dataTable); }; Then getData() returns a multidimensional array of values by column. I've demonstrated this in a jsFiddle for you. Of course, you can't do it quite this easily if you don't trust the input - if there could be script in your data which eval might pick up, etc. A: Here is an dynamic approach assuming an existing header on line 1. The csv is loaded with d3.js. function csvToColumnArrays(csv) { var mainObj = {}, header = Object.keys(csv[0]); for (var i = 0; i < header.length; i++) { mainObj[header[i]] = []; }; csv.map(function(d) { for (key in mainObj) { mainObj[key].push(d[key]) } }); return mainObj; } d3.csv(path, function(csv) { var df = csvToColumnArrays(csv); }); Then you are able to access each column of the data similar an R, python or Matlab dataframe with df.column_header[row_number]. A: Arquero is a library for handling relational data, with syntax similar to the popular R package dplyr (which is a sort of SQL-like). https://observablehq.com/@uwdata/introducing-arquero
Python Pandas equivalent in JavaScript
With this CSV example: Source,col1,col2,col3 foo,1,2,3 bar,3,4,5 The standard method I use Pandas is this: Parse CSV Select columns into a data frame (col1 and col3) Process the column (e.g. avarage the values of col1 and col3) Is there a JavaScript library that does that like Pandas?
[ "This wiki will summarize and compare many pandas-like Javascript libraries.\nIn general, you should check out the d3 Javascript library. d3 is very useful \"swiss army knife\" for handling data in Javascript, just like pandas is helpful for Python. You may see d3 used frequently like pandas, even if d3 is not exactly a DataFrame/Pandas replacement (i.e. d3 doesn't have the same API; d3 does not have Series / DataFrame classes with methods that match the pandas behavior)\nAhmed's answer explains how d3 can be used to achieve some DataFrame functionality, and some of the libraries below were inspired by things like LearnJsData which uses d3 and lodash.\nAs for DataFrame-style data transformation (splitting, joining, group by etc) , here is a quick list of some of the Javascript libraries.\nNote some libraries are Node.js aka Server-side Javascript, some are browser-compatible aka client-side Javascript, and some are Typescript. So use the option that's right for you.\n\ndanfo-js (browser-support AND NodeJS-support)\n\nFrom Vignesh's answer\n\ndanfo (which is often imported and aliased as dfd); has a basic DataFrame-type data structure, with the ability to plot directly\n\nBuilt by the team at Tensorflow: \"One of the main goals of Danfo.js is to bring data processing, machine learning and AI tools to JavaScript developers. ... Open-source libraries like Numpy and Pandas...\"\n\npandas is built on top of numpy; likewise danfo-js is built on tensorflow-js\n\nplease note danfo may not (yet?) support multi-column indexes\n\n\n\npandas-js\n\nUPDATE The pandas-js repo has not been updated in awhile\nFrom STEEL and Feras' answers\n\"pandas.js is an open source (experimental) library mimicking the Python pandas library. It relies on Immutable.js as the NumPy logical equivalent. The main data objects in pandas.js are, like in Python pandas, the Series and the DataFrame.\"\n\n\ndataframe-js\n\n\"DataFrame-js provides an immutable data structure for javascript and datascience, the DataFrame, which allows to work on rows and columns with a sql and functional programming inspired api.\"\n\n\ndata-forge\n\nSeen in Ashley Davis' answer\n\"JavaScript data transformation and analysis toolkit inspired by Pandas and LINQ.\"\nNote the old data-forge JS repository is no longer maintained; now a new repository uses Typescript\n\n\njsdataframe\n\n\"Jsdataframe is a JavaScript data wrangling library inspired by data frame functionality in R and Python Pandas.\"\n\n\ndataframe\n\n\"explore data by grouping and reducing.\"\n\n\nSQL Frames\n\n\"DataFrames meet SQL, in the Browser\"\n\"SQL Frames is a low code data management framework that can be directly embedded in the browser to provide rich data visualization and UX. Complex DataFrames can be composed using familiar SQL constructs. With its powerful built-in analytics engine, data sources can come in any shape, form and frequency and they can be analyzed directly within the browser. It allows scaling to big data backends by transpiling the composed DataFrame logic to SQL.\"\n\n\n\nThen after coming to this question, checking other answers here and doing more searching, I found options like:\n\nApache Arrow in JS\n\nThanks to user Back2Basics suggestion:\n\"Apache Arrow is a columnar memory layout specification for encoding vectors and table-like containers of flat and nested data. Apache Arrow is the emerging standard for large in-memory columnar data (Spark, Pandas, Drill, Graphistry, ...)\"\n\n\npolars\n\nPolars is a blazingly fast DataFrames library implemented in Rust using Apache Arrow Columnar Format as memory model.\n\n\nObservable\n\nAt first glance, seems like a JS alternative to the IPython/Jupyter \"notebooks\"\nObservable's page promises: \"Reactive programming\", a \"Community\", on a \"Web Platform\"\nSee 5 minute intro here\n\n\nportal.js (formerly recline; from Rufus' answer)\n\nMAY BE OUTDATED: Does not use a \"DataFrame\" API\nMAY BE OUTDATED: Instead emphasizes its \"Multiview\" (the UI) API, (similar to jQuery/DOM model) which doesn't require jQuery but does require a browser! More examples\nMAY BE OUTDATED: Also emphasizes its MVC-ish architecture; including back-end stuff (i.e. database connections)\n\n\njs-data\n\nReally more of an ORM! Most of its modules correspond to different data storage questions (js-data-mongodb, js-data-redis, js-data-cloud-datastore), sorting, filtering, etc.\nOn plus-side does work on Node.js as a first-priority; \"Works in Node.js and in the Browser.\"\n\n\nmiso (another suggestion from Rufus)\n\nImpressive backers like Guardian and bocoup.\n\n\nAlaSQL\n\n\"AlaSQL\" is an open source SQL database for Javascript with a strong focus on query speed and data source flexibility for both relational data and schemaless data. It works in your browser, Node.js, and Cordova.\"\n\n\nSome thought experiments:\n\n\"Scaling a DataFrame in Javascript\" - Gary Sieling\n\n\n\nHere are the criteria we used to consider the above choices\n\nGeneral Criteria\n\nLanguage (NodeJS vs browser JS vs Typescript)\nDependencies (i.e. if it uses an underlying library / AJAX/remote API's)\nActively supported (active user-base, active source repository, etc)\nSize/speed of JS library\n\n\nPanda's criterias in its R comparison\n\nPerformance\nFunctionality/flexibility\nEase-of-use\n\n\nSimilarity to Pandas / Dataframe API's\n\nSpecifically hits on their main features\nData-science emphasis\nBuilt-in visualization functions\nDemonstrated integration in combination with other tools like Jupyter\n(interactive notebooks), etc\n\n\n\n", "I've been working on a data wrangling library for JavaScript called data-forge. It's inspired by LINQ and Pandas.\nIt can be installed like this: \nnpm install --save data-forge\n\nYour example would work like this:\nvar csvData = \"Source,col1,col2,col3\\n\" +\n \"foo,1,2,3\\n\" +\n \"bar,3,4,5\\n\";\n\nvar dataForge = require('data-forge');\nvar dataFrame = \n dataForge.fromCSV(csvData)\n .parseInts([ \"col1\", \"col2\", \"col3\" ])\n ;\n\nIf your data was in a CSV file you could load it like this:\nvar dataFrame = dataForge.readFileSync(fileName)\n .parseCSV()\n .parseInts([ \"col1\", \"col2\", \"col3\" ])\n ;\n\nYou can use the select method to transform rows.\nYou can extract a column using getSeries then use the select method to transform values in that column.\nYou get your data back out of the data-frame like this:\nvar data = dataFrame.toArray();\n\nTo average a column:\n var avg = dataFrame.getSeries(\"col1\").average();\n\nThere is much more you can do with this.\nYou can find more documentation on npm.\n", "Ceaveat The following is applicable only to d3 v3, and not the latest d4v4!\nI am partial to d3.js, and while it won't be a total replacement for Pandas, if you spend some time learning its paradigm, it should be able to take care of all your data wrangling for you. (And if you wind up wanting to display results in the browser, it's ideally suited to that.)\nExample. My CSV file data.csv:\nname,age,color\nMickey,65,black\nDonald,58,white\nPluto,64,orange\n\nIn the same directory, create an index.html containing the following:\n<!DOCTYPE html>\n<html>\n <head>\n <meta charset=\"utf-8\"/>\n <title>My D3 demo</title>\n\n <script src=\"http://d3js.org/d3.v3.min.js\" charset=\"utf-8\"></script>\n </head>\n <body>\n\n <script charset=\"utf-8\" src=\"demo.js\"></script>\n </body>\n</html>\n\nand also a demo.js file containing the following:\nd3.csv('/data.csv',\n\n // How to format each row. Since the CSV file has a header, `row` will be\n // an object with keys derived from the header.\n function(row) {\n return {name : row.name, age : +row.age, color : row.color};\n },\n\n // Callback to run once all data's loaded and ready.\n function(data) {\n // Log the data to the JavaScript console\n console.log(data);\n\n // Compute some interesting results\n var averageAge = data.reduce(function(prev, curr) {\n return prev + curr.age;\n }, 0) / data.length;\n\n // Also, display it\n var ulSelection = d3.select('body').append('ul');\n var valuesSelection =\n ulSelection.selectAll('li').data(data).enter().append('li').text(\n function(d) { return d.age; });\n var totalSelection =\n ulSelection.append('li').text('Average: ' + averageAge);\n });\n\nIn the directory, run python -m SimpleHTTPServer 8181, and open http://localhost:8181 in your browser to see a simple listing of the ages and their average.\nThis simple example shows a few relevant features of d3:\n\nExcellent support for ingesting online data (CSV, TSV, JSON, etc.)\nData wrangling smarts baked in\nData-driven DOM manipulation (maybe the hardest thing to wrap one's head around): your data gets transformed into DOM elements.\n\n", "Pandas.js\nat the moment is an experimental library, but seems very promising it uses under the hood immutable.js and NumpPy logic, both data objects series and DataFrame are there..\n10-Feb-2021 Update as @jarthur mentioned it seems no update on this repo for last 4 years\n", "@neversaint your wait is over. say welcome to Danfo.js which is pandas like Javascript library built on tensorflow.js and supports tensors out of the box. This means you can convert danfo data structure to Tensors. And you can do groupby, merging, joining, plotting and other data processing.\n", "Below is Python numpy and pandas\n```\nimport numpy as np\nimport pandas as pd\n\ndata_frame = pd.DataFrame(np.random.randn(5, 4), ['A', 'B', 'C', 'D', 'E'], [1, 2, 3, 4])\n\ndata_frame[5] = np.random.randint(1, 50, 5)\n\nprint(data_frame.loc[['C', 'D'], [2, 3]])\n\n# axis 1 = Y | 0 = X\ndata_frame.drop(5, axis=1, inplace=True)\n\nprint(data_frame)\n\n```\nThe same can be achieved in JavaScript* [numjs works only with Node.js]\nBut D3.js has much advanced Data file set options. Both numjs and Pandas-js still in works..\n\n\nimport np from 'numjs';\r\nimport { DataFrame } from 'pandas-js';\r\n\r\nconst df = new DataFrame(np.random.randn(5, 4), ['A', 'B', 'C', 'D', 'E'], [1, 2, 3, 4])\r\n\r\n// df\r\n/*\r\n\r\n 1 2 3 4\r\nA 0.023126 1.078130 -0.521409 -1.480726\r\nB 0.920194 -0.201019 0.028180 0.558041\r\nC -0.650564 -0.505693 -0.533010 0.441858\r\nD -0.973549 0.095626 -1.302843 1.109872\r\nE -0.989123 -1.382969 -1.682573 -0.637132\r\n\r\n*/\n\n\n\n", "I think the closest thing are libraries like:\n\nReclineJS\nMiso Project Dataset\n\nRecline in particular has a Dataset object with a structure somewhat similar to Pandas data frames. It then allows you to connect your data with \"Views\" such as a data grid, graphing, maps etc. Views are usually thin wrappers around existing best of breed visualization libraries such as D3, Flot, SlickGrid etc.\nHere's an example for Recline:\n\n// Load some data\nvar dataset = recline.Model.Dataset({\n records: [\n { value: 1, date: '2012-08-07' },\n { value: 5, b: '2013-09-07' }\n ]\n // Load CSV data instead\n // (And Recline has support for many more data source types)\n // url: 'my-local-csv-file.csv',\n // backend: 'csv'\n});\n\n// get an element from your HTML for the viewer\nvar $el = $('#data-viewer');\n\nvar allInOneDataViewer = new recline.View.MultiView({\n model: dataset,\n el: $el\n});\n// Your new Data Viewer will be live!\n\n", "It's pretty easy to parse CSV in javascript because each line's already essentially a javascript array. If you load your csv into an array of strings (one per line) it's pretty easy to load an array of arrays with the values:\nvar pivot = function(data){\n var result = [];\n for (var i = 0; i < data.length; i++){\n for (var j=0; j < data[i].length; j++){\n if (i === 0){\n result[j] = [];\n }\n result[j][i] = data[i][j];\n }\n }\n return result;\n};\n\nvar getData = function() {\n var csvString = $(\".myText\").val();\n var csvLines = csvString.split(/\\n?$/m);\n\n var dataTable = [];\n\n for (var i = 0; i < csvLines.length; i++){\n var values;\n eval(\"values = [\" + csvLines[i] + \"]\");\n dataTable[i] = values;\n }\n\n return pivot(dataTable);\n};\n\nThen getData() returns a multidimensional array of values by column.\nI've demonstrated this in a jsFiddle for you.\nOf course, you can't do it quite this easily if you don't trust the input - if there could be script in your data which eval might pick up, etc.\n", "Here is an dynamic approach assuming an existing header on line 1. The csv is loaded with d3.js. \nfunction csvToColumnArrays(csv) {\n\n var mainObj = {},\n header = Object.keys(csv[0]);\n\n for (var i = 0; i < header.length; i++) {\n\n mainObj[header[i]] = [];\n };\n\n csv.map(function(d) {\n\n for (key in mainObj) {\n mainObj[key].push(d[key])\n }\n\n }); \n\n return mainObj;\n\n}\n\n\nd3.csv(path, function(csv) {\n\n var df = csvToColumnArrays(csv); \n\n});\n\nThen you are able to access each column of the data similar an R, python or Matlab dataframe with df.column_header[row_number]. \n", "Arquero is a library for handling relational data, with syntax similar to the popular R package dplyr (which is a sort of SQL-like).\nhttps://observablehq.com/@uwdata/introducing-arquero\n" ]
[ 193, 11, 8, 7, 7, 6, 3, 1, 1, 0 ]
[]
[]
[ "javascript", "pandas", "python" ]
stackoverflow_0030610675_javascript_pandas_python.txt
Q: beautiful soup to grab data from table I had recently asked for help using beautiful soup to grab forex prices from a site. the data was hidden in the span. I was lucky enough to get help from two people who were amazing and helped me work through it. I have since found a different site that i want to scrape from, this time there is no span the text is in tr and td from the table. https://www.wsj.com/market-data/quotes/fx/AUDNZD/historical-prices is the website.. as you can see the high and low prices go back i believe 30 days on this table i would like to grab the whole table so i can use the data as needed for different calculations when i attempt to grab the data its still just coming back as an empty list.. and i have tried alot of different places to grab it from. Can someone not only help me get what i want but explain what im doing wrong so i can learn to use the beautiful soup for myself so i dont have to keep asking for help. last time i grabbed from span it saved it in a list of lists that i was able to use and save as variables for differnt days and then do calculations with it. this is what i am attempting to do again. '''import requests from bs4 import BeautifulSoup import re result = [] URL = "https://www.wsj.com/market-data/quotes/fx/AUDNZD/historical-prices" page = requests.get(URL) soup = BeautifulSoup(page.content, "html.parser") table = soup.select('cr_dataTable') print(table)''' i did not save all my attempts at different ways i tried.. i literally got down to this super basic attempt to just try to get a response back from somewhere that im grabbing so i could then continue into breaking it down to just the text.. everything i put in that soup.select() came back empty list.. so i kinda just got to a point where i decided i must not be doing any of this right. the soup is grabbing the html though. my find_all and find() and soup.select .. nothing seemed to work or get a repsonse back. please advise on how i am going about this wrong.. this simple code here should come back with lots of data for all the code in the table correct.. then i can go through it to grab text and grab what i want?? '''import requests from bs4 import BeautifulSoup import re result = [] URL = "https://www.wsj.com/market-data/quotes/fx/AUDNZD/historical-prices" page = requests.get(URL) soup = BeautifulSoup(page.content, "html.parser") table = soup.find('table', class_='cr_dataTable') print(table)''' comes back none! A: You hadn't added headers thus the request was fetching output for robots. Full Code import requests from bs4 import BeautifulSoup import json import os result = [] headers = { 'user-agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.99 Mobile Safari/537.36', } r = URL = "https://www.wsj.com/market-data/quotes/fx/AUDNZD/historical-prices" page = requests.get(URL, headers=headers) soup = BeautifulSoup(page.content, "html.parser") div = soup.find('div', {"id": "historical_data_table"}) table = div.find('table', {"class": "cr_dataTable"}) for i in table.findAll("tr"): row = i row = row.findAll("td") DATE = row[0].text OPEN = row[1].text HIGH = row[2].text LOW = row[3].text CLOSE = row[4].text output = {"DATE": DATE, "OPEN": OPEN, "HIGH": HIGH, "LOW": LOW, "CLOSE": CLOSE} result.append(output) if (os.path.exists("Data.json") == False): f = open("Data.json", "w") json.dump(result, f, indent=4) else: with open('Data.json', 'w') as f: json.dump(result, f, indent=4) Output [ { "DATE": "12/02/22", "OPEN": "1.0691", "HIGH": "1.0709", "LOW": "1.0568", "CLOSE": "1.0602" }, { "DATE": "12/01/22", "OPEN": "1.0768", "HIGH": "1.0792", "LOW": "1.0669", "CLOSE": "1.0692" }, { "DATE": "11/30/22", "OPEN": "1.0787", "HIGH": "1.0813", "LOW": "1.0737", "CLOSE": "1.0783" }, { "DATE": "11/29/22", "OPEN": "1.0794", "HIGH": "1.0820", "LOW": "1.0773", "CLOSE": "1.0788" }, { "DATE": "11/28/22", "OPEN": "1.0807", "HIGH": "1.0815", "LOW": "1.0752", "CLOSE": "1.0792" }, { "DATE": "11/25/22", "OPEN": "1.0805", "HIGH": "1.0822", "LOW": "1.0782", "CLOSE": "1.0804" }, { "DATE": "11/24/22", "OPEN": "1.0787", "HIGH": "1.0819", "LOW": "1.0765", "CLOSE": "1.0797" }, { "DATE": "11/23/22", "OPEN": "1.0801", "HIGH": "1.0837", "LOW": "1.0747", "CLOSE": "1.0781" }, { "DATE": "11/22/22", "OPEN": "1.0826", "HIGH": "1.0838", "LOW": "1.0781", "CLOSE": "1.0804" }, { "DATE": "11/21/22", "OPEN": "1.0891", "HIGH": "1.0891", "LOW": "1.0799", "CLOSE": "1.0828" }, { "DATE": "11/18/22", "OPEN": "1.0915", "HIGH": "1.0934", "LOW": "1.0833", "CLOSE": "1.0849" }, { "DATE": "11/17/22", "OPEN": "1.0964", "HIGH": "1.0981", "LOW": "1.0912", "CLOSE": "1.0917" }, { "DATE": "11/16/22", "OPEN": "1.0971", "HIGH": "1.0997", "LOW": "1.0941", "CLOSE": "1.0963" }, { "DATE": "11/15/22", "OPEN": "1.0995", "HIGH": "1.1002", "LOW": "1.0946", "CLOSE": "1.0975" }, { "DATE": "11/14/22", "OPEN": "1.0957", "HIGH": "1.1015", "LOW": "1.0953", "CLOSE": "1.0994" }, { "DATE": "11/11/22", "OPEN": "1.0987", "HIGH": "1.1046", "LOW": "1.0949", "CLOSE": "1.0965" }, { "DATE": "11/10/22", "OPEN": "1.0927", "HIGH": "1.0992", "LOW": "1.0913", "CLOSE": "1.0986" }, { "DATE": "11/09/22", "OPEN": "1.0927", "HIGH": "1.0975", "LOW": "1.0901", "CLOSE": "1.0929" }, { "DATE": "11/08/22", "OPEN": "1.0908", "HIGH": "1.0928", "LOW": "1.0882", "CLOSE": "1.0919" }, { "DATE": "11/07/22", "OPEN": "1.0863", "HIGH": "1.0977", "LOW": "1.0863", "CLOSE": "1.0910" }, { "DATE": "11/04/22", "OPEN": "1.0896", "HIGH": "1.0960", "LOW": "1.0877", "CLOSE": "1.0909" }, { "DATE": "11/03/22", "OPEN": "1.0914", "HIGH": "1.0937", "LOW": "1.0883", "CLOSE": "1.0898" }, { "DATE": "11/02/22", "OPEN": "1.0945", "HIGH": "1.0957", "LOW": "1.0902", "CLOSE": "1.0913" }, { "DATE": "11/01/22", "OPEN": "1.1003", "HIGH": "1.1033", "LOW": "1.0930", "CLOSE": "1.0944" }, { "DATE": "10/31/22", "OPEN": "1.1031", "HIGH": "1.1348", "LOW": "1.0989", "CLOSE": "1.1004" }, { "DATE": "10/28/22", "OPEN": "1.1070", "HIGH": "1.1084", "LOW": "1.1012", "CLOSE": "1.1032" }, { "DATE": "10/27/22", "OPEN": "1.1140", "HIGH": "1.1154", "LOW": "1.1058", "CLOSE": "1.1072" }, { "DATE": "10/26/22", "OPEN": "1.1130", "HIGH": "1.1176", "LOW": "1.1092", "CLOSE": "1.1133" }, { "DATE": "10/25/22", "OPEN": "1.1089", "HIGH": "1.1122", "LOW": "1.1065", "CLOSE": "1.1111" }, { "DATE": "10/24/22", "OPEN": "1.1124", "HIGH": "1.1124", "LOW": "1.1020", "CLOSE": "1.1085" }, { "DATE": "10/21/22", "OPEN": "1.1063", "HIGH": "1.1102", "LOW": "1.1044", "CLOSE": "1.1077" }, { "DATE": "10/20/22", "OPEN": "1.1056", "HIGH": "1.1094", "LOW": "1.1023", "CLOSE": "1.1062" }, { "DATE": "10/19/22", "OPEN": "1.1100", "HIGH": "1.1107", "LOW": "1.1052", "CLOSE": "1.1055" }, { "DATE": "10/18/22", "OPEN": "1.1151", "HIGH": "1.1210", "LOW": "1.1071", "CLOSE": "1.1101" }, { "DATE": "10/17/22", "OPEN": "1.1138", "HIGH": "1.1193", "LOW": "1.1137", "CLOSE": "1.1161" }, { "DATE": "10/14/22", "OPEN": "1.1176", "HIGH": "1.1191", "LOW": "1.1121", "CLOSE": "1.1151" }, { "DATE": "10/13/22", "OPEN": "1.1192", "HIGH": "1.1215", "LOW": "1.1157", "CLOSE": "1.1163" }, { "DATE": "10/12/22", "OPEN": "1.1235", "HIGH": "1.1244", "LOW": "1.1172", "CLOSE": "1.1188" }, { "DATE": "10/11/22", "OPEN": "1.1318", "HIGH": "1.1328", "LOW": "1.1195", "CLOSE": "1.1237" }, { "DATE": "10/10/22", "OPEN": "1.1367", "HIGH": "1.1370", "LOW": "1.1266", "CLOSE": "1.1317" }, { "DATE": "10/07/22", "OPEN": "1.1322", "HIGH": "1.1376", "LOW": "1.1301", "CLOSE": "1.1358" }, { "DATE": "10/06/22", "OPEN": "1.1309", "HIGH": "1.1355", "LOW": "1.1244", "CLOSE": "1.1327" }, { "DATE": "10/05/22", "OPEN": "1.1348", "HIGH": "1.1381", "LOW": "1.1242", "CLOSE": "1.1308" }, { "DATE": "10/04/22", "OPEN": "1.1386", "HIGH": "1.1426", "LOW": "1.1306", "CLOSE": "1.1349" }, { "DATE": "10/03/22", "OPEN": "1.1460", "HIGH": "1.1460", "LOW": "1.1362", "CLOSE": "1.1388" }, { "DATE": "09/30/22", "OPEN": "1.1387", "HIGH": "1.1444", "LOW": "1.1320", "CLOSE": "1.1439" }, { "DATE": "09/29/22", "OPEN": "1.1382", "HIGH": "1.1417", "LOW": "1.1346", "CLOSE": "1.1350" }, { "DATE": "09/28/22", "OPEN": "1.1417", "HIGH": "1.1495", "LOW": "1.1290", "CLOSE": "1.1385" }, { "DATE": "09/27/22", "OPEN": "1.1453", "HIGH": "1.1466", "LOW": "1.1370", "CLOSE": "1.1419" }, { "DATE": "09/26/22", "OPEN": "1.1365", "HIGH": "1.1465", "LOW": "1.1328", "CLOSE": "1.1454" }, { "DATE": "09/23/22", "OPEN": "1.1365", "HIGH": "1.1378", "LOW": "1.1323", "CLOSE": "1.1373" }, { "DATE": "09/22/22", "OPEN": "1.1329", "HIGH": "1.1373", "LOW": "1.1303", "CLOSE": "1.1366" }, { "DATE": "09/21/22", "OPEN": "1.1342", "HIGH": "1.1363", "LOW": "1.1315", "CLOSE": "1.1334" }, { "DATE": "09/20/22", "OPEN": "1.1284", "HIGH": "1.1365", "LOW": "1.1273", "CLOSE": "1.1347" }, { "DATE": "09/19/22", "OPEN": "1.1221", "HIGH": "1.1295", "LOW": "1.1206", "CLOSE": "1.1289" }, { "DATE": "09/16/22", "OPEN": "1.1232", "HIGH": "1.1256", "LOW": "1.1197", "CLOSE": "1.1218" }, { "DATE": "09/15/22", "OPEN": "1.1247", "HIGH": "1.1261", "LOW": "1.1212", "CLOSE": "1.1233" }, { "DATE": "09/14/22", "OPEN": "1.1255", "HIGH": "1.1255", "LOW": "1.1201", "CLOSE": "1.1239" }, { "DATE": "09/13/22", "OPEN": "1.1218", "HIGH": "1.1259", "LOW": "1.1194", "CLOSE": "1.1223" }, { "DATE": "09/12/22", "OPEN": "1.1186", "HIGH": "1.1240", "LOW": "1.1181", "CLOSE": "1.1225" }, { "DATE": "09/09/22", "OPEN": "1.1156", "HIGH": "1.1215", "LOW": "1.1139", "CLOSE": "1.1212" }, { "DATE": "09/08/22", "OPEN": "1.1142", "HIGH": "1.1157", "LOW": "1.1115", "CLOSE": "1.1151" }, { "DATE": "09/07/22", "OPEN": "1.1153", "HIGH": "1.1181", "LOW": "1.1134", "CLOSE": "1.1141" }, { "DATE": "09/06/22", "OPEN": "1.1150", "HIGH": "1.1173", "LOW": "1.1127", "CLOSE": "1.1152" }, { "DATE": "09/05/22", "OPEN": "1.1113", "HIGH": "1.1167", "LOW": "1.1113", "CLOSE": "1.1153" } ]
beautiful soup to grab data from table
I had recently asked for help using beautiful soup to grab forex prices from a site. the data was hidden in the span. I was lucky enough to get help from two people who were amazing and helped me work through it. I have since found a different site that i want to scrape from, this time there is no span the text is in tr and td from the table. https://www.wsj.com/market-data/quotes/fx/AUDNZD/historical-prices is the website.. as you can see the high and low prices go back i believe 30 days on this table i would like to grab the whole table so i can use the data as needed for different calculations when i attempt to grab the data its still just coming back as an empty list.. and i have tried alot of different places to grab it from. Can someone not only help me get what i want but explain what im doing wrong so i can learn to use the beautiful soup for myself so i dont have to keep asking for help. last time i grabbed from span it saved it in a list of lists that i was able to use and save as variables for differnt days and then do calculations with it. this is what i am attempting to do again. '''import requests from bs4 import BeautifulSoup import re result = [] URL = "https://www.wsj.com/market-data/quotes/fx/AUDNZD/historical-prices" page = requests.get(URL) soup = BeautifulSoup(page.content, "html.parser") table = soup.select('cr_dataTable') print(table)''' i did not save all my attempts at different ways i tried.. i literally got down to this super basic attempt to just try to get a response back from somewhere that im grabbing so i could then continue into breaking it down to just the text.. everything i put in that soup.select() came back empty list.. so i kinda just got to a point where i decided i must not be doing any of this right. the soup is grabbing the html though. my find_all and find() and soup.select .. nothing seemed to work or get a repsonse back. please advise on how i am going about this wrong.. this simple code here should come back with lots of data for all the code in the table correct.. then i can go through it to grab text and grab what i want?? '''import requests from bs4 import BeautifulSoup import re result = [] URL = "https://www.wsj.com/market-data/quotes/fx/AUDNZD/historical-prices" page = requests.get(URL) soup = BeautifulSoup(page.content, "html.parser") table = soup.find('table', class_='cr_dataTable') print(table)''' comes back none!
[ "You hadn't added headers thus the request was fetching output for robots.\nFull Code\nimport requests\nfrom bs4 import BeautifulSoup\nimport json\nimport os\nresult = []\nheaders = {\n 'user-agent':\n 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.99 Mobile Safari/537.36',\n}\nr = URL = \"https://www.wsj.com/market-data/quotes/fx/AUDNZD/historical-prices\"\npage = requests.get(URL, headers=headers)\nsoup = BeautifulSoup(page.content, \"html.parser\")\ndiv = soup.find('div', {\"id\": \"historical_data_table\"})\ntable = div.find('table', {\"class\": \"cr_dataTable\"})\nfor i in table.findAll(\"tr\"):\n row = i\n row = row.findAll(\"td\")\n DATE = row[0].text\n OPEN = row[1].text\n HIGH = row[2].text\n LOW = row[3].text\n CLOSE = row[4].text\n output = {\"DATE\": DATE, \"OPEN\": OPEN,\n \"HIGH\": HIGH, \"LOW\": LOW, \"CLOSE\": CLOSE}\n result.append(output)\nif (os.path.exists(\"Data.json\") == False):\n f = open(\"Data.json\", \"w\")\n json.dump(result, f, indent=4)\nelse:\n with open('Data.json', 'w') as f:\n json.dump(result, f, indent=4)\n\nOutput\n[\n {\n \"DATE\": \"12/02/22\",\n \"OPEN\": \"1.0691\",\n \"HIGH\": \"1.0709\",\n \"LOW\": \"1.0568\",\n \"CLOSE\": \"1.0602\"\n },\n {\n \"DATE\": \"12/01/22\",\n \"OPEN\": \"1.0768\",\n \"HIGH\": \"1.0792\",\n \"LOW\": \"1.0669\",\n \"CLOSE\": \"1.0692\"\n },\n {\n \"DATE\": \"11/30/22\",\n \"OPEN\": \"1.0787\",\n \"HIGH\": \"1.0813\",\n \"LOW\": \"1.0737\",\n \"CLOSE\": \"1.0783\"\n },\n {\n \"DATE\": \"11/29/22\",\n \"OPEN\": \"1.0794\",\n \"HIGH\": \"1.0820\",\n \"LOW\": \"1.0773\",\n \"CLOSE\": \"1.0788\"\n },\n {\n \"DATE\": \"11/28/22\",\n \"OPEN\": \"1.0807\",\n \"HIGH\": \"1.0815\",\n \"LOW\": \"1.0752\",\n \"CLOSE\": \"1.0792\"\n },\n {\n \"DATE\": \"11/25/22\",\n \"OPEN\": \"1.0805\",\n \"HIGH\": \"1.0822\",\n \"LOW\": \"1.0782\",\n \"CLOSE\": \"1.0804\"\n },\n {\n \"DATE\": \"11/24/22\",\n \"OPEN\": \"1.0787\",\n \"HIGH\": \"1.0819\",\n \"LOW\": \"1.0765\",\n \"CLOSE\": \"1.0797\"\n },\n {\n \"DATE\": \"11/23/22\",\n \"OPEN\": \"1.0801\",\n \"HIGH\": \"1.0837\",\n \"LOW\": \"1.0747\",\n \"CLOSE\": \"1.0781\"\n },\n {\n \"DATE\": \"11/22/22\",\n \"OPEN\": \"1.0826\",\n \"HIGH\": \"1.0838\",\n \"LOW\": \"1.0781\",\n \"CLOSE\": \"1.0804\"\n },\n {\n \"DATE\": \"11/21/22\",\n \"OPEN\": \"1.0891\",\n \"HIGH\": \"1.0891\",\n \"LOW\": \"1.0799\",\n \"CLOSE\": \"1.0828\"\n },\n {\n \"DATE\": \"11/18/22\",\n \"OPEN\": \"1.0915\",\n \"HIGH\": \"1.0934\",\n \"LOW\": \"1.0833\",\n \"CLOSE\": \"1.0849\"\n },\n {\n \"DATE\": \"11/17/22\",\n \"OPEN\": \"1.0964\",\n \"HIGH\": \"1.0981\",\n \"LOW\": \"1.0912\",\n \"CLOSE\": \"1.0917\"\n },\n {\n \"DATE\": \"11/16/22\",\n \"OPEN\": \"1.0971\",\n \"HIGH\": \"1.0997\",\n \"LOW\": \"1.0941\",\n \"CLOSE\": \"1.0963\"\n },\n {\n \"DATE\": \"11/15/22\",\n \"OPEN\": \"1.0995\",\n \"HIGH\": \"1.1002\",\n \"LOW\": \"1.0946\",\n \"CLOSE\": \"1.0975\"\n },\n {\n \"DATE\": \"11/14/22\",\n \"OPEN\": \"1.0957\",\n \"HIGH\": \"1.1015\",\n \"LOW\": \"1.0953\",\n \"CLOSE\": \"1.0994\"\n },\n {\n \"DATE\": \"11/11/22\",\n \"OPEN\": \"1.0987\",\n \"HIGH\": \"1.1046\",\n \"LOW\": \"1.0949\",\n \"CLOSE\": \"1.0965\"\n },\n {\n \"DATE\": \"11/10/22\",\n \"OPEN\": \"1.0927\",\n \"HIGH\": \"1.0992\",\n \"LOW\": \"1.0913\",\n \"CLOSE\": \"1.0986\"\n },\n {\n \"DATE\": \"11/09/22\",\n \"OPEN\": \"1.0927\",\n \"HIGH\": \"1.0975\",\n \"LOW\": \"1.0901\",\n \"CLOSE\": \"1.0929\"\n },\n {\n \"DATE\": \"11/08/22\",\n \"OPEN\": \"1.0908\",\n \"HIGH\": \"1.0928\",\n \"LOW\": \"1.0882\",\n \"CLOSE\": \"1.0919\"\n },\n {\n \"DATE\": \"11/07/22\",\n \"OPEN\": \"1.0863\",\n \"HIGH\": \"1.0977\",\n \"LOW\": \"1.0863\",\n \"CLOSE\": \"1.0910\"\n },\n {\n \"DATE\": \"11/04/22\",\n \"OPEN\": \"1.0896\",\n \"HIGH\": \"1.0960\",\n \"LOW\": \"1.0877\",\n \"CLOSE\": \"1.0909\"\n },\n {\n \"DATE\": \"11/03/22\",\n \"OPEN\": \"1.0914\",\n \"HIGH\": \"1.0937\",\n \"LOW\": \"1.0883\",\n \"CLOSE\": \"1.0898\"\n },\n {\n \"DATE\": \"11/02/22\",\n \"OPEN\": \"1.0945\",\n \"HIGH\": \"1.0957\",\n \"LOW\": \"1.0902\",\n \"CLOSE\": \"1.0913\"\n },\n {\n \"DATE\": \"11/01/22\",\n \"OPEN\": \"1.1003\",\n \"HIGH\": \"1.1033\",\n \"LOW\": \"1.0930\",\n \"CLOSE\": \"1.0944\"\n },\n {\n \"DATE\": \"10/31/22\",\n \"OPEN\": \"1.1031\",\n \"HIGH\": \"1.1348\",\n \"LOW\": \"1.0989\",\n \"CLOSE\": \"1.1004\"\n },\n {\n \"DATE\": \"10/28/22\",\n \"OPEN\": \"1.1070\",\n \"HIGH\": \"1.1084\",\n \"LOW\": \"1.1012\",\n \"CLOSE\": \"1.1032\"\n },\n {\n \"DATE\": \"10/27/22\",\n \"OPEN\": \"1.1140\",\n \"HIGH\": \"1.1154\",\n \"LOW\": \"1.1058\",\n \"CLOSE\": \"1.1072\"\n },\n {\n \"DATE\": \"10/26/22\",\n \"OPEN\": \"1.1130\",\n \"HIGH\": \"1.1176\",\n \"LOW\": \"1.1092\",\n \"CLOSE\": \"1.1133\"\n },\n {\n \"DATE\": \"10/25/22\",\n \"OPEN\": \"1.1089\",\n \"HIGH\": \"1.1122\",\n \"LOW\": \"1.1065\",\n \"CLOSE\": \"1.1111\"\n },\n {\n \"DATE\": \"10/24/22\",\n \"OPEN\": \"1.1124\",\n \"HIGH\": \"1.1124\",\n \"LOW\": \"1.1020\",\n \"CLOSE\": \"1.1085\"\n },\n {\n \"DATE\": \"10/21/22\",\n \"OPEN\": \"1.1063\",\n \"HIGH\": \"1.1102\",\n \"LOW\": \"1.1044\",\n \"CLOSE\": \"1.1077\"\n },\n {\n \"DATE\": \"10/20/22\",\n \"OPEN\": \"1.1056\",\n \"HIGH\": \"1.1094\",\n \"LOW\": \"1.1023\",\n \"CLOSE\": \"1.1062\"\n },\n {\n \"DATE\": \"10/19/22\",\n \"OPEN\": \"1.1100\",\n \"HIGH\": \"1.1107\",\n \"LOW\": \"1.1052\",\n \"CLOSE\": \"1.1055\"\n },\n {\n \"DATE\": \"10/18/22\",\n \"OPEN\": \"1.1151\",\n \"HIGH\": \"1.1210\",\n \"LOW\": \"1.1071\",\n \"CLOSE\": \"1.1101\"\n },\n {\n \"DATE\": \"10/17/22\",\n \"OPEN\": \"1.1138\",\n \"HIGH\": \"1.1193\",\n \"LOW\": \"1.1137\",\n \"CLOSE\": \"1.1161\"\n },\n {\n \"DATE\": \"10/14/22\",\n \"OPEN\": \"1.1176\",\n \"HIGH\": \"1.1191\",\n \"LOW\": \"1.1121\",\n \"CLOSE\": \"1.1151\"\n },\n {\n \"DATE\": \"10/13/22\",\n \"OPEN\": \"1.1192\",\n \"HIGH\": \"1.1215\",\n \"LOW\": \"1.1157\",\n \"CLOSE\": \"1.1163\"\n },\n {\n \"DATE\": \"10/12/22\",\n \"OPEN\": \"1.1235\",\n \"HIGH\": \"1.1244\",\n \"LOW\": \"1.1172\",\n \"CLOSE\": \"1.1188\"\n },\n {\n \"DATE\": \"10/11/22\",\n \"OPEN\": \"1.1318\",\n \"HIGH\": \"1.1328\",\n \"LOW\": \"1.1195\",\n \"CLOSE\": \"1.1237\"\n },\n {\n \"DATE\": \"10/10/22\",\n \"OPEN\": \"1.1367\",\n \"HIGH\": \"1.1370\",\n \"LOW\": \"1.1266\",\n \"CLOSE\": \"1.1317\"\n },\n {\n \"DATE\": \"10/07/22\",\n \"OPEN\": \"1.1322\",\n \"HIGH\": \"1.1376\",\n \"LOW\": \"1.1301\",\n \"CLOSE\": \"1.1358\"\n },\n {\n \"DATE\": \"10/06/22\",\n \"OPEN\": \"1.1309\",\n \"HIGH\": \"1.1355\",\n \"LOW\": \"1.1244\",\n \"CLOSE\": \"1.1327\"\n },\n {\n \"DATE\": \"10/05/22\",\n \"OPEN\": \"1.1348\",\n \"HIGH\": \"1.1381\",\n \"LOW\": \"1.1242\",\n \"CLOSE\": \"1.1308\"\n },\n {\n \"DATE\": \"10/04/22\",\n \"OPEN\": \"1.1386\",\n \"HIGH\": \"1.1426\",\n \"LOW\": \"1.1306\",\n \"CLOSE\": \"1.1349\"\n },\n {\n \"DATE\": \"10/03/22\",\n \"OPEN\": \"1.1460\",\n \"HIGH\": \"1.1460\",\n \"LOW\": \"1.1362\",\n \"CLOSE\": \"1.1388\"\n },\n {\n \"DATE\": \"09/30/22\",\n \"OPEN\": \"1.1387\",\n \"HIGH\": \"1.1444\",\n \"LOW\": \"1.1320\",\n \"CLOSE\": \"1.1439\"\n },\n {\n \"DATE\": \"09/29/22\",\n \"OPEN\": \"1.1382\",\n \"HIGH\": \"1.1417\",\n \"LOW\": \"1.1346\",\n \"CLOSE\": \"1.1350\"\n },\n {\n \"DATE\": \"09/28/22\",\n \"OPEN\": \"1.1417\",\n \"HIGH\": \"1.1495\",\n \"LOW\": \"1.1290\",\n \"CLOSE\": \"1.1385\"\n },\n {\n \"DATE\": \"09/27/22\",\n \"OPEN\": \"1.1453\",\n \"HIGH\": \"1.1466\",\n \"LOW\": \"1.1370\",\n \"CLOSE\": \"1.1419\"\n },\n {\n \"DATE\": \"09/26/22\",\n \"OPEN\": \"1.1365\",\n \"HIGH\": \"1.1465\",\n \"LOW\": \"1.1328\",\n \"CLOSE\": \"1.1454\"\n },\n {\n \"DATE\": \"09/23/22\",\n \"OPEN\": \"1.1365\",\n \"HIGH\": \"1.1378\",\n \"LOW\": \"1.1323\",\n \"CLOSE\": \"1.1373\"\n },\n {\n \"DATE\": \"09/22/22\",\n \"OPEN\": \"1.1329\",\n \"HIGH\": \"1.1373\",\n \"LOW\": \"1.1303\",\n \"CLOSE\": \"1.1366\"\n },\n {\n \"DATE\": \"09/21/22\",\n \"OPEN\": \"1.1342\",\n \"HIGH\": \"1.1363\",\n \"LOW\": \"1.1315\",\n \"CLOSE\": \"1.1334\"\n },\n {\n \"DATE\": \"09/20/22\",\n \"OPEN\": \"1.1284\",\n \"HIGH\": \"1.1365\",\n \"LOW\": \"1.1273\",\n \"CLOSE\": \"1.1347\"\n },\n {\n \"DATE\": \"09/19/22\",\n \"OPEN\": \"1.1221\",\n \"HIGH\": \"1.1295\",\n \"LOW\": \"1.1206\",\n \"CLOSE\": \"1.1289\"\n },\n {\n \"DATE\": \"09/16/22\",\n \"OPEN\": \"1.1232\",\n \"HIGH\": \"1.1256\",\n \"LOW\": \"1.1197\",\n \"CLOSE\": \"1.1218\"\n },\n {\n \"DATE\": \"09/15/22\",\n \"OPEN\": \"1.1247\",\n \"HIGH\": \"1.1261\",\n \"LOW\": \"1.1212\",\n \"CLOSE\": \"1.1233\"\n },\n {\n \"DATE\": \"09/14/22\",\n \"OPEN\": \"1.1255\",\n \"HIGH\": \"1.1255\",\n \"LOW\": \"1.1201\",\n \"CLOSE\": \"1.1239\"\n },\n {\n \"DATE\": \"09/13/22\",\n \"OPEN\": \"1.1218\",\n \"HIGH\": \"1.1259\",\n \"LOW\": \"1.1194\",\n \"CLOSE\": \"1.1223\"\n },\n {\n \"DATE\": \"09/12/22\",\n \"OPEN\": \"1.1186\",\n \"HIGH\": \"1.1240\",\n \"LOW\": \"1.1181\",\n \"CLOSE\": \"1.1225\"\n },\n {\n \"DATE\": \"09/09/22\",\n \"OPEN\": \"1.1156\",\n \"HIGH\": \"1.1215\",\n \"LOW\": \"1.1139\",\n \"CLOSE\": \"1.1212\"\n },\n {\n \"DATE\": \"09/08/22\",\n \"OPEN\": \"1.1142\",\n \"HIGH\": \"1.1157\",\n \"LOW\": \"1.1115\",\n \"CLOSE\": \"1.1151\"\n },\n {\n \"DATE\": \"09/07/22\",\n \"OPEN\": \"1.1153\",\n \"HIGH\": \"1.1181\",\n \"LOW\": \"1.1134\",\n \"CLOSE\": \"1.1141\"\n },\n {\n \"DATE\": \"09/06/22\",\n \"OPEN\": \"1.1150\",\n \"HIGH\": \"1.1173\",\n \"LOW\": \"1.1127\",\n \"CLOSE\": \"1.1152\"\n },\n {\n \"DATE\": \"09/05/22\",\n \"OPEN\": \"1.1113\",\n \"HIGH\": \"1.1167\",\n \"LOW\": \"1.1113\",\n \"CLOSE\": \"1.1153\"\n }\n]\n\n" ]
[ 0 ]
[]
[]
[ "beautifulsoup", "python" ]
stackoverflow_0074672389_beautifulsoup_python.txt
Q: CUDA atomicAdd being run too many times I am trying to initialise a numpy matrix with a preset initial cache size, and then each CUDA thread with run atomicAdd at most once, hopefully as long as the accumulated sum is still within the initial cache size. The problem here is that when the initial cache size (500) is smaller than the number of threads (1024), it returns with very unexpected number that the accumulated sum becomes very large (1140850688). Could anyone kindly advise why it does not work? or how to make it works? Ideally I would like the "if (result_count[0] < InitialResultCacheSize - 1)" to stop atomicAdd from accumulating to over the initial cache size (InitialResultCacheSize). import os _path = r"C:\Program Files\Microsoft Visual Studio\2022\Community\VC\Tools\MSVC\14.33.31629\bin\Hostx64\x64" if os.system("cl.exe"): os.environ['PATH'] += ';' + _path if os.system("cl.exe"): raise RuntimeError("cl.exe still not found, path probably incorrect") import pycuda.driver as cuda import pycuda.autoinit from pycuda.compiler import SourceModule import pandas as pd import numpy as np RESULT_COLUMN_COUNT = 4 InitialResultCacheSize = 10000 # InitialResultCacheSize = 500 number_matrix = np.zeros(InitialResultCacheSize * RESULT_COLUMN_COUNT) number_matrix = number_matrix.astype(np.float32) number_matrix_gpu = cuda.mem_alloc(number_matrix.nbytes) cuda.memcpy_htod(number_matrix_gpu, number_matrix) result_count = np.int32(0) result_count_gpu = cuda.mem_alloc(result_count.nbytes) cuda.memcpy_htod(result_count_gpu, result_count) mod = SourceModule(""" #include <cstdlib> __global__ void test_cuda_utilisation(int InitialResultCacheSize, int RESULT_COLUMN_COUNT, float *number_matrix, int *result_count) { int result_index, result_index_offset; if (result_count[0] < InitialResultCacheSize - 1) { result_index = atomicAdd(result_count,1); result_index_offset = result_index * RESULT_COLUMN_COUNT; number_matrix[result_index_offset + 0] = result_index; number_matrix[result_index_offset + 1] = result_count[0]; number_matrix[result_index_offset + 2] = InitialResultCacheSize; number_matrix[result_index_offset + 3] = RESULT_COLUMN_COUNT; } } """) func = mod.get_function("test_cuda_utilisation") func(np.int32(InitialResultCacheSize), np.int32(RESULT_COLUMN_COUNT), number_matrix_gpu, result_count_gpu, block=(4,16,16)) result_count_out = np.empty_like(result_count) cuda.memcpy_dtoh(result_count_out, result_count_gpu) print('result_count_out = ' + str(result_count_out) + ' and InitialResultCacheSize is ' + str(InitialResultCacheSize)) number_matrix_out = np.empty((result_count_out, RESULT_COLUMN_COUNT), dtype=np.float32) cuda.memcpy_dtoh(number_matrix_out, number_matrix_gpu) print('number_matrix_out is with len ' + str(len(number_matrix_out)) + ' x ' + str(len(number_matrix_out[0]))) print(number_matrix_out) Result of InitialResultCacheSize = 10000 'cl.exe' is not recognized as an internal or external command, operable program or batch file. Microsoft (R) C/C++ Optimizing Compiler Version 19.33.31629 for x64 Copyright (C) Microsoft Corporation. All rights reserved. result_count_out = 1024 and InitialResultCacheSize is 10000 number_matrix_out is with len 1024 x 4 [[0.000e+00 1.024e+03 1.000e+04 4.000e+00] [1.000e+00 1.024e+03 1.000e+04 4.000e+00] [2.000e+00 1.024e+03 1.000e+04 4.000e+00] ... [1.021e+03 1.024e+03 1.000e+04 4.000e+00] [1.022e+03 1.024e+03 1.000e+04 4.000e+00] [1.023e+03 1.024e+03 1.000e+04 4.000e+00]] Result of InitialResultCacheSize = 500 'cl.exe' is not recognized as an internal or external command, operable program or batch file. Microsoft (R) C/C++ Optimizing Compiler Version 19.33.31629 for x64 Copyright (C) Microsoft Corporation. All rights reserved. result_count_out = 1140850688 and InitialResultCacheSize is 500 Traceback (most recent call last): File C:\PythonProjects\TradeAnalysis\Test\TestCUDAUtilisation.py:67 in <module> cuda.memcpy_dtoh(number_matrix_out, number_matrix_gpu) LogicError: cuMemcpyDtoH failed: invalid argument Trial on another approach, not sure if it is the legal behaviour approach import os # _path = r"D:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.28.29910\bin\Hostx64\x64" _path = r"C:\Program Files\Microsoft Visual Studio\2022\Community\VC\Tools\MSVC\14.33.31629\bin\Hostx64\x64" # _path = r"C:\Program Files\Microsoft Visual Studio\2022\Community\VC\Tools\MSVC\14.33.31629\bin\Hostx64" # _path = r"C:\Program Files\Microsoft Visual Studio\2022\Community\VC\Tools\MSVC\14.33.31629\bin\Hostx64\x64\" if os.system("cl.exe"): os.environ['PATH'] += ';' + _path if os.system("cl.exe"): raise RuntimeError("cl.exe still not found, path probably incorrect") import pycuda.driver as cuda import pycuda.autoinit from pycuda.compiler import SourceModule import pandas as pd import numpy as np RESULT_COLUMN_COUNT = 4 # InitialResultCacheSize = 10000 InitialResultCacheSize = 500 number_matrix = np.zeros(InitialResultCacheSize * RESULT_COLUMN_COUNT) number_matrix = number_matrix.astype(np.float32) number_matrix_gpu = cuda.mem_alloc(number_matrix.nbytes) cuda.memcpy_htod(number_matrix_gpu, number_matrix) result_count = 0 mod = SourceModule(""" #include <cstdlib> __global__ void test_cuda_utilisation(int InitialResultCacheSize, int RESULT_COLUMN_COUNT, int result_count, float *number_matrix) { int result_index, result_index_offset; result_index = atomicAdd(&result_count,1); if (result_index < InitialResultCacheSize - 1) { result_index_offset = result_index * RESULT_COLUMN_COUNT; number_matrix[result_index_offset + 0] = result_index; number_matrix[result_index_offset + 1] = result_count; number_matrix[result_index_offset + 2] = InitialResultCacheSize; number_matrix[result_index_offset + 3] = RESULT_COLUMN_COUNT; } } """) func = mod.get_function("test_cuda_utilisation") func(np.int32(InitialResultCacheSize), np.int32(RESULT_COLUMN_COUNT), np.int32(result_count), number_matrix_gpu, block=(4,16,16)) print('result_count = ' + str(result_count) + ' and InitialResultCacheSize = ' + str(InitialResultCacheSize)) which result with the below. Still cannot get the count of how many times atomicAdd has been run result_count = 0 and InitialResultCacheSize = 500 A: You changed several things, most incorrectly (e.g. you cannot do atomics on a local variable, you are not actually copying any results back to the host, etc.) in between your first and second postings, more than just the one thing I suggested you change. If we start with your first listing, here are the changes I suggest to address the usage of atomics: $ cat t35.py import pycuda.driver as cuda import pycuda.autoinit from pycuda.compiler import SourceModule import numpy as np RESULT_COLUMN_COUNT = 4 InitialResultCacheSize = 500 number_matrix = np.zeros(InitialResultCacheSize * RESULT_COLUMN_COUNT) number_matrix = number_matrix.astype(np.float32) number_matrix_gpu = cuda.mem_alloc(number_matrix.nbytes) cuda.memcpy_htod(number_matrix_gpu, number_matrix) result_count = np.zeros(1, dtype=np.int32) result_count_gpu = cuda.mem_alloc(result_count.nbytes) cuda.memcpy_htod(result_count_gpu, result_count) mod = SourceModule(""" #include <cstdlib> __global__ void test_cuda_utilisation(int InitialResultCacheSize, int RESULT_COLUMN_COUNT, float *number_matrix, int *result_count) { int result_index, result_index_offset; result_index = atomicAdd(result_count, 1); if (result_index < InitialResultCacheSize - 1) { result_index_offset = result_index * RESULT_COLUMN_COUNT; number_matrix[result_index_offset + 0] = result_index; number_matrix[result_index_offset + 1] = result_count[0]; number_matrix[result_index_offset + 2] = InitialResultCacheSize; number_matrix[result_index_offset + 3] = RESULT_COLUMN_COUNT; } } """) func = mod.get_function("test_cuda_utilisation") func(np.int32(InitialResultCacheSize), np.int32(RESULT_COLUMN_COUNT), number_matrix_gpu, result_count_gpu, block=(4,16,16)) result_count_out = np.empty_like(result_count) cuda.memcpy_dtoh(result_count_out, result_count_gpu) print('result_count_out = ' + str(result_count_out) + ' and InitialResultCacheSize is ' + str(InitialResultCacheSize)) if InitialResultCacheSize < result_count_out: result_count_out = InitialResultCacheSize number_matrix_out = np.empty((result_count_out, RESULT_COLUMN_COUNT), dtype=np.float32) cuda.memcpy_dtoh(number_matrix_out, number_matrix_gpu) print('number_matrix_out is with len ' + str(len(number_matrix_out)) + ' x ' + str(len(number_matrix_out[0]))) print(number_matrix_out) $ python t35.py result_count_out = [1024] and InitialResultCacheSize is 500 number_matrix_out is with len 500 x 4 [[ 0.00000000e+00 1.02400000e+03 5.00000000e+02 4.00000000e+00] [ 1.00000000e+00 1.02400000e+03 5.00000000e+02 4.00000000e+00] [ 2.00000000e+00 1.02400000e+03 5.00000000e+02 4.00000000e+00] ..., [ 4.97000000e+02 1.02400000e+03 5.00000000e+02 4.00000000e+00] [ 4.98000000e+02 1.02400000e+03 5.00000000e+02 4.00000000e+00] [ 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00]] $ You'll note that the reported output of the atomic variable is 1024, but since there are only 500 "slots" available for output, we are limiting ourselves (both in kernel/device code, and in your host code) to the number of output slots available. So 1024 atomic ops were done, one per thread, because you are launching 1024 threads. But we are limiting the kernel to only write to 500 rows of output, because that's all that are allocated. Likewise, when retrieving results to the host, we must acknowledge that if the reported number of atomic ops is larger than the InitialResultCacheSize, then we must limit ourselves to the lower number.
CUDA atomicAdd being run too many times
I am trying to initialise a numpy matrix with a preset initial cache size, and then each CUDA thread with run atomicAdd at most once, hopefully as long as the accumulated sum is still within the initial cache size. The problem here is that when the initial cache size (500) is smaller than the number of threads (1024), it returns with very unexpected number that the accumulated sum becomes very large (1140850688). Could anyone kindly advise why it does not work? or how to make it works? Ideally I would like the "if (result_count[0] < InitialResultCacheSize - 1)" to stop atomicAdd from accumulating to over the initial cache size (InitialResultCacheSize). import os _path = r"C:\Program Files\Microsoft Visual Studio\2022\Community\VC\Tools\MSVC\14.33.31629\bin\Hostx64\x64" if os.system("cl.exe"): os.environ['PATH'] += ';' + _path if os.system("cl.exe"): raise RuntimeError("cl.exe still not found, path probably incorrect") import pycuda.driver as cuda import pycuda.autoinit from pycuda.compiler import SourceModule import pandas as pd import numpy as np RESULT_COLUMN_COUNT = 4 InitialResultCacheSize = 10000 # InitialResultCacheSize = 500 number_matrix = np.zeros(InitialResultCacheSize * RESULT_COLUMN_COUNT) number_matrix = number_matrix.astype(np.float32) number_matrix_gpu = cuda.mem_alloc(number_matrix.nbytes) cuda.memcpy_htod(number_matrix_gpu, number_matrix) result_count = np.int32(0) result_count_gpu = cuda.mem_alloc(result_count.nbytes) cuda.memcpy_htod(result_count_gpu, result_count) mod = SourceModule(""" #include <cstdlib> __global__ void test_cuda_utilisation(int InitialResultCacheSize, int RESULT_COLUMN_COUNT, float *number_matrix, int *result_count) { int result_index, result_index_offset; if (result_count[0] < InitialResultCacheSize - 1) { result_index = atomicAdd(result_count,1); result_index_offset = result_index * RESULT_COLUMN_COUNT; number_matrix[result_index_offset + 0] = result_index; number_matrix[result_index_offset + 1] = result_count[0]; number_matrix[result_index_offset + 2] = InitialResultCacheSize; number_matrix[result_index_offset + 3] = RESULT_COLUMN_COUNT; } } """) func = mod.get_function("test_cuda_utilisation") func(np.int32(InitialResultCacheSize), np.int32(RESULT_COLUMN_COUNT), number_matrix_gpu, result_count_gpu, block=(4,16,16)) result_count_out = np.empty_like(result_count) cuda.memcpy_dtoh(result_count_out, result_count_gpu) print('result_count_out = ' + str(result_count_out) + ' and InitialResultCacheSize is ' + str(InitialResultCacheSize)) number_matrix_out = np.empty((result_count_out, RESULT_COLUMN_COUNT), dtype=np.float32) cuda.memcpy_dtoh(number_matrix_out, number_matrix_gpu) print('number_matrix_out is with len ' + str(len(number_matrix_out)) + ' x ' + str(len(number_matrix_out[0]))) print(number_matrix_out) Result of InitialResultCacheSize = 10000 'cl.exe' is not recognized as an internal or external command, operable program or batch file. Microsoft (R) C/C++ Optimizing Compiler Version 19.33.31629 for x64 Copyright (C) Microsoft Corporation. All rights reserved. result_count_out = 1024 and InitialResultCacheSize is 10000 number_matrix_out is with len 1024 x 4 [[0.000e+00 1.024e+03 1.000e+04 4.000e+00] [1.000e+00 1.024e+03 1.000e+04 4.000e+00] [2.000e+00 1.024e+03 1.000e+04 4.000e+00] ... [1.021e+03 1.024e+03 1.000e+04 4.000e+00] [1.022e+03 1.024e+03 1.000e+04 4.000e+00] [1.023e+03 1.024e+03 1.000e+04 4.000e+00]] Result of InitialResultCacheSize = 500 'cl.exe' is not recognized as an internal or external command, operable program or batch file. Microsoft (R) C/C++ Optimizing Compiler Version 19.33.31629 for x64 Copyright (C) Microsoft Corporation. All rights reserved. result_count_out = 1140850688 and InitialResultCacheSize is 500 Traceback (most recent call last): File C:\PythonProjects\TradeAnalysis\Test\TestCUDAUtilisation.py:67 in <module> cuda.memcpy_dtoh(number_matrix_out, number_matrix_gpu) LogicError: cuMemcpyDtoH failed: invalid argument Trial on another approach, not sure if it is the legal behaviour approach import os # _path = r"D:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.28.29910\bin\Hostx64\x64" _path = r"C:\Program Files\Microsoft Visual Studio\2022\Community\VC\Tools\MSVC\14.33.31629\bin\Hostx64\x64" # _path = r"C:\Program Files\Microsoft Visual Studio\2022\Community\VC\Tools\MSVC\14.33.31629\bin\Hostx64" # _path = r"C:\Program Files\Microsoft Visual Studio\2022\Community\VC\Tools\MSVC\14.33.31629\bin\Hostx64\x64\" if os.system("cl.exe"): os.environ['PATH'] += ';' + _path if os.system("cl.exe"): raise RuntimeError("cl.exe still not found, path probably incorrect") import pycuda.driver as cuda import pycuda.autoinit from pycuda.compiler import SourceModule import pandas as pd import numpy as np RESULT_COLUMN_COUNT = 4 # InitialResultCacheSize = 10000 InitialResultCacheSize = 500 number_matrix = np.zeros(InitialResultCacheSize * RESULT_COLUMN_COUNT) number_matrix = number_matrix.astype(np.float32) number_matrix_gpu = cuda.mem_alloc(number_matrix.nbytes) cuda.memcpy_htod(number_matrix_gpu, number_matrix) result_count = 0 mod = SourceModule(""" #include <cstdlib> __global__ void test_cuda_utilisation(int InitialResultCacheSize, int RESULT_COLUMN_COUNT, int result_count, float *number_matrix) { int result_index, result_index_offset; result_index = atomicAdd(&result_count,1); if (result_index < InitialResultCacheSize - 1) { result_index_offset = result_index * RESULT_COLUMN_COUNT; number_matrix[result_index_offset + 0] = result_index; number_matrix[result_index_offset + 1] = result_count; number_matrix[result_index_offset + 2] = InitialResultCacheSize; number_matrix[result_index_offset + 3] = RESULT_COLUMN_COUNT; } } """) func = mod.get_function("test_cuda_utilisation") func(np.int32(InitialResultCacheSize), np.int32(RESULT_COLUMN_COUNT), np.int32(result_count), number_matrix_gpu, block=(4,16,16)) print('result_count = ' + str(result_count) + ' and InitialResultCacheSize = ' + str(InitialResultCacheSize)) which result with the below. Still cannot get the count of how many times atomicAdd has been run result_count = 0 and InitialResultCacheSize = 500
[ "You changed several things, most incorrectly (e.g. you cannot do atomics on a local variable, you are not actually copying any results back to the host, etc.) in between your first and second postings, more than just the one thing I suggested you change.\nIf we start with your first listing, here are the changes I suggest to address the usage of atomics:\n$ cat t35.py\nimport pycuda.driver as cuda\nimport pycuda.autoinit\nfrom pycuda.compiler import SourceModule\nimport numpy as np\n\nRESULT_COLUMN_COUNT = 4\n\nInitialResultCacheSize = 500\n\nnumber_matrix = np.zeros(InitialResultCacheSize * RESULT_COLUMN_COUNT)\nnumber_matrix = number_matrix.astype(np.float32)\n\nnumber_matrix_gpu = cuda.mem_alloc(number_matrix.nbytes)\ncuda.memcpy_htod(number_matrix_gpu, number_matrix)\n\nresult_count = np.zeros(1, dtype=np.int32)\nresult_count_gpu = cuda.mem_alloc(result_count.nbytes)\ncuda.memcpy_htod(result_count_gpu, result_count)\n\nmod = SourceModule(\"\"\"\n #include <cstdlib>\n\n __global__ void test_cuda_utilisation(int InitialResultCacheSize, int RESULT_COLUMN_COUNT, float *number_matrix, int *result_count)\n {\n int result_index, result_index_offset;\n result_index = atomicAdd(result_count, 1);\n if (result_index < InitialResultCacheSize - 1) {\n result_index_offset = result_index * RESULT_COLUMN_COUNT;\n number_matrix[result_index_offset + 0] = result_index;\n number_matrix[result_index_offset + 1] = result_count[0];\n number_matrix[result_index_offset + 2] = InitialResultCacheSize;\n number_matrix[result_index_offset + 3] = RESULT_COLUMN_COUNT;\n }\n }\n \"\"\")\n\nfunc = mod.get_function(\"test_cuda_utilisation\")\nfunc(np.int32(InitialResultCacheSize), np.int32(RESULT_COLUMN_COUNT), number_matrix_gpu, result_count_gpu, block=(4,16,16))\n\nresult_count_out = np.empty_like(result_count)\ncuda.memcpy_dtoh(result_count_out, result_count_gpu)\nprint('result_count_out = ' + str(result_count_out) + ' and InitialResultCacheSize is ' + str(InitialResultCacheSize))\nif InitialResultCacheSize < result_count_out:\n result_count_out = InitialResultCacheSize\nnumber_matrix_out = np.empty((result_count_out, RESULT_COLUMN_COUNT), dtype=np.float32)\ncuda.memcpy_dtoh(number_matrix_out, number_matrix_gpu)\n\nprint('number_matrix_out is with len ' + str(len(number_matrix_out)) + ' x ' + str(len(number_matrix_out[0])))\nprint(number_matrix_out)\n$ python t35.py\nresult_count_out = [1024] and InitialResultCacheSize is 500\nnumber_matrix_out is with len 500 x 4\n[[ 0.00000000e+00 1.02400000e+03 5.00000000e+02 4.00000000e+00]\n [ 1.00000000e+00 1.02400000e+03 5.00000000e+02 4.00000000e+00]\n [ 2.00000000e+00 1.02400000e+03 5.00000000e+02 4.00000000e+00]\n ...,\n [ 4.97000000e+02 1.02400000e+03 5.00000000e+02 4.00000000e+00]\n [ 4.98000000e+02 1.02400000e+03 5.00000000e+02 4.00000000e+00]\n [ 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00]]\n$\n\nYou'll note that the reported output of the atomic variable is 1024, but since there are only 500 \"slots\" available for output, we are limiting ourselves (both in kernel/device code, and in your host code) to the number of output slots available.\nSo 1024 atomic ops were done, one per thread, because you are launching 1024 threads. But we are limiting the kernel to only write to 500 rows of output, because that's all that are allocated. Likewise, when retrieving results to the host, we must acknowledge that if the reported number of atomic ops is larger than the InitialResultCacheSize, then we must limit ourselves to the lower number.\n" ]
[ 1 ]
[]
[]
[ "cuda", "python" ]
stackoverflow_0074662945_cuda_python.txt
Q: Failed to build ta-lib ERROR: Could not build wheels for ta-lib, which is required to install pyproject.toml-based project I'm getting below error, while pip installing ta-lib. I used command : !pip install ta-lib Please provide me solution. Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/ Collecting ta-lib Using cached TA-Lib-0.4.25.tar.gz (271 kB) Installing build dependencies ... done Getting requirements to build wheel ... done Installing backend dependencies ... done Preparing metadata (pyproject.toml) ... done Requirement already satisfied: numpy in /usr/local/lib/python3.8/dist-packages (from ta-lib) (1.21.6) Building wheels for collected packages: ta-lib error: subprocess-exited-with-error × Building wheel for ta-lib (pyproject.toml) did not run successfully. │ exit code: 1 ╰─> See above for output. note: This error originates from a subprocess, and is likely not a problem with pip. Building wheel for ta-lib (pyproject.toml) ... error ERROR: Failed building wheel for ta-lib Failed to build ta-lib ERROR: Could not build wheels for ta-lib, which is required to install pyproject.toml-based projects I tried following commands : pip install --upgrade pip setuptools wheel pip install pep517 !pip3 install --upgrade pip !pip install pyproject-toml pip install TA_Lib‑0.4.10‑cp35‑cp35m‑win_amd64.whl !pip install ta-lib A: https://www.lfd.uci.edu/~gohlke/pythonlibs/#ta-lib in this web download ta-lib.whl and then pip install gotch
Failed to build ta-lib ERROR: Could not build wheels for ta-lib, which is required to install pyproject.toml-based project
I'm getting below error, while pip installing ta-lib. I used command : !pip install ta-lib Please provide me solution. Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/ Collecting ta-lib Using cached TA-Lib-0.4.25.tar.gz (271 kB) Installing build dependencies ... done Getting requirements to build wheel ... done Installing backend dependencies ... done Preparing metadata (pyproject.toml) ... done Requirement already satisfied: numpy in /usr/local/lib/python3.8/dist-packages (from ta-lib) (1.21.6) Building wheels for collected packages: ta-lib error: subprocess-exited-with-error × Building wheel for ta-lib (pyproject.toml) did not run successfully. │ exit code: 1 ╰─> See above for output. note: This error originates from a subprocess, and is likely not a problem with pip. Building wheel for ta-lib (pyproject.toml) ... error ERROR: Failed building wheel for ta-lib Failed to build ta-lib ERROR: Could not build wheels for ta-lib, which is required to install pyproject.toml-based projects I tried following commands : pip install --upgrade pip setuptools wheel pip install pep517 !pip3 install --upgrade pip !pip install pyproject-toml pip install TA_Lib‑0.4.10‑cp35‑cp35m‑win_amd64.whl !pip install ta-lib
[ "https://www.lfd.uci.edu/~gohlke/pythonlibs/#ta-lib\nin this web download ta-lib.whl and then pip install\ngotch\n" ]
[ 0 ]
[]
[]
[ "algorithmic_trading", "artificial_intelligence", "python", "technical_indicator" ]
stackoverflow_0074651107_algorithmic_trading_artificial_intelligence_python_technical_indicator.txt
Q: I lose leading zeros when copy data from dataframe to openpyxl.workbook I use openpyxl and pandas to fill row color with specified condition. Everything works fine but in some cells I lose leading zeros (like 0345 -> output 345), I don't want that. How can I get the exact data? dt = pd.read_excel(file_luu, sheet_name="Sheet1") dt = pd.DataFrame(dt) dinhDanh = len(dt.columns) - 1 wb = load_workbook(file_luu) print(type(wb)) ws = wb['Sheet1'] for i in range(0, dt.shape[1]): ws.cell(row=1, column=i + 1).value = dt.columns[i] for row in range(dt.shape[0]): for col in range(dt.shape[1] ): ws.cell(row + 2, col + 1).value = str(dt.iat[row, col]) if (str(dt.iat[row, col]) != "nan") else " " if dt.iat[row, dinhDanh] == True: ws.cell(row + 2, col + 1).fill = PatternFill(start_color='FFD970', end_color='FFD970', fill_type="solid") # used hex code for brown color ws.delete_cols(1) ws.delete_cols(dinhDanh) wb.save(file_luu) Copy exactly all characters A: To prevent losing leading zeros when writing data to an Excel file with openpyxl and pandas, you can specify that the cell should be formatted as a string by setting the number_format property of the cell to @. This tells Excel that the cell should be treated as a string, and any leading zeros will be preserved. # Import the openpyxl Workbook and cell classes from openpyxl.workbook import Workbook from openpyxl.cell import Cell dt = pd.read_excel(file_luu, sheet_name="Sheet1") dt = pd.DataFrame(dt) dinhDanh = len(dt.columns) - 1 wb = load_workbook(file_luu) print(type(wb)) ws = wb['Sheet1'] for i in range(0, dt.shape[1]): ws.cell(row=1, column=i + 1).value = dt.columns[i] for row in range(dt.shape[0]): for col in range(dt.shape[1] ): # Create a cell with the value from the DataFrame, and specify that it should be formatted as a string cell = Cell(ws, row + 2, col + 1, value=dt.iat[row, col], number_format="@") # Set the cell's value and fill color ws.cell(row + 2, col + 1).value = str(dt.iat[row, col]) if (str(dt.iat[row, col]) != "nan") else " " if dt.iat[row, dinhDanh] == True: ws.cell(row + 2, col + 1).fill = PatternFill(start_color='FFD970', end_color='FFD970', fill_type="solid") # used hex code for brown color ws.delete_
I lose leading zeros when copy data from dataframe to openpyxl.workbook
I use openpyxl and pandas to fill row color with specified condition. Everything works fine but in some cells I lose leading zeros (like 0345 -> output 345), I don't want that. How can I get the exact data? dt = pd.read_excel(file_luu, sheet_name="Sheet1") dt = pd.DataFrame(dt) dinhDanh = len(dt.columns) - 1 wb = load_workbook(file_luu) print(type(wb)) ws = wb['Sheet1'] for i in range(0, dt.shape[1]): ws.cell(row=1, column=i + 1).value = dt.columns[i] for row in range(dt.shape[0]): for col in range(dt.shape[1] ): ws.cell(row + 2, col + 1).value = str(dt.iat[row, col]) if (str(dt.iat[row, col]) != "nan") else " " if dt.iat[row, dinhDanh] == True: ws.cell(row + 2, col + 1).fill = PatternFill(start_color='FFD970', end_color='FFD970', fill_type="solid") # used hex code for brown color ws.delete_cols(1) ws.delete_cols(dinhDanh) wb.save(file_luu) Copy exactly all characters
[ "To prevent losing leading zeros when writing data to an Excel file with openpyxl and pandas, you can specify that the cell should be formatted as a string by setting the number_format property of the cell to @. This tells Excel that the cell should be treated as a string, and any leading zeros will be preserved.\n# Import the openpyxl Workbook and cell classes\nfrom openpyxl.workbook import Workbook\nfrom openpyxl.cell import Cell\n\ndt = pd.read_excel(file_luu, sheet_name=\"Sheet1\")\ndt = pd.DataFrame(dt)\ndinhDanh = len(dt.columns) - 1\nwb = load_workbook(file_luu)\nprint(type(wb))\nws = wb['Sheet1']\nfor i in range(0, dt.shape[1]):\n ws.cell(row=1, column=i + 1).value = dt.columns[i]\nfor row in range(dt.shape[0]):\n for col in range(dt.shape[1] ):\n\n # Create a cell with the value from the DataFrame, and specify that it should be formatted as a string\n cell = Cell(ws, row + 2, col + 1, value=dt.iat[row, col], number_format=\"@\")\n\n # Set the cell's value and fill color\n ws.cell(row + 2, col + 1).value = str(dt.iat[row, col]) if (str(dt.iat[row, col]) != \"nan\") else \" \"\n if dt.iat[row, dinhDanh] == True:\n ws.cell(row + 2, col + 1).fill = PatternFill(start_color='FFD970', end_color='FFD970',\n fill_type=\"solid\") # used hex code for brown color\n\nws.delete_\n\n" ]
[ 0 ]
[]
[]
[ "openpyxl", "pandas", "python" ]
stackoverflow_0074672592_openpyxl_pandas_python.txt
Q: Python combinations of multiple list of different sizes Am trying to swap items between multiple lists and I wanted to know if there is any method to generate combinations between multiple list of different size? For example, I have this 3 lists: a = [(0, 0), (1, 0), (2, 0)] b = [(0, 2), (1, 2), (2, 2)] c = [(0, 3), (1, 3)] Expected result: a : [(0, 3), (0, 2), (0, 0)] b : [(1, 3), (1, 2), (1, 0)] c : [(2, 2), (2, 0)] a : [(0, 3), (0, 2), (0, 0)] b : [(1, 3), (1, 2), (2, 0)] c : [(2, 2), (1, 0)] ... a : [(0, 3), (0, 2)] b : [(1, 3), (1, 2), (0, 0)] c : [(2, 2), (2, 0), (1, 0)] I found this code here (python combinations of multiple list): import itertools as it import numpy as np a = [(0, 0), (1, 0), (2, 0)] b = [(0, 2), (1, 2), (2, 2)] c = [(0, 3), (1, 3)] def combination(first, *rest): for i in it.product([first], *(it.permutations(j) for j in rest)): yield tuple(zip(*i)) for i in combination(c, b, a): print("a :", list(i[0])) print("b :", list(i[1])) print("c :", list(i[2])) It works perfectly fine if the list are the same size. A: Try adding None to your lists so that they all have the same length, use sympy.utilities.iterables.multiset_permutations instead of, it.permutations, and finally filter out None values from the output. That should generalize in a natural way your approach for lists of equal sizes: import itertools as it from sympy.utilities.iterables import multiset_permutations a = [(0, 0), (1, 0), (2, 0)] b = [(0, 2), (1, 2), (2, 2)] c = [(0, 3), (1, 3), None] def combination(first, *rest): for i in it.product([first], *(multiset_permutations(j) for j in rest)): yield tuple(zip(*i)) for i in combination(c, b, a): print("a :", [val for val in i[0] if val]) print("b :", [val for val in i[1] if val]) print("c :", [val for val in i[2] if val])
Python combinations of multiple list of different sizes
Am trying to swap items between multiple lists and I wanted to know if there is any method to generate combinations between multiple list of different size? For example, I have this 3 lists: a = [(0, 0), (1, 0), (2, 0)] b = [(0, 2), (1, 2), (2, 2)] c = [(0, 3), (1, 3)] Expected result: a : [(0, 3), (0, 2), (0, 0)] b : [(1, 3), (1, 2), (1, 0)] c : [(2, 2), (2, 0)] a : [(0, 3), (0, 2), (0, 0)] b : [(1, 3), (1, 2), (2, 0)] c : [(2, 2), (1, 0)] ... a : [(0, 3), (0, 2)] b : [(1, 3), (1, 2), (0, 0)] c : [(2, 2), (2, 0), (1, 0)] I found this code here (python combinations of multiple list): import itertools as it import numpy as np a = [(0, 0), (1, 0), (2, 0)] b = [(0, 2), (1, 2), (2, 2)] c = [(0, 3), (1, 3)] def combination(first, *rest): for i in it.product([first], *(it.permutations(j) for j in rest)): yield tuple(zip(*i)) for i in combination(c, b, a): print("a :", list(i[0])) print("b :", list(i[1])) print("c :", list(i[2])) It works perfectly fine if the list are the same size.
[ "Try\n\nadding None to your lists so that they all have the same length,\nuse sympy.utilities.iterables.multiset_permutations instead of,\nit.permutations, and\nfinally filter out None values from the output.\n\nThat should generalize in a natural way your approach for lists of equal sizes:\nimport itertools as it\nfrom sympy.utilities.iterables import multiset_permutations\n\na = [(0, 0), (1, 0), (2, 0)]\nb = [(0, 2), (1, 2), (2, 2)]\nc = [(0, 3), (1, 3), None]\n\ndef combination(first, *rest):\n for i in it.product([first], *(multiset_permutations(j) for j in rest)):\n yield tuple(zip(*i))\n\nfor i in combination(c, b, a):\n print(\"a :\", [val for val in i[0] if val])\n print(\"b :\", [val for val in i[1] if val])\n print(\"c :\", [val for val in i[2] if val])\n\n" ]
[ 0 ]
[]
[]
[ "combinations", "list", "python", "python_itertools" ]
stackoverflow_0074646518_combinations_list_python_python_itertools.txt
Q: How to take a sum (in denominator) for calculating group by weighted average in a dataframe? I have a data frame that looks like this. import pandas as pd import numpy as np data = [ ['A',1,2,3,4], ['A',5,6,7,8], ['A',9,10,11,12], ['B',13,14,15,16], ['B',17,18,19,20], ['B',21,22,23,24], ['B',25,26,27,28], ['C',29,30,31,32], ['C',33,34,35,36], ['C',37,38,39,40], ['D',13,14,15,0], ['D',0,18,19,0], ['D',0,0,23,0], ['D',0,0,0,0], ['E',13,14,15,0], ['E',0,18,19,0], ['F',0,0,23,0], ] df = pd.DataFrame(data, columns=['Name', 'num1', 'num2', 'num3', 'num4']) df Then I have the following code to calculate the group by weighted average. weights = [10,20,30,40] df=df.groupby('Name').agg(lambda g: sum(g*weights[:len(g)])/sum(weights[:len(g)])) The problem lies in sum(weights[:len(g)]) because all the groups do not have equal rows. As you can see above, group A has 3 rows, B has 4 rows, C has 3 rows, D has 4 rows, E has 2 rows and F has 1 row. Depending upon the rows, it needs to calculate the sum. Now, the above code returns me the weighted average by calculating For Group A, the first column calculates the weighted average as (1 X 10+5 X 20+9 X 30)/60 but it should calculate the weighted average as (1 X20+5 X 30+9 X 40)/90 For Group E, the first column calculates the weighted average as (13 X 10+0 X 20)/30 but it should calculate the weighted average as (13 X 30+0 X 40)/70 Current Result Expected result A: i edit your code little bit n = len(weights) df=df.groupby('Name').agg(lambda g: sum(g*weights[n-len(g):])/sum(weights[n-len(g):])) output(df): num1 num2 num3 num4 Name A 5.9 6.9 7.9 8.9 B 21.0 22.0 23.0 24.0 C 33.9 34.9 35.9 36.9 D 1.3 5.0 12.2 0.0 E 5.6 16.3 17.3 0.0 F 0.0 0.0 23.0 0.0 A: @PandaKim's solution suffices; for efficiency, depending on your data size, you may have to take a longer route: n = len(weights) pos = n - df.groupby('Name').size() pos = [weights[posn : n] for posn in pos] pos = np.concatenate(pos) (df .set_index('Name') .mul(pos, axis=0) .assign(wt = pos) .groupby('Name') .sum() .pipe(lambda df: df.filter(like='num') .div(df.wt, axis=0) ) ) num1 num2 num3 num4 Name A 5.888889 6.888889 7.888889 8.888889 B 21.000000 22.000000 23.000000 24.000000 C 33.888889 34.888889 35.888889 36.888889 D 1.300000 5.000000 12.200000 0.000000 E 5.571429 16.285714 17.285714 0.000000 F 0.000000 0.000000 23.000000 0.000000
How to take a sum (in denominator) for calculating group by weighted average in a dataframe?
I have a data frame that looks like this. import pandas as pd import numpy as np data = [ ['A',1,2,3,4], ['A',5,6,7,8], ['A',9,10,11,12], ['B',13,14,15,16], ['B',17,18,19,20], ['B',21,22,23,24], ['B',25,26,27,28], ['C',29,30,31,32], ['C',33,34,35,36], ['C',37,38,39,40], ['D',13,14,15,0], ['D',0,18,19,0], ['D',0,0,23,0], ['D',0,0,0,0], ['E',13,14,15,0], ['E',0,18,19,0], ['F',0,0,23,0], ] df = pd.DataFrame(data, columns=['Name', 'num1', 'num2', 'num3', 'num4']) df Then I have the following code to calculate the group by weighted average. weights = [10,20,30,40] df=df.groupby('Name').agg(lambda g: sum(g*weights[:len(g)])/sum(weights[:len(g)])) The problem lies in sum(weights[:len(g)]) because all the groups do not have equal rows. As you can see above, group A has 3 rows, B has 4 rows, C has 3 rows, D has 4 rows, E has 2 rows and F has 1 row. Depending upon the rows, it needs to calculate the sum. Now, the above code returns me the weighted average by calculating For Group A, the first column calculates the weighted average as (1 X 10+5 X 20+9 X 30)/60 but it should calculate the weighted average as (1 X20+5 X 30+9 X 40)/90 For Group E, the first column calculates the weighted average as (13 X 10+0 X 20)/30 but it should calculate the weighted average as (13 X 30+0 X 40)/70 Current Result Expected result
[ "i edit your code little bit\nn = len(weights)\ndf=df.groupby('Name').agg(lambda g: sum(g*weights[n-len(g):])/sum(weights[n-len(g):]))\n\noutput(df):\n num1 num2 num3 num4\nName \nA 5.9 6.9 7.9 8.9\nB 21.0 22.0 23.0 24.0\nC 33.9 34.9 35.9 36.9\nD 1.3 5.0 12.2 0.0\nE 5.6 16.3 17.3 0.0\nF 0.0 0.0 23.0 0.0\n\n", "@PandaKim's solution suffices; for efficiency, depending on your data size, you may have to take a longer route:\nn = len(weights)\npos = n - df.groupby('Name').size()\npos = [weights[posn : n] for posn in pos]\npos = np.concatenate(pos)\n(df\n.set_index('Name')\n.mul(pos, axis=0)\n.assign(wt = pos)\n.groupby('Name')\n.sum()\n.pipe(lambda df: df.filter(like='num')\n .div(df.wt, axis=0)\n )\n)\n\n num1 num2 num3 num4\nName\nA 5.888889 6.888889 7.888889 8.888889\nB 21.000000 22.000000 23.000000 24.000000\nC 33.888889 34.888889 35.888889 36.888889\nD 1.300000 5.000000 12.200000 0.000000\nE 5.571429 16.285714 17.285714 0.000000\nF 0.000000 0.000000 23.000000 0.000000\n\n" ]
[ 2, 0 ]
[]
[]
[ "data_science_experience", "dataframe", "pandas", "python" ]
stackoverflow_0074672338_data_science_experience_dataframe_pandas_python.txt
Q: Can't get attribute to work on a method of a class Putting the "self.budget" attribute on the buy method returns error 'Shopper' object has no attribute 'budget', this alsoo happense when calling the gifts list to append the additional gift bought through the buy method. As such, both the list of gifts is not adjusted, the budget remains unchanged and the quantity is not updated. class Shopper(): def __init__(self, gifts, quantity, budget): self.buy(gifts, quantity) self.budget = budget self.quantity=0 self.gifts=gifts #list to store the gifts bought self.gifts=[] if self.budget < quantity * 100: self.budget = budget print("Insuffecient budget") else: self.gifts.extend(gifts) self.quantity+=quantity self.budget-=quantity*100 #method for buying an additional gift at certain quantity def buy(self, gift, quantit): if self.budget < quantity * 100: self.budget = budget print("Insuffecient budget") else: self.gifts.extend(gift) self.quantity+=quantity self.budget-=quantity*100 #the other part prints the list of gifts bought and the budget left, and the total number of gifts bought. It workds when the buy method is is empty or is removed due to attribute errors. #Input #Shopper1 = Shopper(['Toys', 'Clothes', 'Foods'], 10, 5000) #Shopper1.enlist("book", 1) #Gives error, "Attribute Error: 'Shopper' object has no attribute 'budget'"". A: It looks like you've named both classes the same thing, and the first definition doesn't have the 'budget' attribute.
Can't get attribute to work on a method of a class
Putting the "self.budget" attribute on the buy method returns error 'Shopper' object has no attribute 'budget', this alsoo happense when calling the gifts list to append the additional gift bought through the buy method. As such, both the list of gifts is not adjusted, the budget remains unchanged and the quantity is not updated. class Shopper(): def __init__(self, gifts, quantity, budget): self.buy(gifts, quantity) self.budget = budget self.quantity=0 self.gifts=gifts #list to store the gifts bought self.gifts=[] if self.budget < quantity * 100: self.budget = budget print("Insuffecient budget") else: self.gifts.extend(gifts) self.quantity+=quantity self.budget-=quantity*100 #method for buying an additional gift at certain quantity def buy(self, gift, quantit): if self.budget < quantity * 100: self.budget = budget print("Insuffecient budget") else: self.gifts.extend(gift) self.quantity+=quantity self.budget-=quantity*100 #the other part prints the list of gifts bought and the budget left, and the total number of gifts bought. It workds when the buy method is is empty or is removed due to attribute errors. #Input #Shopper1 = Shopper(['Toys', 'Clothes', 'Foods'], 10, 5000) #Shopper1.enlist("book", 1) #Gives error, "Attribute Error: 'Shopper' object has no attribute 'budget'"".
[ "It looks like you've named both classes the same thing, and the first definition doesn't have the 'budget' attribute.\n" ]
[ 0 ]
[]
[]
[ "class", "inheritance", "methods", "oop", "python" ]
stackoverflow_0074672632_class_inheritance_methods_oop_python.txt
Q: How does a for loop inside of an array (square brackets) work? I need to increase the size of a list using a for loop, and I figured out how to do it, I just don't understand the math and the logic behind it. from random import randint() random_values = randint(0,5) size = 5 list = [ random_values for i in range(size)] This will create a list(array) with 5 random values. I just don't understand the logic behind the for loop in the square brackets. How does this increase the size and add commas to the list(array)? Please let me know, it will help a lot. A: That's a "list comprehension" - a way of writing a for loop that generates a list. The term itself is from way back in the day, and I've never really comprehended why its called a comprehension, but lets just go with it. You start with an iterable on the right side of the for and an expression on the left: [expression for value in iterable]. Python will iterate the values from the iterator, run the expression on each, and build a list. It works the same as if you used a regular for loop and appended to an existing list.
How does a for loop inside of an array (square brackets) work?
I need to increase the size of a list using a for loop, and I figured out how to do it, I just don't understand the math and the logic behind it. from random import randint() random_values = randint(0,5) size = 5 list = [ random_values for i in range(size)] This will create a list(array) with 5 random values. I just don't understand the logic behind the for loop in the square brackets. How does this increase the size and add commas to the list(array)? Please let me know, it will help a lot.
[ "That's a \"list comprehension\" - a way of writing a for loop that generates a list. The term itself is from way back in the day, and I've never really comprehended why its called a comprehension, but lets just go with it.\nYou start with an iterable on the right side of the for and an expression on the left: [expression for value in iterable]. Python will iterate the values from the iterator, run the expression on each, and build a list. It works the same as if you used a regular for loop and appended to an existing list.\n" ]
[ 1 ]
[]
[]
[ "list_comprehension", "python" ]
stackoverflow_0074672606_list_comprehension_python.txt
Q: Assign value numbers for alphabet in Python I have alphabets that I want to assign as follows: lowercase items a-z have value of 1-26 uppercase items A-Z have value of 27-52 What is the shortest way to implement this [a,B,h,R] Expected Output: [1,28,8,44] How can we go about doing this in Python Thank you A: The python string module is perfect for this. from string import ascii_letters print([ascii_letters.index(letter) + 1 for letter in ["a", "B", "h", "R"]]) A: I think I recognize an Advent of Code question! I developed the alphabet to score mapping as follows: import string from collections import OrderedDict lower_priorities = OrderedDict(zip(string.ascii_lowercase, range(1,27))) upper_priorities = OrderedDict(zip(string.ascii_uppercase, range(27,53))) You can then call the dictionary by the letter value you are interested in after checking whether is is uppercase or lowercase and then sorting it to the correct dictionary. Otherwise, combine the two dictionaries and just query the combined dictionary, i.e. lower_priorities["a"] would return 1. Loop through your array and obtain your outputs. Can't guarantee it's the shortest, but I can say it works! A: This is a way that you can implement what you want: print([ord(item) - 38 if ord(item) < 97 else ord(item) - 96 for item in ['a','B','h','R']]) converting each item into an int value and finding which positioning they are in (Capitalized letters come before lowercase) https://appdividend.com/2022/06/15/how-to-convert-python-char-to-int/
Assign value numbers for alphabet in Python
I have alphabets that I want to assign as follows: lowercase items a-z have value of 1-26 uppercase items A-Z have value of 27-52 What is the shortest way to implement this [a,B,h,R] Expected Output: [1,28,8,44] How can we go about doing this in Python Thank you
[ "The python string module is perfect for this.\nfrom string import ascii_letters\nprint([ascii_letters.index(letter) + 1 for letter in [\"a\", \"B\", \"h\", \"R\"]])\n\n", "I think I recognize an Advent of Code question! I developed the alphabet to score mapping as follows:\nimport string\nfrom collections import OrderedDict\nlower_priorities = OrderedDict(zip(string.ascii_lowercase, range(1,27)))\nupper_priorities = OrderedDict(zip(string.ascii_uppercase, range(27,53)))\n\nYou can then call the dictionary by the letter value you are interested in after checking whether is is uppercase or lowercase and then sorting it to the correct dictionary. Otherwise, combine the two dictionaries and just query the combined dictionary, i.e. lower_priorities[\"a\"] would return 1. Loop through your array and obtain your outputs. Can't guarantee it's the shortest, but I can say it works!\n", "This is a way that you can implement what you want:\nprint([ord(item) - 38 if ord(item) < 97 else ord(item) - 96 for item in ['a','B','h','R']])\n\nconverting each item into an int value and finding which positioning they are in (Capitalized letters come before lowercase)\nhttps://appdividend.com/2022/06/15/how-to-convert-python-char-to-int/\n" ]
[ 3, 1, 0 ]
[]
[]
[ "list", "python", "python_3.x" ]
stackoverflow_0074672541_list_python_python_3.x.txt
Q: ERROR:root:can't pickle fasttext_pybind.fasttext objects I am using gunicorn with multiple workers for my machine learning project. But the problem is when I send a train request only the worker getting the training request gets updated with the latest model after training is done. Here it is worth to mention that, to make the inference faster I have programmed to load the model once after each training. This is why, the only worker which is used for current training operation loads the latest model and the other workers still keeps the previously loaded model. Right now the model file (binary format) is loaded once after each training in a global dictionary variable where key is the model name and the value is the model file. Obviously, this problem won't occur if I program it to load the model every time from disk for each prediction, but I cannot do it, as it will make the prediction slower. I studied further on global variables and further investigation shows that, in a multi-processing environment, all the workers (processes) create their own copies of global variables. Apart from the binary model file, I also have some other global variables (in dictionary type) need to be synced across all processes. So, how to handle this situation? TL;DR: I need some approach which can help me to store variable which will be common across all the processes (workers). Any way to do this? With multiprocessing.Manager, dill etc.? Update 1: I have multiple machine learning algorithms in my project and they have their own model files, which are being loaded to memory in a dictionary where the key is the model name and the value is the corresponding model object. I need to share all of them (in other words, I need to share the dictionary). But some of the models are not pickle serializable like - FastText. So, when I try to use a proxy variable (in my case a dictionary to hold models) with multiprocessing.Manager I get error for those non-pickle-serializable object while assigning the loaded model file to this dictionary. Like: can't pickle fasttext_pybind.fasttext objects. More information on multiprocessing.Manager can be found here: Proxy Objects Following is the summary what I have done: import multiprocessing import fasttext mgr = multiprocessing.Manager() model_dict = mgr.dict() model_file = fasttext.load_model("path/to/model/file/which/is/in/.bin/format") model_dict["fasttext"] = model_file # This line throws this error Error: can't pickle fasttext_pybind.fasttext objects I printed the model_file which I am trying to assign, it is: <fasttext.FastText._FastText object at 0x7f86e2b682e8> Update 2: According to this answer I modified my code a little bit: import fasttext from multiprocessing.managers import SyncManager def Manager(): m = SyncManager() m.start() return m # As the model file has a type of "<fasttext.FastText._FastText object at 0x7f86e2b682e8>" so, using "fasttext.FastText._FastText" as the class of it SyncManager.register("fast", fasttext.FastText._FastText) # Now this is the Manager as a replacement of the old one. mgr = Manager() ft = mgr.fast() # This line gives error. This gives me EOFError. Update 3: I tried using dill both with multiprocessing and multiprocess. The summary of changes are as the following: import multiprocessing import multiprocess import dill # Any one of the following two lines mgr = multiprocessing.Manager() # Or, mgr = multiprocess.Manager() model_dict = mgr.dict() ... ... ... ... ... ... model_file = dill.dumps(model_file) # This line throws the error model_dict["fasttext"] = model_file ... ... ... ... ... ... # During loading model_file = dill.loads(model_dict["fasttext"]) But still getting the error: can't pickle fasttext_pybind.fasttext objects. Update 4: This time I am using another library called jsonpickle. It seems to be that serialization and de-serialization occurs properly (as it is not reporting any issue while running). But surprisingly enough, after de-serialization whenever I am making a prediction, it faces segmentation fault. More details and the steps to reproduce it can be found here: Segmentation fault (core dumped) Update 5: Tried cloudpickle, srsly, but couldn't make the program working. A: For the sake of completeness I am providing the solution that worked for me. All the approaches I have tried to serialize FastText went in vain. Finally, as @MedetTleukabiluly mentioned in the comment, I managed to share the message of loading the model from the disk with other workers with redis-pubsub. Obviously, it is not actually sharing the model from the same memory space, rather, just sharing the message to other workers to inform them they should load the model from the disk (as a new training just happened). Following is the general solution: # redis_pubsub.py import logging import os import fasttext import socket import threading import time """The whole purpose of GLOBAL_NAMESPACE is to keep the whole pubsub mechanism separate. As this might be a case another service also publishing in the same channel. """ GLOBAL_NAMESPACE = "SERVICE_0" def get_ip(): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: # doesn't even have to be reachable s.connect(('10.255.255.255', 1)) IP = s.getsockname()[0] except Exception: IP = '127.0.0.1' finally: s.close() return IP class RedisPubSub: def __init__(self): self.redis_client = get_redis_client() #TODO: A SAMPLE METHOD WHICH CAN RETURN YOUR REDIS CLIENT (you have to implement) # Unique ID is used, to identify which worker from which server is the publisher. Just to avoid updating # getting a message which message is indeed sent by itself. self.unique_id = "IP_" + get_ip() + "__" + str(GLOBAL_NAMESPACE) + "__" + "PID_" + str(os.getpid()) def listen_to_channel_and_update_models(self, channel): try: pubsub = self.redis_client.pubsub() pubsub.subscribe(channel) except Exception as exception: logging.error(f"REDIS_ERROR: Model Update Listening: {exception}") while True: try: message = pubsub.get_message() # Successful operation gives 1 and unsuccessful gives 0 # ..we are not interested to receive these flags if message and message["data"] != 1 and message["data"] != 0: message = message["data"].decode("utf-8") message = str(message) splitted_msg = message.split("__SEPERATOR__") # Not only making sure the message is coming from another worker # but also we have to make sure the message sender and receiver (i.e, both of the workers) are under the same namespace if (splitted_msg[0] != self.unique_id) and (splitted_msg[0].split('__')[1] == GLOBAL_NAMESPACE): algo_name = splitted_msg[1] model_path = splitted_msg[2] # Fasttext if "fasttext" in algo_name: try: #TODO: YOU WILL GET THE LOADED NEW FILE IN model_file. USE IT TO UPDATE THE OLD ONE. model_file = fasttext.load_model(model_path + '.bin') except Exception as exception: logging.error(exception) else: logging.info(f"{algo_name} model is updated for process with unique_id: {self.unique_id} by process with unique_id: {splitted_msg[0]}") time.sleep(1) # sleeping for 1 second to avoid hammering the CPU too much except Exception as exception: time.sleep(1) logging.error(f"PUBSUB_ERROR: Model or component update: {exception}") def publish_to_channel(self, channel, algo_name, model_path): def _publish_to_channel(): try: message = self.unique_id + '__SEPERATOR__' + str(algo_name) + '__SEPERATOR__' + str(model_path) time.sleep(3) self.redis_client.publish(channel, message) except Exception as exception: logging.error(f"PUBSUB_ERROR: Model or component publishing: {exception}") # As the delay before pubsub can pause the next activities which are independent, hence, doing this publishing in another thread. thread = threading.Thread(target = _publish_to_channel) thread.start() Also you have to start the listener: from redis_pubsub import RedisPubSub pubsub = RedisPubSub() # start the listener: thread = threading.Thread(target = pubsub.listen_to_channel_and_update_models, args = ("sync-ml-models", )) thread.start() From fasttext training module, when you finish the training, publish this message to other workers, such that the other workers get a chance to re-load the model from the disk: # fasttext_api.py from redis_pubsub import RedisPubSub pubsub = RedisPubSub() pubsub.publish_to_channel(channel = "sync-ml-models", # a sample name for the channel algo_name = f"fasttext", model_path = "path/to/fasttext/model")
ERROR:root:can't pickle fasttext_pybind.fasttext objects
I am using gunicorn with multiple workers for my machine learning project. But the problem is when I send a train request only the worker getting the training request gets updated with the latest model after training is done. Here it is worth to mention that, to make the inference faster I have programmed to load the model once after each training. This is why, the only worker which is used for current training operation loads the latest model and the other workers still keeps the previously loaded model. Right now the model file (binary format) is loaded once after each training in a global dictionary variable where key is the model name and the value is the model file. Obviously, this problem won't occur if I program it to load the model every time from disk for each prediction, but I cannot do it, as it will make the prediction slower. I studied further on global variables and further investigation shows that, in a multi-processing environment, all the workers (processes) create their own copies of global variables. Apart from the binary model file, I also have some other global variables (in dictionary type) need to be synced across all processes. So, how to handle this situation? TL;DR: I need some approach which can help me to store variable which will be common across all the processes (workers). Any way to do this? With multiprocessing.Manager, dill etc.? Update 1: I have multiple machine learning algorithms in my project and they have their own model files, which are being loaded to memory in a dictionary where the key is the model name and the value is the corresponding model object. I need to share all of them (in other words, I need to share the dictionary). But some of the models are not pickle serializable like - FastText. So, when I try to use a proxy variable (in my case a dictionary to hold models) with multiprocessing.Manager I get error for those non-pickle-serializable object while assigning the loaded model file to this dictionary. Like: can't pickle fasttext_pybind.fasttext objects. More information on multiprocessing.Manager can be found here: Proxy Objects Following is the summary what I have done: import multiprocessing import fasttext mgr = multiprocessing.Manager() model_dict = mgr.dict() model_file = fasttext.load_model("path/to/model/file/which/is/in/.bin/format") model_dict["fasttext"] = model_file # This line throws this error Error: can't pickle fasttext_pybind.fasttext objects I printed the model_file which I am trying to assign, it is: <fasttext.FastText._FastText object at 0x7f86e2b682e8> Update 2: According to this answer I modified my code a little bit: import fasttext from multiprocessing.managers import SyncManager def Manager(): m = SyncManager() m.start() return m # As the model file has a type of "<fasttext.FastText._FastText object at 0x7f86e2b682e8>" so, using "fasttext.FastText._FastText" as the class of it SyncManager.register("fast", fasttext.FastText._FastText) # Now this is the Manager as a replacement of the old one. mgr = Manager() ft = mgr.fast() # This line gives error. This gives me EOFError. Update 3: I tried using dill both with multiprocessing and multiprocess. The summary of changes are as the following: import multiprocessing import multiprocess import dill # Any one of the following two lines mgr = multiprocessing.Manager() # Or, mgr = multiprocess.Manager() model_dict = mgr.dict() ... ... ... ... ... ... model_file = dill.dumps(model_file) # This line throws the error model_dict["fasttext"] = model_file ... ... ... ... ... ... # During loading model_file = dill.loads(model_dict["fasttext"]) But still getting the error: can't pickle fasttext_pybind.fasttext objects. Update 4: This time I am using another library called jsonpickle. It seems to be that serialization and de-serialization occurs properly (as it is not reporting any issue while running). But surprisingly enough, after de-serialization whenever I am making a prediction, it faces segmentation fault. More details and the steps to reproduce it can be found here: Segmentation fault (core dumped) Update 5: Tried cloudpickle, srsly, but couldn't make the program working.
[ "For the sake of completeness I am providing the solution that worked for me. All the approaches I have tried to serialize FastText went in vain. Finally, as @MedetTleukabiluly mentioned in the comment, I managed to share the message of loading the model from the disk with other workers with redis-pubsub. Obviously, it is not actually sharing the model from the same memory space, rather, just sharing the message to other workers to inform them they should load the model from the disk (as a new training just happened). Following is the general solution:\n# redis_pubsub.py\n\nimport logging\nimport os\nimport fasttext\nimport socket\nimport threading\nimport time\n\n\"\"\"The whole purpose of GLOBAL_NAMESPACE is to keep the whole pubsub mechanism separate.\nAs this might be a case another service also publishing in the same channel.\n\"\"\"\nGLOBAL_NAMESPACE = \"SERVICE_0\"\n\ndef get_ip():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n # doesn't even have to be reachable\n s.connect(('10.255.255.255', 1))\n IP = s.getsockname()[0]\n except Exception:\n IP = '127.0.0.1'\n finally:\n s.close()\n return IP\n\n\nclass RedisPubSub:\n def __init__(self):\n self.redis_client = get_redis_client() #TODO: A SAMPLE METHOD WHICH CAN RETURN YOUR REDIS CLIENT (you have to implement)\n # Unique ID is used, to identify which worker from which server is the publisher. Just to avoid updating\n # getting a message which message is indeed sent by itself.\n self.unique_id = \"IP_\" + get_ip() + \"__\" + str(GLOBAL_NAMESPACE) + \"__\" + \"PID_\" + str(os.getpid())\n\n\n def listen_to_channel_and_update_models(self, channel):\n try:\n pubsub = self.redis_client.pubsub()\n pubsub.subscribe(channel)\n except Exception as exception:\n logging.error(f\"REDIS_ERROR: Model Update Listening: {exception}\")\n\n while True:\n try:\n message = pubsub.get_message()\n\n # Successful operation gives 1 and unsuccessful gives 0\n # ..we are not interested to receive these flags\n if message and message[\"data\"] != 1 and message[\"data\"] != 0: \n message = message[\"data\"].decode(\"utf-8\")\n message = str(message)\n splitted_msg = message.split(\"__SEPERATOR__\")\n\n\n # Not only making sure the message is coming from another worker\n # but also we have to make sure the message sender and receiver (i.e, both of the workers) are under the same namespace\n if (splitted_msg[0] != self.unique_id) and (splitted_msg[0].split('__')[1] == GLOBAL_NAMESPACE):\n algo_name = splitted_msg[1]\n model_path = splitted_msg[2]\n\n # Fasttext\n if \"fasttext\" in algo_name:\n try:\n #TODO: YOU WILL GET THE LOADED NEW FILE IN model_file. USE IT TO UPDATE THE OLD ONE.\n model_file = fasttext.load_model(model_path + '.bin')\n except Exception as exception:\n logging.error(exception)\n else:\n logging.info(f\"{algo_name} model is updated for process with unique_id: {self.unique_id} by process with unique_id: {splitted_msg[0]}\")\n\n\n time.sleep(1) # sleeping for 1 second to avoid hammering the CPU too much\n\n except Exception as exception:\n time.sleep(1)\n logging.error(f\"PUBSUB_ERROR: Model or component update: {exception}\")\n\n\n def publish_to_channel(self, channel, algo_name, model_path):\n def _publish_to_channel():\n try:\n message = self.unique_id + '__SEPERATOR__' + str(algo_name) + '__SEPERATOR__' + str(model_path)\n time.sleep(3)\n self.redis_client.publish(channel, message)\n except Exception as exception:\n logging.error(f\"PUBSUB_ERROR: Model or component publishing: {exception}\")\n\n # As the delay before pubsub can pause the next activities which are independent, hence, doing this publishing in another thread.\n thread = threading.Thread(target = _publish_to_channel)\n thread.start()\n\nAlso you have to start the listener:\nfrom redis_pubsub import RedisPubSub\npubsub = RedisPubSub()\n\n\n# start the listener:\nthread = threading.Thread(target = pubsub.listen_to_channel_and_update_models, args = (\"sync-ml-models\", ))\nthread.start()\n\nFrom fasttext training module, when you finish the training, publish this message to other workers, such that the other workers get a chance to re-load the model from the disk:\n# fasttext_api.py\n\nfrom redis_pubsub import RedisPubSub\npubsub = RedisPubSub()\n\npubsub.publish_to_channel(channel = \"sync-ml-models\", # a sample name for the channel\n algo_name = f\"fasttext\",\n model_path = \"path/to/fasttext/model\")\n\n\n" ]
[ 0 ]
[]
[]
[ "dill", "fasttext", "gunicorn", "multiprocessing", "python" ]
stackoverflow_0069430747_dill_fasttext_gunicorn_multiprocessing_python.txt
Q: Gaurd Node in torrc file Hello I have some questions related to tor. How to disable Guard node in torrc file or by using stem Is there any method in stem where I can specify my Exit Node. I know a method in torrc file but I don't know how to do it in stem or using controler. for example. I want this because I want my entry node to be change for every circuit and Exit to be same controller.set_options({'__DisablePredictedCircuits': '1', 'MaxOnionsPending': '0', 'newcircuitperiod': '999999999', 'maxcircuitdirtiness': '999999999'}) also for if possible in this part it will be good too. mean if I pass my nodes as an argument here controller.new_circuit() A: To disable Guard nodes in the torrc file, you can add the following lines: UseEntryGuards 0 NumEntryGuards 0 To specify an Exit node in the torrc file, you can add the following line: ExitNodes $fingerprint where $fingerprint is the fingerprint of the Exit node you want to use. You can use the set_options method of the Controller class to set the UseEntryGuards, NumEntryGuards, and ExitNodes options as follows: from stem import Controller with Controller.from_port() as controller: controller.authenticate() controller.set_options({ 'UseEntryGuards': '0', 'NumEntryGuards': '0', 'ExitNodes': '$fingerprint', }) To specify the Exit node when creating a new circuit, you can use the extend_circuit method of the Controller class as follows: from stem import Controller with Controller.from_port() as controller: controller.authenticate() controller.new_circuit() controller.extend_circuit('$fingerprint') where $fingerprint is the fingerprint of the Exit node you want to use.
Gaurd Node in torrc file
Hello I have some questions related to tor. How to disable Guard node in torrc file or by using stem Is there any method in stem where I can specify my Exit Node. I know a method in torrc file but I don't know how to do it in stem or using controler. for example. I want this because I want my entry node to be change for every circuit and Exit to be same controller.set_options({'__DisablePredictedCircuits': '1', 'MaxOnionsPending': '0', 'newcircuitperiod': '999999999', 'maxcircuitdirtiness': '999999999'}) also for if possible in this part it will be good too. mean if I pass my nodes as an argument here controller.new_circuit()
[ "To disable Guard nodes in the torrc file, you can add the following lines:\nUseEntryGuards 0\nNumEntryGuards 0\n\nTo specify an Exit node in the torrc file, you can add the following line:\nExitNodes $fingerprint\n\nwhere $fingerprint is the fingerprint of the Exit node you want to use.\nYou can use the set_options method of the Controller class to set the UseEntryGuards, NumEntryGuards, and ExitNodes options as follows:\nfrom stem import Controller\n\nwith Controller.from_port() as controller:\n controller.authenticate()\n \n controller.set_options({\n 'UseEntryGuards': '0',\n 'NumEntryGuards': '0',\n 'ExitNodes': '$fingerprint',\n })\n\nTo specify the Exit node when creating a new circuit, you can use the extend_circuit method of the Controller class as follows:\nfrom stem import Controller\n\nwith Controller.from_port() as controller:\n controller.authenticate()\n \n controller.new_circuit()\n controller.extend_circuit('$fingerprint')\n\nwhere $fingerprint is the fingerprint of the Exit node you want to use.\n" ]
[ 0 ]
[]
[]
[ "python", "python_3.x", "stem" ]
stackoverflow_0074672521_python_python_3.x_stem.txt
Q: problem password_reset_key_message.txt` - dj-rest-auth I'm creating a project (an api), but I'm stuck on the next part. When sending the password reset mail, specifically password_reset_key_message.txt, I can't capture the user's 'key' and 'uid', I want to change the address. Email delivery work fine, my problem is with password_reset_key_message.txt. Packages django==4.0.7 dj-rest-auth==2.2.5 django-allauth==0.51.0 I'm looking for something like this: {% extends "account/email/base_message.txt" %} {% load i18n %} {% block content %}{% autoescape off %}{% blocktrans %}You're receiving this e-mail because you or someone else has requested a password for your user account. It can be safely ignored if you did not request a password reset. Click the link below to reset your password.{% endblocktrans %} --- https://fontend-project.com/password-reset?id={{ uid }}&key={{ key }} --- {% if username %} {% blocktrans %}In case you forgot, your username is {{ username }}.{% endblocktrans %}{% endif %}{% endautoescape %}{% endblock %} But i can't capture 'uid and key. By default it uses '{{ password_reset_url }}', but I want to change the address, I need 'uid' and 'key' which are provided in '{{ password_reset_url }}' and I can't capture them, which I can do in the template o message file 'email_confirmation_message.txt'. Help please, I have tried and could not find the solution. A: To resolve this issue, I did the following: In the settings.py of the main project, add a custom_password_serializer: config/settings.py REST_AUTH_SERIALIZERS = { 'PASSWORD_RESET_SERIALIZER': 'myapp.serializers.CustomPasswordResetSerializer' } Create the custom_password_serializer: config/serializers.py from dj_rest_auth.serializers import PasswordResetSerializer from myapp.forms import CustomResetForm class CustomPasswordResetSerializer(PasswordResetSerializer): """ Serializer for requesting a password reset e-mail. """ @property def password_reset_form_class(self): return CustomResetForm Add new forms.py config/forms.py from dj_rest_auth.forms import AllAuthPasswordResetForm from django.contrib.sites.shortcuts import get_current_site from allauth.account.forms import default_token_generator from allauth.account.adapter import get_adapter from allauth.account.utils import user_pk_to_url_str class CustomResetForm(AllAuthPasswordResetForm): def save(self, request, **kwargs): current_site = get_current_site(request) email = self.cleaned_data['email'] token_generator = kwargs.get('token_generator', default_token_generator) for user in self.users: temp_key = token_generator.make_token(user) uid = user_pk_to_url_str(user) context = { 'current_site': current_site, 'user': user, 'key': temp_key, 'uid': uid, } get_adapter(request).send_mail( 'account/email/password_reset_key', email, context ) return self.cleaned_data['email'] Now you can capture user, key, uid in the password_reset_key_message.txt file Remember that password_reset_key_message.txt must go in the following path backend_directory/api/templates/account/email/ Thanks to StephenSorriaux for this solution, Source. I hope this solution helps.
problem password_reset_key_message.txt` - dj-rest-auth
I'm creating a project (an api), but I'm stuck on the next part. When sending the password reset mail, specifically password_reset_key_message.txt, I can't capture the user's 'key' and 'uid', I want to change the address. Email delivery work fine, my problem is with password_reset_key_message.txt. Packages django==4.0.7 dj-rest-auth==2.2.5 django-allauth==0.51.0 I'm looking for something like this: {% extends "account/email/base_message.txt" %} {% load i18n %} {% block content %}{% autoescape off %}{% blocktrans %}You're receiving this e-mail because you or someone else has requested a password for your user account. It can be safely ignored if you did not request a password reset. Click the link below to reset your password.{% endblocktrans %} --- https://fontend-project.com/password-reset?id={{ uid }}&key={{ key }} --- {% if username %} {% blocktrans %}In case you forgot, your username is {{ username }}.{% endblocktrans %}{% endif %}{% endautoescape %}{% endblock %} But i can't capture 'uid and key. By default it uses '{{ password_reset_url }}', but I want to change the address, I need 'uid' and 'key' which are provided in '{{ password_reset_url }}' and I can't capture them, which I can do in the template o message file 'email_confirmation_message.txt'. Help please, I have tried and could not find the solution.
[ "To resolve this issue, I did the following:\n\nIn the settings.py of the main project, add a custom_password_serializer:\n\nconfig/settings.py\n\nREST_AUTH_SERIALIZERS = {\n 'PASSWORD_RESET_SERIALIZER': 'myapp.serializers.CustomPasswordResetSerializer'\n}\n\n\n\nCreate the custom_password_serializer:\n\nconfig/serializers.py\nfrom dj_rest_auth.serializers import PasswordResetSerializer\nfrom myapp.forms import CustomResetForm\n\nclass CustomPasswordResetSerializer(PasswordResetSerializer):\n \"\"\"\n Serializer for requesting a password reset e-mail.\n \"\"\"\n @property\n def password_reset_form_class(self):\n return CustomResetForm\n\n\n\nAdd new forms.py\n\nconfig/forms.py\nfrom dj_rest_auth.forms import AllAuthPasswordResetForm\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom allauth.account.forms import default_token_generator\nfrom allauth.account.adapter import get_adapter\nfrom allauth.account.utils import user_pk_to_url_str\n\nclass CustomResetForm(AllAuthPasswordResetForm):\n\n def save(self, request, **kwargs):\n current_site = get_current_site(request)\n email = self.cleaned_data['email']\n token_generator = kwargs.get('token_generator', default_token_generator)\n\n for user in self.users:\n\n temp_key = token_generator.make_token(user)\n uid = user_pk_to_url_str(user)\n\n context = {\n 'current_site': current_site,\n 'user': user,\n 'key': temp_key,\n 'uid': uid,\n }\n get_adapter(request).send_mail(\n 'account/email/password_reset_key', email, context\n )\n return self.cleaned_data['email']\n\n\nNow you can capture user, key, uid in the password_reset_key_message.txt file\n\nRemember that password_reset_key_message.txt must go in the following path backend_directory/api/templates/account/email/\nThanks to StephenSorriaux for this solution, Source.\nI hope this solution helps.\n" ]
[ 0 ]
[]
[]
[ "api", "dj_rest_auth", "django_allauth", "python" ]
stackoverflow_0073476094_api_dj_rest_auth_django_allauth_python.txt
Q: python parallel and join threading not working? I need to run parallel and join threads in the following code: ` from threading import Thread import time def do_stuff(i): if i == 1: time.sleep(1) if i ==2: time.sleep(2) if i ==3: time.sleep(3) print(i) time.sleep(1) def thread1(i): worker1 = Thread(target = do_stuff, args=(i,), daemon= True) worker1.start() def thread2(i): worker2 = Thread(target = do_stuff, args=(i,), daemon= True) worker2.start() def thread3(i): worker3 = Thread(target = do_stuff, args=(i,), daemon= True) worker3.start() num_threads = 1000 for i in range(num_threads): worker11 = Thread(target = thread1, args=(1, ), daemon= True) worker11.start() worker22 = Thread(target = thread2, args=(2, ), daemon= True) worker22.start() worker33 = Thread(target = thread3, args=(3, ), daemon= True) worker33.start() while True: # Keep running Code L = 1 ` Image explaine the process results shows that join method not working and the parallel threads join with themselfs. reults: 312233 1 3 11 31 2311 32 I appreciate your help. run parllel and join multi_threads properly. add Queue to to the code. A: To run threads in parallel, you can use the Thread.start() method, which starts the execution of the thread's target function. The join() method, on the other hand, is used to wait for a thread to complete its execution. In your code, you are starting three threads in the main thread, and then calling join() on each of those threads. This causes the main thread to wait for each of those threads to complete their execution before starting the next thread. This is why your threads are not running in parallel. To fix this, you can move the calls to start() and join() to the main thread, after starting all three threads. This will allow all three threads to run in parallel. Here is how your code could look like with these changes: from threading import Thread import time def do_stuff(i): time.sleep(1) print(i) time.sleep(1) def thread1(i): worker = Thread(target = do_stuff, args=(i,), daemon= True) worker.start() def thread2(i): worker = Thread(target = do_stuff, args=(i,), daemon= True) worker.start() def thread3(i): worker = Thread(target = do_stuff, args=(i,), daemon= True) worker.start() num_threads = 10 for i in range(num_threads): worker1 = Thread(target = thread1, args=(1, ), daemon= True) worker1.start() worker2 = Thread(target = thread2, args=(2, ), daemon= True) worker2.start() worker3 = Thread(target = thread3, args=(3, ), daemon= True) worker3.start() # wait for all threads to complete their execution worker1.join() worker2.join() worker3.join() while True: # Keep running Code L = 1 As for adding a queue to your code, you can use the Queue class from the queue module to create a queue. You can then add items to the queue using the put() method, and retrieve items from the queue using the get() method. Here is an example of how you could use a queue in your code: from queue import Queue from threading import Thread import time def do_stuff(i, queue): time.sleep(1) queue.put(i) def thread1(i, queue): worker = Thread(target = do_stuff, args=(i, queue), daemon= True) worker.start() def thread2(i, queue): worker = Thread(target = do_stuff, args=(i, queue), daemon= True) worker.start() def thread3(i, queue): worker = Thread(target = do_stuff, args=(i, queue), daemon= True) worker.start() num_threads = 10 queue = Queue() for i in range(num_threads): worker1 = Thread(target = thread1, args=(1, queue), daemon= True) worker1
python parallel and join threading not working?
I need to run parallel and join threads in the following code: ` from threading import Thread import time def do_stuff(i): if i == 1: time.sleep(1) if i ==2: time.sleep(2) if i ==3: time.sleep(3) print(i) time.sleep(1) def thread1(i): worker1 = Thread(target = do_stuff, args=(i,), daemon= True) worker1.start() def thread2(i): worker2 = Thread(target = do_stuff, args=(i,), daemon= True) worker2.start() def thread3(i): worker3 = Thread(target = do_stuff, args=(i,), daemon= True) worker3.start() num_threads = 1000 for i in range(num_threads): worker11 = Thread(target = thread1, args=(1, ), daemon= True) worker11.start() worker22 = Thread(target = thread2, args=(2, ), daemon= True) worker22.start() worker33 = Thread(target = thread3, args=(3, ), daemon= True) worker33.start() while True: # Keep running Code L = 1 ` Image explaine the process results shows that join method not working and the parallel threads join with themselfs. reults: 312233 1 3 11 31 2311 32 I appreciate your help. run parllel and join multi_threads properly. add Queue to to the code.
[ "To run threads in parallel, you can use the Thread.start() method, which starts the execution of the thread's target function. The join() method, on the other hand, is used to wait for a thread to complete its execution.\nIn your code, you are starting three threads in the main thread, and then calling join() on each of those threads. This causes the main thread to wait for each of those threads to complete their execution before starting the next thread. This is why your threads are not running in parallel.\nTo fix this, you can move the calls to start() and join() to the main thread, after starting all three threads. This will allow all three threads to run in parallel. Here is how your code could look like with these changes:\nfrom threading import Thread\nimport time\n\ndef do_stuff(i):\n time.sleep(1)\n print(i)\n time.sleep(1)\n\ndef thread1(i):\n worker = Thread(target = do_stuff, args=(i,), daemon= True)\n worker.start()\n\ndef thread2(i):\n worker = Thread(target = do_stuff, args=(i,), daemon= True)\n worker.start()\n \ndef thread3(i):\n worker = Thread(target = do_stuff, args=(i,), daemon= True)\n worker.start()\n\nnum_threads = 10\nfor i in range(num_threads):\n worker1 = Thread(target = thread1, args=(1, ), daemon= True)\n worker1.start()\n worker2 = Thread(target = thread2, args=(2, ), daemon= True)\n worker2.start()\n worker3 = Thread(target = thread3, args=(3, ), daemon= True)\n worker3.start()\n \n # wait for all threads to complete their execution\n worker1.join()\n worker2.join()\n worker3.join()\n\nwhile True:\n # Keep running Code\n L = 1\n\nAs for adding a queue to your code, you can use the Queue class from the queue module to create a queue. You can then add items to the queue using the put() method, and retrieve items from the queue using the get() method. Here is an example of how you could use a queue in your code:\nfrom queue import Queue\nfrom threading import Thread\nimport time\n\ndef do_stuff(i, queue):\n time.sleep(1)\n queue.put(i)\n\ndef thread1(i, queue):\n worker = Thread(target = do_stuff, args=(i, queue), daemon= True)\n worker.start()\n\ndef thread2(i, queue):\n worker = Thread(target = do_stuff, args=(i, queue), daemon= True)\n worker.start()\n \ndef thread3(i, queue):\n worker = Thread(target = do_stuff, args=(i, queue), daemon= True)\n worker.start()\n\nnum_threads = 10\nqueue = Queue()\n\nfor i in range(num_threads):\n worker1 = Thread(target = thread1, args=(1, queue), daemon= True)\n worker1\n\n" ]
[ 0 ]
[]
[]
[ "python", "python_multithreading" ]
stackoverflow_0074672671_python_python_multithreading.txt
Q: How to test for a reference cycle caused by saved exception? I'm talking about this problem: https://bugs.python.org/issue36820. Small summary: Saving an exception causes a cyclic reference, because the exception's data include a traceback containing the stack frame with the variable where the exception was saved. try: 1/0 except Exception as e: ee = e The code is not broken, beacuse Python will eventually free the memory with its garbage collector. But the whole sitation can be avoided: try: 1/0 except Exception as e: ee = e ... ... finally: ee = None In the linked bpo-36820 there is a demonstration with a weak reference kept alive. My question is if there exist a test that does not need to edit the function itself. Something like run the tested function check if a new cycle was created Can the gc module do that? A: Yes, using the gc module, we can check whether there are (new) exceptions that are only referred to by a traceback frame. In practice, iterating gc objects creates an additional referrer (can't use WeakSet as built-in exceptions don't support weakref), so we check that there are two referrers — the frame and the additional referrer. def get_exception_ids_with_reference_cycle(exclude_ids=None): import gc import types exclude_ids = () if exclude_ids is None else exclude_ids exceptions = [ o for o in gc.get_objects(generation=0) if isinstance(o, Exception) and id(o) not in exclude_ids ] exception_ids = [ id(e) for e in exceptions if len(gc.get_referrers(e)) == 2 and all( isinstance(r, types.FrameType) or r is exceptions for r in gc.get_referrers(e) ) ] return exception_ids Usage: exception_ids = get_exception_ids_with_reference_cycle() x() print(bool(get_exception_ids_with_reference_cycle(exclude_ids=exception_ids))) Alternative usage: @contextlib.contextmanager def make_helper(): exception_ids = get_exception_ids_with_reference_cycle() yield lambda: bool(get_exception_ids_with_reference_cycle(exclude_ids=exception_ids)) with make_helper() as get_true_if_reference_cycle_was_created: x() print(get_true_if_reference_cycle_was_created())
How to test for a reference cycle caused by saved exception?
I'm talking about this problem: https://bugs.python.org/issue36820. Small summary: Saving an exception causes a cyclic reference, because the exception's data include a traceback containing the stack frame with the variable where the exception was saved. try: 1/0 except Exception as e: ee = e The code is not broken, beacuse Python will eventually free the memory with its garbage collector. But the whole sitation can be avoided: try: 1/0 except Exception as e: ee = e ... ... finally: ee = None In the linked bpo-36820 there is a demonstration with a weak reference kept alive. My question is if there exist a test that does not need to edit the function itself. Something like run the tested function check if a new cycle was created Can the gc module do that?
[ "Yes, using the gc module, we can check whether there are (new) exceptions that are only referred to by a traceback frame.\nIn practice, iterating gc objects creates an additional referrer (can't use WeakSet as built-in exceptions don't support weakref), so we check that there are two referrers — the frame and the additional referrer.\ndef get_exception_ids_with_reference_cycle(exclude_ids=None):\n import gc\n import types\n exclude_ids = () if exclude_ids is None else exclude_ids\n exceptions = [\n o for o in gc.get_objects(generation=0)\n if isinstance(o, Exception) and id(o) not in exclude_ids\n ]\n exception_ids = [\n id(e) for e in exceptions\n if len(gc.get_referrers(e)) == 2 and all(\n isinstance(r, types.FrameType) or r is exceptions\n for r in gc.get_referrers(e)\n )\n ]\n return exception_ids\n\nUsage:\nexception_ids = get_exception_ids_with_reference_cycle()\nx()\nprint(bool(get_exception_ids_with_reference_cycle(exclude_ids=exception_ids)))\n\nAlternative usage:\[email protected]\ndef make_helper():\n exception_ids = get_exception_ids_with_reference_cycle()\n yield lambda: bool(get_exception_ids_with_reference_cycle(exclude_ids=exception_ids))\n\n\nwith make_helper() as get_true_if_reference_cycle_was_created:\n x()\n print(get_true_if_reference_cycle_was_created())\n\n" ]
[ 2 ]
[ "I believe you can use the gc module to do something like this.\nimport gc\n\n# First, enable garbage collection\ngc.enable()\n\n# Save an exception to a variable\nexception = Exception('test exception')\n\n# Check for objects that are no longer being referenced by the program\nif gc.garbage:\n # Print the objects that are causing the cycle\n print(gc.garbage)\n\n # Use the gc.get_referrers method to find out what objects\n # are causing the cycle\n for obj in gc.garbage:\n print(gc.get_referrers(obj))\n\n # Modify your code to break the cycle\n # (This will depend on your specific code and the objects\n # involved in the cycle)\n\n" ]
[ -1 ]
[ "garbage_collection", "python" ]
stackoverflow_0067157372_garbage_collection_python.txt
Q: How can I get the source of chat without using selenium? So my issue is that, I want to get user's id info from the chat. The chat area what I'm looking for, looks like this... <div id="chat_area" class="chat_area" style="will-change: scroll-position;"> <dl class="" user_id="asdf1234"><dt class="user_m"><em class="pc"></em> :</dt><dd id="1">blah blah</dd></dl> <a href="javascript:;" user_id="asdf1234" user_nick="asdf1234" userflag="65536" is_mobile="false" grade="user">asdf1234</a> ... What I want do is to, Get the part starting with <a href='javascript:'' user_id='asdf1234' ... so that I can parse this and do some other stuffs. But this webpage is the one I'm currently using, and it can not be proxy(webdriver by selenium). How can I extract that data from the chat? A: It looks like you've got two separate problems here. I'd use both the requests and BeautifulSoup libraries to accomplish this. Use your browser's developer tools, the network tab, to refresh the page and look for the request which responds with the HTML you want. Use the requests library to emulate this request exactly. import requests headers = {"name": "value"} # Get case example. response = requests.get("some_url", headers=headers) # Post case example. data = {"key": "value"} response = requests.post("some_url", headers=headers, data=data) Web-scraping is always finicky, if this doesn't work you're most likely going to need to use a requests session. Or a one-time hacky solution is just to set your cookies from the browser. Once you have made the request you can use BeautifulSoup to scrape your user id very easily. from bs4 import BeautifulSoup # Create BS parser. soup = BeautifulSoup(response.text, 'lxml') # Find all elements with the attribute "user_id". find_results = soup.findAll("a", {"user_id" : True}) # Iterate results. Could also just index if you want the single user_id. for result in find_results: user_id = result["user_id"] A: In order to extract data from the chat area you would need to use a web scraping tool or library. Since you mentioned that you cannot use a proxy such as Selenium, you may want to consider using a library in a programming language like Python or JavaScript to scrape the data from the chat area. For example, in Python you could use BeautifulSoup to parse the HTML of the page and extract the desired information. You could then use the user_id value to do any further processing that you need to do. Alternatively, if you have access to the server-side code for the page, you could modify it to include the user_id information in a more easily accessible way, such as in a data attribute on the chat area element itself. This would allow you to easily retrieve the user_id value using JavaScript without having to scrape the page.
How can I get the source of chat without using selenium?
So my issue is that, I want to get user's id info from the chat. The chat area what I'm looking for, looks like this... <div id="chat_area" class="chat_area" style="will-change: scroll-position;"> <dl class="" user_id="asdf1234"><dt class="user_m"><em class="pc"></em> :</dt><dd id="1">blah blah</dd></dl> <a href="javascript:;" user_id="asdf1234" user_nick="asdf1234" userflag="65536" is_mobile="false" grade="user">asdf1234</a> ... What I want do is to, Get the part starting with <a href='javascript:'' user_id='asdf1234' ... so that I can parse this and do some other stuffs. But this webpage is the one I'm currently using, and it can not be proxy(webdriver by selenium). How can I extract that data from the chat?
[ "It looks like you've got two separate problems here. I'd use both the requests and BeautifulSoup libraries to accomplish this.\nUse your browser's developer tools, the network tab, to refresh the page and look for the request which responds with the HTML you want. Use the requests library to emulate this request exactly.\nimport requests\n\nheaders = {\"name\": \"value\"}\n\n# Get case example.\nresponse = requests.get(\"some_url\", headers=headers)\n\n# Post case example.\ndata = {\"key\": \"value\"}\nresponse = requests.post(\"some_url\", headers=headers, data=data)\n\nWeb-scraping is always finicky, if this doesn't work you're most likely going to need to use a requests session. Or a one-time hacky solution is just to set your cookies from the browser.\nOnce you have made the request you can use BeautifulSoup to scrape your user id very easily.\nfrom bs4 import BeautifulSoup\n\n# Create BS parser.\nsoup = BeautifulSoup(response.text, 'lxml')\n\n# Find all elements with the attribute \"user_id\".\nfind_results = soup.findAll(\"a\", {\"user_id\" : True})\n\n# Iterate results. Could also just index if you want the single user_id.\nfor result in find_results:\n user_id = result[\"user_id\"]\n\n", "In order to extract data from the chat area you would need to use a web scraping tool or library. Since you mentioned that you cannot use a proxy such as Selenium, you may want to consider using a library in a programming language like Python or JavaScript to scrape the data from the chat area.\nFor example, in Python you could use BeautifulSoup to parse the HTML of the page and extract the desired information. You could then use the user_id value to do any further processing that you need to do.\nAlternatively, if you have access to the server-side code for the page, you could modify it to include the user_id information in a more easily accessible way, such as in a data attribute on the chat area element itself. This would allow you to easily retrieve the user_id value using JavaScript without having to scrape the page.\n" ]
[ 0, 0 ]
[]
[]
[ "html", "python", "python_requests", "selenium" ]
stackoverflow_0074672630_html_python_python_requests_selenium.txt
Q: How to use the first row as keys in excel for python selenium I am making auto login tool on selenium, i want it to use row by row to get login information. I used this code to do it but it only uses the 28th row. Is there a way for it to automatically get the data from the first row to the next to the next? Thank u all! from selenium import webdriver from selenium.webdriver.common.proxy import * from selenium.webdriver.common.by import By from time import sleep import time import openpyxl import requests def get_value_excel(filename, cellname): wb = openpyxl.load_workbook(filename) Sheet1 = wb['Sheet1'] wb.close() return Sheet1[cellname].value def update_value_excel(filename, cellname, value): wb = openpyxl.load_workbook(filename) Sheet1 = wb['Sheet1'] Sheet1[cellname].value = value wb.close() wb.save(filename) col_name_acc="A" col_name_pass="B" filename='file.xlsx' for i in range(2,29): cell_name_acc="%s%s"%(col_name_acc, i) cell_name_pass="%s%s"%(col_name_pass, i) account = get_value_excel(filename, cell_name_acc) password = get_value_excel(filename, cell_name_pass) print(i) A: Do you mean for i in range(2,29): doesn't cover the range of rows in the sheet. So you could increase 29 to max that is needed. Or you can use Sheet1.max_row to get the max row number (last row in sheet with data) except you need to create the worksheet (ws) object to get that value. From your code it looks like you are opening the excel workbook (wb) each time you call the function 'get_value_excel' or 'update_value_excel'. If these are called more than once it is inefficient, you should only open the wb/ws one time for reading and updating data. Therefore you could remove the wb and ws object creation from these functions and do that at the start. The ws object is then available to use in the range. Also you mention "first row to the next to the next" so if starting row 1 the range start param should be 1. Rough example of code change ... def get_value_excel(sheet, cellname): return sheet[cellname].value def update_value_excel(sheet, cellname, value): sheet[cellname].value = value col_name_acc = "A" col_name_pass = "B" filename = 'file.xlsx' wb = openpyxl.load_workbook(filename) Sheet1 = wb['Sheet1'] for i in range(1, Sheet1.max_row): cell_name_acc = "%s%s" % (col_name_acc, i) cell_name_pass = "%s%s" % (col_name_pass, i) account = get_value_excel(Sheet1, cell_name_acc) password = get_value_excel(Sheet1, cell_name_pass) print(i) wb.save(filename)
How to use the first row as keys in excel for python selenium
I am making auto login tool on selenium, i want it to use row by row to get login information. I used this code to do it but it only uses the 28th row. Is there a way for it to automatically get the data from the first row to the next to the next? Thank u all! from selenium import webdriver from selenium.webdriver.common.proxy import * from selenium.webdriver.common.by import By from time import sleep import time import openpyxl import requests def get_value_excel(filename, cellname): wb = openpyxl.load_workbook(filename) Sheet1 = wb['Sheet1'] wb.close() return Sheet1[cellname].value def update_value_excel(filename, cellname, value): wb = openpyxl.load_workbook(filename) Sheet1 = wb['Sheet1'] Sheet1[cellname].value = value wb.close() wb.save(filename) col_name_acc="A" col_name_pass="B" filename='file.xlsx' for i in range(2,29): cell_name_acc="%s%s"%(col_name_acc, i) cell_name_pass="%s%s"%(col_name_pass, i) account = get_value_excel(filename, cell_name_acc) password = get_value_excel(filename, cell_name_pass) print(i)
[ "Do you mean\nfor i in range(2,29): \n\ndoesn't cover the range of rows in the sheet. So you could increase 29 to max that is needed. Or you can use\nSheet1.max_row\n\nto get the max row number (last row in sheet with data) except you need to create the worksheet (ws) object to get that value.\nFrom your code it looks like you are opening the excel workbook (wb) each time you call the function 'get_value_excel' or 'update_value_excel'. If these are called more than once it is inefficient, you should only open the wb/ws one time for reading and updating data.\nTherefore you could remove the wb and ws object creation from these functions and do that at the start. The ws object is then available to use in the range.\nAlso you mention \"first row to the next to the next\" so if starting row 1 the range start param should be 1.\nRough example of code change\n...\ndef get_value_excel(sheet, cellname):\n return sheet[cellname].value\n\ndef update_value_excel(sheet, cellname, value):\n sheet[cellname].value = value\n\ncol_name_acc = \"A\"\ncol_name_pass = \"B\"\nfilename = 'file.xlsx'\n\nwb = openpyxl.load_workbook(filename)\nSheet1 = wb['Sheet1']\n\nfor i in range(1, Sheet1.max_row):\n cell_name_acc = \"%s%s\" % (col_name_acc, i)\n cell_name_pass = \"%s%s\" % (col_name_pass, i)\n\n account = get_value_excel(Sheet1, cell_name_acc)\n password = get_value_excel(Sheet1, cell_name_pass)\nprint(i)\n\nwb.save(filename)\n\n" ]
[ 0 ]
[]
[]
[ "excel", "openpyxl", "python", "python_3.x", "selenium_chromedriver" ]
stackoverflow_0074668602_excel_openpyxl_python_python_3.x_selenium_chromedriver.txt
Q: 'int' and 'str' mistake enter image description here bank_account = None highest = 0 for account, amount in accounts.items(): if amount > highest: -------------< bank_account = account highest = account print(bank_acount, highest) TypeError: '>' not supported between instances of 'int' and 'str' how can I alter my code to make it works A: Either 'account' or 'highest' is a string, you need to determine which one and adjust your code. If the string is the string form of a number, i.e. "1", you can use int("1") to get the int form. A: I believe you have a typo in the line that says: highest = account Looks like you wanted that line to say highest = amount
'int' and 'str' mistake
enter image description here bank_account = None highest = 0 for account, amount in accounts.items(): if amount > highest: -------------< bank_account = account highest = account print(bank_acount, highest) TypeError: '>' not supported between instances of 'int' and 'str' how can I alter my code to make it works
[ "Either 'account' or 'highest' is a string, you need to determine which one and adjust your code.\nIf the string is the string form of a number, i.e. \"1\", you can use int(\"1\") to get the int form.\n", "I believe you have a typo in the line that says:\n\nhighest = account\n\nLooks like you wanted that line to say\n\nhighest = amount\n\n" ]
[ 1, 1 ]
[]
[]
[ "python", "python_3.x" ]
stackoverflow_0074672798_python_python_3.x.txt
Q: How to tail a log file with timestamps and count occurrences in the last X seconds I have a log files that I read/stream into Python (it contains timestamp and data) using tail. I need a way to see if, in the last 10 seconds, how many lines were seen/observed based on a filter (e.g. line contains "error") I'll be checking every X seconds to see how many lines were present for "error" or "debug" etc... The count should only look at the last X seconds. Example: A log file which Python tails is 2022-11-15 14:00:00,000 : Error 1923 2022-11-15 14:00:01,000 : Error 1456 2022-11-15 14:00:01,400 : Error 1001 2022-11-15 14:00:03,400 : Error 1124 2022-11-15 14:00:05,400 : Normal 0011 2022-11-15 14:00:06,400 : Error 1123 When I read the file, in Python; I want to answer the question In the last X seconds, how many times have I seen Error or How many times have I seen Normal? How would I accomplish this whilst I tail a file to check the last 10 seconds or 20 seconds etc.? A: This is a quite simple solution that looks at the current timestamp (I hardcoded the timestamp to follow the timestamps from your example but you can use datetime.datetime.now() instead). Simply put, the following was done: I made a file called test.log with the exact contents of that python tails piece of text you made above, which I read in using Python Then you should be able to simply run and tweak the following code: import datetime import re with open('test.log') as f: lines = f.readlines() # Defining the interesting interval of time seconds_interval = 4 interval = datetime.timedelta(seconds=seconds_interval) # You could use now = datetime.datetime.now() but this is for this test now = datetime.datetime(2022, 11, 15, hour=14, minute=00, second=6) # This is the function that grabs the interesting lines, and is used in the # filter operator def grab_interesting_lines(line): strDate = re.search('\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}', line).group(0) date_time_obj = datetime.datetime.strptime(strDate, '%Y-%m-%d %H:%M:%S') return date_time_obj >= now - interval # Now we're actually filtering. The interesting_lines object is an iterator over # which we can loop later on interesting_lines = filter(grab_interesting_lines, lines) # Now we simply loop over the interesting lines, and count whether an error # occurred, a "normal" occured or something else happened error_counter = 0 normal_counter = 0 else_counter = 0 for line in interesting_lines: if 'Error' in line: error_counter+=1 elif 'Normal' in line: normal_counter+=1 else: else_counter+=1 # Here we print out the output. Of course you can use these counter variables # somewhere else if you want print(f"The last {seconds_interval} seconds had {error_counter} errors, {normal_counter} normals and {else_counter} elses in there") The output for seconds_interval = 4 and your data example is: The last 4 seconds had 2 errors, 1 normals and 0 elses in there Of course, this is a pretty crude approach. Everything depends on how variable your input is. For example, what if you have an Error and a Normal on the same line? I didn't add any error handling in there because the edge cases are not known. Hope this helps you! :) A: To accomplish this, you can use a combination of the time and tail modules in Python. The time module allows you to get the current time and compare it to previous times, while the tail module allows you to read and stream the log file. Here is an example of how you could implement this in Python: # Import the necessary modules import time from tail import tail # Set the number of seconds to look back num_seconds = 10 # Open the log file and stream the lines with open('logfile.txt') as logfile: for line in tail(logfile): # Get the current time current_time = time.time() # Check if the line contains the desired string if "error" in line: # Check if the line was seen within the last X seconds if current_time - line_time <= num_seconds: # Increment the count count += 1 This code will stream the lines from the log file, and for each line, it will check if the line contains the string "error" and if the line was seen within the last num_seconds seconds. If both of these conditions are true, it will increment the count. You can adjust the num_seconds variable to change the number of seconds to look back, and you can also change the string that is being searched for in the log file. A: Here's one way to accomplish this in Python: Store the timestamps and lines of the log file in a list. Use the time module to get the current time and subtract X seconds from it to get the time X seconds ago. Iterate over the list of timestamps and lines and count the number of lines that occurred within the last X seconds. Here's an example: import time log_lines = [ ("2022-11-15 14:00:00,000", "Error 1923"), ("2022-11-15 14:00:01,000", "Error 1456"), ("2022-11-15 14:00:01,400", "Error 1001"), ("2022-11-15 14:00:03,400", "Error 1124"), ("2022-11-15 14:00:05,400", "Normal 0011"), ("2022-11-15 14:00:06,400", "Error 1123"), ] # Get the current time and subtract 10 seconds from it time_x_seconds_ago = time.time() - 10 # Count the number of lines that occurred within the last X seconds count = 0 for timestamp, line in log_lines: if time.strptime(timestamp, "%Y-%m-%d %H:%M:%S,%f") >= time_x_seconds_ago: count += 1 print(f"Number of lines in the last 10 seconds: {count}") You can adapt this approach to work with a log file that you're tailing in Python by storing the timestamps and lines in a list and continuously updating the list as you read new lines from the file. You can then use the same approach as above to count the number of lines that occurred within the last X seconds. Here's an example of how you could write unit tests for the code above using the unittest module in Python: import unittest import time log_lines = [ ("2022-11-15 14:00:00,000", "Error 1923"), ("2022-11-15 14:00:01,000", "Error 1456"), ("2022-11-15 14:00:01,400", "Error 1001"), ("2022-11-15 14:00:03,400", "Error 1124"), ("2022-11-15 14:00:05,400", "Normal 0011"), ("2022-11-15 14:00:06,400", "Error 1123"), ] class TestCountLines(unittest.TestCase): def test_count_lines(self): # Test counting the number of lines in the last 10 seconds time_x_seconds_ago = time.time() - 10 count = 0 for timestamp, line in log_lines: if time.strptime(timestamp, "%Y-%m-%d %H:%M:%S,%f") >= time_x_seconds_ago: count += 1 self.assertEqual(count, 4) # Test counting the number of lines in the last 5 seconds time_x_seconds_ago = time.time() - 5 count = 0 for timestamp, line in log_lines: if time.strptime(timestamp, "%Y-%m-%d %H:%M:%S,%f") >= time_x_seconds_ago: count += 1 self.assertEqual(count, 3) # Test counting the number of lines in the last 1 second time_x_seconds_ago = time.time() - 1 count = 0 for timestamp, line in log_lines: if time.strptime(timestamp, "%Y-%m-%d %H:%M:%S,%f") >= time_x_seconds_ago: count += 1 self.assertEqual(count, 0) if __name__ == "__main__": unittest.main() In this example, the TestCountLines class contains three test cases: test_count_lines: Tests counting the number of lines in the last 10 seconds. test_count_lines: Tests counting the number of lines in the last 5 seconds. test_count_lines: Tests counting the number of lines in the last 1 second. Each test case uses the assertEqual method to compare the expected output with the actual output of the code. If the expected and actual outputs match, the test passes. If the outputs don't match, the test fails.
How to tail a log file with timestamps and count occurrences in the last X seconds
I have a log files that I read/stream into Python (it contains timestamp and data) using tail. I need a way to see if, in the last 10 seconds, how many lines were seen/observed based on a filter (e.g. line contains "error") I'll be checking every X seconds to see how many lines were present for "error" or "debug" etc... The count should only look at the last X seconds. Example: A log file which Python tails is 2022-11-15 14:00:00,000 : Error 1923 2022-11-15 14:00:01,000 : Error 1456 2022-11-15 14:00:01,400 : Error 1001 2022-11-15 14:00:03,400 : Error 1124 2022-11-15 14:00:05,400 : Normal 0011 2022-11-15 14:00:06,400 : Error 1123 When I read the file, in Python; I want to answer the question In the last X seconds, how many times have I seen Error or How many times have I seen Normal? How would I accomplish this whilst I tail a file to check the last 10 seconds or 20 seconds etc.?
[ "This is a quite simple solution that looks at the current timestamp (I hardcoded the timestamp to follow the timestamps from your example but you can use datetime.datetime.now() instead).\nSimply put, the following was done:\n\nI made a file called test.log with the exact contents of that python tails piece of text you made above, which I read in using Python\nThen you should be able to simply run and tweak the following code:\n\nimport datetime\nimport re\n\nwith open('test.log') as f:\n lines = f.readlines()\n\n# Defining the interesting interval of time\nseconds_interval = 4\ninterval = datetime.timedelta(seconds=seconds_interval)\n# You could use now = datetime.datetime.now() but this is for this test\nnow = datetime.datetime(2022, 11, 15, hour=14, minute=00, second=6)\n\n# This is the function that grabs the interesting lines, and is used in the\n# filter operator\ndef grab_interesting_lines(line):\n strDate = re.search('\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}', line).group(0)\n date_time_obj = datetime.datetime.strptime(strDate, '%Y-%m-%d %H:%M:%S')\n return date_time_obj >= now - interval\n\n# Now we're actually filtering. The interesting_lines object is an iterator over\n# which we can loop later on\ninteresting_lines = filter(grab_interesting_lines, lines)\n\n# Now we simply loop over the interesting lines, and count whether an error\n# occurred, a \"normal\" occured or something else happened\nerror_counter = 0\nnormal_counter = 0\nelse_counter = 0\nfor line in interesting_lines:\n if 'Error' in line:\n error_counter+=1\n elif 'Normal' in line:\n normal_counter+=1\n else:\n else_counter+=1\n\n# Here we print out the output. Of course you can use these counter variables\n# somewhere else if you want\nprint(f\"The last {seconds_interval} seconds had {error_counter} errors, {normal_counter} normals and {else_counter} elses in there\")\n\nThe output for seconds_interval = 4 and your data example is:\nThe last 4 seconds had 2 errors, 1 normals and 0 elses in there\n\nOf course, this is a pretty crude approach. Everything depends on how variable your input is. For example, what if you have an Error and a Normal on the same line? I didn't add any error handling in there because the edge cases are not known.\nHope this helps you! :)\n", "To accomplish this, you can use a combination of the time and tail modules in Python. The time module allows you to get the current time and compare it to previous times, while the tail module allows you to read and stream the log file.\nHere is an example of how you could implement this in Python:\n# Import the necessary modules\nimport time\nfrom tail import tail\n\n# Set the number of seconds to look back\nnum_seconds = 10\n\n# Open the log file and stream the lines\nwith open('logfile.txt') as logfile:\n for line in tail(logfile):\n # Get the current time\n current_time = time.time()\n\n # Check if the line contains the desired string\n if \"error\" in line:\n # Check if the line was seen within the last X seconds\n if current_time - line_time <= num_seconds:\n # Increment the count\n count += 1\n\nThis code will stream the lines from the log file, and for each line, it will check if the line contains the string \"error\" and if the line was seen within the last num_seconds seconds. If both of these conditions are true, it will increment the count. You can adjust the num_seconds variable to change the number of seconds to look back, and you can also change the string that is being searched for in the log file.\n", "Here's one way to accomplish this in Python:\nStore the timestamps and lines of the log file in a list.\nUse the time module to get the current time and subtract X seconds from it to get the time X seconds ago.\nIterate over the list of timestamps and lines and count the number of lines that occurred within the last X seconds.\nHere's an example:\nimport time\n\nlog_lines = [\n (\"2022-11-15 14:00:00,000\", \"Error 1923\"),\n (\"2022-11-15 14:00:01,000\", \"Error 1456\"),\n (\"2022-11-15 14:00:01,400\", \"Error 1001\"),\n (\"2022-11-15 14:00:03,400\", \"Error 1124\"),\n (\"2022-11-15 14:00:05,400\", \"Normal 0011\"),\n (\"2022-11-15 14:00:06,400\", \"Error 1123\"),\n]\n\n# Get the current time and subtract 10 seconds from it\ntime_x_seconds_ago = time.time() - 10\n\n# Count the number of lines that occurred within the last X seconds\ncount = 0\nfor timestamp, line in log_lines:\n if time.strptime(timestamp, \"%Y-%m-%d %H:%M:%S,%f\") >= time_x_seconds_ago:\n count += 1\n\nprint(f\"Number of lines in the last 10 seconds: {count}\")\n\nYou can adapt this approach to work with a log file that you're tailing in Python by storing the timestamps and lines in a list and continuously updating the list as you read new lines from the file. You can then use the same approach as above to count the number of lines that occurred within the last X seconds.\nHere's an example of how you could write unit tests for the code above using the unittest module in Python:\nimport unittest\nimport time\n\nlog_lines = [\n (\"2022-11-15 14:00:00,000\", \"Error 1923\"),\n (\"2022-11-15 14:00:01,000\", \"Error 1456\"),\n (\"2022-11-15 14:00:01,400\", \"Error 1001\"),\n (\"2022-11-15 14:00:03,400\", \"Error 1124\"),\n (\"2022-11-15 14:00:05,400\", \"Normal 0011\"),\n (\"2022-11-15 14:00:06,400\", \"Error 1123\"),\n]\n\nclass TestCountLines(unittest.TestCase):\n def test_count_lines(self):\n # Test counting the number of lines in the last 10 seconds\n time_x_seconds_ago = time.time() - 10\n count = 0\n for timestamp, line in log_lines:\n if time.strptime(timestamp, \"%Y-%m-%d %H:%M:%S,%f\") >= time_x_seconds_ago:\n count += 1\n self.assertEqual(count, 4)\n\n # Test counting the number of lines in the last 5 seconds\n time_x_seconds_ago = time.time() - 5\n count = 0\n for timestamp, line in log_lines:\n if time.strptime(timestamp, \"%Y-%m-%d %H:%M:%S,%f\") >= time_x_seconds_ago:\n count += 1\n self.assertEqual(count, 3)\n\n # Test counting the number of lines in the last 1 second\n time_x_seconds_ago = time.time() - 1\n count = 0\n for timestamp, line in log_lines:\n if time.strptime(timestamp, \"%Y-%m-%d %H:%M:%S,%f\") >= time_x_seconds_ago:\n count += 1\n self.assertEqual(count, 0)\n\nif __name__ == \"__main__\":\n unittest.main()\n\nIn this example, the TestCountLines class contains three test cases:\ntest_count_lines: Tests counting the number of lines in the last 10 seconds.\ntest_count_lines: Tests counting the number of lines in the last 5 seconds.\ntest_count_lines: Tests counting the number of lines in the last 1 second.\nEach test case uses the assertEqual method to compare the expected output with the actual output of the code. If the expected and actual outputs match, the test passes. If the outputs don't match, the test fails.\n" ]
[ 1, 0, 0 ]
[]
[]
[ "python" ]
stackoverflow_0074444395_python.txt
Q: How to graph a mathematical function for "Distance and Speed over Time" in Python? I'm struggling with some Python homework. I'm really new to Python, and coding in general. I have really basic knowledge in Python, and somewhat acceptable level in JavaScript. My issue: I have to make a graph to represent these two functions: distance = (x**2/2 - np.cos(5*x) - 7) speed = (x + 5*np.sin(5*x)) Between the timestamps 3 and 6 (inclusive) I know I have to use Pandas to make a DataFrame, I know I have to use MatPlotLib to make the actual plot, and I have to use Numpy for the math to work, but I can't get the math to be recognised as mathematical functions because I simply don't know how. This is what the graph should look like: Graph for Distance and Speed over Time This is what my code looks for now: import pandas as pd import matplotlib.pyplot as plt import numpy as np x = 10 time = [3, 6] distance = (x**2/2 - np.cos(5*x) - 7) speed = (x + 5*np.sin(5*x)) values = {'Distance': distance, 'Speed': speed, 'Time': time} df = pd.DataFrame(data= values) df.plot(title='Distance and speed', xlabel='Time (hours)', ylabel='Distance (km) / Speed (km/h)', x='Time') plt.show() x = 10 I know shouldn't be included, but since I'm missing the part that makes the math work, I have to include it to make it "work" and not get an error. I have a vague idea that using Numpy is the answer to my problem, but I don't know how (for now, hopefully). How wrong am I? Can anyone help me? A: It looks like your code is almost there! You have imported all of the necessary libraries, and you have defined your distance and speed functions correctly. To make your code work, you need to specify the range of values that you want to use for the x-axis of your graph. In this case, you want to use the values between 3 and 6, inclusive. To do this, you can use the range function in Python, which will generate a sequence of numbers within a given range. Here is an example of how you can use the range function to specify the range of x-values that you want to use: x_values = np.arange(0.0, 20.0, 0.01) Once you have generated the list of x-values, you can use a for loop to iterate over each value and calculate the corresponding distance and speed values. You can then store these values in separate lists, which you can use to create your DataFrame. Here is an example of how you can use a for loop to calculate the distance and speed values for each x-value: # Initialize empty lists to store the distance and speed values distance_values = [] speed_values = [] # Iterate over each x-value for x in x_values: # Calculate the distance and speed values for the current x-value distance = (x**2/2 - np.cos(5*x) - 7) speed = (x + 5*np.sin(5*x)) # Append the calculated values to their respective lists distance_values.append(distance) speed_values.append(speed) Once you have calculated the distance and speed values for each x-value, you can use these lists to create your DataFrame. You can then use the plot method of your DataFrame to create the graph. Here is an example of how you can create your DataFrame and plot the graph: # Create the DataFrame using the x-values, distance values, and speed values df = pd.DataFrame({'Time': x_values, 'Distance': distance_values, 'Speed': speed_values}) # Use the plot method of the DataFrame to create the graph df.plot(title='Distance and speed', xlabel='Time (hours)', ylabel='Distance (km) / Speed (km/h)', x='Time') # Show the graph plt.show() A: Corrections to posted version Use variable t for time (rather than x) np.arange(3, 6.01, 0.01) to get time from 3 to 6 inclusive Code # time values from 3 to 6 inclusive in steps of 0.01 (use 6.01 to include 6) t = np.arange(3, 6.01, 0.01) # t for time # Use NumPy array operations to compute distance and speed at all time values (i.e. x axis) distance = (t**2/2 - np.cos(5*t) - 7) speed = (t + 5*np.sin(5*t)) values = {'Distance': distance, 'Speed': speed, 'Time': t} # x is time t df = pd.DataFrame(data= values) df.plot(title='Distance and speed', xlabel='Time (hours)', ylabel='Distance (km) / Speed (km/h)', x='Time') plt.show()
How to graph a mathematical function for "Distance and Speed over Time" in Python?
I'm struggling with some Python homework. I'm really new to Python, and coding in general. I have really basic knowledge in Python, and somewhat acceptable level in JavaScript. My issue: I have to make a graph to represent these two functions: distance = (x**2/2 - np.cos(5*x) - 7) speed = (x + 5*np.sin(5*x)) Between the timestamps 3 and 6 (inclusive) I know I have to use Pandas to make a DataFrame, I know I have to use MatPlotLib to make the actual plot, and I have to use Numpy for the math to work, but I can't get the math to be recognised as mathematical functions because I simply don't know how. This is what the graph should look like: Graph for Distance and Speed over Time This is what my code looks for now: import pandas as pd import matplotlib.pyplot as plt import numpy as np x = 10 time = [3, 6] distance = (x**2/2 - np.cos(5*x) - 7) speed = (x + 5*np.sin(5*x)) values = {'Distance': distance, 'Speed': speed, 'Time': time} df = pd.DataFrame(data= values) df.plot(title='Distance and speed', xlabel='Time (hours)', ylabel='Distance (km) / Speed (km/h)', x='Time') plt.show() x = 10 I know shouldn't be included, but since I'm missing the part that makes the math work, I have to include it to make it "work" and not get an error. I have a vague idea that using Numpy is the answer to my problem, but I don't know how (for now, hopefully). How wrong am I? Can anyone help me?
[ "It looks like your code is almost there! You have imported all of the necessary libraries, and you have defined your distance and speed functions correctly.\nTo make your code work, you need to specify the range of values that you want to use for the x-axis of your graph. In this case, you want to use the values between 3 and 6, inclusive. To do this, you can use the range function in Python, which will generate a sequence of numbers within a given range.\nHere is an example of how you can use the range function to specify the range of x-values that you want to use:\nx_values = np.arange(0.0, 20.0, 0.01) \n\nOnce you have generated the list of x-values, you can use a for loop to iterate over each value and calculate the corresponding distance and speed values. You can then store these values in separate lists, which you can use to create your DataFrame.\nHere is an example of how you can use a for loop to calculate the distance and speed values for each x-value:\n# Initialize empty lists to store the distance and speed values\ndistance_values = []\nspeed_values = []\n\n# Iterate over each x-value\nfor x in x_values:\n # Calculate the distance and speed values for the current x-value\n distance = (x**2/2 - np.cos(5*x) - 7)\n speed = (x + 5*np.sin(5*x))\n\n # Append the calculated values to their respective lists\n distance_values.append(distance)\n speed_values.append(speed)\n\nOnce you have calculated the distance and speed values for each x-value, you can use these lists to create your DataFrame. You can then use the plot method of your DataFrame to create the graph.\nHere is an example of how you can create your DataFrame and plot the graph:\n# Create the DataFrame using the x-values, distance values, and speed values\ndf = pd.DataFrame({'Time': x_values, 'Distance': distance_values, 'Speed': speed_values})\n\n# Use the plot method of the DataFrame to create the graph\ndf.plot(title='Distance and speed', xlabel='Time (hours)', ylabel='Distance (km) / Speed (km/h)', x='Time')\n\n# Show the graph\nplt.show()\n\n\n", "Corrections to posted version\n\nUse variable t for time (rather than x)\nnp.arange(3, 6.01, 0.01) to get time from 3 to 6 inclusive\n\nCode\n# time values from 3 to 6 inclusive in steps of 0.01 (use 6.01 to include 6)\nt = np.arange(3, 6.01, 0.01) # t for time\n\n# Use NumPy array operations to compute distance and speed at all time values (i.e. x axis)\ndistance = (t**2/2 - np.cos(5*t) - 7)\nspeed = (t + 5*np.sin(5*t))\nvalues = {'Distance': distance, 'Speed': speed, 'Time': t} # x is time t\n\ndf = pd.DataFrame(data= values)\ndf.plot(title='Distance and speed', xlabel='Time (hours)', ylabel='Distance (km) / Speed (km/h)', x='Time')\n\nplt.show()\n\n\n" ]
[ 0, 0 ]
[]
[]
[ "graph", "matplotlib", "numpy", "pandas", "python" ]
stackoverflow_0074671475_graph_matplotlib_numpy_pandas_python.txt
Q: What does builder do that python code doesn't? When I use builder, the program outputs information from qr codes to the lower half of the application, but it is necessary to replace the built code with an equivalent python code, immediately information about qr codes ceases to be output With builder: ` from kivy.app import App from kivy.uix.boxlayout import BoxLayout from kivy.uix.label import Label from kivy.lang import Builder from kivy_garden.zbarcam import ZBarCam DEMO_APP_KV_LANG = """ BoxLayout: orientation: 'vertical' ZBarCam: id: zbarcam Label: text: ', '.join([str(symbol.data) for symbol in zbarcam.symbols]) """ class DemoApp(App): def build(self): return Builder.load_string(DEMO_APP_KV_LANG) if __name__ == '__main__': DemoApp().run() With python code: from kivy.app import App from kivy.uix.boxlayout import BoxLayout from kivy.uix.label import Label from kivy.lang import Builder from kivy_garden.zbarcam import ZBarCam class Demo(BoxLayout): def __init__(self, **kwargs): super().__init__(**kwargs) self.orientation = 'vertical' self.zbarcam= ZBarCam() self.add_widget(self.zbarcam) self.add_widget(Label(text=', '.join([str(symbol.data) for symbol in self.zbarcam.symbols]))) class DemoApp(App): def build(self): return Demo() if __name__ == '__main__': DemoApp().run() ` A: The kivy language sets up bindings for you that the pure python does not. So your line in the kv: text: ', '.join([str(symbol.data) for symbol in zbarcam.symbols]) sets up binding to zbarcam.symbols so that the text is updated whenever zbarcam.symbols changes. And in the python code: text=', '.join([str(symbol.data) for symbol in self.zbarcam.symbols] sets the text once when that line is executed, and it is never updated. Why must you replace the the kivy language code?
What does builder do that python code doesn't?
When I use builder, the program outputs information from qr codes to the lower half of the application, but it is necessary to replace the built code with an equivalent python code, immediately information about qr codes ceases to be output With builder: ` from kivy.app import App from kivy.uix.boxlayout import BoxLayout from kivy.uix.label import Label from kivy.lang import Builder from kivy_garden.zbarcam import ZBarCam DEMO_APP_KV_LANG = """ BoxLayout: orientation: 'vertical' ZBarCam: id: zbarcam Label: text: ', '.join([str(symbol.data) for symbol in zbarcam.symbols]) """ class DemoApp(App): def build(self): return Builder.load_string(DEMO_APP_KV_LANG) if __name__ == '__main__': DemoApp().run() With python code: from kivy.app import App from kivy.uix.boxlayout import BoxLayout from kivy.uix.label import Label from kivy.lang import Builder from kivy_garden.zbarcam import ZBarCam class Demo(BoxLayout): def __init__(self, **kwargs): super().__init__(**kwargs) self.orientation = 'vertical' self.zbarcam= ZBarCam() self.add_widget(self.zbarcam) self.add_widget(Label(text=', '.join([str(symbol.data) for symbol in self.zbarcam.symbols]))) class DemoApp(App): def build(self): return Demo() if __name__ == '__main__': DemoApp().run() `
[ "The kivy language sets up bindings for you that the pure python does not. So your line in the kv:\ntext: ', '.join([str(symbol.data) for symbol in zbarcam.symbols])\n\nsets up binding to zbarcam.symbols so that the text is updated whenever zbarcam.symbols changes.\nAnd in the python code:\ntext=', '.join([str(symbol.data) for symbol in self.zbarcam.symbols]\n\nsets the text once when that line is executed, and it is never updated.\nWhy must you replace the the kivy language code?\n" ]
[ 0 ]
[]
[]
[ "barcode", "kivy", "kivy_language", "python", "qr_code" ]
stackoverflow_0074669847_barcode_kivy_kivy_language_python_qr_code.txt
Q: Getting Element Not Interactable Exception after attempting to insert search query into YouTube input field I'm just beginning to explore Python and automation testing Wanted to create a quick script that will: Open a YouTube page Find the search input field where I will insert my search query Insert a search query into the field Press on the button to receive search results Unfortunately Ive bumped into an error: "selenium.common.exceptions.ElementNotInteractableException: Message: element not interactable" Please assist from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.common.keys import Keys link = "https://www.youtube.com/" browser = webdriver.Chrome() browser.get(link) search_string = browser.find_element(By.XPATH, "/html/body/ytd-app/div[1]/div/ytd-masthead/div[3]/div[2]/ytd-searchbox/form/div[1]/div[1]/div/div[2]/input") search_string.send_keys("Test search input") button = browser.find_element(By.XPATH, '/html/body/ytd-app/div[1]/div/ytd-masthead/div[3]/div[2]/ytd-searchbox/button') button.click() A: Instead of searching the Input field using the absolute xpath you can use the id properties of the element i.e search and similarly to click on the search icon to resolve the error. Note:- We will have to specify the input tag along with the id since the page contains couple of element with id as as search Your solution would look like link = "https://www.youtube.com/" browser.get(link) browser.maximize_window() browser.find_element(By.CSS_SELECTOR, "input#search").send_keys("Test search input") button = browser.find_element(By.ID, 'search-icon-legacy') button.click()
Getting Element Not Interactable Exception after attempting to insert search query into YouTube input field
I'm just beginning to explore Python and automation testing Wanted to create a quick script that will: Open a YouTube page Find the search input field where I will insert my search query Insert a search query into the field Press on the button to receive search results Unfortunately Ive bumped into an error: "selenium.common.exceptions.ElementNotInteractableException: Message: element not interactable" Please assist from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.common.keys import Keys link = "https://www.youtube.com/" browser = webdriver.Chrome() browser.get(link) search_string = browser.find_element(By.XPATH, "/html/body/ytd-app/div[1]/div/ytd-masthead/div[3]/div[2]/ytd-searchbox/form/div[1]/div[1]/div/div[2]/input") search_string.send_keys("Test search input") button = browser.find_element(By.XPATH, '/html/body/ytd-app/div[1]/div/ytd-masthead/div[3]/div[2]/ytd-searchbox/button') button.click()
[ "Instead of searching the Input field using the absolute xpath you can use the id properties of the element i.e search and similarly to click on the search icon to resolve the error.\nNote:- We will have to specify the input tag along with the id since the page contains couple of element with id as as search\nYour solution would look like\nlink = \"https://www.youtube.com/\"\nbrowser.get(link)\nbrowser.maximize_window()\nbrowser.find_element(By.CSS_SELECTOR, \"input#search\").send_keys(\"Test \nsearch input\")\nbutton = browser.find_element(By.ID, 'search-icon-legacy')\nbutton.click()\n\n" ]
[ 0 ]
[]
[]
[ "python", "selenium" ]
stackoverflow_0074662142_python_selenium.txt
Q: How can I remove all vowels from an inputted string This doesn't really work To explain what I did: I set a vowel variable with a list Then I used a for loop to iterate through the list and print the letters not in the list A: as user @user56700 noted: You did, probably by mistake: if not letter.lower in vowels: instead: if not letter.lower() in vowels: first is method "itself", second is call of method. P.S. also, as user @user56700 noted, do not screenshot and paste code as image. Just paste and format as code, it is really simple, and shows that min of respect for others :) A: ` st=input("Enter Any String ") vowel=['a','e','i','o','u'] #Create a List of Vowel st=st.lower() #Convert Vowel in Lower case output="" for i in st: if i not in vowel: #Check Vowel if not then add to output output+=i print(output)`
How can I remove all vowels from an inputted string
This doesn't really work To explain what I did: I set a vowel variable with a list Then I used a for loop to iterate through the list and print the letters not in the list
[ "as user @user56700 noted: You did, probably by mistake:\nif not letter.lower in vowels:\n\ninstead:\nif not letter.lower() in vowels:\n\nfirst is method \"itself\", second is call of method.\nP.S.\nalso, as user @user56700 noted, do not screenshot and paste code as image. Just paste and format as code, it is really simple, and shows that min of respect for others :)\n", "`\nst=input(\"Enter Any String \")\nvowel=['a','e','i','o','u']\n#Create a List of Vowel\nst=st.lower()\n#Convert Vowel in Lower case\noutput=\"\"\nfor i in st:\nif i not in vowel:\n\n#Check Vowel if not then add to output\n\n output+=i\n\nprint(output)`\n" ]
[ 0, 0 ]
[ "import re\nvowel = input()\nlst = re.sub(\"[aeiouAEIOU]\",\"\",vowel)\nprint(lst)\n" ]
[ -1 ]
[ "list", "python", "string" ]
stackoverflow_0074670665_list_python_string.txt
Q: How Do I Add Subtitles to a Video in Python If I have a script.txt which contains all the things said in the video and the video itself then how do I dynamically add subtitles to the video using python. A: To add subtitles to a video using python, you can use the moviepy library. Here is an example of how you can do this: from moviepy.editor import * # Open the video video = VideoFileClip('video.mp4') # Read the script from the text file with open('script.txt', 'r') as f: script = f.read() # Add the subtitles to the video using the script video_with_subtitles = video.subclip(t_start=0, t_end=None).text_imprint(txt=script, fontsize=20, font='Arial', color='white') # Save the video with subtitles video_with_subtitles.write_videofile('video_with_subtitles.mp4') This code will open the video file, read the script from the text file, add the subtitles to the video using the script, and save the video with subtitles. You can adjust the font size, font, and color of the subtitles by modifying the fontsize, font, and color parameters in the text_imprint method.
How Do I Add Subtitles to a Video in Python
If I have a script.txt which contains all the things said in the video and the video itself then how do I dynamically add subtitles to the video using python.
[ "To add subtitles to a video using python, you can use the moviepy library. Here is an example of how you can do this:\nfrom moviepy.editor import *\n\n# Open the video\nvideo = VideoFileClip('video.mp4')\n\n# Read the script from the text file\nwith open('script.txt', 'r') as f:\n script = f.read()\n\n# Add the subtitles to the video using the script\nvideo_with_subtitles = video.subclip(t_start=0, \nt_end=None).text_imprint(txt=script, fontsize=20, font='Arial', \ncolor='white')\n\n# Save the video with subtitles\nvideo_with_subtitles.write_videofile('video_with_subtitles.mp4')\n\nThis code will open the video file, read the script from the text file, add the subtitles to the video using the script, and save the video with subtitles. You can adjust the font size, font, and color of the subtitles by modifying the fontsize, font, and color parameters in the text_imprint method.\n" ]
[ 2 ]
[]
[]
[ "python", "video_subtitles" ]
stackoverflow_0074672952_python_video_subtitles.txt
Q: How can we print a list of numbers taken as [1,2,3,4,5] in a column one by one such that the output should be as 1 2 3 4 5 i had written a piece of code expecting the output as 1 2 3 4 5 but i am unable to get that with my code for num in numlist: print(num) print(num,end=' ') 1 1 2 2 3 3 4 4 5 5 for num in numlist: print(num) print(num,end=' ') 1 2 3 4 5 5 can i know when i am executing it separately without indentation i am getting 5 5 two time line by line And also what if i wanted to get the output as 1 2 3 4 5
How can we print a list of numbers taken as [1,2,3,4,5] in a column one by one such that the output should be as 1 2 3 4 5
i had written a piece of code expecting the output as 1 2 3 4 5 but i am unable to get that with my code for num in numlist: print(num) print(num,end=' ') 1 1 2 2 3 3 4 4 5 5 for num in numlist: print(num) print(num,end=' ') 1 2 3 4 5 5 can i know when i am executing it separately without indentation i am getting 5 5 two time line by line And also what if i wanted to get the output as 1 2 3 4 5
[]
[]
[ "You can use list comprehension to get a list of strings, and then use join to get a string and print it.\nstrlist = [str(x) for x in numlist] \noutstr = \"\\n\".join(strlist) \nprint(outstr) \n\n" ]
[ -1 ]
[ "python" ]
stackoverflow_0074672938_python.txt
Q: An efficient way to search elements in a Json array (dictionary of arrays) I am writing a script that reads two Json files into dictionaries The dictionaries are more or less similar, like this { "elements":[ { "element_id":0, "thedata":{ "this": 5 } }, { "element_id":4, "thedata":{ "this": 5 } } { ... } ]} So far I had assumed that the element_id went from 0 and increased 1 by 1 Then the requirements changed and this time they went from 0 and increased 4 by 4 or something like this Anyway, I though so far that both dictionaries would have the same number of elements and the same increasing distance so when I got the elements in my script I wrote something like def process_elements(number): el1_id=thedictionary['elements'][number]['element_id'] el2_id=thedictionary2['elements'][number]['element_id'] assert(el1_id==el2_id) #here work with the data However the requirements have changed again Now the number of elements of one dictionary are not necessarily the same as the other Also it is not guaranteed that one of them start always at 0 So now I have to find the elements in both dictionaries with the same element id So my question is , in a dictionary like above (that came from a json) is there a quick way to find the element that has a particular element_id and get the element? Something like def process_elements(number): el1_id=thedictionary['elements'][number]['element_id'] n=find_i(thedictionary2,el1_id) #finds the index with the element that has id the same as el1_id el2_id=thedictionary2['elements'][n]['element_id'] assert(el1_id==el2_id) #Of course they are the same since we used find_i #here work with the data It has to be quick since I use it for an animation A: If you need to find multiple elements with a particular element_id in a dictionary, and you want to do it as efficiently as possible, you could use a dictionary to store the elements with a given element_id. Then, when you need to find an element with a particular element_id, you can just look it up in the dictionary using the element_id as the key, without having to iterate over the elements in the dictionary. Here's an example of how you could do this: # Create a dictionary to store the elements with a given element_id elements_by_id = {} # Iterate over the elements in the dictionary for element in thedictionary['elements']: # Get the element_id for the current element element_id = element['element_id'] # Check if the element_id is already a key in the elements_by_id dictionary if element_id not in elements_by_id: # If the element_id is not already a key in the dictionary, create a new key-value pair in the dictionary, # with the element_id as the key and an empty list as the value elements_by_id[element_id] = [] # Add the current element to the list of elements with the given element_id elements_by_id[element_id].append(element) # Now, when you need to find the elements with a particular element_id, you can just look it up in the dictionary # using the element_id as the key found_elements = elements_by_id[4] # Print the found elements to the console print(found_elements) This method is more efficient than iterating over the elements in the dictionary and checking each element's element_id value, because it only requires a single pass over the elements in the dictionary to create the elements_by_id dictionary, and then you can look up elements with a particular element_id in constant time. If you want to make the code even faster, you could use the dict.setdefault() method to create the elements_by_id dictionary in a single pass over the elements in the dictionary. This method allows you to specify a default value to use if the key you're looking for doesn't already exist in the dictionary, so you don't have to check if the key exists before adding it to the dictionary. Here's an example of how you could use the dict.setdefault() method to create the elements_by_id dictionary: # Create a dictionary to store the elements with a given element_id elements_by_id = {} # Iterate over the elements in the dictionary for element in thedictionary['elements']: # Get the element_id for the current element element_id = element['element_id'] # Use the setdefault() method to create a new key-value pair in the dictionary, # with the element_id as the key and an empty list as the value, if the element_id is not already a key in the dictionary elements_by_id.setdefault(element_id, []) # Add the current element to the list of elements with the given element_id elements_by_id[element_id].append(element) # Now, when you need to find the elements with a particular element_id, you can just look it up in the dictionary # using the element_id as the key found_elements = elements_by_id[4] # Print the found elements to the console print(found_elements) This method is faster than the previous method because it only requires a single pass over the elements in the dictionary, and it doesn't require you to check if the element_id is already a key in the dictionary before adding it. A: Using the get() method: # Create a dictionary to store the elements with a given element_id elements_by_id = {} # Iterate over the elements in the dictionary for element in thedictionary['elements']: # Get the element_id for the current element element_id = element['element_id'] # Check if the element_id is already a key in the elements_by_id dictionary if element_id not in elements_by_id: # If the element_id is not already a key in the dictionary, create a new key-value pair in the dictionary, # with the element_id as the key and an empty list as the value elements_by_id[element_id] = [] # Add the current element to the list of elements with the given element_id elements_by_id[element_id].append(element) # Now, when you need to find the elements with a particular element_id, you can use the dict.get() method # to get the list of elements with the given element_id, and specify a default value to return if the element_id # doesn't exist as a key in the dictionary found_elements = elements_by_id.get(34554, []) # Print the found elements to the console print(found_elements)
An efficient way to search elements in a Json array (dictionary of arrays)
I am writing a script that reads two Json files into dictionaries The dictionaries are more or less similar, like this { "elements":[ { "element_id":0, "thedata":{ "this": 5 } }, { "element_id":4, "thedata":{ "this": 5 } } { ... } ]} So far I had assumed that the element_id went from 0 and increased 1 by 1 Then the requirements changed and this time they went from 0 and increased 4 by 4 or something like this Anyway, I though so far that both dictionaries would have the same number of elements and the same increasing distance so when I got the elements in my script I wrote something like def process_elements(number): el1_id=thedictionary['elements'][number]['element_id'] el2_id=thedictionary2['elements'][number]['element_id'] assert(el1_id==el2_id) #here work with the data However the requirements have changed again Now the number of elements of one dictionary are not necessarily the same as the other Also it is not guaranteed that one of them start always at 0 So now I have to find the elements in both dictionaries with the same element id So my question is , in a dictionary like above (that came from a json) is there a quick way to find the element that has a particular element_id and get the element? Something like def process_elements(number): el1_id=thedictionary['elements'][number]['element_id'] n=find_i(thedictionary2,el1_id) #finds the index with the element that has id the same as el1_id el2_id=thedictionary2['elements'][n]['element_id'] assert(el1_id==el2_id) #Of course they are the same since we used find_i #here work with the data It has to be quick since I use it for an animation
[ "If you need to find multiple elements with a particular element_id in a dictionary, and you want to do it as efficiently as possible, you could use a dictionary to store the elements with a given element_id. Then, when you need to find an element with a particular element_id, you can just look it up in the dictionary using the element_id as the key, without having to iterate over the elements in the dictionary.\nHere's an example of how you could do this:\n# Create a dictionary to store the elements with a given element_id\nelements_by_id = {}\n\n# Iterate over the elements in the dictionary\nfor element in thedictionary['elements']:\n # Get the element_id for the current element\n element_id = element['element_id']\n\n # Check if the element_id is already a key in the elements_by_id dictionary\n if element_id not in elements_by_id:\n # If the element_id is not already a key in the dictionary, create a new key-value pair in the dictionary,\n # with the element_id as the key and an empty list as the value\n elements_by_id[element_id] = []\n\n # Add the current element to the list of elements with the given element_id\n elements_by_id[element_id].append(element)\n\n# Now, when you need to find the elements with a particular element_id, you can just look it up in the dictionary\n# using the element_id as the key\nfound_elements = elements_by_id[4]\n\n# Print the found elements to the console\nprint(found_elements)\n\nThis method is more efficient than iterating over the elements in the dictionary and checking each element's element_id value, because it only requires a single pass over the elements in the dictionary to create the elements_by_id dictionary, and then you can look up elements with a particular element_id in constant time.\nIf you want to make the code even faster, you could use the dict.setdefault() method to create the elements_by_id dictionary in a single pass over the elements in the dictionary. This method allows you to specify a default value to use if the key you're looking for doesn't already exist in the dictionary, so you don't have to check if the key exists before adding it to the dictionary.\nHere's an example of how you could use the dict.setdefault() method to create the elements_by_id dictionary:\n# Create a dictionary to store the elements with a given element_id\nelements_by_id = {}\n\n# Iterate over the elements in the dictionary\nfor element in thedictionary['elements']:\n # Get the element_id for the current element\n element_id = element['element_id']\n\n # Use the setdefault() method to create a new key-value pair in the dictionary,\n # with the element_id as the key and an empty list as the value, if the element_id is not already a key in the dictionary\n elements_by_id.setdefault(element_id, [])\n\n # Add the current element to the list of elements with the given element_id\n elements_by_id[element_id].append(element)\n\n# Now, when you need to find the elements with a particular element_id, you can just look it up in the dictionary\n# using the element_id as the key\nfound_elements = elements_by_id[4]\n\n# Print the found elements to the console\nprint(found_elements)\n\nThis method is faster than the previous method because it only requires a single pass over the elements in the dictionary, and it doesn't require you to check if the element_id is already a key in the dictionary before adding it.\n", "Using the get() method:\n# Create a dictionary to store the elements with a given element_id\nelements_by_id = {}\n\n# Iterate over the elements in the dictionary\nfor element in thedictionary['elements']:\n # Get the element_id for the current element\n element_id = element['element_id']\n\n # Check if the element_id is already a key in the elements_by_id dictionary\n if element_id not in elements_by_id:\n # If the element_id is not already a key in the dictionary, create a new key-value pair in the dictionary,\n # with the element_id as the key and an empty list as the value\n elements_by_id[element_id] = []\n\n # Add the current element to the list of elements with the given element_id\n elements_by_id[element_id].append(element)\n\n# Now, when you need to find the elements with a particular element_id, you can use the dict.get() method\n# to get the list of elements with the given element_id, and specify a default value to return if the element_id\n# doesn't exist as a key in the dictionary\nfound_elements = elements_by_id.get(34554, [])\n\n# Print the found elements to the console\nprint(found_elements)\n\n" ]
[ 1, 1 ]
[]
[]
[ "dictionary", "json", "python" ]
stackoverflow_0074672767_dictionary_json_python.txt
Q: big-O-calculator: AttributeError: 'list' object has no attribute 'lower' I'm trying to calculate the speed of two functions that I have, this one uses quick sort method. I am using this page to download and use the big O calculator, and test the speed using this. But when I try to execute it, it throws me this error: AttributeError: 'list' object has no attribute 'lower'. I'm not sure why, otherwise the program works fine. from bigO import BigO data = "studentMockData_AS2.txt" students = [] with open(data, "r") as datafile: for line in datafile: datum = line.split() students.append(datum) size = len(students) def quicksort(array, lowest, highest): if lowest < highest: pi = partition(array, lowest, highest) quicksort(array, lowest, pi - 1) quicksort(array, pi + 1, highest) def partition(array, lowest, highest): pivot = array[highest] ptr = lowest - 1 for student in range(lowest, highest): if array[student] <= pivot: ptr += 1 (array[ptr], array[student]) = (array[student], array[ptr]) (array[ptr + 1], array[highest]) = (array[highest], array[ptr + 1]) return ptr + 1 lib=BigO() comp = lib.test(quicksort, students, 0, size-1) print(comp) A: Big0 test func parameters: def test(**args): functionName [Callable]: a function to call. array [str]: "random", "big", "sorted", "reversed", "partial", "Ksorted", "string", "almost_equal", "equal", "hole". limit [bool] = True: To break before it takes "forever" to sort an array. (ex. selectionSort) prtResult [bool] = True: Whether to print result by itself You are passing a list students to Big0.test() method where it is expecting a str. Use 'random' instead.
big-O-calculator: AttributeError: 'list' object has no attribute 'lower'
I'm trying to calculate the speed of two functions that I have, this one uses quick sort method. I am using this page to download and use the big O calculator, and test the speed using this. But when I try to execute it, it throws me this error: AttributeError: 'list' object has no attribute 'lower'. I'm not sure why, otherwise the program works fine. from bigO import BigO data = "studentMockData_AS2.txt" students = [] with open(data, "r") as datafile: for line in datafile: datum = line.split() students.append(datum) size = len(students) def quicksort(array, lowest, highest): if lowest < highest: pi = partition(array, lowest, highest) quicksort(array, lowest, pi - 1) quicksort(array, pi + 1, highest) def partition(array, lowest, highest): pivot = array[highest] ptr = lowest - 1 for student in range(lowest, highest): if array[student] <= pivot: ptr += 1 (array[ptr], array[student]) = (array[student], array[ptr]) (array[ptr + 1], array[highest]) = (array[highest], array[ptr + 1]) return ptr + 1 lib=BigO() comp = lib.test(quicksort, students, 0, size-1) print(comp)
[ "Big0 test func parameters:\ndef test(**args):\n functionName [Callable]: a function to call.\n array [str]: \"random\", \"big\", \"sorted\", \"reversed\", \"partial\", \"Ksorted\", \"string\", \"almost_equal\", \"equal\", \"hole\".\n limit [bool] = True: To break before it takes \"forever\" to sort an array. (ex. selectionSort)\n prtResult [bool] = True: Whether to print result by itself\n\nYou are passing a list students to Big0.test() method where it is expecting a str. Use 'random' instead.\n" ]
[ 0 ]
[]
[]
[ "big_o", "python", "quicksort" ]
stackoverflow_0074672974_big_o_python_quicksort.txt
Q: 320 Error after IBApi.EClient.placeOrder() in Python & Interactive Brokers I am trying to place an order through Interactive Brokers' Python API but receive the error: ERROR 1 320 Error reading request: Unable to parse data. java.lang.NumberFormatException: For input string: "1.7976931348623157e+308" Connecting and retrieving data works fine but when submitting an order, one of my parameters seems to be wrong and I simply can't figure out what it is. I was closely following IB's documentation, so it really comes as a bit of a surprise to me. The error code (320) is not really telling, unfortunately, as IB merely describes it as a "Server error". The only related question I found online, links the error to an invalid ID but I checked mine and it should be fine. The code: from ibapi.client import EClient from ibapi.wrapper import EWrapper from ibapi.contract import Contract from ibapi.order import Order import threading class IBapi(EWrapper, EClient): def __init__(self): EClient.__init__(self, self) def run_loop(): app.run() app = IBapi() app.connect('127.0.0.1', 7496, 1) api_thread = threading.Thread(target = run_loop, daemon = True) api_thread.start() ctr = Contract() ctr.symbol = 'AAPL' ctr.secType = 'STK' ctr.exchange = 'SMART' ctr.currency = 'USD' ord = Order() ord.action = 'BUY' ord.orderType = 'LMT' ord.totalQuantity = 1 ord.lmtPrice = 150 app.reqIds(-1) id = app.nextValidOrderId print(id) print(isinstance(id, int)) app.placeOrder(id, ctr, ord) returns: 1 True ERROR 1 320 Error reading request: Unable to parse data. java.lang.NumberFormatException: For input string: "1.7976931348623157e+308" My TWS version is 10.20.1d, which is the latest as of now (since this fixed a somewhat related question). Can someone help me with what I am doing wrong, please? A: Using TWS 10.20.1d and API_Version=10.20.01 I find your code works with only a minor change with nextValidOrderId. Suggest checking API version, and upgrading if not latest version.
320 Error after IBApi.EClient.placeOrder() in Python & Interactive Brokers
I am trying to place an order through Interactive Brokers' Python API but receive the error: ERROR 1 320 Error reading request: Unable to parse data. java.lang.NumberFormatException: For input string: "1.7976931348623157e+308" Connecting and retrieving data works fine but when submitting an order, one of my parameters seems to be wrong and I simply can't figure out what it is. I was closely following IB's documentation, so it really comes as a bit of a surprise to me. The error code (320) is not really telling, unfortunately, as IB merely describes it as a "Server error". The only related question I found online, links the error to an invalid ID but I checked mine and it should be fine. The code: from ibapi.client import EClient from ibapi.wrapper import EWrapper from ibapi.contract import Contract from ibapi.order import Order import threading class IBapi(EWrapper, EClient): def __init__(self): EClient.__init__(self, self) def run_loop(): app.run() app = IBapi() app.connect('127.0.0.1', 7496, 1) api_thread = threading.Thread(target = run_loop, daemon = True) api_thread.start() ctr = Contract() ctr.symbol = 'AAPL' ctr.secType = 'STK' ctr.exchange = 'SMART' ctr.currency = 'USD' ord = Order() ord.action = 'BUY' ord.orderType = 'LMT' ord.totalQuantity = 1 ord.lmtPrice = 150 app.reqIds(-1) id = app.nextValidOrderId print(id) print(isinstance(id, int)) app.placeOrder(id, ctr, ord) returns: 1 True ERROR 1 320 Error reading request: Unable to parse data. java.lang.NumberFormatException: For input string: "1.7976931348623157e+308" My TWS version is 10.20.1d, which is the latest as of now (since this fixed a somewhat related question). Can someone help me with what I am doing wrong, please?
[ "Using TWS 10.20.1d and API_Version=10.20.01 I find your code works with only a minor change with nextValidOrderId.\nSuggest checking API version, and upgrading if not latest version.\n" ]
[ 0 ]
[]
[]
[ "interactive_brokers", "java", "python" ]
stackoverflow_0074632771_interactive_brokers_java_python.txt
Q: Python Pandas - KeyError: 'username' : when username exist its showing key error when I am Trying to slice users data from csv who logged in I am a beginner in python and working with python pandas. I have created a program a demo of payment gateway system . It contains a login page and signup page . I want to display the main page when valid user logs in after that I want to extract the data of the only valid user in the form of Data Frame for a Function To display their profile Containing ( Name ,Email, Phone no.) but facing this error. This Function Executes after the login of valid user def Home(): print(1," : Profile") print(2," : Top-up") print(3," : Account Balance") print(4," : About us") print(5," : Back") print("") pref=int(input("Enter your Choice : ")) print("") if pref==1: p_csv=pd.read_csv('Data.csv') a=p_csv.loc["username"]==["user"] print(a) Output : KeyError: 'username' The Csv File Contains Certain Data and I want to extract the user logged in the system (Only a Single row of csv containing the user data) CSV File : username,password,Name,email,Phone Ramesh,Ramesh123,Ramesh Chaurasiya,[email protected],1234567890 pooja0,Pja125,Pooja Sharma,[email protected],7894561230 I was expecting it to display the Data frame containing users (name , email, phone) I also Faced multiple errors While changing this code like length not match (2,1) (1,) ... df empty[] only showing column names: username, password , name , email , phone index[] keyError: 'username' Below here is the whole code of the program ...... import pandas as pd #functions(): All Functions are here represented ...... #==================================================================================================== def Return(): return Main_Menu() #================================================================================================================= def Main_Menu(): print("") print("-------------------------------------------------------------") print(" ........... Main Menu ........... ") print("-------------------------------------------------------------") print("") print(1,": New user") print(2,": Log-In") print(3,": Exit") print("") inp=int(input("===> Enter your Choice: ")) if inp==1: Sign_Up() if inp==2: login() if inp==3: print("Thanks for visiting...") #======================================================================================== def Main_page(): while True: print("") print("--------------------------------------------------------------------") print(" .........Welcome To /\/[(O)......... ") print("--------------------------------------------------------------------") print("") print(1,": Home") print(2,": Transaction") print(3,": Account Statement") print(4,": Exit") print("") choice=int(input("Enter your preference from above options: ")) print("") if choice==1: print("") print("-------------------------------------------------------------------") print(" ............. Home ............ ") print("-------------------------------------------------------------------") print("") Home() if choice==2: Transaction() if choice==3: Statement() if choice==4: print("Returning Back...") break #==================================================================================================== #profile #top-up #acc_balance #about us def Home(): print(1," : Profile") print(2," : Top-up") print(3," : Account Balance") print(4," : About us") print(5," : Back") print("") pref=int(input("Enter your Choice : ")) print("") if pref==1: p_csv=pd.read_csv('Data.csv') user_= "match_uname" a=p_csv.query("username == @user_") print(a) # ======> This is the place where i am facing Errors if pref==2: print("Top up") if pref==3: print("Balance") if pref==4: print("About us") if pref==5: Main_page() #=================================================================================== def Transaction(): while True: print("") else: Main_page() #==================================================================================== def Statement(): print("stateee") #==================================================================================== def Sign_Up(): _name=input("Enter your Full-Name: " ) E_mail=input("Enter your E-mail: " ) print("") print("Warning: Enter the username and password of 8 alphanumeric Digits (**) ---->") print("") n_user=input("Create Username: ") n_passwd=input("Create Password: ") mobile=int(input("Enter your mobile no.: ")) user_data={"username":[n_user],"password":[n_passwd],"Name":[_name],"email":[E_mail],"Phone":[mobile]} user_cred=pd.DataFrame(user_data) user_cred.to_csv('Data.csv',mode='a',index=True,header=0) print("") print("======Account Successfully Created======") Return() #=================================================================================================== def login(): log_cred=pd.read_csv('Data.csv') # Reading csv File.. print("") print("======= LOGIN ============") print("") user=input('username: ') passwd=input('password: ') # input data taken from user... match_uname=log_cred.loc[log_cred["username"]==user] # comparing the data given by user == True or False/... #================================================================================================ # ......All Function Execution Window........ #========================================================================================================== #login Execution.... if match_uname.empty: print("") print("Oops! Invalid Username `\(*_*)") print("") return False else: match_pass=log_cred.loc[log_cred["password"]==passwd] if match_pass.empty: print("") print("Invalid Password \(@_@)..Oops!") print("") return False else: print("") print("Valid username and password...<(~_~)>...Welcome") print("") Main_page() return True #================================================================================================== #Main menu execution .... print("") print("") a=input("Press Enter ...") if a=="": Main_Menu() else: print("Get lost") A: user = "Ramesh" a = p_csv.query("username == @user") print(a) username password Name email Phone 0 Ramesh Ramesh123 Ramesh Chaurasiya [email protected] 1234567890
Python Pandas - KeyError: 'username' : when username exist its showing key error when I am Trying to slice users data from csv who logged in
I am a beginner in python and working with python pandas. I have created a program a demo of payment gateway system . It contains a login page and signup page . I want to display the main page when valid user logs in after that I want to extract the data of the only valid user in the form of Data Frame for a Function To display their profile Containing ( Name ,Email, Phone no.) but facing this error. This Function Executes after the login of valid user def Home(): print(1," : Profile") print(2," : Top-up") print(3," : Account Balance") print(4," : About us") print(5," : Back") print("") pref=int(input("Enter your Choice : ")) print("") if pref==1: p_csv=pd.read_csv('Data.csv') a=p_csv.loc["username"]==["user"] print(a) Output : KeyError: 'username' The Csv File Contains Certain Data and I want to extract the user logged in the system (Only a Single row of csv containing the user data) CSV File : username,password,Name,email,Phone Ramesh,Ramesh123,Ramesh Chaurasiya,[email protected],1234567890 pooja0,Pja125,Pooja Sharma,[email protected],7894561230 I was expecting it to display the Data frame containing users (name , email, phone) I also Faced multiple errors While changing this code like length not match (2,1) (1,) ... df empty[] only showing column names: username, password , name , email , phone index[] keyError: 'username' Below here is the whole code of the program ...... import pandas as pd #functions(): All Functions are here represented ...... #==================================================================================================== def Return(): return Main_Menu() #================================================================================================================= def Main_Menu(): print("") print("-------------------------------------------------------------") print(" ........... Main Menu ........... ") print("-------------------------------------------------------------") print("") print(1,": New user") print(2,": Log-In") print(3,": Exit") print("") inp=int(input("===> Enter your Choice: ")) if inp==1: Sign_Up() if inp==2: login() if inp==3: print("Thanks for visiting...") #======================================================================================== def Main_page(): while True: print("") print("--------------------------------------------------------------------") print(" .........Welcome To /\/[(O)......... ") print("--------------------------------------------------------------------") print("") print(1,": Home") print(2,": Transaction") print(3,": Account Statement") print(4,": Exit") print("") choice=int(input("Enter your preference from above options: ")) print("") if choice==1: print("") print("-------------------------------------------------------------------") print(" ............. Home ............ ") print("-------------------------------------------------------------------") print("") Home() if choice==2: Transaction() if choice==3: Statement() if choice==4: print("Returning Back...") break #==================================================================================================== #profile #top-up #acc_balance #about us def Home(): print(1," : Profile") print(2," : Top-up") print(3," : Account Balance") print(4," : About us") print(5," : Back") print("") pref=int(input("Enter your Choice : ")) print("") if pref==1: p_csv=pd.read_csv('Data.csv') user_= "match_uname" a=p_csv.query("username == @user_") print(a) # ======> This is the place where i am facing Errors if pref==2: print("Top up") if pref==3: print("Balance") if pref==4: print("About us") if pref==5: Main_page() #=================================================================================== def Transaction(): while True: print("") else: Main_page() #==================================================================================== def Statement(): print("stateee") #==================================================================================== def Sign_Up(): _name=input("Enter your Full-Name: " ) E_mail=input("Enter your E-mail: " ) print("") print("Warning: Enter the username and password of 8 alphanumeric Digits (**) ---->") print("") n_user=input("Create Username: ") n_passwd=input("Create Password: ") mobile=int(input("Enter your mobile no.: ")) user_data={"username":[n_user],"password":[n_passwd],"Name":[_name],"email":[E_mail],"Phone":[mobile]} user_cred=pd.DataFrame(user_data) user_cred.to_csv('Data.csv',mode='a',index=True,header=0) print("") print("======Account Successfully Created======") Return() #=================================================================================================== def login(): log_cred=pd.read_csv('Data.csv') # Reading csv File.. print("") print("======= LOGIN ============") print("") user=input('username: ') passwd=input('password: ') # input data taken from user... match_uname=log_cred.loc[log_cred["username"]==user] # comparing the data given by user == True or False/... #================================================================================================ # ......All Function Execution Window........ #========================================================================================================== #login Execution.... if match_uname.empty: print("") print("Oops! Invalid Username `\(*_*)") print("") return False else: match_pass=log_cred.loc[log_cred["password"]==passwd] if match_pass.empty: print("") print("Invalid Password \(@_@)..Oops!") print("") return False else: print("") print("Valid username and password...<(~_~)>...Welcome") print("") Main_page() return True #================================================================================================== #Main menu execution .... print("") print("") a=input("Press Enter ...") if a=="": Main_Menu() else: print("Get lost")
[ "user = \"Ramesh\"\na = p_csv.query(\"username == @user\")\nprint(a)\n\n username password Name email Phone\n0 Ramesh Ramesh123 Ramesh Chaurasiya [email protected] 1234567890\n\n" ]
[ 0 ]
[]
[]
[ "csv", "keyerror", "pandas", "python", "slice" ]
stackoverflow_0074672980_csv_keyerror_pandas_python_slice.txt
Q: Python Trouble Parsing a .max translated to OLE File => output unreadable in text format The following script outputs files unreadable in .txt format. Please advise. I inspired myself with: https://area.autodesk.com/m/drew.avis/tutorials/writing-and-reading-3ds-max-scene-sidecar-data-in-python This is to replicate a macho shark into a mechanical robot. import olefile # set this to your file f = r'C:\MRP\Shortfin_Mako_Shark_Rigged_scanline.max' def cleanString(data,isArray=False): # remove first 6 bytes + last byte data = data[6:] if isArray: data = data[:-1] return data with olefile.OleFileIO(f) as ole: ole.listdir() print(ole.listdir()) i = 0 for entry in ole.listdir(): i = i + 1 print(entry) if i > 2: fin = ole.openstream(entry) # myString = fin.read().decode("utf-16") # myString = cleanString(myString, isArray=True) fout = open(entry[0], "wb") print(fout) while True: s = fin.read(8192) if not s: break fout.write(s) Please advise. https://www.turbosquid.com/fr/3d-models/max-shortfin-mako-shark-rigged/991102# I also tried this: with olefile.OleFileIO(f) as ole: ole.listdir() print(ole.listdir()) i = 0 for entry in ole.listdir(): i = i + 1 print(entry) if i > 2: fin = ole.openstream(entry) #myString = fin.read().decode("utf-16") #myString = cleanString(myString, isArray=True) fout = open(entry[0], "w") print(fout) while True: s = fin.read(8192) if not s: break fout.write(cleanString(s, isArray = True).decode("utf-8")) # stream = ole.openstream('CustomFileStreamDataStorage/MyString') # myString = stream.read().decode('utf-16') # myString = cleanString(myString) # stream = ole.openstream('CustomFileStreamDataStorage/MyGeometry') # myGeometry = stream.read().decode('utf-16') # myGeometry = cleanString(myGeometry, isArray=True) # myGeometry = myGeometry.split('\x00') # stream = ole.openstream('CustomFileStreamDataStorage/MyLayers') # myLayers = stream.read().decode('utf-16') # myLayers = cleanString(myLayers, isArray=True) # myLayers = myLayers.split('\x00') # print ("My String: {}\nMy Geometry: {}\nMy Layers: {}".format (myString, myGeometry, myLayers)) What is the right encoding to decode from? Exception has occurred: UnicodeDecodeError 'utf-8' codec can't decode bytes in position 4-5: invalid continuation byte File "C:\MRP\ALG_LIN.py", line 59, in fout.write(cleanString(s, isArray = True).decode("utf-8")) Exception has occurred: UnicodeEncodeError 'charmap' codec can't encode characters in position 2-5: character maps to File "C:\MRP\ALG_LIN.py", line 59, in fout.write(cleanString(s, isArray = True).decode("utf-16")) KR, Ludo A: Try opening in binary mode instead of text mode
Python Trouble Parsing a .max translated to OLE File => output unreadable in text format
The following script outputs files unreadable in .txt format. Please advise. I inspired myself with: https://area.autodesk.com/m/drew.avis/tutorials/writing-and-reading-3ds-max-scene-sidecar-data-in-python This is to replicate a macho shark into a mechanical robot. import olefile # set this to your file f = r'C:\MRP\Shortfin_Mako_Shark_Rigged_scanline.max' def cleanString(data,isArray=False): # remove first 6 bytes + last byte data = data[6:] if isArray: data = data[:-1] return data with olefile.OleFileIO(f) as ole: ole.listdir() print(ole.listdir()) i = 0 for entry in ole.listdir(): i = i + 1 print(entry) if i > 2: fin = ole.openstream(entry) # myString = fin.read().decode("utf-16") # myString = cleanString(myString, isArray=True) fout = open(entry[0], "wb") print(fout) while True: s = fin.read(8192) if not s: break fout.write(s) Please advise. https://www.turbosquid.com/fr/3d-models/max-shortfin-mako-shark-rigged/991102# I also tried this: with olefile.OleFileIO(f) as ole: ole.listdir() print(ole.listdir()) i = 0 for entry in ole.listdir(): i = i + 1 print(entry) if i > 2: fin = ole.openstream(entry) #myString = fin.read().decode("utf-16") #myString = cleanString(myString, isArray=True) fout = open(entry[0], "w") print(fout) while True: s = fin.read(8192) if not s: break fout.write(cleanString(s, isArray = True).decode("utf-8")) # stream = ole.openstream('CustomFileStreamDataStorage/MyString') # myString = stream.read().decode('utf-16') # myString = cleanString(myString) # stream = ole.openstream('CustomFileStreamDataStorage/MyGeometry') # myGeometry = stream.read().decode('utf-16') # myGeometry = cleanString(myGeometry, isArray=True) # myGeometry = myGeometry.split('\x00') # stream = ole.openstream('CustomFileStreamDataStorage/MyLayers') # myLayers = stream.read().decode('utf-16') # myLayers = cleanString(myLayers, isArray=True) # myLayers = myLayers.split('\x00') # print ("My String: {}\nMy Geometry: {}\nMy Layers: {}".format (myString, myGeometry, myLayers)) What is the right encoding to decode from? Exception has occurred: UnicodeDecodeError 'utf-8' codec can't decode bytes in position 4-5: invalid continuation byte File "C:\MRP\ALG_LIN.py", line 59, in fout.write(cleanString(s, isArray = True).decode("utf-8")) Exception has occurred: UnicodeEncodeError 'charmap' codec can't encode characters in position 2-5: character maps to File "C:\MRP\ALG_LIN.py", line 59, in fout.write(cleanString(s, isArray = True).decode("utf-16")) KR, Ludo
[ "Try opening in binary mode instead of text mode\n" ]
[ 0 ]
[]
[]
[ "3dsmax", "python" ]
stackoverflow_0074673023_3dsmax_python.txt
Q: TypeError: listdir: path should be string, bytes, os.PathLike or None, not Namespace I am using Python 3.9, PyCharm 2022. My purpose (Ultimate goal of this question): create a command line application receive 2 parameters: Path of directory Extension of files then get size of files (Per file size, not sum of files size). import os import argparse from os import listdir from os.path import isfile, join def main(): parser = argparse.ArgumentParser() parser.add_argument("path", help="Path of directory.") parser.add_argument("ext", help="Extension of files (for example: jpg, png, exe, mp4, etc.") args1 = parser.parse_args() args2 = parser.parse_args() print(args1) arr = os.listdir(args1) print(arr) # os.path.getsize(args.path) # bytes_size = os.path.getsize(args1.path) # mb_size = int(bytes_size / 1024 / 1024) # print(mb_size, "MB") if __name__ == '__main__': main() My command and according error: (base) PS C:\Users\donhu\PycharmProjects\pythonProject4> python size.py 'D:' 'jpg' Traceback (most recent call last): File "C:\Users\donhu\PycharmProjects\pythonProject4\size.py", line 22, in <module> (base) PS C:\Users\donhu\PycharmProjects\pythonProject4> python size.py 'D:' 'jpg' Namespace(path='D:', ext='jpg') Traceback (most recent call last): File "C:\Users\donhu\PycharmProjects\pythonProject4\size.py", line 23, in <module> main() File "C:\Users\donhu\PycharmProjects\pythonProject4\size.py", line 13, in main arr = os.listdir(args1) TypeError: listdir: path should be string, bytes, os.PathLike or None, not Namespace (base) PS C:\Users\donhu\PycharmProjects\pythonProject4> How to fix? Update, I tried something import os import argparse from os import listdir from os.path import isfile, join from pathlib import * def main(): parser = argparse.ArgumentParser() parser.add_argument("path", help="Đường dẫn của thư mục") parser.add_argument("ext", help="Định dạng tập tin cần liệt kê kích thước.") args1 = parser.parse_args() args2 = parser.parse_args() foo = args1.path # arr = os.listdir('D:/') files = [x for x in foo.iterdir() if x.is_file()] print(files) # os.path.getsize(args.path) # bytes_size = os.path.getsize(args1.path) # mb_size = int(bytes_size / 1024 / 1024) # print(mb_size, "MB") if __name__ == '__main__': main() but not work. A: The os module holds the traditional interface into the file system. It closely follows the Clib interface so you'll see functions like listdir and stat. pathlib is a new object oriented "pythonic" interface to the file system. One can argue whether its better, but I use it, so its gotta be, right? It looks like you are mixing "old" and "new" ways of doing things, which gets confusing. If you want to use pathlib, try to use it for everything. Here is your script re-imagined for pathlib. You only need to parse the command line once and then build a Path object for the directory of interest. import argparse from pathlib import Path def main(): parser = argparse.ArgumentParser() parser.add_argument("path", help="Đường dẫn của thư mục") parser.add_argument("ext", help="Định dạng tập tin cần liệt kê kích thước.") args = parser.parse_args() foo = Path(args.path) if not foo.is_dir(): print("Error: Must be a directory") exit(1) files = [x for x in foo.iterdir() if x.is_file()] print(files) # os.path.getsize(args.path) bytes_size = sum(file.stat().st_size for file in files) print("total bytes", bytes_size) # mb_size = int(bytes_size / 1024 / 1024) # print(mb_size, "MB") if __name__ == '__main__': main() If you want to use the ext parameter, you would change from iterdir to glob. files = [x for x in foo.glob(f"*.{args.ext}") if x.is_file()] or files = [x for x in foo.glob(f"**/*.{args.ext}") if x.is_file()] depending on whether you want just the directory or its subtree. A: Argparse's parse_args() function returns a Namespace object. I believe your goal was to pass the path argument, you have to access it as an attribute. os.listdir(args1.path) A: Program import os import argparse from pathlib import Path def main(): parser = argparse.ArgumentParser() parser.add_argument("path", help="Path of directory/folder") parser.add_argument("ext", help="Extension of file what need get size.") args = parser.parse_args() foo = Path(args.path) files = [x for x in foo.glob(f"*.{args.ext}") if x.is_file()] for file in files: print(file.__str__(), os.path.getsize(file)) if __name__ == '__main__': main() # python size.py "D:\" 'jpg' # (base) PS C:\Users\donhu\PycharmProjects\pythonProject4> python size.py "D:\" 'jpg' # D:\1496231_10152440570407564_3432420_o.jpg 241439 # D:\15002366_278058419262140_505451777021235_o.jpg 598063 # D:\1958485_703442046353041_1444502_n.jpg 63839 # D:\277522952_5065319530178162_680264454398630_n.jpg 335423
TypeError: listdir: path should be string, bytes, os.PathLike or None, not Namespace
I am using Python 3.9, PyCharm 2022. My purpose (Ultimate goal of this question): create a command line application receive 2 parameters: Path of directory Extension of files then get size of files (Per file size, not sum of files size). import os import argparse from os import listdir from os.path import isfile, join def main(): parser = argparse.ArgumentParser() parser.add_argument("path", help="Path of directory.") parser.add_argument("ext", help="Extension of files (for example: jpg, png, exe, mp4, etc.") args1 = parser.parse_args() args2 = parser.parse_args() print(args1) arr = os.listdir(args1) print(arr) # os.path.getsize(args.path) # bytes_size = os.path.getsize(args1.path) # mb_size = int(bytes_size / 1024 / 1024) # print(mb_size, "MB") if __name__ == '__main__': main() My command and according error: (base) PS C:\Users\donhu\PycharmProjects\pythonProject4> python size.py 'D:' 'jpg' Traceback (most recent call last): File "C:\Users\donhu\PycharmProjects\pythonProject4\size.py", line 22, in <module> (base) PS C:\Users\donhu\PycharmProjects\pythonProject4> python size.py 'D:' 'jpg' Namespace(path='D:', ext='jpg') Traceback (most recent call last): File "C:\Users\donhu\PycharmProjects\pythonProject4\size.py", line 23, in <module> main() File "C:\Users\donhu\PycharmProjects\pythonProject4\size.py", line 13, in main arr = os.listdir(args1) TypeError: listdir: path should be string, bytes, os.PathLike or None, not Namespace (base) PS C:\Users\donhu\PycharmProjects\pythonProject4> How to fix? Update, I tried something import os import argparse from os import listdir from os.path import isfile, join from pathlib import * def main(): parser = argparse.ArgumentParser() parser.add_argument("path", help="Đường dẫn của thư mục") parser.add_argument("ext", help="Định dạng tập tin cần liệt kê kích thước.") args1 = parser.parse_args() args2 = parser.parse_args() foo = args1.path # arr = os.listdir('D:/') files = [x for x in foo.iterdir() if x.is_file()] print(files) # os.path.getsize(args.path) # bytes_size = os.path.getsize(args1.path) # mb_size = int(bytes_size / 1024 / 1024) # print(mb_size, "MB") if __name__ == '__main__': main() but not work.
[ "The os module holds the traditional interface into the file system. It closely follows the Clib interface so you'll see functions like listdir and stat. pathlib is a new object oriented \"pythonic\" interface to the file system. One can argue whether its better, but I use it, so its gotta be, right?\nIt looks like you are mixing \"old\" and \"new\" ways of doing things, which gets confusing. If you want to use pathlib, try to use it for everything.\nHere is your script re-imagined for pathlib. You only need to parse the command line once and then build a Path object for the directory of interest.\nimport argparse\nfrom pathlib import Path\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"path\", help=\"Đường dẫn của thư mục\")\n parser.add_argument(\"ext\", help=\"Định dạng tập tin cần liệt kê kích thước.\")\n args = parser.parse_args()\n foo = Path(args.path)\n if not foo.is_dir():\n print(\"Error: Must be a directory\")\n exit(1)\n files = [x for x in foo.iterdir() if x.is_file()]\n print(files)\n # os.path.getsize(args.path)\n bytes_size = sum(file.stat().st_size for file in files)\n print(\"total bytes\", bytes_size)\n # mb_size = int(bytes_size / 1024 / 1024)\n # print(mb_size, \"MB\")\n\nif __name__ == '__main__':\n main()\n\nIf you want to use the ext parameter, you would change from iterdir to glob.\nfiles = [x for x in foo.glob(f\"*.{args.ext}\") if x.is_file()]\n\nor\nfiles = [x for x in foo.glob(f\"**/*.{args.ext}\") if x.is_file()]\n\ndepending on whether you want just the directory or its subtree.\n", "Argparse's parse_args() function returns a Namespace object. I believe your goal was to pass the path argument, you have to access it as an attribute.\nos.listdir(args1.path)\n\n", "Program\nimport os\nimport argparse\nfrom pathlib import Path\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"path\", help=\"Path of directory/folder\")\n parser.add_argument(\"ext\", help=\"Extension of file what need get size.\")\n args = parser.parse_args()\n foo = Path(args.path)\n files = [x for x in foo.glob(f\"*.{args.ext}\") if x.is_file()]\n for file in files:\n print(file.__str__(), os.path.getsize(file))\n\n\nif __name__ == '__main__':\n main()\n \n# python size.py \"D:\\\" 'jpg'\n\n# (base) PS C:\\Users\\donhu\\PycharmProjects\\pythonProject4> python size.py \"D:\\\" 'jpg'\n# D:\\1496231_10152440570407564_3432420_o.jpg 241439\n# D:\\15002366_278058419262140_505451777021235_o.jpg 598063\n# D:\\1958485_703442046353041_1444502_n.jpg 63839\n# D:\\277522952_5065319530178162_680264454398630_n.jpg 335423\n\n" ]
[ 1, 0, 0 ]
[ "Your two command line arguments are being returned as a single object of the argparse.Namespace class, both stored identically in your args1 and (the superfluous) args2 variables.\nInserting the following line after your calls to parse_args() and commenting out the subsequent code would illuminate this a little more:\nprint(type(args1))\n\nTo access the values you named in your calls to add_argument(), use this syntax:\nargs1.path\nargs1.ext\n\nsuch as\narr = os.listdir(args1.path)\n\nFor further discussion, see this answer: Accessing argument values for argparse in Python\n" ]
[ -1 ]
[ "python" ]
stackoverflow_0074672824_python.txt
Q: I can't understand what's wrong - Python multiple text replace dictionary I can't understand what happen. I'm trying to make this script to replace multiple text files using a list of pairs, but only the first pair is working, the others are not processed. Did I make any mistakes in the loops? replacements = [ ('Dog', 'Cat'), ('Lazy', 'Smart'), ('Fat', 'Slim'), ] import re import sys if __name__ == "__main__": if len(sys.argv) < 2 or len(sys.argv) > 4: print("Invalid argument(s)") exit() with open(sys.argv[1], "r") as f: print(f"Reading {sys.argv[1]}") new_lines = "" for old, new in replacements: for l in f: new_lines += re.sub(old, new, l) with open(sys.argv[2] or sys.argv[1], "w") as f: print(f"Writing into '{sys.argv[2] or sys.argv[1]}'") f.write(new_lines) A: The double for loop is causing the issue. Reading the file contents only once fixes the issue. replacements = [ ('Dog', 'Cat'), ('Lazy', 'Smart'), ('Fat', 'Slim'), ] import re import sys if __name__ == "__main__": if len(sys.argv) < 2 or len(sys.argv) > 4: print("Invalid argument(s)") exit() with open(sys.argv[1], "r") as f: print(f"Reading {sys.argv[1]}") new_lines = f.read() for old, new in replacements: new_lines = re.sub(old, new, new_lines) with open(sys.argv[2] or sys.argv[1], "w") as f: print(f"Writing into '{sys.argv[2] or sys.argv[1]}'") f.write(new_lines) The file will be read only once for the first replacement pairs, then it will be exhausted as all elements of the file have already been read once. Therefore, for the next replacements, pairs file contents will not be read. That's why it's only working for Dog and Cat pair and not the rest.
I can't understand what's wrong - Python multiple text replace dictionary
I can't understand what happen. I'm trying to make this script to replace multiple text files using a list of pairs, but only the first pair is working, the others are not processed. Did I make any mistakes in the loops? replacements = [ ('Dog', 'Cat'), ('Lazy', 'Smart'), ('Fat', 'Slim'), ] import re import sys if __name__ == "__main__": if len(sys.argv) < 2 or len(sys.argv) > 4: print("Invalid argument(s)") exit() with open(sys.argv[1], "r") as f: print(f"Reading {sys.argv[1]}") new_lines = "" for old, new in replacements: for l in f: new_lines += re.sub(old, new, l) with open(sys.argv[2] or sys.argv[1], "w") as f: print(f"Writing into '{sys.argv[2] or sys.argv[1]}'") f.write(new_lines)
[ "The double for loop is causing the issue. Reading the file contents only once fixes the issue.\nreplacements = [\n ('Dog', 'Cat'),\n ('Lazy', 'Smart'),\n ('Fat', 'Slim'),\n]\n\nimport re\nimport sys\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2 or len(sys.argv) > 4:\n print(\"Invalid argument(s)\")\n exit()\n with open(sys.argv[1], \"r\") as f:\n print(f\"Reading {sys.argv[1]}\")\n new_lines = f.read()\n\n for old, new in replacements:\n new_lines = re.sub(old, new, new_lines)\n \n \n with open(sys.argv[2] or sys.argv[1], \"w\") as f:\n print(f\"Writing into '{sys.argv[2] or sys.argv[1]}'\")\n f.write(new_lines)\n\nThe file will be read only once for the first replacement pairs, then it will be exhausted as all elements of the file have already been read once.\nTherefore, for the next replacements, pairs file contents will not be read. That's why it's only working for Dog and Cat pair and not the rest.\n" ]
[ 0 ]
[]
[]
[ "list", "python", "replace", "text" ]
stackoverflow_0074672969_list_python_replace_text.txt
Q: Validate if my mini project game can be past through Test_Python.py? So basically i am creating my version of rock ,paper, scissor game as a python project and i need help running it through by testing or passing it which i forgot how to becuase i took a few days break working on my project and forgot how to test it and this is my project: import random import math def play(): user = input("What's your choice? 'g' for gun, 'i' for ice, 'k' for knife\n") user = user.lower() computer = random.choice(['g', 'i', 'k']) if user == computer: return (0, user, computer) if is_win(user, computer): return (1, user, computer) return (-1, user, computer) def is_win(player, opponent): if (player == 'g' and opponent == 'k') or (player == 'k' and opponent == 'i') or (player == 'i' and opponent == 'k'): return True return False def play_best_of(n): player_wins = 0 computer_wins = 0 wins_necessary = math.ceil(n/2) while player_wins < wins_necessary and computer_wins < wins_necessary: result, user, computer = play() if result == 0: print('It is a tie. You and the Machine have both chosen {}. \n'.format(user)) elif result == 1: player_wins += 1 print('You chose {} and the Machine chose {}. Yippy You won! ;[\n'.format(user, computer)) else: computer_wins += 1 print('You chose {} and the Machine chose {}. Darn it You lost! :[\n'.format(user, computer)) if player_wins > computer_wins: print('You have won the best of {} games Congrats! What a player :D'.format(n)) else: print('Sadly, the Machine has won the best of {} games. Better luck next time!'.format(n)) if __name__ == '__main__': play_best_of(3) (Note: This is all i came up with i just need help passing through it, how do i test it basically?) i did tr but i keep getting pops up of a few problems here and there. i do have from project import play, is_win, play_best_of def play(): def is_win(): def play_best_of(n): if __name__ == '__main__': play_best_of(3) for my test prompt but i forgot how to assert i think? PLEASE HELP! A: I believe you're talking about writing unit tests. There are two libraries commonly used for unit testing in Python, the built in unittest library and the third-party pytest. I would personally recommend that you use pytest because the syntax is much simpler. Refer to the unittest and pytest docs for further usage information. A basic passing pytest example: test_win.py from project import is_win def test_win_gk(): assert is_win('g', 'k') == True You can run this test by executing pytest in the same directory. This will execute every test in each file prefixed or suffixed with test. (test_something.py or something_test.py)
Validate if my mini project game can be past through Test_Python.py?
So basically i am creating my version of rock ,paper, scissor game as a python project and i need help running it through by testing or passing it which i forgot how to becuase i took a few days break working on my project and forgot how to test it and this is my project: import random import math def play(): user = input("What's your choice? 'g' for gun, 'i' for ice, 'k' for knife\n") user = user.lower() computer = random.choice(['g', 'i', 'k']) if user == computer: return (0, user, computer) if is_win(user, computer): return (1, user, computer) return (-1, user, computer) def is_win(player, opponent): if (player == 'g' and opponent == 'k') or (player == 'k' and opponent == 'i') or (player == 'i' and opponent == 'k'): return True return False def play_best_of(n): player_wins = 0 computer_wins = 0 wins_necessary = math.ceil(n/2) while player_wins < wins_necessary and computer_wins < wins_necessary: result, user, computer = play() if result == 0: print('It is a tie. You and the Machine have both chosen {}. \n'.format(user)) elif result == 1: player_wins += 1 print('You chose {} and the Machine chose {}. Yippy You won! ;[\n'.format(user, computer)) else: computer_wins += 1 print('You chose {} and the Machine chose {}. Darn it You lost! :[\n'.format(user, computer)) if player_wins > computer_wins: print('You have won the best of {} games Congrats! What a player :D'.format(n)) else: print('Sadly, the Machine has won the best of {} games. Better luck next time!'.format(n)) if __name__ == '__main__': play_best_of(3) (Note: This is all i came up with i just need help passing through it, how do i test it basically?) i did tr but i keep getting pops up of a few problems here and there. i do have from project import play, is_win, play_best_of def play(): def is_win(): def play_best_of(n): if __name__ == '__main__': play_best_of(3) for my test prompt but i forgot how to assert i think? PLEASE HELP!
[ "I believe you're talking about writing unit tests. There are two libraries commonly used for unit testing in Python, the built in unittest library and the third-party pytest.\nI would personally recommend that you use pytest because the syntax is much simpler. Refer to the unittest and pytest docs for further usage information.\nA basic passing pytest example:\ntest_win.py\nfrom project import is_win\n\ndef test_win_gk():\n assert is_win('g', 'k') == True\n\nYou can run this test by executing pytest in the same directory. This will execute every test in each file prefixed or suffixed with test. (test_something.py or something_test.py)\n" ]
[ 0 ]
[]
[]
[ "project", "python", "unit_testing" ]
stackoverflow_0074673060_project_python_unit_testing.txt
Q: How do I create a magic square matrix using python A basket is given to you in the shape of a matrix. If the size of the matrix is N x N then the range of number of eggs you can put in each slot of the basket is 1 to N2 . You task is to arrange the eggs in the basket such that the sum of each row, column and the diagonal of the matrix remain same This code is working only for odd numbers but not even numbers. here's my code that i tried but it didn't work ` def matrix(n): m = [[0 for x in range(n)] for y in range(n)] i = n / 2 j = n - 1 num = 1 while num <= (n * n): if i == -1 and j == n: j = n - 2 i = 0 else: if j == n: j = 0 if i < 0: i = n - 1 if m[int(i)][int(j)]: j = j - 2 i = i + 1 continue else: m[int(i)][int(j)] = num num = num + 1 j = j + 1 i = i - 1 print ("Sum of eggs in each row or column and diagonal ",n * (n * n + 1) / 2, "\n") for i in range(0, n): for j in range(0, n): print('%2d ' % (m[i][j]),end = '') if j == n - 1: print() n=int(input("Number of rows of matrix:")) matrix(n) ` A: def matrix(n): m = [[0 for x in range(n)] for y in range(n)] i = n / 2 j = n - 1 num = 1 while num <= (n * n): if i == -1 and j == n: j = n - 2 i = 0 else: if j == n: j = 0 if i < 0: i = n - 1 if m[int(i)][int(j)]: j = j - 2 i = i + 1 continue else: m[int(i)][int(j)] = num num = num + 1 j = j + 1 i = i - 1 print ("Sum of eggs in each row or column and diagonal ",n * (n * n + 1) / 2, "\n") for i in range(0, n): for j in range(0, n): print('%2d ' % (m[i][j]),end = '') if j == n - 1: print() n=int(input("Number of rows of matrix:")) matrix(n)
How do I create a magic square matrix using python
A basket is given to you in the shape of a matrix. If the size of the matrix is N x N then the range of number of eggs you can put in each slot of the basket is 1 to N2 . You task is to arrange the eggs in the basket such that the sum of each row, column and the diagonal of the matrix remain same This code is working only for odd numbers but not even numbers. here's my code that i tried but it didn't work ` def matrix(n): m = [[0 for x in range(n)] for y in range(n)] i = n / 2 j = n - 1 num = 1 while num <= (n * n): if i == -1 and j == n: j = n - 2 i = 0 else: if j == n: j = 0 if i < 0: i = n - 1 if m[int(i)][int(j)]: j = j - 2 i = i + 1 continue else: m[int(i)][int(j)] = num num = num + 1 j = j + 1 i = i - 1 print ("Sum of eggs in each row or column and diagonal ",n * (n * n + 1) / 2, "\n") for i in range(0, n): for j in range(0, n): print('%2d ' % (m[i][j]),end = '') if j == n - 1: print() n=int(input("Number of rows of matrix:")) matrix(n) `
[ "def matrix(n): \nm = [[0 for x in range(n)] \n for y in range(n)]\ni = n / 2\nj = n - 1\nnum = 1\nwhile num <= (n * n): \n if i == -1 and j == n:\n j = n - 2\n i = 0\n else:\n if j == n: \n j = 0 \n if i < 0: \n i = n - 1\n if m[int(i)][int(j)]:\n j = j - 2\n i = i + 1\n continue\n else: \n m[int(i)][int(j)] = num \n num = num + 1\n j = j + 1\n i = i - 1\nprint (\"Sum of eggs in each row or column and diagonal \",n * (n * n + 1) / 2, \"\\n\") \nfor i in range(0, n): \n for j in range(0, n): \n print('%2d ' % (m[i][j]),end = '') \n if j == n - 1: \n print()\n\nn=int(input(\"Number of rows of matrix:\"))\nmatrix(n)\n" ]
[ 0 ]
[]
[]
[ "computer_science", "magic_square", "matrix", "python", "python_3.x" ]
stackoverflow_0074384748_computer_science_magic_square_matrix_python_python_3.x.txt
Q: Get confidence interval from sklearn linear regression in python I want to get a confidence interval of the result of a linear regression. I'm working with the boston house price dataset. I've found this question: How to calculate the 99% confidence interval for the slope in a linear regression model in python? However, this doesn't quite answer my question. Here is my code: import numpy as np import matplotlib.pyplot as plt from math import pi import pandas as pd import seaborn as sns from sklearn.datasets import load_boston from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error, r2_score # import the data boston_dataset = load_boston() boston = pd.DataFrame(boston_dataset.data, columns=boston_dataset.feature_names) boston['MEDV'] = boston_dataset.target X = pd.DataFrame(np.c_[boston['LSTAT'], boston['RM']], columns=['LSTAT', 'RM']) Y = boston['MEDV'] # splits the training and test data set in 80% : 20% # assign random_state to any value.This ensures consistency. X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=5) lin_model = LinearRegression() lin_model.fit(X_train, Y_train) # model evaluation for training set y_train_predict = lin_model.predict(X_train) rmse = (np.sqrt(mean_squared_error(Y_train, y_train_predict))) r2 = r2_score(Y_train, y_train_predict) # model evaluation for testing set y_test_predict = lin_model.predict(X_test) # root mean square error of the model rmse = (np.sqrt(mean_squared_error(Y_test, y_test_predict))) # r-squared score of the model r2 = r2_score(Y_test, y_test_predict) plt.scatter(Y_test, y_test_predict) plt.show() How can I get, for instance, the 95% or 99% confidence interval from this? Is there some sort of in-built function or piece of code? A: I am not sure if there is any in-built function for this purpose, but what I do is create a loop on n no. of times and compare the accuracy of all the models and save the model with highest accuracy with pickle and use reuse it later. Here goes the code: for _ in range(30): x_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(X, y, test_size=0.1) linear = linear_model.LinearRegression() linear.fit(x_train, y_train) acc = linear.score(x_test, y_test) print("Accuracy: " + str(acc)) if acc > best: best = acc with open("confidence_interval.pickle", "wb") as f: pickle.dump(linear, f) print("The best Accuracy: ", best) You can always make changes to the given variables as I know the variables that you have provided varies greatly from mine. and if you want to predict the class possibilities you can use predict_proba. Refer to this link for difference between predict and predict_proba https://www.kaggle.com/questions-and-answers/82657 A: If you're looking to compute the confidence interval of the regression parameters, one way is to manually compute it using the results of LinearRegression from scikit-learn and numpy methods. The code below computes the 95%-confidence interval (alpha=0.05). alpha=0.01 would compute 99%-confidence interval etc. import numpy as np import pandas as pd from scipy import stats from sklearn.linear_model import LinearRegression alpha = 0.05 # for 95% confidence interval; use 0.01 for 99%-CI. # fit a sklearn LinearRegression model lin_model = LinearRegression().fit(X_train, Y_train) # the coefficients of the regression model coefs = np.r_[[lin_model.intercept_], lin_model.coef_] # build an auxiliary dataframe with the constant term in it X_aux = X_train.copy() X_aux.insert(0, 'const', 1) # degrees of freedom dof = -np.diff(X_aux.shape)[0] # Student's t-distribution table lookup t_val = stats.t.isf(alpha/2, dof) # MSE of the residuals mse = np.sum((Y_train - lin_model.predict(X_train)) ** 2) / dof # inverse of the variance of the parameters var_params = np.diag(np.linalg.inv(X_aux.T.dot(X_aux))) # distance between lower and upper bound of CI gap = t_val * np.sqrt(mse * var_params) conf_int = pd.DataFrame({'lower': coefs - gap, 'upper': coefs + gap}, index=X_aux.columns) Using the Boston housing dataset, the above code produces the dataframe below: If this is too much manual code, you can always resort to the statsmodels and use its conf_int method: import statsmodels.api as sm alpha = 0.05 # 95% confidence interval lr = sm.OLS(Y_train, sm.add_constant(X_train)).fit() conf_interval = lr.conf_int(alpha) Since it uses the same formula, it produces the same output as above. Stats reference
Get confidence interval from sklearn linear regression in python
I want to get a confidence interval of the result of a linear regression. I'm working with the boston house price dataset. I've found this question: How to calculate the 99% confidence interval for the slope in a linear regression model in python? However, this doesn't quite answer my question. Here is my code: import numpy as np import matplotlib.pyplot as plt from math import pi import pandas as pd import seaborn as sns from sklearn.datasets import load_boston from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error, r2_score # import the data boston_dataset = load_boston() boston = pd.DataFrame(boston_dataset.data, columns=boston_dataset.feature_names) boston['MEDV'] = boston_dataset.target X = pd.DataFrame(np.c_[boston['LSTAT'], boston['RM']], columns=['LSTAT', 'RM']) Y = boston['MEDV'] # splits the training and test data set in 80% : 20% # assign random_state to any value.This ensures consistency. X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=5) lin_model = LinearRegression() lin_model.fit(X_train, Y_train) # model evaluation for training set y_train_predict = lin_model.predict(X_train) rmse = (np.sqrt(mean_squared_error(Y_train, y_train_predict))) r2 = r2_score(Y_train, y_train_predict) # model evaluation for testing set y_test_predict = lin_model.predict(X_test) # root mean square error of the model rmse = (np.sqrt(mean_squared_error(Y_test, y_test_predict))) # r-squared score of the model r2 = r2_score(Y_test, y_test_predict) plt.scatter(Y_test, y_test_predict) plt.show() How can I get, for instance, the 95% or 99% confidence interval from this? Is there some sort of in-built function or piece of code?
[ "I am not sure if there is any in-built function for this purpose, but what I do is create a loop on n no. of times and compare the accuracy of all the models and save the model with highest accuracy with pickle and use reuse it later.\nHere goes the code:\nfor _ in range(30):\nx_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(X, y, test_size=0.1)\n\nlinear = linear_model.LinearRegression()\n\nlinear.fit(x_train, y_train)\nacc = linear.score(x_test, y_test)\nprint(\"Accuracy: \" + str(acc))\n\nif acc > best:\n best = acc\n with open(\"confidence_interval.pickle\", \"wb\") as f:\n pickle.dump(linear, f)\n print(\"The best Accuracy: \", best)\n\nYou can always make changes to the given variables as I know the variables that you have provided varies greatly from mine. and if you want to predict the class possibilities you can use predict_proba. Refer to this link for difference between predict and predict_proba https://www.kaggle.com/questions-and-answers/82657\n", "If you're looking to compute the confidence interval of the regression parameters, one way is to manually compute it using the results of LinearRegression from scikit-learn and numpy methods.\nThe code below computes the 95%-confidence interval (alpha=0.05). alpha=0.01 would compute 99%-confidence interval etc.\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\nfrom sklearn.linear_model import LinearRegression\n\nalpha = 0.05 # for 95% confidence interval; use 0.01 for 99%-CI.\n\n# fit a sklearn LinearRegression model\nlin_model = LinearRegression().fit(X_train, Y_train)\n\n# the coefficients of the regression model\ncoefs = np.r_[[lin_model.intercept_], lin_model.coef_]\n# build an auxiliary dataframe with the constant term in it\nX_aux = X_train.copy()\nX_aux.insert(0, 'const', 1)\n# degrees of freedom\ndof = -np.diff(X_aux.shape)[0]\n# Student's t-distribution table lookup\nt_val = stats.t.isf(alpha/2, dof)\n# MSE of the residuals\nmse = np.sum((Y_train - lin_model.predict(X_train)) ** 2) / dof\n# inverse of the variance of the parameters\nvar_params = np.diag(np.linalg.inv(X_aux.T.dot(X_aux)))\n# distance between lower and upper bound of CI\ngap = t_val * np.sqrt(mse * var_params)\n\nconf_int = pd.DataFrame({'lower': coefs - gap, 'upper': coefs + gap}, index=X_aux.columns)\n\nUsing the Boston housing dataset, the above code produces the dataframe below:\n\n\nIf this is too much manual code, you can always resort to the statsmodels and use its conf_int method:\nimport statsmodels.api as sm\nalpha = 0.05 # 95% confidence interval\nlr = sm.OLS(Y_train, sm.add_constant(X_train)).fit()\nconf_interval = lr.conf_int(alpha)\n\nSince it uses the same formula, it produces the same output as above.\nStats reference\n" ]
[ 0, 0 ]
[]
[]
[ "linear_regression", "python", "scikit_learn" ]
stackoverflow_0061292464_linear_regression_python_scikit_learn.txt
Q: cant enter password in twine pypi package upload PS D:\Python> cd ClockBlock PS D:\Python\ClockBlock> python3 -m twine upload --repository testpypi dist/* Uploading distributions to https://test.pypi.org/legacy/ Enter your username: MYNAME Enter your password: Desc for img I cant enter anything in password, can someone help, i bashed a bunch of keys and it didnt output anything
cant enter password in twine pypi package upload
PS D:\Python> cd ClockBlock PS D:\Python\ClockBlock> python3 -m twine upload --repository testpypi dist/* Uploading distributions to https://test.pypi.org/legacy/ Enter your username: MYNAME Enter your password: Desc for img I cant enter anything in password, can someone help, i bashed a bunch of keys and it didnt output anything
[]
[]
[ "SO.. it just didnt show the password for privacy reasons it underastood what i was typing though\n" ]
[ -1 ]
[ "pypi", "python", "python_3.x", "python_packaging", "twine" ]
stackoverflow_0074672962_pypi_python_python_3.x_python_packaging_twine.txt
Q: I want to solve this question but I am finding some difficulty help me to find the solution Help me to find out the solution of this question Ms. Gabriel Williams is a botany professor at District College. One day, she asked her student Mickey to compute the average of all the plants with distinct heights in her greenhouse. Function Description average has the following parameters: int arr: an array of integers Returns float: the resulting float value rounded to 3 places after the decimal I want the output of the problem I am finding some difficulty in it. A: I think giving you the answer would probably defeat the purpose of the question, but I'll try to reframe the crux of the problem: The height of each plant may be repeated multiple times in the input array, we need to make sure that we only count each height once when computing the total height. There exists a data structure that is very effective at counting such elements. If you don't know of this data structure, consider using an array to store a "key" (the number that has been seen), and corresponding "value" (the count of times the number has been seen) locally. (The optimal data structure might not even need this value!)
I want to solve this question but I am finding some difficulty help me to find the solution
Help me to find out the solution of this question Ms. Gabriel Williams is a botany professor at District College. One day, she asked her student Mickey to compute the average of all the plants with distinct heights in her greenhouse. Function Description average has the following parameters: int arr: an array of integers Returns float: the resulting float value rounded to 3 places after the decimal I want the output of the problem I am finding some difficulty in it.
[ "I think giving you the answer would probably defeat the purpose of the question, but I'll try to reframe the crux of the problem:\nThe height of each plant may be repeated multiple times in the input array, we need to make sure that we only count each height once when computing the total height.\nThere exists a data structure that is very effective at counting such elements. If you don't know of this data structure, consider using an array to store a \"key\" (the number that has been seen), and corresponding \"value\" (the count of times the number has been seen) locally. (The optimal data structure might not even need this value!)\n" ]
[ 0 ]
[]
[]
[ "python" ]
stackoverflow_0074673111_python.txt
Q: How to split a string but keep multiple delimiters with the original chunk Say my string is st = 'Walking happened at 8 am breakfast happened at 9am baseball happened at 12 pm lunch happened at 1pm' I would like to split on 'am' or 'pm', but I want those deliminters to be a part of the original chunk. So the desired result is splitlist = ['Walking happened at 8 am', 'breakfast happened at 9am', 'baseball happened at 12 pm', 'lunch happened at 1pm'] There are many solutions for keeping the delimiter, but keeping it as a separate item in the list like this one In Python, how do I split a string and keep the separators? A: You can use a lookbehind: import re splitlist = re.split(r'(?<=[ap]m)\s+', st) Output: ['Walking happened at 8 am', 'breakfast happened at 9am', 'baseball happened at 12 pm', 'lunch happened at 1pm'] If you want to ensure having a word boundary or a digit before am/pm (i.e not splitting after words such as "program"): import re splitlist = re.split(r'(?:(?<=\d[ap]m)|(?<=\b[ap]m))\s+', st) Example: ['Walking happened at 8 am', 'breakfast happened at 9am', 'baseball happened at 12 pm', 'beginning of program happened at 1pm']
How to split a string but keep multiple delimiters with the original chunk
Say my string is st = 'Walking happened at 8 am breakfast happened at 9am baseball happened at 12 pm lunch happened at 1pm' I would like to split on 'am' or 'pm', but I want those deliminters to be a part of the original chunk. So the desired result is splitlist = ['Walking happened at 8 am', 'breakfast happened at 9am', 'baseball happened at 12 pm', 'lunch happened at 1pm'] There are many solutions for keeping the delimiter, but keeping it as a separate item in the list like this one In Python, how do I split a string and keep the separators?
[ "You can use a lookbehind:\nimport re\n\nsplitlist = re.split(r'(?<=[ap]m)\\s+', st)\n\nOutput:\n['Walking happened at 8 am',\n 'breakfast happened at 9am',\n 'baseball happened at 12 pm',\n 'lunch happened at 1pm']\n\nIf you want to ensure having a word boundary or a digit before am/pm (i.e not splitting after words such as \"program\"):\nimport re\n\nsplitlist = re.split(r'(?:(?<=\\d[ap]m)|(?<=\\b[ap]m))\\s+', st)\n\nExample:\n['Walking happened at 8 am',\n 'breakfast happened at 9am',\n 'baseball happened at 12 pm',\n 'beginning of program happened at 1pm']\n\n" ]
[ 3 ]
[]
[]
[ "python" ]
stackoverflow_0074673121_python.txt
Q: How to extract all timestamps of badminton shot sound in an audio clip using Neural Networks? I am trying to find the instances in a source audio file taken from a badminton match where a shot was hit by either of the players. For the same purpose, I have marked the timestamps with positive (hit sounds) and negative (no hit sound: commentary/crowd sound etc) labels like so: shot_timestamps = [0,6.5,8, 11, 18.5, 23, 27, 29, 32, 37, 43.5, 47.5, 52, 55.5, 63, 66, 68, 72, 75, 79, 94.5, 96, 99, 105, 122, 115, 118.5, 122, 126, 130.5, 134, 140, 144, 147, 154, 158, 164, 174.5, 183, 186, 190, 199, 238, 250, 253, 261, 267, 269, 270, 274] shot_labels = ['no', 'yes', 'yes', 'yes', 'yes', 'yes', 'no', 'no', 'no', 'no', 'yes', 'yes', 'yes', 'yes', 'yes', 'no', 'no','no','no', 'no', 'yes', 'yes', 'no', 'no', 'yes', 'yes', 'yes', 'yes', 'yes', 'yes', 'yes', 'no', 'no', 'no', 'no', 'yes', 'no', 'yes', 'no', 'no', 'no', 'yes', 'no', 'yes', 'yes', 'no', 'no', 'yes', 'yes', 'no'] I have been taking 1 second windows around these timestamps like so: rate, source = wavfile.read(source) def get_audio_snippets(shot_timestamps): shot_snippets = [] # Collection of all audio snippets in the timestamps above for timestamp in shot_timestamps: start = math.ceil(timestamp*rate) end = math.ceil((timestamp + 1)*rate) if start >= source.shape[0]: start = source.shape[0] - 1 if end >= source.shape[0]: end = source.shape[0] - 1 shot_snippets.append(source[start:end]) return shot_snippets and converting that to spectrogram images for the model. The model doesn't seem to be learning anything with an accuracy of around 50%. What can I do to improve the model? Edit: The audio file: Google Drive The timestamps labels: Google Drive Code: Github These timestamps were made recently and haven't been used in the code above as I don't exactly know what window sizes to take for labelling purposes. The annotation file above has all the timestamps of hitting the shots. PS: Also added this on Data Science Stackexchange as recommended: https://datascience.stackexchange.com/q/116629/98765 A: To improve the model, some possible solutions are: Adjust the window size for the snippets you are creating. Increase the number of data points. Augment your existing data with additional data. Try different architectures, such as convolutional neural networks and recurrent neural networks. Try different feature extraction techniques, such as Mel-Frequency Cepstral Coefficients (MFCCs). Tune the hyperparameters of the model. Try different optimizers and learning rates. Experiment with different loss functions. A: Detecting when a particular sound happens is know as Sound Event Detection. There are a wide range of approaches to this topic, as it has been actively researched for many decades. Your existing solution, using correlation in the waveform domain with some template sounds is unlikely to work well as the amount of variation between shot sounds in a match is likely to be quite high. The recommended approach would be to first collect a small dataset. Say for example to take data from 20 different matches (preferably with different), and then annotate each short from time-periods, to get at least 50 shots from each match. Then you can apply supervised machine learning to learn a detector. Some level of feature engineering should be done to the audio, for example to transform it into a spectrogram. A: There are several things you can try to improve the performance of your model. Here are some suggestions: You may want to try using a larger or smaller window size when extracting the audio snippets. A window size of 1 second may not be the optimal size for your model, so you could try using a different size to see if it improves the model's performance. You could also try using different parameters when generating the spectrogram images. For example, you could try changing the size of the window or the overlap between windows to see if it improves the model's performance. You may also want to try preprocessing the audio data in other ways. For example, you could try applying a filter to the data to remove background noise, or you could try using a different feature extraction method, such as mel-frequency cepstral coefficients (MFCCs). In addition, you could try using a different model architecture or training algorithm. For example, you could try using a convolutional neural network (CNN) instead of a fully-connected network, or you could try using a different optimizer or learning rate. Finally, you may want to try using more data. If you have access to a larger dataset, you could try training your model on that data to see if it improves the model's performance. Overall, the key is to experiment with different approaches and see what works best for your particular problem. A: This is a challenging task as it requires a lot of complex audio processing. The first step is to extract the audio features from the audio clip. This can be done by using various techniques such as Fast Fourier Transform (FFT), Mel-Frequency Cepstral Coefficients (MFCC), and Wavelet Transform (WT). Once the features are extracted, they need to be fed into a neural network model such as Convolutional Neural Network (CNN) or Long Short Term Memory (LSTM) to detect the timestamps of badminton shot sound. The model can be trained over a dataset of audio clips with the timestamps of badminton shot sound. After the model is trained, it can be used for identifying the timestamps of badminton shot sound in the given audio clip. A: One possible way to improve the model's performance would be to increase the length of the audio snippets that you are using to train the model. Right now, you are using 1-second long snippets around each timestamp, which may not be enough to capture the characteristics of a hit sound. Increasing the length of the snippets to, for example, 2 or 3 seconds, might allow the model to learn more from the data and improve its accuracy. A: One thing you can try to improve your model's performance is to use a different audio feature representation. Instead of using a spectrogram image, you could try using a Mel-frequency cepstral coefficient (MFCC) representation of the audio snippets. MFCCs are commonly used in speech recognition and are known to be effective in capturing the characteristics of a sound. Another thing you can try is to use a different machine learning model. Instead of using a neural network, you could try using a support vector machine (SVM) or a random forest classifier. These models are known to be effective in classifying audio data. Additionally, you may want to consider augmenting your training data. This could involve adding noise to the audio snippets or applying other transformations to them to make the model more robust. Finally, you may want to experiment with different model hyperparameters such as the learning rate, the number of hidden layers, and the number of units in each layer. Finding the optimal values for these hyperparameters can often improve a model's performance. A: There are a few things you could try to improve the performance of your model. Here are a few ideas: Use a longer window size when generating the spectrogram images. You are currently using a 1-second window, which may not be enough to capture the full range of sounds that are present in a shot. You could try using a longer window, such as 2 or 3 seconds, to see if that improves the model's performance. Use a different type of spectrogram. The spectrogram you are currently using is a simple power spectrogram, which only shows the power of the different frequencies present in the audio. You could try using a more advanced type of spectrogram, such as a mel-frequency cepstral coefficients (MFCC) spectrogram, which is commonly used in speech recognition applications. This may provide more information for the model to learn from. Augment your dataset. You may be able to improve the model's performance by augmenting your dataset with additional data. For example, you could try adding additional shots with different types of hits, such as backhand shots or overhead shots, to the dataset. You could also try adding different types of background noise to the shots, such as crowd noise or commentary, to make the dataset more representative of real-world situations. Try a different model architecture. The model you are currently using may not be well-suited to the task at hand. You could try using a different model architecture, such as a convolutional neural network (CNN) or a recurrent neural network (RNN), to see if that performs better on the task. You could also try tuning the hyperparameters of the model, such as the learning rate and the number of layers, to see if that improves performance. Use transfer learning. If you have access to a pre-trained model that has been trained on a similar task, you may be able to improve performance by using transfer learning. This involves using the pre-trained model as the starting point for your own model, and then fine-tuning it on your own dataset. This can help the model learn faster and improve performance, since it will already have learned some relevant features from the pre-trained model. Hope this helps!
How to extract all timestamps of badminton shot sound in an audio clip using Neural Networks?
I am trying to find the instances in a source audio file taken from a badminton match where a shot was hit by either of the players. For the same purpose, I have marked the timestamps with positive (hit sounds) and negative (no hit sound: commentary/crowd sound etc) labels like so: shot_timestamps = [0,6.5,8, 11, 18.5, 23, 27, 29, 32, 37, 43.5, 47.5, 52, 55.5, 63, 66, 68, 72, 75, 79, 94.5, 96, 99, 105, 122, 115, 118.5, 122, 126, 130.5, 134, 140, 144, 147, 154, 158, 164, 174.5, 183, 186, 190, 199, 238, 250, 253, 261, 267, 269, 270, 274] shot_labels = ['no', 'yes', 'yes', 'yes', 'yes', 'yes', 'no', 'no', 'no', 'no', 'yes', 'yes', 'yes', 'yes', 'yes', 'no', 'no','no','no', 'no', 'yes', 'yes', 'no', 'no', 'yes', 'yes', 'yes', 'yes', 'yes', 'yes', 'yes', 'no', 'no', 'no', 'no', 'yes', 'no', 'yes', 'no', 'no', 'no', 'yes', 'no', 'yes', 'yes', 'no', 'no', 'yes', 'yes', 'no'] I have been taking 1 second windows around these timestamps like so: rate, source = wavfile.read(source) def get_audio_snippets(shot_timestamps): shot_snippets = [] # Collection of all audio snippets in the timestamps above for timestamp in shot_timestamps: start = math.ceil(timestamp*rate) end = math.ceil((timestamp + 1)*rate) if start >= source.shape[0]: start = source.shape[0] - 1 if end >= source.shape[0]: end = source.shape[0] - 1 shot_snippets.append(source[start:end]) return shot_snippets and converting that to spectrogram images for the model. The model doesn't seem to be learning anything with an accuracy of around 50%. What can I do to improve the model? Edit: The audio file: Google Drive The timestamps labels: Google Drive Code: Github These timestamps were made recently and haven't been used in the code above as I don't exactly know what window sizes to take for labelling purposes. The annotation file above has all the timestamps of hitting the shots. PS: Also added this on Data Science Stackexchange as recommended: https://datascience.stackexchange.com/q/116629/98765
[ "To improve the model, some possible solutions are:\n\nAdjust the window size for the snippets you are creating.\nIncrease the number of data points.\nAugment your existing data with additional data.\nTry different architectures, such as convolutional neural networks and recurrent neural networks.\nTry different feature extraction techniques, such as Mel-Frequency Cepstral Coefficients (MFCCs).\nTune the hyperparameters of the model.\nTry different optimizers and learning rates.\nExperiment with different loss functions.\n\n", "Detecting when a particular sound happens is know as Sound Event Detection. There are a wide range of approaches to this topic, as it has been actively researched for many decades.\nYour existing solution, using correlation in the waveform domain with some template sounds is unlikely to work well as the amount of variation between shot sounds in a match is likely to be quite high.\nThe recommended approach would be to first collect a small dataset. Say for example to take data from 20 different matches (preferably with different), and then annotate each short from time-periods, to get at least 50 shots from each match. Then you can apply supervised machine learning to learn a detector. Some level of feature engineering should be done to the audio, for example to transform it into a spectrogram.\n", "There are several things you can try to improve the performance of your model. Here are some suggestions:\n\nYou may want to try using a larger or smaller window size when extracting the audio snippets. A window size of 1 second may not be the optimal size for your model, so you could try using a different size to see if it improves the model's performance.\n\nYou could also try using different parameters when generating the spectrogram images. For example, you could try changing the size of the window or the overlap between windows to see if it improves the model's performance.\n\nYou may also want to try preprocessing the audio data in other ways. For example, you could try applying a filter to the data to remove background noise, or you could try using a different feature extraction method, such as mel-frequency cepstral coefficients (MFCCs).\n\nIn addition, you could try using a different model architecture or training algorithm. For example, you could try using a convolutional neural network (CNN) instead of a fully-connected network, or you could try using a different optimizer or learning rate.\n\nFinally, you may want to try using more data. If you have access to a larger dataset, you could try training your model on that data to see if it improves the model's performance.\n\n\nOverall, the key is to experiment with different approaches and see what works best for your particular problem.\n", "This is a challenging task as it requires a lot of complex audio processing. The first step is to extract the audio features from the audio clip. This can be done by using various techniques such as Fast Fourier Transform (FFT), Mel-Frequency Cepstral Coefficients (MFCC), and Wavelet Transform (WT).\nOnce the features are extracted, they need to be fed into a neural network model such as Convolutional Neural Network (CNN) or Long Short Term Memory (LSTM) to detect the timestamps of badminton shot sound. The model can be trained over a dataset of audio clips with the timestamps of badminton shot sound. After the model is trained, it can be used for identifying the timestamps of badminton shot sound in the given audio clip.\n", "One possible way to improve the model's performance would be to increase the length of the audio snippets that you are using to train the model. Right now, you are using 1-second long snippets around each timestamp, which may not be enough to capture the characteristics of a hit sound. Increasing the length of the snippets to, for example, 2 or 3 seconds, might allow the model to learn more from the data and improve its accuracy.\n", "One thing you can try to improve your model's performance is to use a different audio feature representation. Instead of using a spectrogram image, you could try using a Mel-frequency cepstral coefficient (MFCC) representation of the audio snippets. MFCCs are commonly used in speech recognition and are known to be effective in capturing the characteristics of a sound.\nAnother thing you can try is to use a different machine learning model. Instead of using a neural network, you could try using a support vector machine (SVM) or a random forest classifier. These models are known to be effective in classifying audio data.\nAdditionally, you may want to consider augmenting your training data. This could involve adding noise to the audio snippets or applying other transformations to them to make the model more robust.\nFinally, you may want to experiment with different model hyperparameters such as the learning rate, the number of hidden layers, and the number of units in each layer. Finding the optimal values for these hyperparameters can often improve a model's performance.\n", "There are a few things you could try to improve the performance of your model. Here are a few ideas:\n\nUse a longer window size when generating the spectrogram images. You are currently using a 1-second window, which may not be enough to capture the full range of sounds that are present in a shot. You could try using a longer window, such as 2 or 3 seconds, to see if that improves the model's performance.\n\nUse a different type of spectrogram. The spectrogram you are currently using is a simple power spectrogram, which only shows the power of the different frequencies present in the audio. You could try using a more advanced type of spectrogram, such as a mel-frequency cepstral coefficients (MFCC) spectrogram, which is commonly used in speech recognition applications. This may provide more information for the model to learn from.\n\nAugment your dataset. You may be able to improve the model's performance by augmenting your dataset with additional data. For example, you could try adding additional shots with different types of hits, such as backhand shots or overhead shots, to the dataset. You could also try adding different types of background noise to the shots, such as crowd noise or commentary, to make the dataset more representative of real-world situations.\n\nTry a different model architecture. The model you are currently using may not be well-suited to the task at hand. You could try using a different model architecture, such as a convolutional neural network (CNN) or a recurrent neural network (RNN), to see if that performs better on the task. You could also try tuning the hyperparameters of the model, such as the learning rate and the number of layers, to see if that improves performance.\n\nUse transfer learning. If you have access to a pre-trained model that has been trained on a similar task, you may be able to improve performance by using transfer learning. This involves using the pre-trained model as the starting point for your own model, and then fine-tuning it on your own dataset. This can help the model learn faster and improve performance, since it will already have learned some relevant features from the pre-trained model.\n\n\nHope this helps!\n" ]
[ 3, 2, 0, 0, 0, 0, 0 ]
[]
[]
[ "audio", "deep_learning", "librosa", "machine_learning", "python" ]
stackoverflow_0074471111_audio_deep_learning_librosa_machine_learning_python.txt
Q: how to avoid a type error when finding the median of a list I am working on a project to get the mean, median and mode of a list. I have it almost all down but the median function is giving me the following error: return (list[midIndex]+list[midIndex-1])/2.0 TypeError: list indices must be integers or slices, not float def median(list): if len(list) == 0: return 0 list.sort() midIndex = len(list)/2 if len(list)%2 == 0: return (list[midIndex]+list[midIndex-1])/2 else: return list[midIndex] def mean(list): if len(list) == 0: return 0 list.sort() total = 0 for number in list: total += number return total / len(list) def mode(list): numberDictionary = {} for digit in list: number = numberDictionary.get(digit, None) if number == None: numberDictionary[digit] = 1 else: numberDictionary[digit] = number+1 maxValue = max(numberDictionary.values()) modeList = [] for key in numberDictionary: if numberDictionary[key] == maxValue: modeList.append(key) return modeList def main(): lyst = [3, 1, 7, 1, 4, 10] print("List:", lyst) print("Mode", mode(lyst)) print("Median:", median(lyst)) print("Mean:", mean(lyst)) main() The answer should be 3.5. A: Try this to see that len division by two will result in a float type and can't be used as an index (python error message is clear). list = {1,2,3,4} midIndex = len(list)/2 print(f"midIndex = {midIndex}") print(type(midIndex)) Output: midIndex = 2.0 <class 'float'>
how to avoid a type error when finding the median of a list
I am working on a project to get the mean, median and mode of a list. I have it almost all down but the median function is giving me the following error: return (list[midIndex]+list[midIndex-1])/2.0 TypeError: list indices must be integers or slices, not float def median(list): if len(list) == 0: return 0 list.sort() midIndex = len(list)/2 if len(list)%2 == 0: return (list[midIndex]+list[midIndex-1])/2 else: return list[midIndex] def mean(list): if len(list) == 0: return 0 list.sort() total = 0 for number in list: total += number return total / len(list) def mode(list): numberDictionary = {} for digit in list: number = numberDictionary.get(digit, None) if number == None: numberDictionary[digit] = 1 else: numberDictionary[digit] = number+1 maxValue = max(numberDictionary.values()) modeList = [] for key in numberDictionary: if numberDictionary[key] == maxValue: modeList.append(key) return modeList def main(): lyst = [3, 1, 7, 1, 4, 10] print("List:", lyst) print("Mode", mode(lyst)) print("Median:", median(lyst)) print("Mean:", mean(lyst)) main() The answer should be 3.5.
[ "Try this to see that len division by two will result in a float type and can't be used as an index (python error message is clear).\nlist = {1,2,3,4}\nmidIndex = len(list)/2\nprint(f\"midIndex = {midIndex}\")\nprint(type(midIndex))\n\nOutput:\nmidIndex = 2.0\n<class 'float'>\n\n" ]
[ 0 ]
[]
[]
[ "python" ]
stackoverflow_0074673114_python.txt
Q: How to run a function periodically with Flask and Celery? I have a flask app that roughly looks like this: app = Flask(__name__) @app.route('/',methods=['POST']) def foo(): data = json.loads(request.data) # do some stuff return "OK" Now in addition I would like to run a function every ten seconds from that script. I don't want to use sleep for that. I have the following celery script in addition: from celery import Celery from datetime import timedelta celery = Celery('__name__') CELERYBEAT_SCHEDULE = { 'add-every-30-seconds': { 'task': 'tasks.add', 'schedule': timedelta(seconds=10) }, } @celery.task(name='tasks.add') def hello(): app.logger.info('run my function') The script works fine, but the logger.info is not executed. What am I missing? A: Do you have Celery worker and Celery beat running? Scheduled tasks are handled by beat, which queues the task mentioned when appropriate. Worker then actually crunches the numbers and executes your task. celery worker --app myproject--loglevel=info celery beat --app myproject Your task however looks like it's calling the Flask app's logger. When using the worker, you probably don't have the Flask application around (since it's in another process). Try using a normal Python logger for the demo task. A: Well, celery beat can be embedded in regular celery worker as well, with -B parameter in your command. celery -A --app myproject --loglevel=info -B It is only recommended for the development environment. For production, you should run beat and celery workers separately as documentation mentions. Otherwise, your periodic task will run more than one time. A: A celery task by default will run outside of the Flask app context and thus it won't have access to Flask app instance. However it's very easy to create the Flask app context while running a task by using app_context method of the Flask app object. app = Flask(__name__) celery = Celery(app.name) @celery.task def task(): with app.app_context(): app.logger.info('running my task') This article by Miguel Grinberg is a very good place to get a primer on the basics of using Celery in a Flask application. A: First install the redis on machine and check it is running or not. install the python dependencies celery redis flask folder structure project app init.py task.py main.py write task.py from celery import Celery from celery.schedules import crontab from app import app from app.scrap import product_data from celery.utils.log import get_task_logger logger = get_task_logger(__name__) def make_celery(app): #Celery configuration app.config['CELERY_BROKER_URL'] = 'redis://127.0.0.1:6379' app.config['CELERY_RESULT_BACKEND'] = 'db+postgresql://user:[email protected]:5432/mydatabase' app.config['CELERY_RESULT_EXTENDED']=True app.config['CELERYBEAT_SCHEDULE'] = { # Executes every minute 'periodic_task-every-minute': { 'task': 'periodic_task', 'schedule': crontab(minute="*") } } celery = Celery(app.import_name, broker=app.config['CELERY_BROKER_URL']) celery.conf.update(app.config) TaskBase = celery.Task class ContextTask(TaskBase): abstract = True def __call__(self, *args, **kwargs): with app.app_context(): return TaskBase.__call__(self, *args, **kwargs) celery.Task = ContextTask return celery celery = make_celery(app) @celery.task(name="periodic_task",bind=True) def testing(self): file1 = open("../myfile.txt", "a") # writing newline character file1.write("\n") file1.write("Today") #faik print("Running") self.request.task_name = "state" logger.info("Hello! from periodic task") return "Done" write init.py from flask import Flask, Blueprint,request from flask_restx import Api,Resource,fields from flask_sqlalchemy import SQLAlchemy import redis from rq import Queue app = Flask(__name__) app.config['SECRET_KEY']='7c09ebc8801a0ce8fb82b3d2ec51b4db' app.config['SQLALCHEMY_DATABASE_URI']='sqlite:///site.db' db=SQLAlchemy(app) command to run celery beat and worker celery -A app.task.celery beat celery -A app.task.celery worker --loglevel=info
How to run a function periodically with Flask and Celery?
I have a flask app that roughly looks like this: app = Flask(__name__) @app.route('/',methods=['POST']) def foo(): data = json.loads(request.data) # do some stuff return "OK" Now in addition I would like to run a function every ten seconds from that script. I don't want to use sleep for that. I have the following celery script in addition: from celery import Celery from datetime import timedelta celery = Celery('__name__') CELERYBEAT_SCHEDULE = { 'add-every-30-seconds': { 'task': 'tasks.add', 'schedule': timedelta(seconds=10) }, } @celery.task(name='tasks.add') def hello(): app.logger.info('run my function') The script works fine, but the logger.info is not executed. What am I missing?
[ "Do you have Celery worker and Celery beat running? Scheduled tasks are handled by beat, which queues the task mentioned when appropriate. Worker then actually crunches the numbers and executes your task.\ncelery worker --app myproject--loglevel=info\ncelery beat --app myproject\n\nYour task however looks like it's calling the Flask app's logger. When using the worker, you probably don't have the Flask application around (since it's in another process). Try using a normal Python logger for the demo task.\n", "Well, celery beat can be embedded in regular celery worker as well, with -B parameter in your command.\ncelery -A --app myproject --loglevel=info -B \nIt is only recommended for the development environment. For production, you should run beat and celery workers separately as documentation mentions. Otherwise, your periodic task will run more than one time.\n", "A celery task by default will run outside of the Flask app context and thus it won't have access to Flask app instance. However it's very easy to create the Flask app context while running a task by using app_context method of the Flask app object.\napp = Flask(__name__)\ncelery = Celery(app.name)\n\[email protected]\ndef task():\n with app.app_context():\n app.logger.info('running my task')\n\nThis article by Miguel Grinberg is a very good place to get a primer on the basics of using Celery in a Flask application.\n", "First install the redis on machine and check it is running or not.\ninstall the python dependencies\n\ncelery\nredis\nflask\n\nfolder structure\n\nproject\n\napp\n\ninit.py\ntask.py\n\n\nmain.py\n\n\n\nwrite task.py\nfrom celery import Celery\nfrom celery.schedules import crontab\nfrom app import app\nfrom app.scrap import product_data\nfrom celery.utils.log import get_task_logger\nlogger = get_task_logger(__name__)\ndef make_celery(app):\n #Celery configuration\n app.config['CELERY_BROKER_URL'] = 'redis://127.0.0.1:6379'\n app.config['CELERY_RESULT_BACKEND'] = 'db+postgresql://user:[email protected]:5432/mydatabase'\n app.config['CELERY_RESULT_EXTENDED']=True\n app.config['CELERYBEAT_SCHEDULE'] = {\n # Executes every minute\n 'periodic_task-every-minute': {\n 'task': 'periodic_task',\n 'schedule': crontab(minute=\"*\")\n }\n }\n\n\n celery = Celery(app.import_name, broker=app.config['CELERY_BROKER_URL'])\n celery.conf.update(app.config)\n TaskBase = celery.Task\n class ContextTask(TaskBase):\n abstract = True\n def __call__(self, *args, **kwargs):\n with app.app_context():\n return TaskBase.__call__(self, *args, **kwargs)\n celery.Task = ContextTask\n return celery\ncelery = make_celery(app)\n\[email protected](name=\"periodic_task\",bind=True)\ndef testing(self):\n file1 = open(\"../myfile.txt\", \"a\")\n\n # writing newline character\n file1.write(\"\\n\")\n file1.write(\"Today\")\n #faik\n print(\"Running\")\n self.request.task_name = \"state\"\n logger.info(\"Hello! from periodic task\")\n return \"Done\"\n\nwrite init.py\nfrom flask import Flask, Blueprint,request\nfrom flask_restx import Api,Resource,fields\nfrom flask_sqlalchemy import SQLAlchemy\nimport redis\nfrom rq import Queue\n\napp = Flask(__name__)\napp.config['SECRET_KEY']='7c09ebc8801a0ce8fb82b3d2ec51b4db'\napp.config['SQLALCHEMY_DATABASE_URI']='sqlite:///site.db'\ndb=SQLAlchemy(app)\n\ncommand to run celery beat and worker\ncelery -A app.task.celery beat\ncelery -A app.task.celery worker --loglevel=info\n\n" ]
[ 11, 5, 4, 0 ]
[]
[]
[ "celery", "flask", "python" ]
stackoverflow_0028761750_celery_flask_python.txt
Q: @ changing to %40 I am using sqlalchemy and creating the url using url = url.URL( drivername, host, username, password, database) this does not work url = url.URL( drivername='mysql+pymysql', host='abc.com', username='app', password=u'56f@;', database='app') logging.info(f'url : {url}') gives mysql+pymysql://app:6f%40;@abc.com/app url = create_engine(url) results = url.execute(sql_query).fetchall() Password contains an ampersand '@' which is getting converted to %40. I do not want the conversion to take place. How can I avoid it. A: To avoid encoding of special characters in the password when creating a SQLAlchemy engine URL, you can use the quote() method from the urllib.parse module to encode the password before passing it to the url.URL() method. import sqlalchemy as sa from urllib.parse import quote # Create the URL object with the encoded password engine_url = sa.engine.url.URL( drivername, host, username, quote(password), database, ) This will encode any special characters in the password, such as the ampersand &, so that they are not interpreted as URL syntax by SQLAlchemy.
@ changing to %40
I am using sqlalchemy and creating the url using url = url.URL( drivername, host, username, password, database) this does not work url = url.URL( drivername='mysql+pymysql', host='abc.com', username='app', password=u'56f@;', database='app') logging.info(f'url : {url}') gives mysql+pymysql://app:6f%40;@abc.com/app url = create_engine(url) results = url.execute(sql_query).fetchall() Password contains an ampersand '@' which is getting converted to %40. I do not want the conversion to take place. How can I avoid it.
[ "To avoid encoding of special characters in the password when creating a SQLAlchemy engine URL, you can use the quote() method from the urllib.parse module to encode the password before passing it to the url.URL() method.\nimport sqlalchemy as sa\nfrom urllib.parse import quote\n\n# Create the URL object with the encoded password\nengine_url = sa.engine.url.URL(\n drivername,\n host,\n username,\n quote(password),\n database,\n)\n\nThis will encode any special characters in the password, such as the ampersand &, so that they are not interpreted as URL syntax by SQLAlchemy.\n" ]
[ 0 ]
[]
[]
[ "database", "python", "python_unicode", "sqlalchemy", "unicode" ]
stackoverflow_0074673195_database_python_python_unicode_sqlalchemy_unicode.txt
Q: How to implement a numpy equation in the call of a tensorflow layer for a tensorflow model (Cannot convert a symbolic tf.Tensor to a numpy array) I have this layer class in tensorflow where i want to implement a specific equation in numpy for the return in the call function. I have this following custom layer: class PhysicalLayer(keras.layers.Layer): def __init__(self, units=32): super(PhysicalLayer, self).__init__() self.units = units def build(self, input_shape): self.w = self.add_weight( shape=(input_shape[-1], self.units), initializer="random_normal", trainable=True, ) self.b = self.add_weight( shape=(self.units,), initializer="random_normal", trainable=True ) def call(self, inputs): rotationSpeedSquare = tf.math.square(rotationSpeed) maximumVibration = tf.convert_to_tensor(np.max(inputs)) stiff = rotationSpeedSquare/maximumVibration stiff.astype('float32') return tf.matmul(stiff, self.w) + self.b This layer is then implement in a model in the following way: class model(tf.keras.Model): def __init__(self, num_classes=50): super(model, self).__init__() self.dense1 = tf.keras.layers.Dense(num_classes, activation=tf.nn.relu) self.physical = PhysicalLayer() self.dense2 = tf.keras.layers.Dense(64, activation=tf.nn.relu) self.dense3 = tf.keras.layers.Dense(32, activation=tf.nn.relu) self.dense4 = tf.keras.layers.Dense(1, activation=tf.nn.relu) def call(self, inputs): x = self.dense1(inputs) x = self.physical(x) x = self.dense2(x) x = self.dense3(x) return self.dense4(x) One of my first concern is if I'm doing this model class correctly as i just learnt how to do it. By trying to fit this model with the training set (which are numpy array, dtype = float32 and size is (72367, 50)) model = model() model.compile(optimizer='adam', loss='mae', metrics=[tf.keras.metrics.RootMeanSquaredError()]) model.fit(a, b, batch_size=32, epochs=2, verbose=2) I obtain the following error: NotImplementedError: Cannot convert a symbolic tf.Tensor (model_18/dense_72/Relu:0) to a numpy array. This error may indicate that you're trying to pass a Tensor to a NumPy call, which is not supported. Thanks A: Use tf.math.reduce_max to get the maximum of a tensor: def call(self, inputs): rotationSpeedSquare = tf.math.square(rotationSpeed) maximumVibration = tf.math.reduce_max(inputs, axis=1, keepdims=True) stiff = rotationSpeedSquare / maximumVibration return tf.matmul(stiff, self.w) + self.b
How to implement a numpy equation in the call of a tensorflow layer for a tensorflow model (Cannot convert a symbolic tf.Tensor to a numpy array)
I have this layer class in tensorflow where i want to implement a specific equation in numpy for the return in the call function. I have this following custom layer: class PhysicalLayer(keras.layers.Layer): def __init__(self, units=32): super(PhysicalLayer, self).__init__() self.units = units def build(self, input_shape): self.w = self.add_weight( shape=(input_shape[-1], self.units), initializer="random_normal", trainable=True, ) self.b = self.add_weight( shape=(self.units,), initializer="random_normal", trainable=True ) def call(self, inputs): rotationSpeedSquare = tf.math.square(rotationSpeed) maximumVibration = tf.convert_to_tensor(np.max(inputs)) stiff = rotationSpeedSquare/maximumVibration stiff.astype('float32') return tf.matmul(stiff, self.w) + self.b This layer is then implement in a model in the following way: class model(tf.keras.Model): def __init__(self, num_classes=50): super(model, self).__init__() self.dense1 = tf.keras.layers.Dense(num_classes, activation=tf.nn.relu) self.physical = PhysicalLayer() self.dense2 = tf.keras.layers.Dense(64, activation=tf.nn.relu) self.dense3 = tf.keras.layers.Dense(32, activation=tf.nn.relu) self.dense4 = tf.keras.layers.Dense(1, activation=tf.nn.relu) def call(self, inputs): x = self.dense1(inputs) x = self.physical(x) x = self.dense2(x) x = self.dense3(x) return self.dense4(x) One of my first concern is if I'm doing this model class correctly as i just learnt how to do it. By trying to fit this model with the training set (which are numpy array, dtype = float32 and size is (72367, 50)) model = model() model.compile(optimizer='adam', loss='mae', metrics=[tf.keras.metrics.RootMeanSquaredError()]) model.fit(a, b, batch_size=32, epochs=2, verbose=2) I obtain the following error: NotImplementedError: Cannot convert a symbolic tf.Tensor (model_18/dense_72/Relu:0) to a numpy array. This error may indicate that you're trying to pass a Tensor to a NumPy call, which is not supported. Thanks
[ "Use tf.math.reduce_max to get the maximum of a tensor:\n def call(self, inputs):\n rotationSpeedSquare = tf.math.square(rotationSpeed)\n maximumVibration = tf.math.reduce_max(inputs, axis=1, keepdims=True)\n\n stiff = rotationSpeedSquare / maximumVibration\n return tf.matmul(stiff, self.w) + self.b\n\n" ]
[ 0 ]
[]
[]
[ "keras", "layer", "python", "tensorflow" ]
stackoverflow_0074670055_keras_layer_python_tensorflow.txt
Q: running a test script for multiple URLs in multiple browsers (local) in selenium python I have a test script that I want to be run for multiple URLs on multiple browsers (Chrome and Firefox) locally on my machine. Every browser has to open all the URLs for the test script. I have run the test script for multiple URLs, but I'm confused about how to do it for multiple browsers. I have checked stuff online but all of them doing it remotely. my test script is below: import time from selenium import webdriver from selenium.webdriver.chrome.options import Options Driver = webdriver.Chrome() def localitems() : local_storage = Driver.execute_script( \ "var ls = window.localStorage, items = {}; " \ "for (var i = 0, k; i < ls.length; ++i) " \ " items[k = ls.key(i)] = ls.getItem(k);"\ "return items; ") return local_storage; def sessionitems() : session_storage = Driver.execute_script( \ "var ls = window.sessionStorage, items = {}; " \ "for (var i = 0, k; i < ls.length; ++i) " \ " items[k = ls.key(i)] = ls.getItem(k);"\ "return items; ") return session_storage; sites = [ "http://www.github.com", "https://tribune.com.pk" ] for index, site in enumerate(sites) print(index,site) Driver.get(site) time.sleep(5) print('localStorage', localitems()) print('sessionStorage', sessionitems()) Driver.quit() If anyone could help me with this, would be thankful. A: Create a list with the drivers and then execute your script in the for loop: drivers = [webdriver.Chrome(), webdriver.Firefox()] for Driver in drivers: def localitems(): local_storage = Driver.execute_script( \ "var ls = window.localStorage, items = {}; " \ "for (var i = 0, k; i < ls.length; ++i) " \ " items[k = ls.key(i)] = ls.getItem(k);" \ "return items; ") return local_storage; def sessionitems(): session_storage = Driver.execute_script( \ "var ls = window.sessionStorage, items = {}; " \ "for (var i = 0, k; i < ls.length; ++i) " \ " items[k = ls.key(i)] = ls.getItem(k);" \ "return items; ") return session_storage; sites = [ "http://www.github.com", "https://tribune.com.pk" ] for index, site in enumerate(sites) print(index, site) Driver.get(site) time.sleep(5) print('localStorage', localitems()) print('sessionStorage', sessionitems()) Driver.quit() Also note that there is a convention in Python (PEP-8). According to it, variable name should be lowercase. So it's better to use driver instead of Driver
running a test script for multiple URLs in multiple browsers (local) in selenium python
I have a test script that I want to be run for multiple URLs on multiple browsers (Chrome and Firefox) locally on my machine. Every browser has to open all the URLs for the test script. I have run the test script for multiple URLs, but I'm confused about how to do it for multiple browsers. I have checked stuff online but all of them doing it remotely. my test script is below: import time from selenium import webdriver from selenium.webdriver.chrome.options import Options Driver = webdriver.Chrome() def localitems() : local_storage = Driver.execute_script( \ "var ls = window.localStorage, items = {}; " \ "for (var i = 0, k; i < ls.length; ++i) " \ " items[k = ls.key(i)] = ls.getItem(k);"\ "return items; ") return local_storage; def sessionitems() : session_storage = Driver.execute_script( \ "var ls = window.sessionStorage, items = {}; " \ "for (var i = 0, k; i < ls.length; ++i) " \ " items[k = ls.key(i)] = ls.getItem(k);"\ "return items; ") return session_storage; sites = [ "http://www.github.com", "https://tribune.com.pk" ] for index, site in enumerate(sites) print(index,site) Driver.get(site) time.sleep(5) print('localStorage', localitems()) print('sessionStorage', sessionitems()) Driver.quit() If anyone could help me with this, would be thankful.
[ "Create a list with the drivers and then execute your script in the for loop:\ndrivers = [webdriver.Chrome(), webdriver.Firefox()]\n\nfor Driver in drivers:\n def localitems():\n local_storage = Driver.execute_script( \\\n \"var ls = window.localStorage, items = {}; \" \\\n \"for (var i = 0, k; i < ls.length; ++i) \" \\\n \" items[k = ls.key(i)] = ls.getItem(k);\" \\\n \"return items; \")\n return local_storage;\n\n\n def sessionitems():\n session_storage = Driver.execute_script( \\\n \"var ls = window.sessionStorage, items = {}; \" \\\n \"for (var i = 0, k; i < ls.length; ++i) \" \\\n \" items[k = ls.key(i)] = ls.getItem(k);\" \\\n \"return items; \")\n return session_storage;\n\n\n sites = [\n \"http://www.github.com\",\n \"https://tribune.com.pk\"\n ]\n\n for index, site in enumerate(sites)\n print(index, site)\n Driver.get(site)\n time.sleep(5)\n print('localStorage', localitems())\n print('sessionStorage', sessionitems())\n Driver.quit()\n\n\nAlso note that there is a convention in Python (PEP-8). According to it, variable name should be lowercase. So it's better to use driver instead of Driver\n" ]
[ 0 ]
[]
[]
[ "browser_automation", "cross_browser", "python", "selenium", "selenium_webdriver" ]
stackoverflow_0074672023_browser_automation_cross_browser_python_selenium_selenium_webdriver.txt
Q: List of numbers have same data but different sum I have two lists of numbers. After comparing them, they are same but there sum is different. You can get the script here: https://mega.nz/file/dHgHEQQA#9k9s86hgGH_vWrcE8J6ixYdu3GYkfwtw0V0IBvuhd4o Am I comparing wrong or what is the problem? A: Check the length of the list. e appears to be having 694 elements and r appears to be having 693 elements. so, zip aggregates only 693 elements. Hence the sum are different. print(len(e), len(r), len([x for x in zip(e, r)])) # 694 693 693
List of numbers have same data but different sum
I have two lists of numbers. After comparing them, they are same but there sum is different. You can get the script here: https://mega.nz/file/dHgHEQQA#9k9s86hgGH_vWrcE8J6ixYdu3GYkfwtw0V0IBvuhd4o Am I comparing wrong or what is the problem?
[ "Check the length of the list. e appears to be having 694 elements and r appears to be having 693 elements. so, zip aggregates only 693 elements. Hence the sum are different.\nprint(len(e), len(r), len([x for x in zip(e, r)]))\n# 694 693 693\n\n" ]
[ 0 ]
[]
[]
[ "comparison", "list", "python" ]
stackoverflow_0074673224_comparison_list_python.txt
Q: glue jupyter notebook locally instead of labs? docker run -itd -p 8888:8888 -p 4040:4040 --name glue_jupyter amazon/aws-glue-libs:glue_libs_2.0.0_image_01 /home/glue_user/jupyter/jupyter_start.sh results in i'm able to open 127.0.0.1:8888 and it redirects to jupyter labs How do i go to jupyter notebook instead? should i bash instead and then jupyter notebook from there? unsure. A: I tried the following command with glue image for 3.0, and it does takes me to the jupyter labs, with a console prompt to choose python/pyspark/etc. notebooks. docker run -it -p 8888:8888 -p 4040:4040 -e DISABLE_SSL="true" --name glue_jupyter amazon/aws-glue-libs:glue_libs_3.0.0_image_01 /home/glue_user/jupyter/jupyter_start.sh In the above command, I have removed "d", so that I can see the logs in the console as the docker runs. Choose Glue Spark Local (PySpark) under Notebook. You can start developing code in the interactive Jupyter notebook UI. A notebook with sample code showing spark versions Please refer to this AWS document (Jupyter Lab section) for reference:https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-libraries.html
glue jupyter notebook locally instead of labs?
docker run -itd -p 8888:8888 -p 4040:4040 --name glue_jupyter amazon/aws-glue-libs:glue_libs_2.0.0_image_01 /home/glue_user/jupyter/jupyter_start.sh results in i'm able to open 127.0.0.1:8888 and it redirects to jupyter labs How do i go to jupyter notebook instead? should i bash instead and then jupyter notebook from there? unsure.
[ "I tried the following command with glue image for 3.0, and it does takes me to the jupyter labs, with a console prompt to choose python/pyspark/etc. notebooks.\ndocker run -it -p 8888:8888 -p 4040:4040 -e DISABLE_SSL=\"true\" --name glue_jupyter amazon/aws-glue-libs:glue_libs_3.0.0_image_01 /home/glue_user/jupyter/jupyter_start.sh\n\nIn the above command, I have removed \"d\", so that I can see the logs in the console as the docker runs.\n\nChoose Glue Spark Local (PySpark) under Notebook. You can start developing code in the interactive Jupyter notebook UI.\nA notebook with sample code showing spark versions\n\nPlease refer to this AWS document (Jupyter Lab section) for reference:https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-libraries.html\n" ]
[ 0 ]
[]
[]
[ "aws_glue", "docker", "jupyter_notebook", "pyspark", "python" ]
stackoverflow_0074663072_aws_glue_docker_jupyter_notebook_pyspark_python.txt
Q: Understanding how the "is" operator works int Python for result from function For example we have this code. x = 1 y = 1 print(x is y) # TRUE print(id(x), id(y)) y = pow(10, 30, 10**30-1) # 1 print(type(y)) print(x, y, x is y) # FALSE It`s return: True 140516304938720 140516304938720 <class 'int'> 1 1 False The last result is False. Please help me understand why this is happening? Result of function is 1, doesn`t it literal, which cach in python? If we change to y = pow(1, 10) It will return "True". A: The "is" operator checks whether two items are the same object. In your example it returns False because x is not the same object as y, even though they have the same content. for example: Here the x and y variables have the same content, but they are not the same object! x = ["apple", "banana"] y = ["apple", "banana"] print(x is y) #False print(x == y) #True To understand more, I suggest you check this link. https://www.w3schools.com/python/python_operators.asp
Understanding how the "is" operator works int Python for result from function
For example we have this code. x = 1 y = 1 print(x is y) # TRUE print(id(x), id(y)) y = pow(10, 30, 10**30-1) # 1 print(type(y)) print(x, y, x is y) # FALSE It`s return: True 140516304938720 140516304938720 <class 'int'> 1 1 False The last result is False. Please help me understand why this is happening? Result of function is 1, doesn`t it literal, which cach in python? If we change to y = pow(1, 10) It will return "True".
[ "The \"is\" operator checks whether two items are the same object.\nIn your example it returns False because x is not the same object as y, even though they have the same content.\nfor example:\nHere the x and y variables have the same content, but they are not the same object!\nx = [\"apple\", \"banana\"]\ny = [\"apple\", \"banana\"]\n\nprint(x is y) #False\nprint(x == y) #True\n\nTo understand more, I suggest you check this link.\nhttps://www.w3schools.com/python/python_operators.asp\n" ]
[ 0 ]
[ "x1 = 5\ny1 = 5\nx2 = 'Hello'\ny2 = 'Hello'\nx3 = [1,2,3]\ny3 = [1,2,3]\nprint(x1 is not y1) # prints False\nprint(x2 is y2) # prints True\nprint(x3 is y3) # prints False\n", "x = [\"apple\", \"banana\"]\n\ny = [\"apple\", \"banana\"]\nprint(x is y) #False\nprint(x == y) #True\n" ]
[ -1, -1 ]
[ "function", "literals", "operators", "python", "syntax" ]
stackoverflow_0074509703_function_literals_operators_python_syntax.txt
Q: Python Pandas Data frame Pivoting I have such .txt file: Field Value First 1 Second alfa First 23 Second beta First 55 Second omega I need to read and transform this file to get data like this: First Second 1 alfa 23 beta 55 omega I start with this: file = './data.txt' df = pd.read_csv(file, sep='\t',header=None, skiprows=89, skipfooter=11, engine='python') df = df.pivot(values=1, columns=0) but it looks as I need to generate some indexes otherwise my pivoted table looks not very well First Second 1 alfa 23 beta 55 omega Is any other solution hot to read that data and get the results that I need? A: The trick is you need to create common keys for the index. Using .assign create a column named CommonKeys which is the cumcount of grouping on the Fields column. Finally chain functions to pivot and clean up the df. df = ( df.assign(CommonKeys=df.groupby("Field").cumcount()) .pivot(index="CommonKeys", columns="Field", values="Value") .reset_index(drop=True) .rename_axis(None, axis=1) ) print(df) Output: First Second 0 1 alfa 1 23 beta 2 55 omega A: in order to make your code work I had to modify the way you access the .csv file, as I don't have that many rows. import pandas as pd file = './data.txt' df = pd.read_csv(file, sep='\t',header=0, engine='python') df = df.pivot(values='Value', columns='Field') # for each column on the dataframe, sort the value and ignore the index for col in df.columns: df[col] = df[col].sort_values(ignore_index=True) # drop NaN df.dropna(axis=0, how='all', inplace=True) # Show dataframe print(df) Here some more info about .sort_values: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.sort_values.html Hope it can help :)
Python Pandas Data frame Pivoting
I have such .txt file: Field Value First 1 Second alfa First 23 Second beta First 55 Second omega I need to read and transform this file to get data like this: First Second 1 alfa 23 beta 55 omega I start with this: file = './data.txt' df = pd.read_csv(file, sep='\t',header=None, skiprows=89, skipfooter=11, engine='python') df = df.pivot(values=1, columns=0) but it looks as I need to generate some indexes otherwise my pivoted table looks not very well First Second 1 alfa 23 beta 55 omega Is any other solution hot to read that data and get the results that I need?
[ "The trick is you need to create common keys for the index.\nUsing .assign create a column named CommonKeys which is the cumcount of grouping on the Fields column. Finally chain functions to pivot and clean up the df.\ndf = (\n df.assign(CommonKeys=df.groupby(\"Field\").cumcount())\n .pivot(index=\"CommonKeys\", columns=\"Field\", values=\"Value\")\n .reset_index(drop=True)\n .rename_axis(None, axis=1)\n)\n\nprint(df)\n\nOutput:\n First Second\n0 1 alfa\n1 23 beta\n2 55 omega\n\n", "in order to make your code work I had to modify the way you access the .csv file, as I don't have that many rows.\nimport pandas as pd\n\nfile = './data.txt'\ndf = pd.read_csv(file, sep='\\t',header=0, engine='python')\ndf = df.pivot(values='Value', columns='Field')\n\n# for each column on the dataframe, sort the value and ignore the index\nfor col in df.columns:\n df[col] = df[col].sort_values(ignore_index=True)\n\n# drop NaN\ndf.dropna(axis=0, how='all', inplace=True)\n\n# Show dataframe\nprint(df)\n\nHere some more info about .sort_values:\nhttps://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.sort_values.html\nHope it can help :)\n" ]
[ 1, 0 ]
[]
[]
[ "pandas", "python" ]
stackoverflow_0074671429_pandas_python.txt
Q: How to get only the initial NaN values and leading non NaN values from a pandas dataframe? I have a dataframe where the rows contain NaN values. The df contains original columns namely Heading 1 Heading 2 and Heading 3 and extra columns called Unnamed: 1 Unnamed: 2 and Unnamed: 3 as shown: Heading 1 Heading 2 Heading 3 Unnamed: 1 Unnamed: 2 Unnamed: 3 NaN 34 24 45 NaN NaN NaN NaN 24 45 11 NaN NaN NaN NaN 45 45 33 4 NaN 24 NaN NaN NaN NaN NaN 4 NaN NaN NaN NaN 34 24 NaN NaN NaN 22 34 24 NaN NaN NaN NaN 34 NaN 45 NaN NaN I want to iterate through each row and find out the amount of initial NaN values in original columns (Heading 1 Heading 2 and Heading 3) and the amount of non NaN values in the extra columns (Unnamed: 1 Unnamed: 2 and Unnamed: 3). For each and every row this should be calculated and returned in a dictionary where the key is the index of the row and the value for that key is a list containing the amount of initial NaN values in original columns (Heading 1 Heading 2 and Heading 3) and the second element of the list would the amount of non NaN values in the extra columns (Unnamed: 1 Unnamed: 2 and Unnamed: 3). So the result for the above dataframe would be: {0 : [1, 1], 1 : [2, 2], 2 : [3, 3], 3 : [0, 0], 4 : [2, 0], 5 : [1, 0], 6 : [0, 0], 7 : [1, 1]} Notice how in row 3 and row 7 the original columns contain 1 and 2 NaN respectively but only the initial NaN's are counted and not the in between ones! UPDATE / RESULTS: Both @mozaway and @Panda Kim gave the correct solution for the current dataframe but @mozway solution does not work at all for another test dataframe. @Panda Kim gave 2 solutions but both the methods he gave (cumsum() and x.first_valid_index()) are giving slightly different results for the different dataframe. Heading 1 Heading 2 Heading 3 Unnamed: 1 Unnamed: 2 Unnamed: 3 Unnamed: 4 NaN 34 24 45 NaN NaN NaN NaN NaN 24 45 11 NaN NaN NaN NaN NaN 45 45 33 NaN 4 NaN 24 NaN NaN NaN NaN NaN NaN 4 NaN NaN NaN NaN NaN 34 24 NaN NaN NaN NaN 22 34 24 NaN NaN NaN NaN NaN 34 NaN 45 NaN NaN NaN NaN NaN NaN NaN 12 22 45 NaN NaN NaN NaN NaN 11 69 NaN NaN NaN NaN 12 NaN 45 NaN NaN NaN NaN NaN NaN 45 NaN NaN NaN NaN NaN 44 NaN For the above df here are the results: @Panda KIM (first_valid_index()) {0: [1, 1], 1: [2, 2], 2: [3, 3], 3: [0, 0], 4: [2, 0], 5: [1, 0], 6: [0, 0], 7: [1, 1], 8: [3, 3], 9: [3, 2], 10: [3, 2], 11: [3, 1], 12: [3, 1]} @Panda Kim (cumsum()) {0: [1, 1], 1: [2, 2], 2: [3, 3], 3: [0, 0], 4: [2, 0], 5: [1, 0], 6: [0, 0], 7: [1, 1], 8: [4, 3], 9: [5, 2], 10: [4, 2], 11: [6, 1], 12: [5, 1]} @mozway solution {0: [1, 1], 1: [2, 2], 2: [3, 3], 3: [0, 0], 4: [2, 0], 5: [1, 0], 6: [0, 0], 7: [1, 1], 8: [3, 0], 9: [3, 0], 10: [3, 0], 11: [3, 0], 12: [3, 0]} A: First divide dataframe (iloc or filter or and so on) df1 = df.iloc[:, :3] df2 = df.iloc[:, 3:] Second count initial NaNs in df1 and count notnull in df2 s1 = df1.apply(lambda x: (x.notnull().cumsum() == 0).sum(), axis=1) s2 = df2.notnull().sum(axis=1) Last concat and make dict pd.concat([s1, s2], axis=1).T.to_dict('list') result: {0: [1, 1], 1: [2, 2], 2: [3, 3], 3: [0, 0], 4: [2, 0], 5: [1, 0], 6: [0, 0], 7: [1, 1]} Update data = [[None, 34.0, 24.0, 45.0, None, None, None], [None, None, 24.0, 45.0, 11.0, None, None], [None, None, None, 45.0, 45.0, 33.0, None], [4.0, None, 24.0, None, None, None, None], [None, None, 4.0, None, None, None, None], [None, 34.0, 24.0, None, None, None, None], [22.0, 34.0, 24.0, None, None, None, None], [None, 34.0, None, 45.0, None, None, None], [None, None, None, None, 12.0, 22.0, 45.0], [None, None, None, None, None, 11.0, 69.0], [None, None, None, None, 12.0, None, 45.0], [None, None, None, None, None, None, 45.0], [None, None, None, None, None, 44.0, None]] col = ['Heading 1', 'Heading 2', 'Heading 3', 'Unnamed: 1', 'Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'] df = pd.DataFrame(data, columns=col) df1 = df.iloc[:, :3] df2 = df.iloc[:, 3:] s1 = df1.apply(lambda x: (x.notnull().cumsum() == 0).sum(), axis=1) s2 = df2.notnull().sum(axis=1) pd.concat([s1, s2], axis=1).T.to_dict('list') result: {0: [1, 1], 1: [2, 2], 2: [3, 3], 3: [0, 0], 4: [2, 0], 5: [1, 0], 6: [0, 0], 7: [1, 1], 8: [3, 3], 9: [3, 2], 10: [3, 2], 11: [3, 1], 12: [3, 1]} Anyone can know that this is different from questioner's result (@Panda Kim (cumsum())). Of course, if function is not applied to df1, the result is different. Let's apply cumsum code to df instead of df1 for wrong result: df2 = df.iloc[:, 3:] s1 = df.apply(lambda x: (x.notnull().cumsum() == 0).sum(), axis=1) # apply cumsum to df instead df1 s2 = df2.notnull().sum(axis=1) pd.concat([s1, s2], axis=1).T.to_dict('list') wrong result(same to questioner's result that he think result of my code) {0: [1, 1], 1: [2, 2], 2: [3, 3], 3: [0, 0], 4: [2, 0], 5: [1, 0], 6: [0, 0], 7: [1, 1], 8: [4, 3], 9: [5, 2], 10: [4, 2], 11: [6, 1], 12: [5, 1]} It is common for the person to apply and get different results, but that should be checked by the person himself before endless question. A: You can use: m = df.columns.str.startswith('Unnamed') out = (df .groupby(m, axis=1) .apply(lambda g: (g.notna() if g.name else g.isna()) .cummin(axis=1).sum(axis=1) ) .set_axis(['named', 'unnamed'], axis=1) ) Output: named unnamed 0 1 1 1 2 2 2 3 3 3 0 0 4 2 0 5 1 0 6 0 0 7 1 1 as dictionary out.T.to_dict('list') Output: {0: [1, 1], 1: [2, 2], 2: [3, 3], 3: [0, 0], 4: [2, 0], 5: [1, 0], 6: [0, 0], 7: [1, 1]}
How to get only the initial NaN values and leading non NaN values from a pandas dataframe?
I have a dataframe where the rows contain NaN values. The df contains original columns namely Heading 1 Heading 2 and Heading 3 and extra columns called Unnamed: 1 Unnamed: 2 and Unnamed: 3 as shown: Heading 1 Heading 2 Heading 3 Unnamed: 1 Unnamed: 2 Unnamed: 3 NaN 34 24 45 NaN NaN NaN NaN 24 45 11 NaN NaN NaN NaN 45 45 33 4 NaN 24 NaN NaN NaN NaN NaN 4 NaN NaN NaN NaN 34 24 NaN NaN NaN 22 34 24 NaN NaN NaN NaN 34 NaN 45 NaN NaN I want to iterate through each row and find out the amount of initial NaN values in original columns (Heading 1 Heading 2 and Heading 3) and the amount of non NaN values in the extra columns (Unnamed: 1 Unnamed: 2 and Unnamed: 3). For each and every row this should be calculated and returned in a dictionary where the key is the index of the row and the value for that key is a list containing the amount of initial NaN values in original columns (Heading 1 Heading 2 and Heading 3) and the second element of the list would the amount of non NaN values in the extra columns (Unnamed: 1 Unnamed: 2 and Unnamed: 3). So the result for the above dataframe would be: {0 : [1, 1], 1 : [2, 2], 2 : [3, 3], 3 : [0, 0], 4 : [2, 0], 5 : [1, 0], 6 : [0, 0], 7 : [1, 1]} Notice how in row 3 and row 7 the original columns contain 1 and 2 NaN respectively but only the initial NaN's are counted and not the in between ones! UPDATE / RESULTS: Both @mozaway and @Panda Kim gave the correct solution for the current dataframe but @mozway solution does not work at all for another test dataframe. @Panda Kim gave 2 solutions but both the methods he gave (cumsum() and x.first_valid_index()) are giving slightly different results for the different dataframe. Heading 1 Heading 2 Heading 3 Unnamed: 1 Unnamed: 2 Unnamed: 3 Unnamed: 4 NaN 34 24 45 NaN NaN NaN NaN NaN 24 45 11 NaN NaN NaN NaN NaN 45 45 33 NaN 4 NaN 24 NaN NaN NaN NaN NaN NaN 4 NaN NaN NaN NaN NaN 34 24 NaN NaN NaN NaN 22 34 24 NaN NaN NaN NaN NaN 34 NaN 45 NaN NaN NaN NaN NaN NaN NaN 12 22 45 NaN NaN NaN NaN NaN 11 69 NaN NaN NaN NaN 12 NaN 45 NaN NaN NaN NaN NaN NaN 45 NaN NaN NaN NaN NaN 44 NaN For the above df here are the results: @Panda KIM (first_valid_index()) {0: [1, 1], 1: [2, 2], 2: [3, 3], 3: [0, 0], 4: [2, 0], 5: [1, 0], 6: [0, 0], 7: [1, 1], 8: [3, 3], 9: [3, 2], 10: [3, 2], 11: [3, 1], 12: [3, 1]} @Panda Kim (cumsum()) {0: [1, 1], 1: [2, 2], 2: [3, 3], 3: [0, 0], 4: [2, 0], 5: [1, 0], 6: [0, 0], 7: [1, 1], 8: [4, 3], 9: [5, 2], 10: [4, 2], 11: [6, 1], 12: [5, 1]} @mozway solution {0: [1, 1], 1: [2, 2], 2: [3, 3], 3: [0, 0], 4: [2, 0], 5: [1, 0], 6: [0, 0], 7: [1, 1], 8: [3, 0], 9: [3, 0], 10: [3, 0], 11: [3, 0], 12: [3, 0]}
[ "First\ndivide dataframe (iloc or filter or and so on)\ndf1 = df.iloc[:, :3]\ndf2 = df.iloc[:, 3:]\n\nSecond\ncount initial NaNs in df1 and count notnull in df2\ns1 = df1.apply(lambda x: (x.notnull().cumsum() == 0).sum(), axis=1)\ns2 = df2.notnull().sum(axis=1)\n\nLast\nconcat and make dict\npd.concat([s1, s2], axis=1).T.to_dict('list')\n\nresult:\n{0: [1, 1],\n 1: [2, 2],\n 2: [3, 3],\n 3: [0, 0],\n 4: [2, 0],\n 5: [1, 0],\n 6: [0, 0],\n 7: [1, 1]}\n\n\nUpdate\ndata = [[None, 34.0, 24.0, 45.0, None, None, None],\n [None, None, 24.0, 45.0, 11.0, None, None],\n [None, None, None, 45.0, 45.0, 33.0, None],\n [4.0, None, 24.0, None, None, None, None],\n [None, None, 4.0, None, None, None, None],\n [None, 34.0, 24.0, None, None, None, None],\n [22.0, 34.0, 24.0, None, None, None, None],\n [None, 34.0, None, 45.0, None, None, None],\n [None, None, None, None, 12.0, 22.0, 45.0],\n [None, None, None, None, None, 11.0, 69.0],\n [None, None, None, None, 12.0, None, 45.0],\n [None, None, None, None, None, None, 45.0],\n [None, None, None, None, None, 44.0, None]]\ncol = ['Heading 1', 'Heading 2', 'Heading 3', 'Unnamed: 1', 'Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4']\ndf = pd.DataFrame(data, columns=col)\n\n\ndf1 = df.iloc[:, :3]\ndf2 = df.iloc[:, 3:]\ns1 = df1.apply(lambda x: (x.notnull().cumsum() == 0).sum(), axis=1)\ns2 = df2.notnull().sum(axis=1)\npd.concat([s1, s2], axis=1).T.to_dict('list')\n\nresult:\n{0: [1, 1],\n 1: [2, 2],\n 2: [3, 3],\n 3: [0, 0],\n 4: [2, 0],\n 5: [1, 0],\n 6: [0, 0],\n 7: [1, 1],\n 8: [3, 3],\n 9: [3, 2],\n 10: [3, 2],\n 11: [3, 1],\n 12: [3, 1]}\n\nAnyone can know that this is different from questioner's result (@Panda Kim (cumsum())).\n\nOf course, if function is not applied to df1, the result is different.\nLet's apply cumsum code to df instead of df1 for wrong result:\ndf2 = df.iloc[:, 3:]\ns1 = df.apply(lambda x: (x.notnull().cumsum() == 0).sum(), axis=1) # apply cumsum to df instead df1\ns2 = df2.notnull().sum(axis=1)\npd.concat([s1, s2], axis=1).T.to_dict('list')\n\nwrong result(same to questioner's result that he think result of my code)\n{0: [1, 1],\n 1: [2, 2],\n 2: [3, 3],\n 3: [0, 0],\n 4: [2, 0],\n 5: [1, 0],\n 6: [0, 0],\n 7: [1, 1],\n 8: [4, 3],\n 9: [5, 2],\n 10: [4, 2],\n 11: [6, 1],\n 12: [5, 1]}\n\nIt is common for the person to apply and get different results, but that should be checked by the person himself before endless question.\n", "You can use:\nm = df.columns.str.startswith('Unnamed')\n\nout = (df\n .groupby(m, axis=1)\n .apply(lambda g: (g.notna() if g.name else g.isna())\n .cummin(axis=1).sum(axis=1)\n )\n .set_axis(['named', 'unnamed'], axis=1)\n )\n\nOutput:\n named unnamed\n0 1 1\n1 2 2\n2 3 3\n3 0 0\n4 2 0\n5 1 0\n6 0 0\n7 1 1\n\nas dictionary\nout.T.to_dict('list')\n\nOutput:\n{0: [1, 1],\n 1: [2, 2],\n 2: [3, 3],\n 3: [0, 0],\n 4: [2, 0],\n 5: [1, 0],\n 6: [0, 0],\n 7: [1, 1]}\n\n" ]
[ 1, 1 ]
[]
[]
[ "data_preprocessing", "dataframe", "nan", "pandas", "python" ]
stackoverflow_0074673249_data_preprocessing_dataframe_nan_pandas_python.txt
Q: how can we find length of word in python without using len function? #No using of Len function a=len b=len(a) print(b) I want this without Len function how can we find length of word in python without using len function? A: Here is one way to find the length of a word in Python without using the len function: word = "hello" count = 0 for letter in word: count += 1 print(count) # this will print 5, the length of the word This works by iterating through each letter in the word and adding 1 to a counter variable for each letter. After the loop finishes, the counter variable will hold the length of the word. Another way to find the length of a word in Python without using the len function is to use the str.format method: word = "hello" # this will print 5, the length of the word print("{:d}".format(word.count(""))) In this example, the str.format method is used to print the number of empty strings in word. Since every character in the word is a non-empty string, the number of empty strings in word is equal to the length of the word.
how can we find length of word in python without using len function?
#No using of Len function a=len b=len(a) print(b) I want this without Len function how can we find length of word in python without using len function?
[ "Here is one way to find the length of a word in Python without using the len function:\nword = \"hello\"\ncount = 0\n\nfor letter in word:\n count += 1\n\nprint(count) # this will print 5, the length of the word\n\nThis works by iterating through each letter in the word and adding 1 to a counter variable for each letter. After the loop finishes, the counter variable will hold the length of the word.\nAnother way to find the length of a word in Python without using the len function is to use the str.format method:\nword = \"hello\"\n\n# this will print 5, the length of the word\nprint(\"{:d}\".format(word.count(\"\")))\n\nIn this example, the str.format method is used to print the number of empty strings in word. Since every character in the word is a non-empty string, the number of empty strings in word is equal to the length of the word.\n" ]
[ 0 ]
[]
[]
[ "python" ]
stackoverflow_0074673350_python.txt
Q: keras save and load model, accuracy drop Link to colab https://colab.research.google.com/drive/1u_jRl3uMlxEne667aCxt5Qh8eMlhme8V?usp=sharing link to training data https://drive.google.com/file/d/1jcu7ZTnTF2obGb5OM4dD6T_GlU0sMWmL/view?usp=sharing So i train a model that have 70% and save it into drive and deleted runtime Then restart runtime and load the model from drive use the exact same code the accuracy drop to 40%-50% why? i tried save n load only the weights, or json, or .5 file, save n load using pickle etc etc. it doesnt work. after i deleted runtiime or open a new ipynb file and load the model the accuracy is always not the same A: I see your question and would like to clarify your understanding of the following: Your understanding of Model Training Your understanding of Training Accuracy and Validation Accuracy General rule-of-thumbs regarding model evaluation. When training your model, you do not want to have a "Perfect Model accuracy" (100% Accuracy during training). At the same time, you do not want your accuracies to be too low. (anything below 70%). During training, you want your training and testing accuracies to be as similar as possible. Having a large gap in accuracies can tell you that your model has 1 of 2 problems, overfitting and underfitting. # Example 1 Epoch 12/60 44/44 [==============================] - 0s 5ms/step - loss: 0.5669 - acc: 0.7429 - val_loss: 0.6224 - val_acc: 0.7133 Overfitting is like your model does not accept new and different information Underfitting is your model not understanding information/bad information used for training. Now, I refer your attention to Example 1, this random epoch I have selected in random from your training, this epoch shows a decent training dynamic, the difference between your acc and val_acc have a difference of 0.296 (2.96%) However, your last epoch: Epoch 60/60 44/44 [==============================] - 0s 6ms/step - loss: 0.0697 - acc: 1.0000 - val_loss: 0.5494 - val_acc: 0.7400 Has a acc difference of 0.2600(26%), this tells me that you have overfitted your model as your model has more or less memorized your validation dataset, thus, any new data that is passed into the model will be predicted less accurately. That is why when you are validating your dataset with a fresh new shuffle of your dataset your accuracy drops (There is no correlation between this drop and the accuracy of epoch accuracy delta). for a macro view, you can refer to your model graph: for a general rule of thumb, the best training and validation accuracies are between 70%(0.7) and 89%(0.89). This can change depending on your model requirements. Disclaimer: information in this post may not be 100% accurate
keras save and load model, accuracy drop
Link to colab https://colab.research.google.com/drive/1u_jRl3uMlxEne667aCxt5Qh8eMlhme8V?usp=sharing link to training data https://drive.google.com/file/d/1jcu7ZTnTF2obGb5OM4dD6T_GlU0sMWmL/view?usp=sharing So i train a model that have 70% and save it into drive and deleted runtime Then restart runtime and load the model from drive use the exact same code the accuracy drop to 40%-50% why? i tried save n load only the weights, or json, or .5 file, save n load using pickle etc etc. it doesnt work. after i deleted runtiime or open a new ipynb file and load the model the accuracy is always not the same
[ "I see your question and would like to clarify your understanding of the following:\n\nYour understanding of Model Training\nYour understanding of Training Accuracy and Validation Accuracy\nGeneral rule-of-thumbs regarding model evaluation.\n\n\nWhen training your model, you do not want to have a \"Perfect Model accuracy\" (100% Accuracy during training).\nAt the same time, you do not want your accuracies to be too low. (anything below 70%).\nDuring training, you want your training and testing accuracies to be as similar as possible. Having a large gap in accuracies can tell you that your model has 1 of 2 problems, overfitting and underfitting.\n# Example 1\nEpoch 12/60\n44/44 [==============================] - 0s 5ms/step - loss: 0.5669 - acc: 0.7429 - val_loss: 0.6224 - val_acc: 0.7133\n\nOverfitting is like your model does not accept new and different information\nUnderfitting is your model not understanding information/bad information used for training.\nNow, I refer your attention to Example 1, this random epoch I have selected in random from your training, this epoch shows a decent training dynamic, the difference between your acc and val_acc have a difference of 0.296 (2.96%)\nHowever, your last epoch:\nEpoch 60/60\n44/44 [==============================] - 0s 6ms/step - loss: 0.0697 - acc: 1.0000 - val_loss: 0.5494 - val_acc: 0.7400\n\nHas a acc difference of 0.2600(26%), this tells me that you have overfitted your model as your model has more or less memorized your validation dataset, thus, any new data that is passed into the model will be predicted less accurately.\nThat is why when you are validating your dataset with a fresh new shuffle of your dataset your accuracy drops (There is no correlation between this drop and the accuracy of epoch accuracy delta).\nfor a macro view, you can refer to your model graph:\n\nfor a general rule of thumb, the best training and validation accuracies are between 70%(0.7) and 89%(0.89). This can change depending on your model requirements.\nDisclaimer: information in this post may not be 100% accurate\n" ]
[ 0 ]
[]
[]
[ "artificial_intelligence", "keras", "machine_learning", "model", "python" ]
stackoverflow_0074665262_artificial_intelligence_keras_machine_learning_model_python.txt
Q: How do I trick the app into thinking the mouse movement is really me? I am attempting to get my script to open up a game through steam and then start a world. This all works fine up until the part where I need to navigate the game. I'm assuming that the modules that let you control the mouse just move to points rather than actually moving the mouse which the game doesn't pick up. The game only becomes responsive when I myself move the mouse around IRL. I've tried using pyautogui, mouse, pynput, and a couple of more but have had no luck. The game doesn't respond until I intervene. I'd love any help ya'll could give me. Thanks. Here's some of my code, if it helps. import pyautogui import time def openWorld(): #pyautogui.moveTo(230, 530, 1) pyautogui.click(x=230, y=530) A: It sounds like the game is not picking up the simulated mouse movements from the modules you are using. One possible solution is to try using the pyautogui module's moveRel() function to move the mouse relative to its current position, rather than using moveTo() to move it to a specific set of coordinates. This may help the game to recognize the mouse movements. Additionally, you can try using the pyautogui.PAUSE variable to add a delay between each mouse movement to allow the game to catch up and recognize the input. Here is an example of how you could use these techniques in your code: import pyautogui import time def openWorld(): pyautogui.PAUSE = 0.5 # add a delay of 0.5 seconds between each movement pyautogui.moveRel(100, 100, duration=1) # move the mouse 100 pixels to the right and 100 pixels down from its current position pyautogui.click() # click at the current mouse position You may need to adjust the parameters of the moveRel() and click() functions to fit your specific game and screen setup. Hope this helps!
How do I trick the app into thinking the mouse movement is really me?
I am attempting to get my script to open up a game through steam and then start a world. This all works fine up until the part where I need to navigate the game. I'm assuming that the modules that let you control the mouse just move to points rather than actually moving the mouse which the game doesn't pick up. The game only becomes responsive when I myself move the mouse around IRL. I've tried using pyautogui, mouse, pynput, and a couple of more but have had no luck. The game doesn't respond until I intervene. I'd love any help ya'll could give me. Thanks. Here's some of my code, if it helps. import pyautogui import time def openWorld(): #pyautogui.moveTo(230, 530, 1) pyautogui.click(x=230, y=530)
[ "It sounds like the game is not picking up the simulated mouse movements from the modules you are using. One possible solution is to try using the pyautogui module's moveRel() function to move the mouse relative to its current position, rather than using moveTo() to move it to a specific set of coordinates. This may help the game to recognize the mouse movements. Additionally, you can try using the pyautogui.PAUSE variable to add a delay between each mouse movement to allow the game to catch up and recognize the input. Here is an example of how you could use these techniques in your code:\nimport pyautogui\nimport time\n\ndef openWorld():\n pyautogui.PAUSE = 0.5 # add a delay of 0.5 seconds between each movement\n pyautogui.moveRel(100, 100, duration=1) # move the mouse 100 pixels to the right and 100 pixels down from its current position\n pyautogui.click() # click at the current mouse position\n\nYou may need to adjust the parameters of the moveRel() and click() functions to fit your specific game and screen setup. Hope this helps!\n" ]
[ 0 ]
[]
[]
[ "automation", "mouse", "pyautogui", "python" ]
stackoverflow_0074673336_automation_mouse_pyautogui_python.txt
Q: This version of ChromeDriver only supports Chrome version 102 I'm using VS Code and Anaconda3. Currently trying to install ChromeDriver_Binary but, when I try to execute code, I get this error: selenium.common.exceptions.SessionNotCreatedException: Message: session not created: This version of ChromeDriver only supports Chrome version 102 Current browser version is 100.0.4896.127 with binary path C:\Program Files (x86)\Google\Chrome\Application\chrome.exe A: One option is to use chromedriver-autoinstaller to do it all at once: import chromedriver_autoinstaller as chromedriver chromedriver.install() Alternatively use chromedriver-binary-auto to find the required version and install the driver: pip install --upgrade --force-reinstall chromedriver-binary-auto import chromedriver_binary No restarting is required. A: I fixed it, by updating chrome to version 101, downloading chromedriver from https://chromedriver.chromium.org/downloads and rebooting. A: You need to check your current chrome version first, and then download the chrome driver following this version: https://chromedriver.chromium.org/downloads The point here is that we have to make sure both of chrome version are the same A: I had the same issue, I'm running MacOS Monterrey. My Chrome version is Version 104.0.5112.79. I was getting the same error as you; This version of ChromeDriver only supports Chrome version 102 What I did was: Download the version of chromedriver that matched the version of Chrome, in this case 104 https://chromedriver.chromium.org/downloads Opened the location of chromedriver, it's usually under this path: /usr/local/bin Open Finder. Press Command-Shift-G to open the dialogue box Input the following search: /usr/local/bin Replaced the previous chromedriver from that location with the new one I just downloaded. A: chrome browser and the chromedriver.exe(Path provided by the project) versions should match to the same version. A: please follow the below steps: 1- Delete the current chrome driver from visual studio. 2- Download the latest release of the chrome driver. 3- Add the new chrome drive to the project ( follow below steps) 3.a- Copy the chrome driver to the application path on your pc for example:C:\Users\xxx\source\repos\APPAutomation\APPAutomation 3.b- Select the project in Visual Studio and press on Add – existing item 3.c- Select the chromedriver exe file in Visual Studio. 3.d- Go to the properties of the chrome driver and change the “Copy to Output Directory” to “Copy if newer” 4- End the chrome driver task from task manager (back ground process) 5- Delete the Bin file from the project path 6- Build the project in Visual Studio . A: This version of ChromeDriver only supports Chrome version 106 Current browser version is 108.0.4896.127 with binary path C:\Program Files (x86)\Google\Chrome\Application\chrome.exe I was getting exact same error steps I followed: delete the old chromedriver.exe file. download the chromedriver.exe file which is compatible with my chrome version which is 108. so I downloaded 108 and it worked. downloadable link
This version of ChromeDriver only supports Chrome version 102
I'm using VS Code and Anaconda3. Currently trying to install ChromeDriver_Binary but, when I try to execute code, I get this error: selenium.common.exceptions.SessionNotCreatedException: Message: session not created: This version of ChromeDriver only supports Chrome version 102 Current browser version is 100.0.4896.127 with binary path C:\Program Files (x86)\Google\Chrome\Application\chrome.exe
[ "One option is to use chromedriver-autoinstaller to do it all at once:\nimport chromedriver_autoinstaller as chromedriver\nchromedriver.install()\n\nAlternatively use chromedriver-binary-auto to find the required version and install the driver:\npip install --upgrade --force-reinstall chromedriver-binary-auto\nimport chromedriver_binary\n\nNo restarting is required.\n", "I fixed it, by updating chrome to version 101, downloading chromedriver from https://chromedriver.chromium.org/downloads and rebooting.\n", "You need to check your current chrome version first, and then download the chrome driver following this version: https://chromedriver.chromium.org/downloads\nThe point here is that we have to make sure both of chrome version are the same\n\n\n", "I had the same issue, I'm running MacOS Monterrey. My Chrome version is Version 104.0.5112.79.\nI was getting the same error as you; This version of ChromeDriver only supports Chrome version 102\nWhat I did was:\n\nDownload the version of chromedriver that matched the version of Chrome, in this case 104\nhttps://chromedriver.chromium.org/downloads\n\nOpened the location of chromedriver, it's usually under this path: /usr/local/bin\n\n\n\nOpen Finder.\nPress Command-Shift-G to open the dialogue box\nInput the following search: /usr/local/bin\n\n\nReplaced the previous chromedriver from that location with the new one I just downloaded.\n\n", "chrome browser and the chromedriver.exe(Path provided by the project) versions should match to the same version.\n", "please follow the below steps:\n1- Delete the current chrome driver from visual studio.\n2- Download the latest release of the chrome driver.\n3- Add the new chrome drive to the project ( follow below steps)\n3.a- Copy the chrome driver to the application path on your pc for\nexample:C:\\Users\\xxx\\source\\repos\\APPAutomation\\APPAutomation\n3.b- Select the project in Visual Studio and press on Add – existing item\n3.c- Select the chromedriver exe file in Visual Studio.\n3.d- Go to the properties of the chrome driver and change the “Copy to Output Directory” to “Copy if newer”\n4- End the chrome driver task from task manager (back ground process)\n5- Delete the Bin file from the project path\n6- Build the project in Visual Studio .\n", "This version of ChromeDriver only supports Chrome version 106\nCurrent browser version is 108.0.4896.127 with binary path C:\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe\nI was getting exact same error\nsteps I followed:\n\ndelete the old chromedriver.exe file.\n\ndownload the chromedriver.exe file which is compatible with my\nchrome version which is 108. so I downloaded 108 and it worked.\n\n\ndownloadable link\n\n" ]
[ 14, 4, 1, 1, 0, 0, 0 ]
[]
[]
[ "python", "selenium_chromedriver" ]
stackoverflow_0072111139_python_selenium_chromedriver.txt
Q: Can you explain me the output I was in class section of python programming and I am confused here. I have learned that super is used to call the method of parent class but here Employee is not a parent of Programmer yet it's called (showing the result of getLanguage method). What I am missing? This is the code. class Employee: company= "Google" language = "java" def showDetails(self): print("This is an employee"); def getLanguage(self): print(f"1. The language is {self.language}"); class Programmer: language= "Python" company = "Youtubeeee" def getLanguage(self): super().getLanguage(); print(f"2. The language is {self.language}") def showDetails(self): print("This is an programmer") class Programmer2(Programmer , Employee): language= "C++" def getLanguage(self): super().getLanguage(); print(f"3. The language is {self.language}") p2 = Programmer2(); p2.getLanguage(); This is the output, 1. The language is C++ 2. The language is C++ 3. The language is C++ A: You've bumped into one of the reasons why super exists. From the docs, super delegates method calls to a parent or sibling class of type. Python bases class inheritance on a dynamic Method Resolution Order (MRO). When you created a class with multiple inheritance, those two parent classes became siblings. The left most is first in MRO and the right one is next. This isn't a property of the Programmer class, Its a property of the Programmer2 class that decided to do multiple inheritance. If you use Programmer differently, as in, p3 = Programmer() p3.getLanguage() You get the error AttributeError: 'super' object has no attribute 'getLanguage' because its MRO only goes to the base object which doesn't have the method. You can view the MRO of the class with its __mro__ attribute. Programmer.__mro__: (<class '__main__.Programmer'>, <class 'object'>) Programmer2.__mro__: (<class '__main__.Programmer2'>, <class '__main__.Programmer'>, <class '__main__.Employee'>, <class 'object'>) A: Here is some more explanation about the mechanics how the code in the question works. In its general form, super() can be called with two arguments super(C, obj) where C is a class and obj is an object. The object obj determines which classes should be searched for a given attribute and the Method Resolution Order (MRO) in which these classes should be searched. The class argument is used to restrict this search to only these classes that appear in MRO after C. By PEP 3135 when super() is used without arguments inside a class definition, the class argument is automatically taken to be the class being defined, and the object argument is the object upon which super acts. In the code in the question, when you call p2.getLanguage() then inside the definition of Programmer2, the code super().getLanguage() is tacitly replaced by super(Programmer2, p2).getLanguage(). MRO of p2 is Programmer2 -> Programmer -> Employee -> object, and the search for getLanguage starts after Programmer2 i.e. with Programmer class, and it succeeds in this class. Then, in the process of executing getLanguage method of Programmer, we again encounter super().getLanguage(). This is now replaced by super(Programmer, p2).getLanguage() since we are still working with the same object, but the call to super is inside Programmer class. MRO of p2 is still Programmer2 -> Programmer -> Employee -> object, but now the search for getLanguage starts after Programmer i.e. with Employee class, and it succeeds in this class. In this way, even though Programmer does not inherit from Employee, the code super().getLanguage() inside Programmer succeeds, since it is applied to an object that has Employee in its MRO.
Can you explain me the output
I was in class section of python programming and I am confused here. I have learned that super is used to call the method of parent class but here Employee is not a parent of Programmer yet it's called (showing the result of getLanguage method). What I am missing? This is the code. class Employee: company= "Google" language = "java" def showDetails(self): print("This is an employee"); def getLanguage(self): print(f"1. The language is {self.language}"); class Programmer: language= "Python" company = "Youtubeeee" def getLanguage(self): super().getLanguage(); print(f"2. The language is {self.language}") def showDetails(self): print("This is an programmer") class Programmer2(Programmer , Employee): language= "C++" def getLanguage(self): super().getLanguage(); print(f"3. The language is {self.language}") p2 = Programmer2(); p2.getLanguage(); This is the output, 1. The language is C++ 2. The language is C++ 3. The language is C++
[ "You've bumped into one of the reasons why super exists. From the docs, super delegates method calls to a parent or sibling class of type. Python bases class inheritance on a dynamic Method Resolution Order (MRO). When you created a class with multiple inheritance, those two parent classes became siblings. The left most is first in MRO and the right one is next.\nThis isn't a property of the Programmer class, Its a property of the Programmer2 class that decided to do multiple inheritance. If you use Programmer differently, as in,\np3 = Programmer()\np3.getLanguage()\n\nYou get the error AttributeError: 'super' object has no attribute 'getLanguage' because its MRO only goes to the base object which doesn't have the method.\nYou can view the MRO of the class with its __mro__ attribute.\nProgrammer.__mro__:\n (<class '__main__.Programmer'>, <class 'object'>)\n\nProgrammer2.__mro__:\n (<class '__main__.Programmer2'>, <class '__main__.Programmer'>, \n <class '__main__.Employee'>, <class 'object'>)\n\n", "Here is some more explanation about the mechanics how the code in the question works. In its general form, super() can be called with two arguments super(C, obj) where C is a class and obj is an object. The object obj determines which classes should be searched for a given attribute and the Method Resolution Order (MRO) in which these classes should be searched. The class argument is used to restrict this search to only these classes that appear in MRO after C.\nBy PEP 3135 when super() is used without arguments inside a class definition, the class argument is automatically taken to be the class being defined, and the object argument is the object upon which super acts.\nIn the code in the question, when you call p2.getLanguage() then inside the definition of Programmer2, the code super().getLanguage() is tacitly replaced by super(Programmer2, p2).getLanguage(). MRO of p2 is Programmer2 -> Programmer -> Employee -> object, and the search for getLanguage starts after Programmer2 i.e. with Programmer class, and it succeeds in this class.\nThen, in the process of executing getLanguage method of Programmer, we again encounter super().getLanguage(). This is now replaced by super(Programmer, p2).getLanguage() since we are still working with the same object, but the call to super is inside Programmer class. MRO of p2 is still Programmer2 -> Programmer -> Employee -> object, but now the search for getLanguage starts after Programmer i.e. with Employee class, and it succeeds in this class.\nIn this way, even though Programmer does not inherit from Employee, the code super().getLanguage() inside Programmer succeeds, since it is applied to an object that has Employee in its MRO.\n" ]
[ 3, 1 ]
[]
[]
[ "python" ]
stackoverflow_0074673076_python.txt
Q: How to concatenate lists into single merged DataFrame from for loop output? I'm tring to pull data by using API, I have a list of IDs from csv,and I use for loop to pull request for each ID, the output is in the form of lists, and I tried to convert them into DataFrame, they come out into seperate DataFrames and I'm not able to merge them into one since they are inside of a for loop. The code looks like this: ================== `` # Read ios id from CSV file data = pd.read_csv('File.csv') ios = (data['ios_id']) ios_data= [] # Convert ios id into a list for i in ios: ios_data.append(i) for id in ios_data: params = { "os": "ios", "app_id": id, "country": "US", "search_term": "kid", "auth_token": AUTH_TOKEN } response = requests.get(BASE_URL, params) # print(response.status_code) raw = response.json() feedback = raw['feedback'] if feedback != []: feedback_dict = feedback[0] df = pd.DataFrame(feedback_dict) print(df) else: pass `` And Output looks like this: content version ... country tags 0 So I love tiles of hop it’s fun but I don’t th... 4.4.0 ... US Family 1 So I love tiles of hop it’s fun but I don’t th... 4.4.0 ... US Love it [2 rows x 9 columns] content ... tags 0 This game is, well, fantastic and I love how B... ... Ads 1 This game is, well, fantastic and I love how B... ... Family 2 This game is, well, fantastic and I love how B... ... Hate it 3 This game is, well, fantastic and I love how B... ... Inappropriate 4 This game is, well, fantastic and I love how B... ... Love it 5 This game is, well, fantastic and I love how B... ... Strenuousness [6 rows x 9 columns] A: You can start by making an empty list/basket, then put in the dataframes collected in every iteration/pull and finally use pandas.concat to make a whole and single dataframe right after the loop. Try this : # Read ios id from CSV file data = pd.read_csv('File.csv') ios_data= data['ios_id'].tolist() list_dfs = [] for id in ios_data: params = { "os": "ios", "app_id": id, "country": "US", "search_term": "kid", "auth_token": AUTH_TOKEN } response = requests.get(BASE_URL, params) # print(response.status_code) raw = response.json() feedback = raw['feedback'] if feedback != []: feedback_dict = feedback[0] df = pd.DataFrame(feedback_dict) list_dfs.append(df) else: pass df_all= pd.concat(list_dfs, ignore_index=True) # Output : print(df_all) content ... tags 0 So I love tiles of hop it’s fun but I don’t th... ... Family 1 So I love tiles of hop it’s fun but I don’t th... ... Love it 2 This game is, well, fantastic and I love how B... ... Ads 3 This game is, well, fantastic and I love how B... ... Family 4 This game is, well, fantastic and I love how B... ... Hate it 5 This game is, well, fantastic and I love how B... ... Inappropriate 6 This game is, well, fantastic and I love how B... ... Love it 7 This game is, well, fantastic and I love how B... ... Strenuousness
How to concatenate lists into single merged DataFrame from for loop output?
I'm tring to pull data by using API, I have a list of IDs from csv,and I use for loop to pull request for each ID, the output is in the form of lists, and I tried to convert them into DataFrame, they come out into seperate DataFrames and I'm not able to merge them into one since they are inside of a for loop. The code looks like this: ================== `` # Read ios id from CSV file data = pd.read_csv('File.csv') ios = (data['ios_id']) ios_data= [] # Convert ios id into a list for i in ios: ios_data.append(i) for id in ios_data: params = { "os": "ios", "app_id": id, "country": "US", "search_term": "kid", "auth_token": AUTH_TOKEN } response = requests.get(BASE_URL, params) # print(response.status_code) raw = response.json() feedback = raw['feedback'] if feedback != []: feedback_dict = feedback[0] df = pd.DataFrame(feedback_dict) print(df) else: pass `` And Output looks like this: content version ... country tags 0 So I love tiles of hop it’s fun but I don’t th... 4.4.0 ... US Family 1 So I love tiles of hop it’s fun but I don’t th... 4.4.0 ... US Love it [2 rows x 9 columns] content ... tags 0 This game is, well, fantastic and I love how B... ... Ads 1 This game is, well, fantastic and I love how B... ... Family 2 This game is, well, fantastic and I love how B... ... Hate it 3 This game is, well, fantastic and I love how B... ... Inappropriate 4 This game is, well, fantastic and I love how B... ... Love it 5 This game is, well, fantastic and I love how B... ... Strenuousness [6 rows x 9 columns]
[ "You can start by making an empty list/basket, then put in the dataframes collected in every iteration/pull and finally use pandas.concat to make a whole and single dataframe right after the loop.\nTry this :\n# Read ios id from CSV file\ndata = pd.read_csv('File.csv')\n\nios_data= data['ios_id'].tolist()\n\nlist_dfs = []\n\nfor id in ios_data:\n params = {\n \"os\": \"ios\",\n \"app_id\": id,\n \"country\": \"US\",\n \"search_term\": \"kid\",\n \"auth_token\": AUTH_TOKEN\n }\n\n response = requests.get(BASE_URL, params)\n # print(response.status_code)\n raw = response.json()\n feedback = raw['feedback']\n if feedback != []:\n feedback_dict = feedback[0]\n df = pd.DataFrame(feedback_dict)\n list_dfs.append(df)\n else:\n pass\n\ndf_all= pd.concat(list_dfs, ignore_index=True)\n\n# Output :\nprint(df_all)\n content ... tags\n0 So I love tiles of hop it’s fun but I don’t th... ... Family\n1 So I love tiles of hop it’s fun but I don’t th... ... Love it\n2 This game is, well, fantastic and I love how B... ... Ads\n3 This game is, well, fantastic and I love how B... ... Family\n4 This game is, well, fantastic and I love how B... ... Hate it\n5 This game is, well, fantastic and I love how B... ... Inappropriate\n6 This game is, well, fantastic and I love how B... ... Love it\n7 This game is, well, fantastic and I love how B... ... Strenuousness\n\n" ]
[ 0 ]
[]
[]
[ "api", "dataframe", "for_loop", "pandas", "python" ]
stackoverflow_0074673399_api_dataframe_for_loop_pandas_python.txt
Q: How do I use binding to change the position of an arc? I am having trouble setting the x position of an arc called "pac_man", and then changing it using += with a function called "xChange()". I have tried multiple things, but I think using a dictionary would suffice. This is because the variable "coord" needs 4 values to assign shape and position for "pac_man." #Imports from tkinter import * #Functions def xChange(): print("Change the value of pac_man's x position here") #Object Attributes wn = Tk() wn.geometry('512x320') wn.title('Moving with Keys') cvs = Canvas(wn, bg='limegreen', height=320, width=512) coord = {'x_pos': 10, 'y_pos': 10, 'x_size': 50, 'y_size': 50} pac_man = cvs.create_arc( coord['x_pos'], coord['y_pos'], coord['x_size'], coord['y_size'], start=45, extent=270, fill='yellow', outline='black', width=4, ) cvs.bind('<Right>', xChange) cvs.pack() A: See comments in the code: #Imports from tkinter import * #Functions def move(event):#add event parameter pixels = 1 #local variable, amount of pixels to "move" direction = event.keysym #get keysym from event object if direction == 'Right':#if keysym is Left cvs.move('packman',+pixels, 0) #canvas has already a method for move, use it! #move "packman" +1 pixel on the x-axis and 0 on the y-axis #Window wn = Tk() wn.geometry('512x320') wn.title('Moving with Keys') #Canvas cvs = Canvas(wn, bg='limegreen', height=320, width=512, takefocus=True) #coord = {'x_pos': 10, 'y_pos': 10, 'x_size': 50, 'y_size': 50} #tkinter stores these values in form of a list #You can retrieve it with like this print(cvs['coords']) pac_man = cvs.create_arc( 10,10,50,50, start=45, extent=270, fill='yellow', outline='black', width=4, tags=('packman',)#add a tag to access this item #tags are tuple^!! ) #cvs.bind('<Right>', xChange) #You could bind to canvas but would've made sure #that canvas has the keyboard focus #it is easier to bind to the window wn.bind('<Left>', move) wn.bind('<Right>', move) wn.bind('<Up>', move) wn.bind('<Down>', move) cvs.pack() wn.mainloop() Additional resource, in case you wonder. event parameter
How do I use binding to change the position of an arc?
I am having trouble setting the x position of an arc called "pac_man", and then changing it using += with a function called "xChange()". I have tried multiple things, but I think using a dictionary would suffice. This is because the variable "coord" needs 4 values to assign shape and position for "pac_man." #Imports from tkinter import * #Functions def xChange(): print("Change the value of pac_man's x position here") #Object Attributes wn = Tk() wn.geometry('512x320') wn.title('Moving with Keys') cvs = Canvas(wn, bg='limegreen', height=320, width=512) coord = {'x_pos': 10, 'y_pos': 10, 'x_size': 50, 'y_size': 50} pac_man = cvs.create_arc( coord['x_pos'], coord['y_pos'], coord['x_size'], coord['y_size'], start=45, extent=270, fill='yellow', outline='black', width=4, ) cvs.bind('<Right>', xChange) cvs.pack()
[ "See comments in the code:\n#Imports\nfrom tkinter import *\n\n#Functions\ndef move(event):#add event parameter\n pixels = 1 #local variable, amount of pixels to \"move\"\n direction = event.keysym #get keysym from event object\n if direction == 'Right':#if keysym is Left\n cvs.move('packman',+pixels, 0)\n #canvas has already a method for move, use it!\n #move \"packman\" +1 pixel on the x-axis and 0 on the y-axis\n\n#Window\nwn = Tk()\nwn.geometry('512x320')\nwn.title('Moving with Keys')\n#Canvas\ncvs = Canvas(wn, bg='limegreen', height=320, width=512, takefocus=True)\n#coord = {'x_pos': 10, 'y_pos': 10, 'x_size': 50, 'y_size': 50}\n#tkinter stores these values in form of a list\n#You can retrieve it with like this print(cvs['coords'])\npac_man = cvs.create_arc(\n 10,10,50,50,\n start=45,\n extent=270,\n fill='yellow',\n outline='black',\n width=4,\n tags=('packman',)#add a tag to access this item\n #tags are tuple^!!\n )\n#cvs.bind('<Right>', xChange)\n#You could bind to canvas but would've made sure\n#that canvas has the keyboard focus\n#it is easier to bind to the window\nwn.bind('<Left>', move)\nwn.bind('<Right>', move)\nwn.bind('<Up>', move)\nwn.bind('<Down>', move)\ncvs.pack()\nwn.mainloop()\n\nAdditional resource, in case you wonder.\nevent parameter\n" ]
[ 1 ]
[]
[]
[ "python", "tkinter", "tkinter_canvas" ]
stackoverflow_0074673409_python_tkinter_tkinter_canvas.txt
Q: Name 'X' is not defined [How to fix it] I attempted to execute the code but the problem shows up as "Name 'X' is not defined" Is X not defined if not how do I define it for the code to run. ` from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42) todo_check([ (X_train.shape == (413, 29), 'X_train does not have the correct shape (413, 29)'), (X_test.shape == (104, 29), 'X_test does not have the correct shape (104, 29)'), (y_train.shape == (413,), 'y_train does not have the correct shape (413,)'), (y_test.shape == (104,), 'y_test does not have the correct shape (104,)'), (np.all(np.isclose(X_train.values[-5:, -4], np.array([17.7, 18.2, 21.8, 23.8, 20.1]),rtol=.01)), 'X_train does not contain the correct values! Make sure you used `X` when splitting!'), (np.all(np.isclose(y_test.values[-5:], np.array([1.25561604, 1.8531681 , 1.15373159, 4.01259206, 3.56558124]),rtol=.01)), 'y_test does not have the correct values! Make sure you used `y` when splitting!') ]) ` I want to the code to fully construct and find why X is not defined and how I can compile it A: You are using the train_test_split function: X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42) train_test_split ( X, y .... But at no point have you defined X (or y for that matter). You need to provide the function with data. What is X ? What is y ? Once you define these variables, your error should go away. For example: from sklearn import datasets from sklearn.model_selection import train_test_split diabetes = datasets.load_diabetes() X, y = diabetes.data, diabetes.target X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.1, random_state=13)
Name 'X' is not defined [How to fix it]
I attempted to execute the code but the problem shows up as "Name 'X' is not defined" Is X not defined if not how do I define it for the code to run. ` from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42) todo_check([ (X_train.shape == (413, 29), 'X_train does not have the correct shape (413, 29)'), (X_test.shape == (104, 29), 'X_test does not have the correct shape (104, 29)'), (y_train.shape == (413,), 'y_train does not have the correct shape (413,)'), (y_test.shape == (104,), 'y_test does not have the correct shape (104,)'), (np.all(np.isclose(X_train.values[-5:, -4], np.array([17.7, 18.2, 21.8, 23.8, 20.1]),rtol=.01)), 'X_train does not contain the correct values! Make sure you used `X` when splitting!'), (np.all(np.isclose(y_test.values[-5:], np.array([1.25561604, 1.8531681 , 1.15373159, 4.01259206, 3.56558124]),rtol=.01)), 'y_test does not have the correct values! Make sure you used `y` when splitting!') ]) ` I want to the code to fully construct and find why X is not defined and how I can compile it
[ "You are using the train_test_split function:\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42)\n\n\ntrain_test_split ( X, y ....\n\nBut at no point have you defined X (or y for that matter).\nYou need to provide the function with data.\n\nWhat is X ?\nWhat is y ?\n\nOnce you define these variables, your error should go away.\n\n\nFor example:\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\n\ndiabetes = datasets.load_diabetes()\nX, y = diabetes.data, diabetes.target\n\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.1, random_state=13)\n\n" ]
[ 0 ]
[]
[]
[ "google_colaboratory", "python" ]
stackoverflow_0074673348_google_colaboratory_python.txt
Q: How to extract values from a while loop to print in python? So I could print out the odd numbers. However, the output isn't what i want. It should look like 1+3+5+7 = 16 but I could not make it into a single line. I couldn't figure out how to extract the values from the while loop as with my method it only gives the latest odd number which is 7 while 1,3 and 5 could not be taken out num = int(input("Insert a postive integer:")) #4 oddNum = 1 total = 0 count = 1 while count <= num: odd = (str(oddNum)) print (odd) total = total + oddNum oddNum = oddNum + 2 count += 1 print (odd + "=" + str(total)) #output will be: ''' 1 3 5 7 7=16 but it should look like 1+3+5+7=16 ''' A: An alternative method would be the use of: range() method to generate the list of odd numbers .join() method to stitch the odd numbers together (eg. 1+3+5+7) f-strings to print odds together with the total = sum(odd_nums) Code: num = int(input("Insert a postive integer:")) #4 odd_nums = range(1, num * 2, 2) sum_nums = "+".join(map(str, odd_nums)) print(f"{sum_nums}={sum(odd_nums)}") Output: 1+3+5+7=16 Note: Same but using two lines of code: num = int(input("Insert a postive integer:")) #4 print(f"{'+'.join(map(str, range(1, num * 2, 2)))}={sum(range(1, num * 2, 2))}") Output: 1+3+5+7=16 A: You are not storing old oddNum values in odd. With minimal changes can be fixed like this: num = int(input("Insert a positive integer:")) oddNum = 1 total = 0 count = 1 odd = "" while count <= num: total = total + oddNum odd += f"{oddNum}" oddNum = oddNum + 2 count += 1 odd = "+".join(odd) print(odd + "=" + str(total)) A: There are a few options, you can either create a string during the loop and print that at the end, or create a list and transform that into a string at the end, or python3 has the ability to modify the default end of line with print(oddNum, end=''). Using a string: num = int(input("Insert a postive integer:")) #4 oddNum = 1 total = 0 count = 1 sequence = '' while count <= num: sequence += ("+" if sequence != "" else "") + str(oddNum) total = total + oddNum oddNum = oddNum + 2 count += 1 print (sequence + "=" + str(total)) Using print: num = int(input("Insert a postive integer:")) #4 oddNum = 1 total = 0 count = 1 while count <= num: if count != 1: print('+', end='') print (oddNum, end='') total = total + oddNum oddNum = oddNum + 2 count += 1 print ("=" + str(total)) A: Alternatively using walrus (:=), range,print, sep, and end: print(*(odd:=[*range(1,int(input('Insert a postive integer:'))*2,2)]),sep='+',end='=');print(sum(odd)) # Insert a postive integer:4 # 1+3+5+7=16
How to extract values from a while loop to print in python?
So I could print out the odd numbers. However, the output isn't what i want. It should look like 1+3+5+7 = 16 but I could not make it into a single line. I couldn't figure out how to extract the values from the while loop as with my method it only gives the latest odd number which is 7 while 1,3 and 5 could not be taken out num = int(input("Insert a postive integer:")) #4 oddNum = 1 total = 0 count = 1 while count <= num: odd = (str(oddNum)) print (odd) total = total + oddNum oddNum = oddNum + 2 count += 1 print (odd + "=" + str(total)) #output will be: ''' 1 3 5 7 7=16 but it should look like 1+3+5+7=16 '''
[ "An alternative method would be the use of:\n\nrange() method to generate the list of odd numbers\n.join() method to stitch the odd numbers together (eg. 1+3+5+7)\nf-strings to print odds together with the total = sum(odd_nums)\n\nCode:\nnum = int(input(\"Insert a postive integer:\")) #4\nodd_nums = range(1, num * 2, 2)\nsum_nums = \"+\".join(map(str, odd_nums))\nprint(f\"{sum_nums}={sum(odd_nums)}\")\n\nOutput:\n1+3+5+7=16\n\n\n\n\nNote:\nSame but using two lines of code:\nnum = int(input(\"Insert a postive integer:\")) #4\n \nprint(f\"{'+'.join(map(str, range(1, num * 2, 2)))}={sum(range(1, num * 2, 2))}\")\n\nOutput:\n1+3+5+7=16\n\n", "You are not storing old oddNum values in odd. With minimal changes can be fixed like this:\nnum = int(input(\"Insert a positive integer:\"))\noddNum = 1\ntotal = 0\ncount = 1\nodd = \"\"\nwhile count <= num:\n total = total + oddNum\n odd += f\"{oddNum}\"\n oddNum = oddNum + 2\n count += 1\nodd = \"+\".join(odd)\nprint(odd + \"=\" + str(total))\n\n", "There are a few options, you can either create a string during the loop and print that at the end, or create a list and transform that into a string at the end, or python3 has the ability to modify the default end of line with print(oddNum, end='').\nUsing a string:\nnum = int(input(\"Insert a postive integer:\")) #4\noddNum = 1\ntotal = 0\ncount = 1\nsequence = ''\nwhile count <= num:\n sequence += (\"+\" if sequence != \"\" else \"\") + str(oddNum)\n total = total + oddNum\n oddNum = oddNum + 2\n count += 1\n\nprint (sequence + \"=\" + str(total))\n\nUsing print:\nnum = int(input(\"Insert a postive integer:\")) #4\noddNum = 1\ntotal = 0\ncount = 1\nwhile count <= num:\n if count != 1:\n print('+', end='')\n print (oddNum, end='')\n total = total + oddNum\n oddNum = oddNum + 2\n count += 1\n\nprint (\"=\" + str(total)) \n\n", "Alternatively using walrus (:=), range,print, sep, and end:\nprint(*(odd:=[*range(1,int(input('Insert a postive integer:'))*2,2)]),sep='+',end='=');print(sum(odd))\n\n# Insert a postive integer:4\n# 1+3+5+7=16\n\n" ]
[ 1, 0, 0, 0 ]
[]
[]
[ "numbers", "python", "while_loop" ]
stackoverflow_0074673156_numbers_python_while_loop.txt
Q: How to pad sequences with variable length in more than 1 dimension in pytorch? Is there any clean way to create a batch of 3D sequences in pytorch? I have 3D sequences with the shape of (sequence_length_lvl1, sequence_length_lvl2, D), the sequences have different values for sequence_length_lvl1 and sequence_length_lvl2 but all of them have the same value for D, and I want to pad these sequences in the first and second dimensions and create a batch of them, but I can't use pytorch pad_sequence function, because it works only if the sequences have variable length in only one dimension. I wanted to ask if anyone knows any clean way to do this? To be more clear, I provide an example. Assume the input sequence is something like: input1 = [ [[1, 1, 1], [2, 2, 2], [3, 3, 3]], [[4, 4, 4], [5, 5, 5]] ] input2 = [ [[1, 1, 1], [2, 2, 2], [3, 3, 3]], [[6, 6, 6]], [[4, 4, 4], [5, 5, 5]] ] And I want to pad [input1, input2]. The desired output would be: output = [ [[[1, 1, 1], [2, 2, 2], [3, 3, 3]], [[4, 4, 4], [5, 5, 5], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0]]], [[[1, 1, 1], [2, 2, 2], [3, 3, 3]], [[6, 6, 6], [0, 0, 0], [0, 0, 0]], [[4, 4, 4], [5, 5, 5], [0, 0, 0]]] ] So the desired output has the shape of (2, 3, 3, 3). A: this works with your example, maybe there is a faster way. input1 = [ [[1, 1, 1], [2, 2, 2], [3, 3, 3]], [[4, 4, 4], [5, 5, 5]] ] input2 = [ [[1, 1, 1], [2, 2, 2], [3, 3, 3]], [[6, 6, 6]], [[4, 4, 4], [5, 5, 5]] ] len_max = max(len(input1), len(input2)) output_val = [[], []] no_val = [[0, 0, 0], [0, 0, 0], [0, 0, 0]] for i in range(len_max): try: a = [] a = input1[i] except Exception: a = no_val add_empty = 3 - len(a) for j in range(add_empty): a += [[0, 0, 0]] try: b = [] b = input2[i] except Exception: b = no_val add_empty = 3 - len(b) for j in range(add_empty): b += [[0, 0, 0]] output_val[0] += [a] output_val[1] += [b] print('-------------\n', output_val) A: You can use text2array library that can perform such padding no matter how deeply nested the sequences are (disclaimer: I'm the author). Install with pip install text2array, then: from text2array import Batch arr = Batch([{'x': input1}, {'x': input2}]).to_array() print(arr['x']) will print array([[[[1, 1, 1], [2, 2, 2], [3, 3, 3]], [[4, 4, 4], [5, 5, 5], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0]]], [[[1, 1, 1], [2, 2, 2], [3, 3, 3]], [[6, 6, 6], [0, 0, 0], [0, 0, 0]], [[4, 4, 4], [5, 5, 5], [0, 0, 0]]]]) as desired. The output is a NumPy array, but you can easily convert it to PyTorch tensor with torch.from_numpy. A: I am not sure about the pytorch data structure but if they are list-like data, you can use my solution. This function is to fill a missing value in every dimension (i.e., width, height, and depth) with 0 to adjust the dimension to be the same as the max one. This can be applied to any number of inputs, not just 2. At first, find the maximum width, maximum height, and maximum depth across all inputs (e.g., input1 and input2). After that, fill a missing cell with 0 for each input and then concatenate them together. This method doesn't require any additional libraries. def fill_missing_dimension(inputs): output = [] # find max width, height, depth among all inputs max_width = max([len(i) for i in inputs]) max_height = max([len(j) for i in inputs for j in i]) max_depth = max([len(k) for i in inputs for j in i for k in j]) print(max_width, max_height, max_depth) # fill missing dimension with 0 for all inputs for input in inputs: for i in range(len(input)): for j in range(len(input[i])): for k in range(len(input[i][j]), max_depth): input[i][j].append(0) for j in range(len(input[i]), max_height): input[i].append([0] * max_depth) for i in range(len(input), max_width): input.append([[0] * max_depth] * max_height) # concate all inputs output.append(input) return output If you think that the code above is too long, below is the shorter and cleaner (list comprehension) version (but hard to read and understand) of the function above: # comprehension version of fill_missing_dimension def fill_missing_dimension(inputs): max_width = max([len(i) for i in inputs]) max_height = max([len(j) for i in inputs for j in i]) max_depth = max([len(k) for i in inputs for j in i for k in j]) return [[[[[input[i][j][k] if k < len(input[i][j]) else 0 for k in range(max_depth)] if j < len(input[i]) else [0] * max_depth for j in range(max_height)] if i < len(input) else [[0] * max_depth] * max_height for i in range(max_width)] for input in inputs]] EXAMPLE input1 = [ [[1, 1, 1], [2, 2, 2], [3, 3, 3]], [[4, 4, 4], [5, 5, 5]] ] input2 = [ [[1, 1, 1], [2, 2, 2], [3, 3, 3]], [[6, 6, 6]], [[4, 4, 4], [5, 5, 5]] ] output = fill_missing_dimension([input1, input2]) output: > output [[[[1, 1, 1], [2, 2, 2], [3, 3, 3]], [[4, 4, 4], [5, 5, 5], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0]]], [[[1, 1, 1], [2, 2, 2], [3, 3, 3]], [[6, 6, 6], [0, 0, 0], [0, 0, 0]], [[4, 4, 4], [5, 5, 5], [0, 0, 0]]]] If you would like to use the output as a numpy array, you can use np.array() as shown below: import numpy as np # convert to numpy array output = np.array(output) print(output.shape) # (2, 3, 3, 3)
How to pad sequences with variable length in more than 1 dimension in pytorch?
Is there any clean way to create a batch of 3D sequences in pytorch? I have 3D sequences with the shape of (sequence_length_lvl1, sequence_length_lvl2, D), the sequences have different values for sequence_length_lvl1 and sequence_length_lvl2 but all of them have the same value for D, and I want to pad these sequences in the first and second dimensions and create a batch of them, but I can't use pytorch pad_sequence function, because it works only if the sequences have variable length in only one dimension. I wanted to ask if anyone knows any clean way to do this? To be more clear, I provide an example. Assume the input sequence is something like: input1 = [ [[1, 1, 1], [2, 2, 2], [3, 3, 3]], [[4, 4, 4], [5, 5, 5]] ] input2 = [ [[1, 1, 1], [2, 2, 2], [3, 3, 3]], [[6, 6, 6]], [[4, 4, 4], [5, 5, 5]] ] And I want to pad [input1, input2]. The desired output would be: output = [ [[[1, 1, 1], [2, 2, 2], [3, 3, 3]], [[4, 4, 4], [5, 5, 5], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0]]], [[[1, 1, 1], [2, 2, 2], [3, 3, 3]], [[6, 6, 6], [0, 0, 0], [0, 0, 0]], [[4, 4, 4], [5, 5, 5], [0, 0, 0]]] ] So the desired output has the shape of (2, 3, 3, 3).
[ "this works with your example, maybe there is a faster way.\ninput1 = [\n [[1, 1, 1], [2, 2, 2], [3, 3, 3]],\n [[4, 4, 4], [5, 5, 5]]\n ]\n\ninput2 = [\n [[1, 1, 1], [2, 2, 2], [3, 3, 3]],\n [[6, 6, 6]],\n [[4, 4, 4], [5, 5, 5]]\n ]\n\nlen_max = max(len(input1), len(input2))\noutput_val = [[], []]\nno_val = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\n\nfor i in range(len_max):\n try:\n a = []\n a = input1[i]\n except Exception:\n a = no_val\n\n add_empty = 3 - len(a)\n for j in range(add_empty):\n a += [[0, 0, 0]]\n\n try:\n b = []\n b = input2[i]\n except Exception:\n b = no_val\n\n add_empty = 3 - len(b)\n for j in range(add_empty):\n b += [[0, 0, 0]]\n\n output_val[0] += [a]\n output_val[1] += [b]\n\nprint('-------------\\n', output_val)\n\n", "You can use text2array library that can perform such padding no matter how deeply nested the sequences are (disclaimer: I'm the author). Install with pip install text2array, then:\nfrom text2array import Batch\n\narr = Batch([{'x': input1}, {'x': input2}]).to_array()\nprint(arr['x'])\n\nwill print\narray([[[[1, 1, 1],\n [2, 2, 2],\n [3, 3, 3]],\n\n [[4, 4, 4],\n [5, 5, 5],\n [0, 0, 0]],\n\n [[0, 0, 0],\n [0, 0, 0],\n [0, 0, 0]]],\n\n\n [[[1, 1, 1],\n [2, 2, 2],\n [3, 3, 3]],\n\n [[6, 6, 6],\n [0, 0, 0],\n [0, 0, 0]],\n\n [[4, 4, 4],\n [5, 5, 5],\n [0, 0, 0]]]])\n\nas desired. The output is a NumPy array, but you can easily convert it to PyTorch tensor with torch.from_numpy.\n", "I am not sure about the pytorch data structure but if they are list-like data, you can use my solution.\nThis function is to fill a missing value in every dimension (i.e., width, height, and depth) with 0 to adjust the dimension to be the same as the max one. This can be applied to any number of inputs, not just 2. At first, find the maximum width, maximum height, and maximum depth across all inputs (e.g., input1 and input2). After that, fill a missing cell with 0 for each input and then concatenate them together.\nThis method doesn't require any additional libraries.\ndef fill_missing_dimension(inputs):\n output = []\n\n # find max width, height, depth among all inputs\n max_width = max([len(i) for i in inputs])\n max_height = max([len(j) for i in inputs for j in i])\n max_depth = max([len(k) for i in inputs for j in i for k in j])\n\n print(max_width, max_height, max_depth)\n\n # fill missing dimension with 0 for all inputs\n for input in inputs:\n for i in range(len(input)):\n for j in range(len(input[i])):\n for k in range(len(input[i][j]), max_depth):\n input[i][j].append(0)\n for j in range(len(input[i]), max_height):\n input[i].append([0] * max_depth)\n for i in range(len(input), max_width):\n input.append([[0] * max_depth] * max_height)\n\n # concate all inputs\n output.append(input)\n\n return output\n\nIf you think that the code above is too long, below is the shorter and cleaner (list comprehension) version (but hard to read and understand) of the function above:\n# comprehension version of fill_missing_dimension\ndef fill_missing_dimension(inputs):\n max_width = max([len(i) for i in inputs])\n max_height = max([len(j) for i in inputs for j in i])\n max_depth = max([len(k) for i in inputs for j in i for k in j])\n return [[[[[input[i][j][k] if k < len(input[i][j]) else 0 for k in range(max_depth)] if j < len(input[i]) else [0] * max_depth for j in range(max_height)] if i < len(input) else [[0] * max_depth] * max_height for i in range(max_width)] for input in inputs]]\n\n\nEXAMPLE\ninput1 = [\n[[1, 1, 1], [2, 2, 2], [3, 3, 3]], \n[[4, 4, 4], [5, 5, 5]]\n]\n\ninput2 = [\n[[1, 1, 1], [2, 2, 2], [3, 3, 3]], \n[[6, 6, 6]],\n[[4, 4, 4], [5, 5, 5]]\n]\n\noutput = fill_missing_dimension([input1, input2])\n\noutput:\n> output\n\n[[[[1, 1, 1], [2, 2, 2], [3, 3, 3]],\n [[4, 4, 4], [5, 5, 5], [0, 0, 0]],\n [[0, 0, 0], [0, 0, 0], [0, 0, 0]]],\n [[[1, 1, 1], [2, 2, 2], [3, 3, 3]],\n [[6, 6, 6], [0, 0, 0], [0, 0, 0]],\n [[4, 4, 4], [5, 5, 5], [0, 0, 0]]]]\n\nIf you would like to use the output as a numpy array, you can use np.array() as shown below:\nimport numpy as np\n# convert to numpy array\noutput = np.array(output)\nprint(output.shape) # (2, 3, 3, 3)\n\n" ]
[ 0, 0, 0 ]
[]
[]
[ "deep_learning", "lstm", "python", "pytorch" ]
stackoverflow_0072488665_deep_learning_lstm_python_pytorch.txt
Q: How to create dynamic hierarchy(nested key value dictionary) based on sub data customer_det = [ { "Customer": "A", "country_name": "USA", "region_name": "North", "state_name": "Florida", "subregion_name": "South Atlantic", "store": "Store1" }, { "Customer": "A", "country_name": "USA", "region_name": "North", "state_name": "Albama", "subregion_name": "Carribean", "store": "Store2" }, { "Customer": "A", "country_name": "USA", "region_name": "North", "state_name": "Albama", "subregion_name": "Carribean", "store": "Store2" }, { "Customer": "A", "country_name": "India", "region_name": "South East", "state_name": "Hyderabad", "subregion_name": "South-West", "store": "Store4" } ] I have the above list of dictionary. but i wanted to create hierarchy based on sub data(like country, region, state etc. in nested key value dictionary format). using python. Like i shown in below format. I have attached the sample output of my requirement: { "A": { "USA": { "North": { "South Atlantic": { "Florida": [ "Store1" ] }, "Carribean": { "Albama": [ "Store2", "Store3" ] } } }, "India": { "South": { "South-West": { "Telangana": [ "Store4" ] } } } } }
How to create dynamic hierarchy(nested key value dictionary) based on sub data
customer_det = [ { "Customer": "A", "country_name": "USA", "region_name": "North", "state_name": "Florida", "subregion_name": "South Atlantic", "store": "Store1" }, { "Customer": "A", "country_name": "USA", "region_name": "North", "state_name": "Albama", "subregion_name": "Carribean", "store": "Store2" }, { "Customer": "A", "country_name": "USA", "region_name": "North", "state_name": "Albama", "subregion_name": "Carribean", "store": "Store2" }, { "Customer": "A", "country_name": "India", "region_name": "South East", "state_name": "Hyderabad", "subregion_name": "South-West", "store": "Store4" } ] I have the above list of dictionary. but i wanted to create hierarchy based on sub data(like country, region, state etc. in nested key value dictionary format). using python. Like i shown in below format. I have attached the sample output of my requirement: { "A": { "USA": { "North": { "South Atlantic": { "Florida": [ "Store1" ] }, "Carribean": { "Albama": [ "Store2", "Store3" ] } } }, "India": { "South": { "South-West": { "Telangana": [ "Store4" ] } } } } }
[]
[]
[ "The question is a bit vague. But to get data from your database, you may first need to query it and then save the results. In python, you can utilize json.dumps() for getting your desired output.\nimport json\n\n# Example data\ndata = [\n {\n 'id': 1,\n 'name': 'John Doe',\n 'age': 30\n },\n {\n 'id': 2,\n 'name': 'Jane Doe',\n 'age': 25\n }\n]\n\n# Convert the data into JSON format\njson_data = json.dumps(data)\n\n# Print the JSON data\nprint(json_data)\n\nResult\n[\n {\n \"id\": 1,\n \"name\": \"John Doe\",\n \"age\": 30\n },\n {\n \"id\": 2,\n \"name\": \"Jane Doe\",\n \"age\": 25\n }\n]\n\n" ]
[ -1 ]
[ "django", "json", "python" ]
stackoverflow_0074673511_django_json_python.txt
Q: Applying two styler functions simultanesouly to a dataframe Here is the example script I am working with. I am trying to apply two styler functions to a dataframe at the same time but as you can see, it would only call the colors2 function. What would be the best way to apply two functions at the same time? import pandas as pd df = pd.DataFrame(data=[[-100,500,400,0,222,222], [9000,124,0,-147,54,-56],[77,0,110,211,0,222], [111,11,-600,33,0,22,],[213,-124,0,-147,54,-56]]) df.columns = pd.MultiIndex.from_product([['x','y','z'], list('ab')]) def colors(i): if i > 0: return 'background: red' elif i < 0: return 'background: green' elif i == 0: return 'background: yellow' else: '' def colors2(i): if i < 0: return 'background: red' elif i > 0: return 'background: green' elif i == 0: return 'background: yellow' else: '' idx = pd.IndexSlice df.style.applymap (colors, subset=pd.IndexSlice[:, idx['x','b']]) df.style.applymap (colors2, subset=pd.IndexSlice[:, idx[:, 'b']]) A: Chain your applymap commands in the desired order (last one prevails): (df.style .applymap(colors2, subset=pd.IndexSlice[:, pd.IndexSlice[:, 'b']]) .applymap(colors, subset=pd.IndexSlice[:, pd.IndexSlice['x','b']]) ) Here pd.IndexSlice['x','b']] is more restrictive than pd.IndexSlice[:, 'b'] so we use it last. Another option could be to use a single function and to decide the color based on the labels inside it. import numpy as np def colors(s): if s.name[0] == 'x': s = s*-1 return np.sign(s).map({-1: 'background: red', 1: 'background: green', 0: 'background: yellow'}) (df.style .apply(colors, subset=pd.IndexSlice[:, pd.IndexSlice[:, 'b']]) ) Output:
Applying two styler functions simultanesouly to a dataframe
Here is the example script I am working with. I am trying to apply two styler functions to a dataframe at the same time but as you can see, it would only call the colors2 function. What would be the best way to apply two functions at the same time? import pandas as pd df = pd.DataFrame(data=[[-100,500,400,0,222,222], [9000,124,0,-147,54,-56],[77,0,110,211,0,222], [111,11,-600,33,0,22,],[213,-124,0,-147,54,-56]]) df.columns = pd.MultiIndex.from_product([['x','y','z'], list('ab')]) def colors(i): if i > 0: return 'background: red' elif i < 0: return 'background: green' elif i == 0: return 'background: yellow' else: '' def colors2(i): if i < 0: return 'background: red' elif i > 0: return 'background: green' elif i == 0: return 'background: yellow' else: '' idx = pd.IndexSlice df.style.applymap (colors, subset=pd.IndexSlice[:, idx['x','b']]) df.style.applymap (colors2, subset=pd.IndexSlice[:, idx[:, 'b']])
[ "Chain your applymap commands in the desired order (last one prevails):\n(df.style\n .applymap(colors2, subset=pd.IndexSlice[:, pd.IndexSlice[:, 'b']])\n .applymap(colors, subset=pd.IndexSlice[:, pd.IndexSlice['x','b']])\n )\n\nHere pd.IndexSlice['x','b']] is more restrictive than pd.IndexSlice[:, 'b'] so we use it last.\nAnother option could be to use a single function and to decide the color based on the labels inside it.\nimport numpy as np\ndef colors(s):\n if s.name[0] == 'x':\n s = s*-1\n return np.sign(s).map({-1: 'background: red', 1: 'background: green', 0: 'background: yellow'})\n \n(df.style\n .apply(colors, subset=pd.IndexSlice[:, pd.IndexSlice[:, 'b']])\n )\n\nOutput:\n\n" ]
[ 2 ]
[]
[]
[ "dataframe", "multi_index", "pandas", "python" ]
stackoverflow_0074673515_dataframe_multi_index_pandas_python.txt
Q: In the same function separating code into y/n from the user and taking steps based on that without if/else? I made a sample below to help explain. The problem with an if/else clause is that it makes the variables local so i can't assign a returned value under if: when the user enters 'y' and use it across the bounds of else: -See the value return_from_add and the two places I want to use it. I want to obey running my code sequentially and simply have the user enter a 'n' instead of a 'y' to run a further down piece of code. I don't know how without an if/else statement maybe somebody else has figured out something similar before and then you would understand my frustration in even wording the question appropriately. Because what else comes to mind other than **if **i want to do this **else ** do this for a yes no question. def run_main_prgm_2(): while True: other = 'Invalid Response' no = 'n' yes = 'y' y_n = input('Would you like to add a new person to the address book? y/n \n - ') if y_n == 'y': return_from_add = add_person() return yes elif y_n = 'n': search_full_name1 = input("Type in a fullname for indexing\n- ") index_person(return_from_add,search_full_name1) return no else: return other If you're having trouble understanding my question look at where return_from_add is being used in two places where it's not possible because the if elif makes it local even under the same function. Which is why I included a def at the top to illustrate that this is all under the same function. Now how can i pull this off with local variables under the same function I've tried messing with while loops. Basically it would be cool in programming if there was something that said skip x number of lines and run this instead when the user enters a specific key. A: There is a syntax error in the code. In the second if statement, the 'y_n' variable is compared to the string 'y' using a single equal sign '=' instead of a double equal sign '=='. This will cause a syntax error, as the single equal sign is used to assign a value to a variable, while the double equal sign is used to compare two values.
In the same function separating code into y/n from the user and taking steps based on that without if/else?
I made a sample below to help explain. The problem with an if/else clause is that it makes the variables local so i can't assign a returned value under if: when the user enters 'y' and use it across the bounds of else: -See the value return_from_add and the two places I want to use it. I want to obey running my code sequentially and simply have the user enter a 'n' instead of a 'y' to run a further down piece of code. I don't know how without an if/else statement maybe somebody else has figured out something similar before and then you would understand my frustration in even wording the question appropriately. Because what else comes to mind other than **if **i want to do this **else ** do this for a yes no question. def run_main_prgm_2(): while True: other = 'Invalid Response' no = 'n' yes = 'y' y_n = input('Would you like to add a new person to the address book? y/n \n - ') if y_n == 'y': return_from_add = add_person() return yes elif y_n = 'n': search_full_name1 = input("Type in a fullname for indexing\n- ") index_person(return_from_add,search_full_name1) return no else: return other If you're having trouble understanding my question look at where return_from_add is being used in two places where it's not possible because the if elif makes it local even under the same function. Which is why I included a def at the top to illustrate that this is all under the same function. Now how can i pull this off with local variables under the same function I've tried messing with while loops. Basically it would be cool in programming if there was something that said skip x number of lines and run this instead when the user enters a specific key.
[ "There is a syntax error in the code. In the second if statement, the 'y_n' variable is compared to the string 'y' using a single equal sign '=' instead of a double equal sign '=='. This will cause a syntax error, as the single equal sign is used to assign a value to a variable, while the double equal sign is used to compare two values.\n" ]
[ 0 ]
[]
[]
[ "python", "python_3.x" ]
stackoverflow_0074673507_python_python_3.x.txt
Q: How to count len of strings in a list without built-in function? How can I create a function count_word in order to get the result like this: x = ['Hello', 'Bye'] print(count_word(x)) # Result must be [5, 3] without using len(x[index]) or any built-in function? A: Since you're not allowed to use built-in functions, you have to iterate over each string in the list and over all characters of each word as well. Also you have to memorize the current length of each word and reset the counter if the next word is taken. This is done by re-assigning the counter value to 0 (length = 0) before the next inner iteration will be started: def count_word(x): result = [] for word in x: length = 0 for char in word: length += 1 result.append(length) return result Please note that this is probably the no-brainer par excellence. However, Python offers some interesting other approaches to solve this problem. Here are some other interesting examples, which of course need to be adapted. While this should answer your questions, I would like to add some notes about performance and why it is better to use built-in functions: Generally spoken, built-in functions are doing the iteration under the hood for you or are even faster by e.g. simply getting the array’s length from the CPython list head structure (emphasis mine): How are lists implemented in CPython? CPython’s lists are really variable-length arrays, not Lisp-style linked lists. The implementation uses a contiguous array of references to other objects, and keeps a pointer to this array and the array’s length in a list head structure. This makes indexing a list a[i] an operation whose cost is independent of the size of the list or the value of the index. When items are appended or inserted, the array of references is resized. Some cleverness is applied to improve the performance of appending items repeatedly; when the array must be grown, some extra space is allocated so the next few times don’t require an actual resize. (Credits also to Ken Y-N, see How does len(array) work under the hood) Generally, it is better to use built-in functions whenever you can, because you seldom can beat the performance of the underlying implementation (e.g. for C-based Python installations): def count_word_2(x): return [len(word) for word in x] You can see that if you time the two given functions: In [1]: from timeit import timeit In [2]: statement = 'count_word(["Hello", "Bye"])' In [3]: count_word_1 = """ ...: def count_word(x): ...: result = [] ...: for word in x: ...: length = 0 ...: for char in word: ...: length += 1 ...: result.append(length) ...: return result ...: """ In [4]: count_word_2 = """ ...: def count_word(x): ...: return [len(word) for word in x] ...: """ In [5]: timeit(stmt=statement, setup=count_word_1, number=10000000) Out[5]: 4.744415309000033 In [6]: timeit(stmt=statement, setup=count_word_2, number=10000000) Out[6]: 2.7576589090022026 If also a little bit of cheating is allowed (using string dunder method __len()__ instead of built-in function len()), you can get some performance back (credits to HeapOverflow): In [7]: count_word_3 = """ ...: def count_word(x): ...: return [word.__len__() for word in x] ...: """ In [8]: timeit(stmt=statement, setup=count_word_3, number=10000000) Out[8]: 3.313732603997778 So a good rule of thumb is: Do whatever you can with built-in functions. They are more readable and faster. A: x = ['Hello', 'Bye'] results = [] for single_string in x: string_length = 0 for i in single_string: string_length += 1 results.append(string_length) print(results) A: x = ['Hello', 'Bye'] def count(x): i=0 for j in x:i+=1 return i print(list(map(count, x))) A: Another possibility using inline assignment (Python >= 3.8) def count(word): c = 0 return [[_ for _ in word if (c := c+1)], c][1] words = ['Hello', 'How are you', 'Bye!'] >>> [count(w) for w in words] [5, 11, 4] Or even passing c as an input argument (defaulted to 0) could be used as an accumulated result from previous counts: def count(word, c=0): return [[_ for _ in word if (c := c+1)], c][1] ctot = [0] [ctot.append(count(w, ctot[-1])) for w in words] # sort of equivalent to built-in 'reduce' >>> ctot # or ctot[1:] to ignore the first zero [0, 5, 16, 20]
How to count len of strings in a list without built-in function?
How can I create a function count_word in order to get the result like this: x = ['Hello', 'Bye'] print(count_word(x)) # Result must be [5, 3] without using len(x[index]) or any built-in function?
[ "Since you're not allowed to use built-in functions, you have to iterate over each string in the list and over all characters of each word as well. Also you have to memorize the current length of each word and reset the counter if the next word is taken. This is done by re-assigning the counter value to 0 (length = 0) before the next inner iteration will be started:\ndef count_word(x):\n result = []\n for word in x:\n length = 0\n for char in word:\n length += 1\n result.append(length)\n return result\n\nPlease note that this is probably the no-brainer par excellence. However, Python offers some interesting other approaches to solve this problem. Here are some other interesting examples, which of course need to be adapted.\n\nWhile this should answer your questions, I would like to add some notes about performance and why it is better to use built-in functions:\nGenerally spoken, built-in functions are doing the iteration under the hood for you or are even faster by e.g. simply getting the array’s length from the CPython list head structure (emphasis mine):\n\nHow are lists implemented in CPython?\nCPython’s lists are really variable-length arrays, not Lisp-style linked lists. The implementation uses a contiguous array of references to other objects, and keeps a pointer to this array and the array’s length in a list head structure.\nThis makes indexing a list a[i] an operation whose cost is independent of the size of the list or the value of the index.\nWhen items are appended or inserted, the array of references is resized. Some cleverness is applied to improve the performance of appending items repeatedly; when the array must be grown, some extra space is allocated so the next few times don’t require an actual resize.\n\n(Credits also to Ken Y-N, see How does len(array) work under the hood)\nGenerally, it is better to use built-in functions whenever you can, because you seldom can beat the performance of the underlying implementation (e.g. for C-based Python installations):\ndef count_word_2(x):\n return [len(word) for word in x]\n\nYou can see that if you time the two given functions:\nIn [1]: from timeit import timeit\n\nIn [2]: statement = 'count_word([\"Hello\", \"Bye\"])'\n\nIn [3]: count_word_1 = \"\"\"\n ...: def count_word(x):\n ...: result = []\n ...: for word in x:\n ...: length = 0\n ...: for char in word:\n ...: length += 1\n ...: result.append(length)\n ...: return result\n ...: \"\"\"\n\nIn [4]: count_word_2 = \"\"\"\n ...: def count_word(x):\n ...: return [len(word) for word in x]\n ...: \"\"\"\n\nIn [5]: timeit(stmt=statement, setup=count_word_1, number=10000000)\nOut[5]: 4.744415309000033\n\nIn [6]: timeit(stmt=statement, setup=count_word_2, number=10000000)\nOut[6]: 2.7576589090022026\n\nIf also a little bit of cheating is allowed (using string dunder method __len()__ instead of built-in function len()), you can get some performance back (credits to HeapOverflow):\nIn [7]: count_word_3 = \"\"\"\n...: def count_word(x):\n...: return [word.__len__() for word in x]\n...: \"\"\"\n\nIn [8]: timeit(stmt=statement, setup=count_word_3, number=10000000)\nOut[8]: 3.313732603997778\n\nSo a good rule of thumb is: Do whatever you can with built-in functions. They are more readable and faster.\n", "x = ['Hello', 'Bye']\nresults = []\nfor single_string in x:\n string_length = 0\n for i in single_string: string_length += 1\n results.append(string_length)\nprint(results)\n\n", "x = ['Hello', 'Bye']\ndef count(x):\n i=0\n for j in x:i+=1\n return i\nprint(list(map(count, x)))\n\n", "Another possibility using inline assignment (Python >= 3.8)\ndef count(word):\n c = 0\n return [[_ for _ in word if (c := c+1)], c][1]\n\nwords = ['Hello', 'How are you', 'Bye!']\n\n>>> [count(w) for w in words]\n[5, 11, 4]\n\nOr even passing c as an input argument (defaulted to 0) could be used as an accumulated result from previous counts:\ndef count(word, c=0):\n return [[_ for _ in word if (c := c+1)], c][1]\n\nctot = [0]\n[ctot.append(count(w, ctot[-1])) for w in words] # sort of equivalent to built-in 'reduce'\n\n>>> ctot # or ctot[1:] to ignore the first zero\n[0, 5, 16, 20]\n\n" ]
[ 1, 0, 0, 0 ]
[]
[]
[ "python" ]
stackoverflow_0060767809_python.txt
Q: Accessing a Pandas index like a regular column I have a Pandas DataFrame with a named index. I want to pass it off to a piece off code that takes a DataFrame, a column name, and some other stuff, and does a bunch of work involving that column. Only in this case the column I want to highlight is the index, but giving the index's label to this piece of code doesn't work because you can't extract an index like you can a regular column. For example, I can construct a DataFrame like this: import pandas as pd, numpy as np df=pd.DataFrame({'name':map(chr, range(97, 102)), 'id':range(10000,10005), 'value':np.random.randn(5)}) df.set_index('name', inplace=True) Here's the result: id value name a 10000 0.659710 b 10001 1.001821 c 10002 -0.197576 d 10003 -0.569181 e 10004 -0.882097 Now how am I allowed to go about accessing the name column? print(df.index) # No problem print(df['name']) # KeyError: u'name' I know there are workaround like duplicating the column or changing the index to something else. But is there something cleaner, like some form of column access that treats the index the same way as everything else? A: Index has a special meaning in Pandas. It's used to optimise specific operations and can be used in various methods such as merging / joining data. Therefore, make a choice: If it's "just another column", use reset_index and treat it as another column. If it's genuinely used for indexing, keep it as an index and use df.index. We can't make this choice for you. It should be dependent on the structure of your underlying data and on how you intend to analyse your data. For more information on use of a dataframe index, see: What is the performance impact of non-unique indexes in pandas? What is the point of indexing in pandas? A: You could also use df.index.get_level_values if you need to access a (index) column by name. It also works with hierarchical indices (MultiIndex). >>> df.index.get_level_values('name') Index(['a', 'b', 'c', 'd', 'e'], dtype='object', name='name') A: Instead of using reset_index, you could just copy the index to a normal column, do some work and then drop the column, for example: df['tmp'] = df.index # do stuff based on df['tmp'] del df['tmp'] A: df.reset_index(inplace=True) print(df.head()) Try this
Accessing a Pandas index like a regular column
I have a Pandas DataFrame with a named index. I want to pass it off to a piece off code that takes a DataFrame, a column name, and some other stuff, and does a bunch of work involving that column. Only in this case the column I want to highlight is the index, but giving the index's label to this piece of code doesn't work because you can't extract an index like you can a regular column. For example, I can construct a DataFrame like this: import pandas as pd, numpy as np df=pd.DataFrame({'name':map(chr, range(97, 102)), 'id':range(10000,10005), 'value':np.random.randn(5)}) df.set_index('name', inplace=True) Here's the result: id value name a 10000 0.659710 b 10001 1.001821 c 10002 -0.197576 d 10003 -0.569181 e 10004 -0.882097 Now how am I allowed to go about accessing the name column? print(df.index) # No problem print(df['name']) # KeyError: u'name' I know there are workaround like duplicating the column or changing the index to something else. But is there something cleaner, like some form of column access that treats the index the same way as everything else?
[ "Index has a special meaning in Pandas. It's used to optimise specific operations and can be used in various methods such as merging / joining data. Therefore, make a choice:\n\nIf it's \"just another column\", use reset_index and treat it as another column.\nIf it's genuinely used for indexing, keep it as an index and use df.index.\n\nWe can't make this choice for you. It should be dependent on the structure of your underlying data and on how you intend to analyse your data.\nFor more information on use of a dataframe index, see:\n\nWhat is the performance impact of non-unique indexes in pandas?\nWhat is the point of indexing in pandas?\n\n", "You could also use df.index.get_level_values if you need to access a (index) column by name. It also works with hierarchical indices (MultiIndex).\n>>> df.index.get_level_values('name')\nIndex(['a', 'b', 'c', 'd', 'e'], dtype='object', name='name')\n\n", "Instead of using reset_index, you could just copy the index to a normal column, do some work and then drop the column, for example:\ndf['tmp'] = df.index\n# do stuff based on df['tmp']\ndel df['tmp']\n\n", "df.reset_index(inplace=True)\nprint(df.head())\nTry this\n" ]
[ 23, 13, 6, 0 ]
[]
[]
[ "dataframe", "indexing", "pandas", "python", "series" ]
stackoverflow_0052139506_dataframe_indexing_pandas_python_series.txt
Q: List of integers to pairs of tuples I have a list of integers like this numbers = [1, 5, 7, 19, 22, 55] I want to have a function that takes this as input and gives me a list of paired tuples that should contain the numbers as (1,5), (5,7), (7,19) and so on. Kindly suggest. I have tried using for loops. Didn't get expected output. A: From Python 3.10 you can use itertools.pairwise from itertools import pairwise numbers = [1, 5, 7, 19, 22, 55] list(pairwise(numbers)) # [(1, 5), (5, 7), (7, 19), (19, 22), (22, 55)] A: lst = [(numbers[i],numbers[i+1]) for i in range(0,len(numbers)-1)] This should do the trick: loop over all elements in the list numbers. You loop until the one to last element, since otherwise you would walk out of the array (get an index error).
List of integers to pairs of tuples
I have a list of integers like this numbers = [1, 5, 7, 19, 22, 55] I want to have a function that takes this as input and gives me a list of paired tuples that should contain the numbers as (1,5), (5,7), (7,19) and so on. Kindly suggest. I have tried using for loops. Didn't get expected output.
[ "From Python 3.10 you can use itertools.pairwise\nfrom itertools import pairwise\n\nnumbers = [1, 5, 7, 19, 22, 55]\nlist(pairwise(numbers)) # [(1, 5), (5, 7), (7, 19), (19, 22), (22, 55)]\n\n", "lst = [(numbers[i],numbers[i+1]) for i in range(0,len(numbers)-1)]\n\nThis should do the trick: loop over all elements in the list numbers. You loop until the one to last element, since otherwise you would walk out of the array (get an index error).\n" ]
[ 2, 0 ]
[]
[]
[ "python" ]
stackoverflow_0074673571_python.txt
Q: I can't print hello world in visual studio code I created a folder named " Python Trial ", and a folder within it named test and within that folder i created a file named test.py I installed the python extension and tried to print " Hello World! " however it keeps on giving me this command: C:/Users/saram/AppData/Local/Microsoft/WindowsApps/python3.10.exe "c:/Users/saram/Documents/Python Trial/test/test.py" -bash: C:/Users/saram/AppData/Local/Microsoft/WindowsApps/python3.10.exe: No such file or directory I don't know what to do I tried reinstalling visual studio code and the python extension but it kept on giving me the same error message A: The error message you're seeing indicates that the interpreter can't find the Python file that you're trying to run. This could be because the path to the interpreter is incorrect, or because the Python interpreter is not installed on your system. The Python extension in VSCode isn't sufficient for running Python code. In the bottom right corner of VSCode, you can see the version of Python you are currently using. You can click it to change the interpreter path once you install Python.
I can't print hello world in visual studio code
I created a folder named " Python Trial ", and a folder within it named test and within that folder i created a file named test.py I installed the python extension and tried to print " Hello World! " however it keeps on giving me this command: C:/Users/saram/AppData/Local/Microsoft/WindowsApps/python3.10.exe "c:/Users/saram/Documents/Python Trial/test/test.py" -bash: C:/Users/saram/AppData/Local/Microsoft/WindowsApps/python3.10.exe: No such file or directory I don't know what to do I tried reinstalling visual studio code and the python extension but it kept on giving me the same error message
[ "The error message you're seeing indicates that the interpreter can't find the Python file that you're trying to run. This could be because the path to the interpreter is incorrect, or because the Python interpreter is not installed on your system.\nThe Python extension in VSCode isn't sufficient for running Python code. In the bottom right corner of VSCode, you can see the version of Python you are currently using. You can click it to change the interpreter path once you install Python.\n" ]
[ 1 ]
[]
[]
[ "python", "terminal", "visual_studio_code" ]
stackoverflow_0074672914_python_terminal_visual_studio_code.txt
Q: python split string on multiple delimeters without regex I have a string that I need to split on multiple characters without the use of regular expressions. for example, I would need something like the following: >>>string="hello there[my]friend" >>>string.split(' []') ['hello','there','my','friend'] is there anything in python like this? A: If you need multiple delimiters, re.split is the way to go. Without using a regex, it's not possible unless you write a custom function for it. Here's such a function - it might or might not do what you want (consecutive delimiters cause empty elements): >>> def multisplit(s, delims): ... pos = 0 ... for i, c in enumerate(s): ... if c in delims: ... yield s[pos:i] ... pos = i + 1 ... yield s[pos:] ... >>> list(multisplit('hello there[my]friend', ' []')) ['hello', 'there', 'my', 'friend'] A: Solution without regexp: from itertools import groupby sep = ' []' s = 'hello there[my]friend' print [''.join(g) for k, g in groupby(s, sep.__contains__) if not k] I've just posted an explanation here https://stackoverflow.com/a/19211729/2468006 A: A recursive solution without use of regex. Uses only base python in contrast to the other answers. def split_on_multiple_chars(string_to_split, set_of_chars_as_string): # Recursive splitting # Returns a list of strings s = string_to_split chars = set_of_chars_as_string # If no more characters to split on, return input if len(chars) == 0: return([s]) # Split on the first of the delimiter characters ss = s.split(chars[0]) # Recursive call without the first splitting character bb = [] for e in ss: aa = split_on_multiple_chars(e, chars[1:]) bb.extend(aa) return(bb) Works very similarly to pythons regular string.split(...), but accepts several delimiters. Example use: print(split_on_multiple_chars('my"example_string.with:funny?delimiters', '_.:;')) Output: ['my"example', 'string', 'with', 'funny?delimiters'] A: If you're not worried about long strings, you could force all delimiters to be the same using string.replace(). The following splits a string by both - and , x.replace('-', ',').split(',') If you have many delimiters you could do the following: def split(x, delimiters): for d in delimiters: x = x.replace(d, delimiters[0]) return x.split(delimiters[0])
python split string on multiple delimeters without regex
I have a string that I need to split on multiple characters without the use of regular expressions. for example, I would need something like the following: >>>string="hello there[my]friend" >>>string.split(' []') ['hello','there','my','friend'] is there anything in python like this?
[ "If you need multiple delimiters, re.split is the way to go.\nWithout using a regex, it's not possible unless you write a custom function for it.\nHere's such a function - it might or might not do what you want (consecutive delimiters cause empty elements):\n>>> def multisplit(s, delims):\n... pos = 0\n... for i, c in enumerate(s):\n... if c in delims:\n... yield s[pos:i]\n... pos = i + 1\n... yield s[pos:]\n...\n>>> list(multisplit('hello there[my]friend', ' []'))\n['hello', 'there', 'my', 'friend']\n\n", "Solution without regexp:\nfrom itertools import groupby\nsep = ' []'\ns = 'hello there[my]friend'\nprint [''.join(g) for k, g in groupby(s, sep.__contains__) if not k]\n\nI've just posted an explanation here https://stackoverflow.com/a/19211729/2468006\n", "A recursive solution without use of regex. Uses only base python in contrast to the other answers.\ndef split_on_multiple_chars(string_to_split, set_of_chars_as_string):\n # Recursive splitting\n # Returns a list of strings\n\n s = string_to_split\n chars = set_of_chars_as_string\n\n # If no more characters to split on, return input\n if len(chars) == 0:\n return([s])\n\n # Split on the first of the delimiter characters\n ss = s.split(chars[0])\n\n # Recursive call without the first splitting character\n bb = []\n for e in ss:\n aa = split_on_multiple_chars(e, chars[1:])\n bb.extend(aa)\n return(bb)\n\nWorks very similarly to pythons regular string.split(...), but accepts several delimiters.\nExample use:\nprint(split_on_multiple_chars('my\"example_string.with:funny?delimiters', '_.:;'))\n\nOutput:\n['my\"example', 'string', 'with', 'funny?delimiters']\n\n", "If you're not worried about long strings, you could force all delimiters to be the same using string.replace(). The following splits a string by both - and ,\nx.replace('-', ',').split(',')\nIf you have many delimiters you could do the following:\ndef split(x, delimiters):\n for d in delimiters:\n x = x.replace(d, delimiters[0])\n return x.split(delimiters[0])\n\n" ]
[ 8, 1, 1, 0 ]
[ "re.split is the right tool here.\n>>> string=\"hello there[my]friend\"\n>>> import re\n>>> re.split('[] []', string)\n['hello', 'there', 'my', 'friend']\n\nIn regex, [...] defines a character class. Any characters inside the brackets will match. The way I've spaced the brackets avoids needing to escape them, but the pattern [\\[\\] ] also works.\n>>> re.split('[\\[\\] ]', string)\n['hello', 'there', 'my', 'friend']\n\nThe re.DEBUG flag to re.compile is also useful, as it prints out what the pattern will match:\n>>> re.compile('[] []', re.DEBUG)\nin \n literal 93\n literal 32\n literal 91\n<_sre.SRE_Pattern object at 0x16b0850>\n\n(Where 32, 91, 93, are the ascii values assigned to , [, ])\n" ]
[ -3 ]
[ "python", "split", "string" ]
stackoverflow_0010655850_python_split_string.txt
Q: Python multiprocessing.Process can not stop when after connecting the network When I try to crawl thesis information in multiple threads, I cannot close the process after getting the information: error And when I comment the code which function is get the information from network, these processes can end normally. normal This error is trouble me and I don't have any idea, my network connect is by requests and set the response.close() so can any handsome brother or beautiful lady help this confused person? Thanks This is whole code: my python is python 3.7 from multiprocessing import Process, Queue, Pool,Manager,Value import time, random import requests import re from bs4 import BeautifulSoup headers = { 'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36,Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36", 'Connection': 'close' } ## Just get the html text def GetUrlInfo(url): response = requests.get(url=url, headers=headers) response.encoding = 'utf-8' response.close() SoupData = BeautifulSoup(response.text, 'lxml') return SoupData def GetVolumeUrlfromUrl(url:str)->str: """input is Journal's url and output is a link and a text description to each issue of the journal""" url = re.sub('http:', 'https:', url) SoupDataTemp = GetUrlInfo(url+'index.html') SoupData = SoupDataTemp.find_all('li') UrlALL = [] for i in SoupData: if i.find('a') != None: volumeUrlRule = '<a href=\"(.*?)\">(.*?)</a>' volumeUrlTemp = re.findall(volumeUrlRule,str(i),re.I) # u = i.find('a')['href'] # # print(u) for u in volumeUrlTemp: if re.findall(url, u[0]): # print(u) UrlALL.append((u[0], u[1]), ) # print(UrlALL) return UrlALL def GetPaperBaseInfoFromUrlAll(url:str)->str: """The input is the url and the output is all the paper information obtained from the web page, including, doi, title, author, and the date about this volume """ soup = GetUrlInfo(url) temp1 = soup.find_all('li',class_='entry article') temp2= soup.find_all('h2') temp2=re.sub('\\n',' ',temp2[1].text) # print(temp2) volumeYear = re.split(' ',temp2)[-1] paper = [] for i in temp1: if i.find('div',class_='head').find('a')== None: paperDoi = '' else: paperDoi = i.find('div',class_='head').find('a')['href'] title = i.find('cite').find('span',class_='title').text[:-2] paper.append([paperDoi,title]) return paper,volumeYear # test start url = 'http://dblp.uni-trier.de/db/journals/talg/' UrlALL = GetVolumeUrlfromUrl(url) UrlLen = len(UrlALL) # put the url into the query def Write(query,value,num): for count in range(num): query.put(value[count][0],True) # time.sleep(random.random()) print('write end') # from the query get the url and get the paper info with this url def Read(query,num,PaperInfo1,COUNT,i,paperNumber): while True: count = COUNT.get(True) # print("before enter" + str(i) + ' - ' + str(count)+' - '+str(num)) COUNT.put(count, True) if not query.empty(): value = query.get(True) count = COUNT.get(True) count = count + 1 COUNT.put(count,True) paper, thisYear = GetPaperBaseInfoFromUrlAll(value) # just commented print("connected " + str(i) + ' - ' + str(count) + ' - ' + str(num)) numb = paperNumber.get(True) numb = numb + len(paper) paperNumber.put(numb) # just commented # print(paper,thisYear) PaperInfo1.put((paper,thisYear),) # just commented print("the process "+str(i)+' - '+ str(count)+ ' : '+value) if not COUNT.empty(): count = COUNT.get(True) # print("after enter" + str(i) + ' - ' + str(count) + ' - ' + str(num)) COUNT.put(count,True) if int(count) == int(num): print("the process "+str(i)+" end ") break print('read end') # print the paper info def GetPaperInfo(PaperInfo1,paperNumber): for i in range(paperNumber.get(True)): value = PaperInfo1.get(True) print(value) if __name__=='__main__': r_num = 10 # th read process number w_num = 1 # th write process number w_cnt = UrlLen # the write counter q = Queue(UrlLen) # the volune url queue paperNumber = Queue(1) # the all paper number COUNT = Queue(1) # the end tag COUNT.put(int(0)) # first is zero paperNumber.put(int(0)) # first is zero PaperInfo1 = Queue() r_list = [Process( target=Read, args=(q,w_cnt,PaperInfo1,COUNT,i,paperNumber) ) for i in range(r_num)] w_list = [Process( target=Write, args=(q,UrlALL,w_cnt) )] time_start = time.time() [task.start() for task in w_list] [task.start() for task in r_list] [task.join() for task in w_list] [task.join() for task in r_list] time_used = time.time() - time_start GetPaperInfo(PaperInfo1, paperNumber) print('time_used:{}s'.format(time_used)) I have no idea, with debug the process finally enter the process.py -> row:297: try: self.run() and then enter the row:300: util._exit_function() and just a connected the debug but I dont know why the network can cause this error and how to solve this that's all Thank you! A: Hi,this is me again,I tried a concurrent implementation of threads,and global variables for threads are much more comfortable than process queue data sharing. By thread it does implement but my main function can't be stopped, previously with processes it was not possible to proceed to the next step when fetching concurrently, the fetching of data was implemented through threads and continued in the main function but the main function can't be stopped anymore. How interesting! I have designed three functions similar to the previous ones. GetUrlintoQueue is to write the fetched url UrlALL to the queue UrlQueue, UrlLen is the number of the url. import threading import queue count = 0 # Record the number of times a value is fetched from the queue paperNumber = 0 # Record the number of papers def GetUrlintoQueue(UrlQueue,UrlALL,UrlLen): for index in range(UrlLen): UrlQueue.put(UrlALL[index][0], True) print('Write End') UrlQueue.task_done() The other is GetPaperInfofromUrl. Get the url from the UrlQueue and write the information of the corresponding page to PaperInfo, index is the thread number. def GetPaperInfofromUrl(UrlQueue,PaperInfo,index,UrlLen): global count,paperNumber while True: if not UrlQueue.empty(): url = UrlQueue.get(True) count = count + 1 paper, thisYear = GetPaperBaseInfoFromUrlAll(url) # just commented print("connected " + str(index) + '-nd - ' + str(count) + ' - ' + str(UrlLen)) print(paper,thisYear) paperNumber = paperNumber + len(paper) PaperInfo.put((paper, thisYear), True) if count == UrlLen: print("the process " + str(index) + " end ") break UrlQueue.task_done() PaperInfo.task_done() print('the process ' + str(index) +' get paper info end') GetPaperInfo is to show the results about PaperInfo, and it don't change. def GetPaperInfo(PaperInfo,paperNumber): for i in range(paperNumber): value = PaperInfo.get(True) print(value) The main function first sets the corresponding variables, then writes directly first, then 10 threads crawl paper information, and finally shows the results, but after displaying the results still can not exit, I can not understand why. if __name__ == '__main__': url = 'http://dblp.uni-trier.de/db/journals/talg/' UrlALL = GetVolumeUrlfromUrl(url) UrlLen = len(UrlALL) UrlQueue = queue.Queue(UrlLen) PaperInfo = queue.Queue(1000) WriteThread = 1 ReadThread = 10 # url write GetUrlThread = [threading.Thread(target=GetUrlintoQueue, args=(UrlQueue,UrlALL,UrlLen,))] time_start = time.time() [geturl.start() for geturl in GetUrlThread] [geturl.join() for geturl in GetUrlThread] time_used = time.time() - time_start print('time_used:{}s'.format(time_used)) # url write end # paperinfo get PaperinfoGetThread = [threading.Thread(target=GetPaperInfofromUrl, args=(UrlQueue,PaperInfo,index,UrlLen,)) for index in range(ReadThread)] time_start = time.time() [getpaper.start() for getpaper in PaperinfoGetThread] [getpaper.join() for getpaper in PaperinfoGetThread] time_used = time.time() - time_start print('time_used:{}s'.format(time_used)) # paperinfo get end GetPaperInfo(PaperInfo,paperNumber) # show the results import sys # it does not work sys.exit() The debug shows: debug.gif (I dont have 10 reputation so the picture is the type of link. )
Python multiprocessing.Process can not stop when after connecting the network
When I try to crawl thesis information in multiple threads, I cannot close the process after getting the information: error And when I comment the code which function is get the information from network, these processes can end normally. normal This error is trouble me and I don't have any idea, my network connect is by requests and set the response.close() so can any handsome brother or beautiful lady help this confused person? Thanks This is whole code: my python is python 3.7 from multiprocessing import Process, Queue, Pool,Manager,Value import time, random import requests import re from bs4 import BeautifulSoup headers = { 'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36,Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36", 'Connection': 'close' } ## Just get the html text def GetUrlInfo(url): response = requests.get(url=url, headers=headers) response.encoding = 'utf-8' response.close() SoupData = BeautifulSoup(response.text, 'lxml') return SoupData def GetVolumeUrlfromUrl(url:str)->str: """input is Journal's url and output is a link and a text description to each issue of the journal""" url = re.sub('http:', 'https:', url) SoupDataTemp = GetUrlInfo(url+'index.html') SoupData = SoupDataTemp.find_all('li') UrlALL = [] for i in SoupData: if i.find('a') != None: volumeUrlRule = '<a href=\"(.*?)\">(.*?)</a>' volumeUrlTemp = re.findall(volumeUrlRule,str(i),re.I) # u = i.find('a')['href'] # # print(u) for u in volumeUrlTemp: if re.findall(url, u[0]): # print(u) UrlALL.append((u[0], u[1]), ) # print(UrlALL) return UrlALL def GetPaperBaseInfoFromUrlAll(url:str)->str: """The input is the url and the output is all the paper information obtained from the web page, including, doi, title, author, and the date about this volume """ soup = GetUrlInfo(url) temp1 = soup.find_all('li',class_='entry article') temp2= soup.find_all('h2') temp2=re.sub('\\n',' ',temp2[1].text) # print(temp2) volumeYear = re.split(' ',temp2)[-1] paper = [] for i in temp1: if i.find('div',class_='head').find('a')== None: paperDoi = '' else: paperDoi = i.find('div',class_='head').find('a')['href'] title = i.find('cite').find('span',class_='title').text[:-2] paper.append([paperDoi,title]) return paper,volumeYear # test start url = 'http://dblp.uni-trier.de/db/journals/talg/' UrlALL = GetVolumeUrlfromUrl(url) UrlLen = len(UrlALL) # put the url into the query def Write(query,value,num): for count in range(num): query.put(value[count][0],True) # time.sleep(random.random()) print('write end') # from the query get the url and get the paper info with this url def Read(query,num,PaperInfo1,COUNT,i,paperNumber): while True: count = COUNT.get(True) # print("before enter" + str(i) + ' - ' + str(count)+' - '+str(num)) COUNT.put(count, True) if not query.empty(): value = query.get(True) count = COUNT.get(True) count = count + 1 COUNT.put(count,True) paper, thisYear = GetPaperBaseInfoFromUrlAll(value) # just commented print("connected " + str(i) + ' - ' + str(count) + ' - ' + str(num)) numb = paperNumber.get(True) numb = numb + len(paper) paperNumber.put(numb) # just commented # print(paper,thisYear) PaperInfo1.put((paper,thisYear),) # just commented print("the process "+str(i)+' - '+ str(count)+ ' : '+value) if not COUNT.empty(): count = COUNT.get(True) # print("after enter" + str(i) + ' - ' + str(count) + ' - ' + str(num)) COUNT.put(count,True) if int(count) == int(num): print("the process "+str(i)+" end ") break print('read end') # print the paper info def GetPaperInfo(PaperInfo1,paperNumber): for i in range(paperNumber.get(True)): value = PaperInfo1.get(True) print(value) if __name__=='__main__': r_num = 10 # th read process number w_num = 1 # th write process number w_cnt = UrlLen # the write counter q = Queue(UrlLen) # the volune url queue paperNumber = Queue(1) # the all paper number COUNT = Queue(1) # the end tag COUNT.put(int(0)) # first is zero paperNumber.put(int(0)) # first is zero PaperInfo1 = Queue() r_list = [Process( target=Read, args=(q,w_cnt,PaperInfo1,COUNT,i,paperNumber) ) for i in range(r_num)] w_list = [Process( target=Write, args=(q,UrlALL,w_cnt) )] time_start = time.time() [task.start() for task in w_list] [task.start() for task in r_list] [task.join() for task in w_list] [task.join() for task in r_list] time_used = time.time() - time_start GetPaperInfo(PaperInfo1, paperNumber) print('time_used:{}s'.format(time_used)) I have no idea, with debug the process finally enter the process.py -> row:297: try: self.run() and then enter the row:300: util._exit_function() and just a connected the debug but I dont know why the network can cause this error and how to solve this that's all Thank you!
[ "Hi,this is me again,I tried a concurrent implementation of threads,and global variables for threads are much more comfortable than process queue data sharing. By thread it does implement but my main function can't be stopped, previously with processes it was not possible to proceed to the next step when fetching concurrently, the fetching of data was implemented through threads and continued in the main function but the main function can't be stopped anymore. How interesting!\nI have designed three functions similar to the previous ones.\nGetUrlintoQueue is to write the fetched url UrlALL to the queue UrlQueue, UrlLen is the number of the url.\nimport threading\nimport queue\n\ncount = 0 # Record the number of times a value is fetched from the queue\npaperNumber = 0 # Record the number of papers\n\ndef GetUrlintoQueue(UrlQueue,UrlALL,UrlLen):\n for index in range(UrlLen):\n UrlQueue.put(UrlALL[index][0], True)\n print('Write End')\n UrlQueue.task_done()\n\nThe other is GetPaperInfofromUrl. Get the url from the UrlQueue and write the information of the corresponding page to PaperInfo, index is the thread number.\ndef GetPaperInfofromUrl(UrlQueue,PaperInfo,index,UrlLen):\n global count,paperNumber\n while True:\n if not UrlQueue.empty():\n url = UrlQueue.get(True)\n count = count + 1\n paper, thisYear = GetPaperBaseInfoFromUrlAll(url) # just commented\n print(\"connected \" + str(index) + '-nd - ' + str(count) + ' - ' + str(UrlLen))\n print(paper,thisYear)\n paperNumber = paperNumber + len(paper)\n PaperInfo.put((paper, thisYear), True)\n if count == UrlLen:\n print(\"the process \" + str(index) + \" end \")\n break\n UrlQueue.task_done()\n PaperInfo.task_done()\n print('the process ' + str(index) +' get paper info end')\n\nGetPaperInfo is to show the results about PaperInfo, and it don't change.\ndef GetPaperInfo(PaperInfo,paperNumber):\n for i in range(paperNumber):\n value = PaperInfo.get(True)\n print(value)\n\nThe main function first sets the corresponding variables, then writes directly first, then 10 threads crawl paper information, and finally shows the results, but after displaying the results still can not exit, I can not understand why.\nif __name__ == '__main__':\n url = 'http://dblp.uni-trier.de/db/journals/talg/'\n UrlALL = GetVolumeUrlfromUrl(url)\n UrlLen = len(UrlALL)\n UrlQueue = queue.Queue(UrlLen)\n PaperInfo = queue.Queue(1000)\n WriteThread = 1\n ReadThread = 10\n\n # url write\n GetUrlThread = [threading.Thread(target=GetUrlintoQueue, args=(UrlQueue,UrlALL,UrlLen,))]\n time_start = time.time()\n [geturl.start() for geturl in GetUrlThread]\n [geturl.join() for geturl in GetUrlThread]\n time_used = time.time() - time_start\n print('time_used:{}s'.format(time_used))\n # url write end\n\n # paperinfo get\n PaperinfoGetThread = [threading.Thread(target=GetPaperInfofromUrl, args=(UrlQueue,PaperInfo,index,UrlLen,)) for index in range(ReadThread)]\n time_start = time.time()\n [getpaper.start() for getpaper in PaperinfoGetThread]\n [getpaper.join() for getpaper in PaperinfoGetThread]\n time_used = time.time() - time_start\n print('time_used:{}s'.format(time_used))\n # paperinfo get end\n \n GetPaperInfo(PaperInfo,paperNumber) # show the results\n import sys # it does not work \n sys.exit()\n\nThe debug shows: debug.gif\n(I dont have 10 reputation so the picture is the type of link. )\n" ]
[ 0 ]
[]
[]
[ "multiprocessing", "python", "python_requests", "queue", "web_crawler" ]
stackoverflow_0074668048_multiprocessing_python_python_requests_queue_web_crawler.txt