id
stringlengths
5
27
question
stringlengths
19
69.9k
title
stringlengths
1
150
tags
stringlengths
1
118
accepted_answer
stringlengths
4
29.9k
_unix.316164
Now if some hardware device does not work I have to Google out what kernel options I should turn on to compile a kernel module for this hardware. I hope there is a better way to do it.Somehow when I boot from live-cd my hardware magically works of of the box. I guess live-cd contains lot's of modules for various hardware and the kernel somehow knows which module to load and use. Can I do something similar without using live-cd? Say I run lspci, find out all the available information about the hardware and using this information find out what module to compile?
How kernel decides what is a suitable module for some hardware device?
kernel;kernel modules
null
_unix.308740
WiFi disconnects from my rooter's AP and when I try to reconnect I get this error (2) Active connection removed before it was initialized it works again normally after I reboot, what could be the problem? I am running I3 on Xubuntu.
Networking or Wifi goes down randomly what could be the problem?
networking;wifi;networkmanager;xubuntu
null
_codereview.129790
I've created a simple hangman solver at solver.lukesalamone.com, and although I'd consider myself proficient in webdev I am by no means an expert. The trick with this is that since HTTP is stateless, I can't really think of a better way to look words up than to start from scratch each time (and lookup is performed with ajax EVERY time the input changes). So any special data structures would need to be created from the input each time, an overhead which isn't really worth it.I'm especially interested in any ways to optimize my backend lookup code:/* lookup.php */$length = $_POST['length'];$proto = $_POST['proto'];$dead = $_POST['dead'];$live = str_replace(*, , $proto);// check that $length is number// load appropriate dictionary into $dict$dict = file(dicts/ . $length . _letters.txt, FILE_IGNORE_NEW_LINES);$words = array();// loop through each word in dictionaryforeach($dict as $word){ $flag = true; // check that word matches proto for($i=0; $i<strlen($proto); $i++){ if($proto[$i] == *){ // wildcard for($j=0; $j<strlen($live); $j++){ if($word[$i] == $live[$j]){ // disallow a*** matching abba $flag = false; continue 3; } } continue; } if($proto[$i] == $word[$i]){ // correct, go to next letter //error_log($proto[$i] . = . $word[$i]); continue; } else{ // incorrect //error_log($proto[$i] . != . $word[$i]); $flag = false; continue 2; } } if($flag){ array_push($words, $word); }}// end foreach loopunset($word);$deadWords = array();//remove words with dead lettersforeach ($words as $word){ for ($i=0; $i < strlen($dead); $i++) { if(strpos($word, $dead[$i]) !== false){ error_log(adding . $word . to deadWords); array_push($deadWords, $word); continue 2; } }}unset($word);$words = array_values(array_diff($words, $deadWords));// probably limit returned words to 500 or so$alphabet = abcdefghijklmnopqrstuvwxyz;$letters = array_fill_keys(str_split($alphabet), 0);// count occurrances of each letter by wordforeach($words as $word){ // check for each letter in each word for($i=0; $i<26; $i++){ if(strpos($word,$alphabet[$i]) !== false){ $letters[ $alphabet[$i] ]++; } }}// end foreach loopunset($word);arsort($letters, SORT_NUMERIC);// normalize numbers to size of words$num_words = count($words);foreach($letters as $l => $v){ $letters[$l] = $letters[$l] / $num_words;}//remove letters with zero frequencyforeach($letters as $l=>$v){ if($v == 0 || $v == 1){ $letters = array_diff($letters, array($l=>$v)); }}unset($l);$ret = array(words=>$words, letters=>$letters);echo json_encode($ret);Here is the JS on my front endvar length;var response;var blocking = false;$(document).ready(function(){ setProto(); var height = Math.ceil($(window).height() / 40); var width = Math.ceil($(window).width() / 40); document.getElementById(length).oninput = function(){ if( !isNaN(parseInt( $(#length).val() )) ){ length = parseInt($(#length).val()); } setProto(); evaluate(); } document.getElementById(proto).oninput = evaluate; document.getElementById(dead).oninput = evaluate; $(.headings h2).click(function(e){ dataSelect(e.target); }); $(.reset button).click(reset);});var evaluate = function(){ if(blocking) return; if( $(#length).val() == ) return; if( !checkProto() ) return; $(.bottom-row).removeClass(hidden); $(#loading).removeClass(hidden); for(var i=0; i<4; i++){ $(.column + i).html(); } blocking = true; // send ajax request $.ajax({ url: 'lookup.php', type: 'post', data: { 'length':length, 'proto':getProto(), 'dead':$(#dead).val()}, success: function(data, status) { data = JSON.parse(data); response = data; var j=0; var norm; for(var key in response.letters){ if(norm == null){ norm = 0.95 / response.letters[key]; } var percent = (response.letters[key] * 100).toFixed(2); var str = key + : + percent + %; var entry = document.createElement(div); var label = document.createElement(span); var bar = document.createElement(div); $(entry).addClass(letter-entry); $(label).html(str); $(bar).attr(style, width: + percent*norm + %); $(entry).append( $(label) ); $(entry).append( $(bar) ); $(.column + j%2).append( $(entry) ); j++; } for(var i=0; i<data.words.length; i++){ var word = document.createElement(a); var lbrk = document.createElement(br); $(word).html(data.words[i]); $(word).attr(href, http://dictionary.com/browse/ + data.words[i]); $(word).attr(target, _blank); $(.column + (i%2 + 2) ).append( $(word) ); $(.column + (i%2 + 2) ).append( $(lbrk) ); } $(#possible).html(data.words.length + Possible Words); }, error: function(xhr, desc, err) { console.log(xhr); console.log(Details: + desc + \nError: + err); }, complete: function(){ $(#loading).addClass(hidden); blocking = false; } }); // end ajax call};function setProto(){ if(length == 0) length = 4; for(var i=0; i<8; i++){ if(i<length){ $($(#proto).children()[i]).css(display, block); }else{ $($(#proto).children()[i]).css(display, none); } }}function getProto(){ var ret = ; for(var i=0; i<length; i++){ if($($(#proto).children()[i]).val() == ){ ret += *; } else{ ret += $($(#proto).children()[i]).val(); } } return ret.toLowerCase();}// return true if proto is not all asterisksfunction checkProto(){ var proto = getProto(); for(var i=0; i<proto.length; i++){ if(proto.charAt(i) != *){ return true; } } return false;}function dataSelect(target){ if($(target).hasClass(selected)) return; $(#likely).toggleClass(selected); $(#possible).toggleClass(selected); if(target.id == likely){ $(#lettergraph).removeClass(hidden); $(#wordlist).addClass(hidden); }else{ $(#lettergraph).addClass(hidden); $(#wordlist).removeClass(hidden); }}var reset = function(){ length = 4; $(#length).val(4); for(var i=0; i<length; i++){ $($(#proto).children()[i]).val(); } $(.bottom-row).addClass(hidden); $(#dead).val(); setProto();}
My online hangman solver
javascript;php;algorithm;jquery;hangman
$length = $_POST['length'];$proto = $_POST['proto'];$dead = $_POST['dead'];Consider adding more normalization to this. For example, trim will strip off leading and trailing whitespace. foreach($dict as $word){ $flag = true; // check that word matches proto for($i=0; $i<strlen($proto); $i++){ if($proto[$i] == *){ // wildcard for($j=0; $j<strlen($live); $j++){ if($word[$i] == $live[$j]){ // disallow a*** matching abba $flag = false; continue 3; } } continue; } if($proto[$i] == $word[$i]){ // correct, go to next letter //error_log($proto[$i] . = . $word[$i]); continue; } else{ // incorrect //error_log($proto[$i] . != . $word[$i]); $flag = false; continue 2; } } if($flag){ array_push($words, $word); }}// end foreach loopYou don't need $flag here with the way that you are using continue. $protoLength = strlen($proto);$deadLength = strlen($dead);foreach ($dict as $word) { for ($i = 0; $i < $protoLength; $i++) { if ($proto[$i] == *) { // wildcard // disallow a*** matching abba if (strpos($live, $word[$i]) !== false) { continue 2; } } else if ($proto[$i] != $word[$i]) { continue 2; } } for ($i=0; $i < $deadLength; $i++) { if (strpos($word, $dead[$i]) !== false){ error_log(adding . $word . to deadWords); continue 2; } } array_push($words, $word);}This will do the same thing without all the $flag setting since you skip to the next iteration every time you set $flag to false. Adding an else saves having to do a continue in the first if. And you never needed the second if. You're really just checking the false case. I wouldn't put strlen in a loop check, as I'm not convinced that PHP will optimize it out rather than calling it on each iteration. Doing it this way ensures that it will only get checked once. A strpos call is going to be more efficient than a for loop. I moved the dead letter check into the same loop. This way we never add a dead word to the list rather than removing them later.
_softwareengineering.302734
As a part of my school CS class, I am reading Python Programming by John Zelle. In the book, Zelle talks about surrounding the invocation of a main method with if <condition>: main()where the condition is something like __name__ == '__main__'. My computer science teacher said that the necessity of this loop depends on if the program is importable and run-directly. I know that because there are 2 variables, there can only be four combinations of importable and run-directly.I know from the book and my CS teacher that If a program is importable and is run directly, then main must be conditional.However, I do not know the cases, which is what I am trying to figure out:If not importable and not run-directlyIf importable and not run-directly If not importable and run-directlyI think, but am not sure, that these are the correct answers.If not importable and not run-directly, then it must be in a conditionalIf importable and not run-directly, then it doesn't have to be in a conditional.If not importable and run-directly, then it doesn't have to be in a conditional.Are the statements above correct? is what I am trying to figure out.I have tried finding this information in the book, but is unfortunately unavailable.
Enclosing main invocation of a function in an if
design patterns;python
You're very close, but are missing one aspect. If a program is importable and is run directly, then main must be conditional.If not importable and not run-directly, then main is irrelevant. Actually, the entire contents of the file are irrelevant. If importable and not run-directly, then main should not exist. If not importable and run-directly, then main does not have to be in a conditional. It's still a good idea, as it'll help avoid accidentally omitting it were needed, and it causes no harm.
_codereview.148570
I have written a small program to query the Stack Exchange API, specifically for badges. The main idea of the program is to return the new list of Tumbleweed posts on Stack Overflow, every 10 minutes. The motivation for this bot was the Weed Eater hat. The program queries the API every 10 mins. If there are new tumbleweed posts, Then it'll print the number of new posts with a link to the tumbleweed page. If there are no new ones, It waits for another 10 minutes. The code for the same isimport com.google.gson.JsonArray;import com.google.gson.JsonObject;import com.google.gson.JsonParser;import org.jsoup.Connection;import org.jsoup.Jsoup;import java.io.IOException;import java.time.Instant;public class RunBadger { private static final String apiKey = my api key; public static Instant previousBadgeTimestamp = Instant.now(); public static void main(String[] args) { try { while (true) { JsonArray weeds = getBadges().get(items).getAsJsonArray(); int numberOfWeeds = weeds.size(); if(numberOfWeeds!=0) { System.out.println([ [Badger](https://www.youtube.com/watch?v=dQw4w9WgXcQ) ] + numberOfWeeds + new [Tumbleweed posts](//stackoverflow.com/help/badges/63/tumbleweed)); } previousBadgeTimestamp = Instant.now(); Thread.sleep(600000); } } catch (Exception e){ e.printStackTrace(); } } public static JsonObject getBadges() throws IOException{ String badgeIdUrl = https://api.stackexchange.com/2.2/badges/63/recipients; JsonObject badgeJson = get(badgeIdUrl,site,stackoverflow,pagesize,100,fromdate,String.valueOf(previousBadgeTimestamp.minusSeconds(1).getEpochSecond()),key,apiKey); return badgeJson; } public static JsonObject get(String url, String... data) throws IOException { Connection.Response response = Jsoup.connect(url).data(data).method(Connection.Method.GET).ignoreContentType(true).ignoreHttpErrors(true).execute(); String json = response.body(); if (response.statusCode() != 200) { throw new IOException(HTTP + response.statusCode() + fetching URL + (url) + . Body is: + response.body()); } JsonObject root = new JsonParser().parse(json).getAsJsonObject(); return root; }}I'd like to have an improved version of it. Particularly regarding the while(true) code block.
Badger - The tumbleweed detector
java;stackexchange
Conceptually, when you want to perform an action at a fixed rate, you want to use an ScheduledExecutorService. This class is designed to handle the use-case of running code at a fixed rate, possibly with an initial delay. The advantage is that you can schedule more than one task, and it also lets you handle all the tasks, eventually cancelling some of them. It's not so much that the while(true) is an issue per se, but doing all those operations with that approach would be a lot more complicated. Furthermore, it wouldn't really let you perform other unrelated actions at the same time as the code getting the badges. Additionally, don't use the Timer class, and always prefer a ScheduleExecutorService, when you can use one.Rewriting the code to use the service would actually make it more clear. First of all, you want a method that represents the action to perform:private static void printBadges() { JsonArray weeds; try { weeds = getBadges().get(items).getAsJsonArray(); } catch (IOException e) { e.printStackTrace(); // this should be replaced with a real logger return; } int numberOfWeeds = weeds.size(); if(numberOfWeeds!=0) { System.out.println([ [Badger](https://www.youtube.com/watch?v=dQw4w9WgXcQ) ] + numberOfWeeds + new [Tumbleweed posts](//stackoverflow.com/help/badges/63/tumbleweed)); } previousBadgeTimestamp = Instant.now();}With that method in place, scheduling the task is as easy asScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor();executorService.scheduleAtFixedRate(RunBadger::printBadges, 0, 10, TimeUnit.MINUTES);This documents clearly what is happening, mainly that something scheduled at a fixed rate, and the delay is obvious: 10 minutes. The second parameter is a potential initial delay before performing the action the first time, which I set to 0 here.A note about the exception handling. The current code catches the exception outside of the while loop, which means that any exception will stop the program from running; generally, this isn't want you want. An exception from getting the badges could occurs for multiple reasons and it shouldn't stop the rest of the code from running (although in this case, there is no other part of the code, but imagine this in a more complex scenario, with other tasks running concurrently). The scheduledAtFixedRate method will suppress the execution of future tasks, thereby cancelling the schedule, if there is an exception thrown by the task: this means that if an exception is thrown, nothing will get executed anymore. Catching and handling the exceptions inside the task, like done in the example above, lets you manage this.The question of what should happen after scheduling the task remains. If you just want to run it for fun until you kill the process manually, you can just keep this in the main method:public static void main(String[] args) { ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor(); executorService.scheduleAtFixedRate(Main::printBadges, 0, 1, TimeUnit.SECONDS);}The executor service won't be garbage collected and the action will run every 10 minutes, until you kill the JVM yourself. A cleaner approach would be to invoke executorService.shutdown(); when you want the process to stop, as it will correctly stop the executor, and let the main thread finish properly. (Note that it won't block until the tasks are finished; awaitTermination can be used for that). You just need to find the stopping condition, like a specific exception thrown in the task. It could be added as a runtime hook as well, but note that those hooks don't run when killing with SIGKILL.Other thoughts:JsonObject root = new JsonParser().parse(json).getAsJsonObject();return root;You don't need to store this in a temporary variable just before returning. Returning directly is just as clear, and doesn't introduce a variable.return new JsonParser().parse(json).getAsJsonObject();public static JsonObject getBadges() throws IOExceptionThis may tie a bit the code fetching the badges to the code printing the result. In fact, you're only interested in the number of new items, so make that method return the count directly:public static int getBadgesCount() throws IOExceptionThis way, the rest of the code is hidden from the internal JSON representation of what the API returns.public static Instant previousBadgeTimestampThat's very good use of Instant: it represents a point in the timeline in UTC and isn't tied to any specific timezone. When you want to model at what time a system event took place, Instant is the perfect class.private static final String apiKey = my api key;If the API key deserves its constant, why not all the other hard-coded Strings like badgeIdUrl or the rick-rolling very interesting link badger links to? You could even make all of those properties dynamic, by reading them from a file.if(numberOfWeeds!=0) {Since this number will never be negative, it is clearer to writeif(numberOfWeeds > 0) {as it doesn't let the reader wonder whether accepting a negative value was intended or not.Final point: keep in mind that the StackExchange API has a rate limit in place, and if the API returns a backoff, you have to wait that amount of seconds before making another call.
_unix.166774
There are two hard disks on my machine, but after installing the operating system, it uses only one hard disk to build RAID0.After installing MegaCLI, I used:MegaCli64 -pdlist -aALLto display the RAID configuration. I found one disk's Firmware state is online, but the other's is unconfigured.Does that mean I need to add the unconfigured disk to the RAID array? How do I add it? Before yesterday, I had no idea about RAID. Can someone advise me?
Use MegaCLI to add a new hard disk to existing RAID0 in Linux
hard disk;raid0
The problem is solved now.And Edouard Fazenda's method is right,but there is one step before adding a new virtual drive.In my case,when I used the command:storcli64 show all or MegaCli64 -CfgForeign -Scan -a0I found that the other hard disk's DG(DriveGroup) is foreign.I think that why I cann't use that hard disk to add a new virtual drive.So the first work is clearing it by the following commond:MegaCli64 -CfgForeign -Clear -a0After that,we can use the hard disk to add a new virtual drive by the following commond:MegaCli64 -CfgLdAdd r0[EnclosureID:SlotID] -a0 or storcli64 /c0 add vd type=r0 drives=EnclosureID:SlotIDNow we can find sd[a|b|c...] directories in /etc,and we can use the hard disk normally.There is also some informations about this problem:[https://serverfault.com/questions/331807/megacli-is-killing-me-any-help-appreciated]In the end,I want to say thank you to @Warren Young and @Edouard Fazenda for giving so many clues to me.
_webapps.15599
We have a new ebay account and imported some old tlb databases with old article data.But when we upload we get some errors like out of weekly limit (this maybe a bad translation).The point is that the error message suggests, the ebay account believes it is the old account, which it isn't, we use the name and the password of the new account.And this only happens with the imported articels.We tried import and compact and repair with the old databases, with no succes.Is it possible that the articles still somehow have the old account information within them? And if so, how to fix it?We had several phonecalls with ebay tech support, they have no idea, they checked our account and say everything should be fine.
Turbolister uses wrong account data internally
ebay
The solution is to add the first auction manually in the web interface.
_unix.147426
I get this error message when trying to install CentOS 7 from an USB device:/dev/root does not exist in CentOS 7How can I solve this problem?
/dev/root does not exist in CentOS 7
boot;system installation;centos
Use Win32 Disk Imager on Windows or dd to write the ISO to the USB stick on Linux/OSX.dd if=CentOS-7.0-1406-x86_64-NetInstall.iso of=/dev/sdb bs=8mI've recently used the first and it booted fine after doing that.
_scicomp.20117
I have a 3-D structured grid that I am trying to convert over to an unstructured grid. Here is a picture of the 2-D structured grid with the nodes shown. It is axisymmetric about the x axis (y = 0). I will rotate this to make it 3-D. My main concern in this conversion is that I need to output the volume of every cell from the structured grid. But as you can see, my cells are irregular hexa/penta-hedras, with non-parallel opposing faces (correct me if I'm wrong and I really hope I'm wrong, but I don't think I am). Does anyone have any suggestions on how to compute the volume of each cell?I have some ideas, but am open to other ideas. A colleague of mine mentioned that I could break up the volumes into tetrahedras and compute the volume that way. That is my #1 option so far. My ideas are not so simple and the algorithm can get pretty involved.Does any one have a suggestion on how to compute the volume?
structured to unstructured grid - calculating volume of cell
mesh generation
null
_webapps.16251
How do I set Facebook to show only a profile owner's posts in his/her wall? Is there a Facebook feature or a browser extension (Opera/Firefox/Chrome) that could do this?(Strange: I remember being able to do this around a month ago as a Facebook feature or thru the Better Facebook Opera/Firefox/Chrome extension, but can't seem to find where to toggle that feature now.)
How do I set Facebook to show only a profile owner's posts in his/her wall?
facebook;facebook timeline
null
_webmaster.90984
I am trying to link my domain registerd from exabytes.com.my and I want to link it with my server in GoDaddy.exabytes.com.my allowed me to modify nameservers only , I set name servers as : ns1.secureserver.netns2.secureserver.netI added my domain and put the same nameservers in GoDaddy server and its still unlinked!My domain is http://atrax.com.my/.Why? and if it get linked , what if someone else just added my domain name and put nameservers that they can check from any DNS checker , will that be linked?I am confused right now about this Nameserver thing, I cant add my server IP in exabytes.com.my so how will I link the domain?
Is it important to point the domain to server IP to get them linked?
domains;dns;nameserver;dns servers
exabytes.com.my allowed me to modify nameservers onlyThat is exactly what you need to do. And that is the only thing you need to do.To find out what NAMESERVERS you need to specify at your registrar, check your account at GoDaddy.Reference (possibly UK specific?):https://uk.godaddy.com/help/find-your-nameservers-for-domains-registered-at-other-companies-12318I added my domain and put the same nameservers in GoDaddy server and its still unlinked!When you create your account at GoDaddy you will be prompted to enter your domain. You should not change any NAMESERVERS at GoDaddy (your host).The NAMESERVERS are where your master DNS records are stored.Note that changing NAMESERVERS can take up to 48 hours. As this change needs to be propagated around the internet.what if someone else just added my domain name and put nameservers that they can check from any DNS checker , will that be linked?It's technically possible that someone else created an account on the same server for the same domain at GoDaddy before you did. But this is very unlikely.
_unix.52193
Is it possible to make Xfce window manager to open certain applications fullscreen by default? For example i want Emacs in Xfce to be always run fullscreen on start. I know how to execute specifically Emacs fullscreen, so i want an Xfce solution.I tried searching through Xfce wiki, Xfce page on Arch wiki and Unix.SE with AskUbuntu, no solution yet.
Starting certain applications fullscreen in Xfce
xfce;window manager
Use gdevilspie to match it, that actually works cross DEs
_webmaster.71252
I moved a site to WP Engine. I changed the zone file cname to point to the wpengine domain. So now I can access www.mysite.com, but I cannot access http://mysite.com (takes me to godaddy no site page).What am I doing wrong? I attached a screenshot below. the A record points to godaddy's own IP. Wo Engine does not provide an IP to point to. They ask you to use the CNAME. I added a wildcard to the CNAME (*) to see if that would work. But no luck.I just want to be able to access the website whether I type the www or not.
CName redirects to www.domain.com but not to http://domain.com
cname
null
_unix.117674
I am setting up video streaming.In local network, there are two devices streaming video over UDP multicast. One is regular computer with Linux (streaming to 239.220.221.10, port 9200), second one is special DVB-S streamer (streaming to 239.220.220.32, port 9200). There are several IPTV set-top boxes in the network, which are able to play streams from either source. There are also some Cisco switches for multicast filtering, but I am not able to check their configuration.On different computer (running Ubuntu 12.04) I would like to capture the stream, preferably using VLC or FFMPEG. From the computer streamer (239.220.221.10) it works, but from special streamer (239.220.220.32) I get no stream.What I triedI tried to narrow down the problem and tried to capture raw UDP datagrams with socat and tcpdump. If I run following command, I get valid video in video.dump file:> socat UDP4-RECVFROM:9200,ip-add-membership=239.220.221.10:0.0.0.0 - > video.dumpWhen I simultaneously run tcpdump, I see incoming datagrams:> sudo tcpdump -i eth1 18:00:39.059824 IP 10.1.2.202.41852 > 239.220.221.10.9200: UDP, length 131618:00:39.060789 IP 10.1.2.202.41852 > 239.220.221.10.9200: UDP, length 1316...When I try to run same commands for the special streamer (just change the IP membership address for socat to 239.220.220.32), tcpdump shows incoming datagrams from 239.220.220.32, but video.dump file is empty.What can be the reason that socat doesn't see the datagrams that are clearly coming?Update on 4 March 2014:I just found out that there are different IP ranges in the network:Computer streamer: 10.1.2.202 / 255.0.0.0Special streamer: 192.168.85.5 / 255.255.255.0Computer on which I am trying to grab stream: 10.1.2.203 / 255.0.0.0When I change IP adress of the last to 192.168.85.x / 255.255.255.0, I can catch streams from the Special streamer, but not from the computer streamer.So the question changes to: Is it possible to set socat, VLC or FFMPEG to accept also multicast streams that have source address outside the range of ethernet interface?
Dumping multicast UDP stream with socat
tcpdump;udp;multicast
null
_codereview.105479
I've written this English checkers game. Meanwhile, it's a human vs. human game, although it is easily extensible to a game between other players (such as a computer one).Play English Checkers.see https://en.wikipedia.org/wiki/English_draughtsfrom collections import namedtuplefrom itertools import cycle# square - a number between 1 and 35, that isn't divisible by 9:# . 35 . 34 . 33 . 32# 31 . 30 . 29 . 28 .# . 26 . 25 . 24 . 23# 22 . 21 . 20 . 19 .# . 17 . 16 . 15 . 14# 13 . 12 . 11 . 10 .# . 8 . 7 . 6 . 5# 4 . 3 . 2 . 1 .# Pay attention that in this representation, the numbers that are# divisible by 9 are skipped - thus, two squares are adjacents if and# only if their difference is 4 or 5. Also, this representation may# be different from the representation that is used in the program's# interface (for example, in `UserPlayer`).# move - a tuple of squares - the first square is the piece we want to# move, and the others are the sequence of moves. Usually that tuple# is 2 squares long - it's only longer if the player made a multiple# jump.# In some places of the program, the word edges will mean pairs of# squares that have only one reachable square between them (in a# diagonal line), so you can jump between them if they are both empty# and there is an opponent's piece in the middle.# The square in the middle of the two edges will just be called# the middle.SQUARES = [s for s in xrange(1, 36) if s%9 != 0]# a jump means both single and multiple jumps.class MovingErrors: A namespace for error constants for illegal moves. NotASquare = The move included a number that is not a square. TooShort = (The move should start at one square and finish at another one.) NotYourPiece = The player should move his own piece. MoveToPiece = The player should move to an empty square. LongSimpleMove = A simple move must include exactly 2 squares in it. NotKing = Only a king can move backwards. JumpAvaliable = (If a player can jump, he must do it; And if a player can make multiple jumps, he must make all available jumps in the sequence he chose.) JumpThroughKingRow = (If a man jumps to the king's row, the move terminates immediately.) SometimesJumps = (If a move starts with a jump, ALL of it should be composed of jumps.) WeirdCapturing = (You have to capture your opponent's piece - not empty pieces and not your own ones) JustInvalid = (What. A simple move should move a piece to an adjacent square, and a jump should jump above opponents. Is that hard?)### Checkers Stuff ###class State(namedtuple('State', 'turn reds whites kings')): A state in the English Checkers game. The board is always represented in the red's point of view, so the 1-4 rank is the closest rank to the red's side, and the 32-35 rank is the closest rank to the white's side. Attributes: turn - the player that should play now - either State.RED or State.WHITE. reds, whites - frozensets of squares, where there are red and white pieces accordingly. kings - the squares where there are kings (red and white). These 4 attributes can also be like elements of a tuple - so you can unpack them: >>> turn, reds, whites, kings = state and you can access them also by doing state[n]. This is useful, because State.RED is 1 and State.WHITE is 2 - thus state[state.turn] will return all of the squares that belong to the current player. Other Attributes: opponent - the player that shouldn't play now, the opposite of self.turn. These attributes can't be accessed like in a tuple. Main Methods: state.move(move) - return a new state, that describes the game after the current player has made the given move. state.did_end() - True if the game ended, False otherwise. Other Methods: simple_move_avaliable(pieces) - return True if any of the given pieces can make a simple move. (The returned value may be incorrect if any piece can make a jump) jump_avaliable(pieces) - return True if any of the given pieces can make a jump. farther(s1, s2) - True if s1 is farther from the current player than the second square, False otherwise. pieces_after_simple_move(move) - Return a tuple of (red pieces, white pieces, kings), that describes the board's pieces after the given (not necessarily legal) simple move. pieces_after_jump(move) - Return a tuple of (red pieces, white pieces, kings), that describes the board's pieces after the given (not necessarily legal) jump. RED, WHITE = 1, 2 # pay attention that state[RED] == state.reds and # state[WHITE] == state.whites KINGS_ROW = {RED: frozenset(range(32, 36)), WHITE: frozenset(range(1, 5))} def __new__(cls, turn, reds, whites, kings): # now you can create a new state by passing any kind of iterable # as pieces pieces = [frozenset(filter(is_square, xs)) for xs in (reds, whites, kings)] self = super(State, cls).__new__(cls, turn, *pieces) self.opponent = cls.WHITE if turn == cls.RED else cls.RED return self def move(self, move): If the given move is legal, make it and return the new state, after the move. If it is illegal, raise ValueError with an appropriate error message from MovingErrors. self.stupid_errors(move) if are_adjacents(*move[0:2]): # Simple move if len(move) > 2: raise ValueError(MovingErrors.LongSimpleMove) if self.jump_avaliable(self[self.turn]): raise ValueError(MovingErrors.JumpAvaliable) return State(self.opponent, *self.pieces_after_simple_move(move)) elif are_edges(*move[0:2]): # jump if not is_jump(move[2:]): raise ValueError(MovingErrors.SometimesJumps) if any(s in self.KINGS_ROW[self.turn] and s not in self.kings for s in move[1:-1]): raise ValueError(MovingErrors.JumpThroughKingRow) if any(middle(*pair) not in self[self.opponent] for pair in pairs(move)): raise ValueError(MovingErrors.WeirdCapturing) # If a man jumps to the king's row, he can't make more jumps. # Otherwise, if he can make more jumps the player must do them. new_board = self.pieces_after_jump(move) if (move[-1] in self.KINGS_ROW[self.turn] and move[0] not in self.kings): return State(self.opponent, *new_board) temp_state = State(self.turn, *new_board) if temp_state.jump_avaliable([move[-1]]): raise ValueError(MovingErrors.JumpAvaliable) return State(self.opponent, *new_board) # Not a simple move, and not a jump raise ValueError(MovingErrors.JustInvalid) # Phew. def stupid_errors(self, move): If the move has an stupid error (explained later), raise ValueError with that error from MovingErrors. Otherwise, do nothing. Stupid error - TooShort, NotASquare, NotYourPiece, MoveToPiece, NotKing. if len(move) <= 1: raise ValueError(MovingErrors.TooShort) if not all(is_square(k) for k in move): raise ValueError(MovingErrors.NotASquare) if move[0] not in self[self.turn]: raise ValueError(MovingErrors.NotYourPiece) if any(s in self.reds|self.whites for s in move[1:]): raise ValueError(MovingErrors.MoveToPiece) if move[0] not in self.kings and not self.farther(move[1], move[0]): raise ValueError(MovingErrors.NotKing) def did_end(self): Return True if the game has ended, and False if the player can do a move. return (not self.simple_move_avaliable(self[self.turn]) and not self.jump_avaliable(self[self.turn])) def simple_move_avaliable(self, pieces): Return True if any piece from the given iterable of pieces can make a simple move, False otherwise. It doesn't check if all of the given pieces exist. Also, if a jump is avaliable it won't return False because of that, so the returned value would be incorrect in that case. assert all(piece in self[self.turn] for piece in pieces) for piece in pieces: for adj in adjacents(piece): if adj not in self.reds | self.whites: return True return False def jump_avaliable(self, pieces): Return True if any piece from the given iterable of pieces can do a jump, False otherwise. It doesn't check if all of the given pieces exist. assert all(piece in self[self.turn] for piece in pieces) for piece in pieces: # Every jump starts with a single jump. for edge, mid in edges_middles(piece): if (edge not in self[self.turn] | self[self.opponent] and mid in self[self.opponent] and (piece in self.kings or self.farther(edge, piece))): return True return False def farther(self, s1, s2): Return True if the first square is farther than the second one (so the second square is closer to the current player's side), False otherwise. return s1 > s2 if self.turn == self.RED else s1 < s2 def pieces_after_simple_move(self, move): Return a tuple of (red pieces, white pieces, kings), that describes the board's pieces after the given simple move. This method doesn't check that the given move is simple, or even legal, and won't necessarily raise an exception. assert (move[0] in self[self.turn] and move[1] not in self.reds | self.whites and len(move) == 2) player = self[self.turn] - {move[0]} | {move[1]} if move[0] in self.kings: kings = self.kings - {move[0]} | {move[1]} else: kings = self.kings | ({move[1]} & self.KINGS_ROW[self.turn]) return ((player, self[self.opponent], kings) if self.turn == self.RED else (self[self.opponent], player, kings)) def pieces_after_jump(self, move): Return a tuple of (red pieces, white pieces, kings), that describes the board's pieces after the given jump. This method doesn't check that the given move is a jump, or even legal, and won't necessarily raise an exception. assert is_jump(move) single_jumps = pairs(move) captured = {middle(*p) for p in single_jumps} player = self[self.turn] - {move[0]} | {move[-1]} opponent = self[self.opponent] - captured if move[0] in self.kings: kings = self.kings - {move[0]} | {move[-1]} else: kings = self.kings | ({move[-1]} & self.KINGS_ROW[self.turn]) kings = kings - captured return ((player, opponent, kings) if self.turn == self.RED else (opponent, player, kings))### Square Stuff ###def are_adjacents(s1, s2): Return True if the two given squares are diagonally adjacent, False otherwise. return abs(s1-s2) in (4, 5)def are_edges(s1, s2): Return True if two given squares are edges, False otherwise. return abs(s1-s2) in (8, 10)def middle(edge1, edge2): Return the middle of the two given edges. assert are_edges(edge1, edge2) return (edge1 + edge2) / 2def edges_middles(s): Return a list of all (edge, middle) tuples, where `edge` is another square that is an edge with the given square, and middle is the middle square of `s` and `edge`. edges = [s + n for n in (8, 10, -8, -10)] middles = [middle(s, edge) for edge in edges] tuples = zip(edges, middles) return [t for t in tuples if is_square(t[0]) and is_square(t[1])]def adjacents(s): Return a list of all of the adjacent squares to the given square. return [s+n for n in (4, 5, -4, -5) if is_square(s+n)]def is_square(n): Return True if the given number represents a square, False if it doesn't. return 1 <= n <= 35 and n % 9 != 0def is_jump(move): Return True if each pair in the given sequence of squares is a pair of edges. False otherwise. return all(are_edges(a, b) for a, b in pairs(move))def rank(s): Return the rank of the given squares. Counting starts from zero. return ((s-s//9)-1) // 4def human_square_to_normal(human_s): Convert the given square from human representation (where squares are identified by numbers 1-32 and squares that are divisible by 9 aren't skipped) to the normal program's representation. Raise KeyError if the square doesn't exist. return SQUARES[human_s-1]### Playing Stuff #### starting position of checkersSTART = State(State.RED, xrange(1, 14), xrange(23, 36), [])def checkers(red, white): Play English Checkers between the two given players - red makes the first move. After each turn, yield the move. A player is a function that has two parameters - a state, and an optional parameter of an error. The state is an instance of the State class, that describes the current game, and the player should return its move, given that state. If the player gets the `error` parameter, it means that in the previous time it was called, it returned an illegal move - so it is called again, with the same state, and with an error from MovingErrors. state = START yield None, state for player in cycle((red, white)): if state.did_end(): return move = player(state) while True: try: state = state.move(move) except ValueError, err: move = player(state, str(err)) else: break yield move, statedef display_checkers(game, upper_color=State.RED): Display each state in the given game, from the first one to the last. The game is an iterable of (move, state) pairs (the state is the state of the game after the move), for example the one that is returned by the function `checkers`. `upper_color` is the color that its player's side appears at the top of the displayed boards. It can get one of two values: State.RED or State.WHITE. for _, state in game: print_board(state, upper_color)def play_display_checkers(red, white, upper_color=State.RED): Play a game of checkers with the given players `red` and `white`, and display every new board. `upper_color` is the color that appears at the top of the displayed boards. (color = either State.RED or State.WHITE) See the docstring of `checkers` for more information about players. display_checkers(checkers(red, white), upper_color)def UserPlayer(dummy_state, error=None): A player function that uses the protocol of the `checkers` function. It doesn't display the board to the user, but if there is an error, it will print it. It asks the user for a move in a human notation (where the squares are identified by numbers 1-32, instead of 1-35, and squares that are divisible by 9 aren't skipped). It returns the move in the program's notation. if error is not None: print error inp = raw_input(What's your move? Seperate the squares by dashes (-). ) while True: try: human_squares = map(int, inp.split('-')) move = map(human_square_to_normal, human_squares) except ValueError: inp = raw_input('Invalid input. Try again: ') except KeyError: # Because of human_square_to_normal print MovingErrors.NotASquare inp = raw_input('Try again: ') else: break return tuple(move)### Utilities ###def pairs(seq): Return a list of all of the consecutive pairs in the sequence. Each element (except the first and the last ones) appears in exactly two pairs: one where it is the first element, and another one where it is the second one. return [(seq[i], seq[i+1]) for i in xrange(len(seq)-1)]def print_board(state, upper_color=State.RED): Print the given state to the user as a board. line = [] # the first squares should be the upper ones. squares = SQUARES if upper_color == State.RED else SQUARES[::-1] # zip(*[iterator]*n) clusters the iterator elements into n-length groups. rows = zip(*[iter(squares)]*4) for row in rows: for square in row: player_ch = ('x' if square in state.reds else 'y' if square in state.whites else '.') char = player_ch.upper() if square in state.kings else player_ch # == is used as an XNOR operator here if (rank(square) % 2 == 1) == (upper_color == State.WHITE): line.append(' {}'.format(char)) else: line.append(' {} '.format(char)) print ''.join(line) line = []###############if __name__ == '__main__': play_display_checkers(UserPlayer, UserPlayer, upper_color=State.WHITE)
English Checkers game
python;python 2.7;checkers draughts
null
_webapps.23703
As we all know, in a normal Facebook account you get a small Birthday events area in the upper-right corner of the screen. I was wondering is it possible to have the same feature for a fan page I created such that the birthdays of the people who liked the fan page will appear similar in my admin interface like a normal FB account?
Birthday events area in Facebook Fan Page?
facebook;facebook pages
No. DOB (when listed with a name) is part of the personal identifiable information and, thus, is protected by privacy agreements.
_unix.181901
Hi I'm trying to find a repository on the web for oddjob-mkhomedir.I'm then going to try and add it to linux so I can install the library with these instructionshttps://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/6/html/Deployment_Guide/sec-Managing_Yum_Repositories.htmlCan someone help me to find this repository? I've been looking for almost 2 hours.
How to find a yum repository
linux;repository
null
_unix.231164
I am planning to divide the largest partition on the hard drive of a CentOS 7 server into four smaller partitions using parted. Three of the new partitions will be 300 GB each, and the fourth partition will take the remainder. I have shown the parted print report for the drive below, followed by the syntax of the commands that I imagine using. How do I change the code below in order to properly partition the hard drive? Here is the result of the print report in parted: [root@localhost ~]# partedGNU Parted 3.1Using /dev/sdaWelcome to GNU Parted! Type 'help' to view a list of commands.(parted) print Model: ATA WDC WD20EZRX-22D (scsi)Disk /dev/sda: 2000GBSector size (logical/physical): 512B/4096BPartition Table: gptDisk Flags: Number Start End Size File system Name Flags 1 1049kB 211MB 210MB fat16 EFI System Partition boot 2 211MB 735MB 524MB ext4 3 735MB 1938GB 1938GB ext4 4 1938GB 1992GB 53.7GB ext4 5 1992GB 2000GB 8187MB linux-swap(v1)So I imagine that the terminal commands are as simple as: rm 3mkpart ext4 735MB 300GBmkpart ext4 300GB 600GB mkpart ext4 600GB 900GB mkpart ext4 900GB 1938GB How do I change the code above to correctly create the partitions? For reference, fdisk -l gives a slightly different picture of the drive as follows: [root@localhost ~]# fdisk -lWARNING: fdisk GPT support is currently new, and therefore in an experimental phase. Use at your own discretion.Disk /dev/sda: 2000.4 GB, 2000398934016 bytes, 3907029168 sectorsUnits = sectors of 1 * 512 = 512 bytesSector size (logical/physical): 512 bytes / 4096 bytesI/O size (minimum/optimal): 4096 bytes / 4096 bytesDisk label type: gpt# Start End Size Type Name 1 2048 411647 200M EFI System EFI System Partition 2 411648 1435647 500M Microsoft basic 3 1435648 3786041343 1.8T Microsoft basic 4 3786041344 3890898943 50G Microsoft basic 5 3890898944 3906889727 7.6G Linux swap EDIT: I have been trying to use @Anthon's suggestions, but am getting different results. The fdisk -l and parted ... print environment is slightly different now after re-installing CentOS, but the same basic concepts apply because this should be reproduceable. When I type select /dev/dm-1 all the existing partitions seem to become invisible to parted, but when I skip select /dev/dm-1, I get an error. Here is the actual terminal record: [root@localhost ~]# fdisk -lWARNING: fdisk GPT support is currently new, and therefore in an experimental phase. Use at your own discretion.Disk /dev/sda: 2000.4 GB, 2000398934016 bytes, 3907029168 sectorsUnits = sectors of 1 * 512 = 512 bytesSector size (logical/physical): 512 bytes / 4096 bytesI/O size (minimum/optimal): 4096 bytes / 4096 bytesDisk label type: gpt# Start End Size Type Name 1 2048 411647 200M EFI System EFI System Partition 2 411648 1435647 500M Microsoft basic 3 1435648 105850879 49.8G Microsoft basic 4 105850880 121841663 7.6G Linux swap Disk /dev/mapper/luks-fb2eda94-fcff-4624-8b04-a9786845504a: 53.5 GB, 53458501632 bytes, 104411136 sectorsUnits = sectors of 1 * 512 = 512 bytesSector size (logical/physical): 512 bytes / 4096 bytesI/O size (minimum/optimal): 4096 bytes / 4096 bytesDisk /dev/mapper/luks-08f8bdb9-85bf-45ef-9519-3f38906c489a: 1938.0 GB, 1938005426176 bytes, 3785166848 sectorsUnits = sectors of 1 * 512 = 512 bytesSector size (logical/physical): 512 bytes / 4096 bytesI/O size (minimum/optimal): 4096 bytes / 4096 bytes[root@localhost ~]# df -T -hFilesystem Type Size Used Avail Use% Mounted on/dev/dm-0 ext4 49G 1.1G 46G 3% /devtmpfs devtmpfs 3.8G 0 3.8G 0% /devtmpfs tmpfs 3.8G 0 3.8G 0% /dev/shmtmpfs tmpfs 3.8G 8.7M 3.8G 1% /runtmpfs tmpfs 3.8G 0 3.8G 0% /sys/fs/cgroup/dev/sda2 ext4 477M 110M 338M 25% /boot/dev/sda1 vfat 200M 9.8M 191M 5% /boot/efi/dev/dm-1 ext4 1.8T 77M 1.7T 1% /home[root@localhost ~]# partedGNU Parted 3.1Using /dev/sdaWelcome to GNU Parted! Type 'help' to view a list of commands.(parted) print Model: ATA WDC WD20EZRX-00D (scsi)Disk /dev/sda: 2000GBSector size (logical/physical): 512B/4096BPartition Table: gptDisk Flags: Number Start End Size File system Name Flags 1 1049kB 211MB 210MB fat16 EFI System Partition boot 2 211MB 735MB 524MB ext4 3 735MB 54.2GB 53.5GB 4 54.2GB 62.4GB 8187MB linux-swap(v1) 5 62.4GB 2000GB 1938GB(parted) unit compact(parted) select /dev/dmError: Could not stat device /dev/dm - No such file or directory.Retry/Cancel? cancel (parted) select /dev/dm-1 Using /dev/dm-1(parted) rm 5Error: Partition doesn't exist.(parted) print Model: Linux device-mapper (crypt) (dm)Disk /dev/dm-1: 1938GBSector size (logical/physical): 512B/4096BPartition Table: loopDisk Flags: Number Start End Size File system Flags 1 0.00B 1938GB 1938GB ext4(parted) quit [root@localhost ~]# partedGNU Parted 3.1Using /dev/sdaWelcome to GNU Parted! Type 'help' to view a list of commands.(parted) print Model: ATA WDC WD20EZRX-00D (scsi)Disk /dev/sda: 2000GBSector size (logical/physical): 512B/4096BPartition Table: gptDisk Flags: Number Start End Size File system Name Flags 1 1049kB 211MB 210MB fat16 EFI System Partition boot 2 211MB 735MB 524MB ext4 3 735MB 54.2GB 53.5GB 4 54.2GB 62.4GB 8187MB linux-swap(v1) 5 62.4GB 2000GB 1938GB(parted) unit compact (parted) rm 5 Error: Partition(s) 5 on /dev/sda have been written, but we have been unable to inform the kernel of the change, probably because it/they are in use. As a result, the old partition(s) will remain in use. You should reboot now before making further changes.Ignore/Cancel? ancel (parted) quitInformation: You may need to update /etc/fstab.So how do I actually partition this drive?
how do I partition a CentOS 7 server using parted?
centos;terminal;partition;parted
null
_webmaster.17449
I have a website with DNS pointing to my hosting account at ns1.a.com and ns2.a.comI want to switch a hosting account (to ns1.b.com and ns2.b.com). However if I simply change the DNS the site will be inaccessible until the change is complete, is there anyway to change host for a website without suffering from downtime?
is there any way to change host for a website without suffering from downtime?
web hosting
null
_unix.116250
I'm trying to make all http versions of my pages redirect to the https version, however I am having some problems with redirection.I want http://stackexchange.com/path/ to go https://stackexchange.com/path/ but instead it is redirecting to https://stackexchange.compath Here is my configuration for the virtual hosts:<VirtualHost *:80>ServerName stackexchange.comServerAlias www.stackexchange.com Redirect / https://stackexchange.com/</VirtualHost><VirtualHost *:443>ServerName stackexchange.comServerAlias www.stackexchange.comSSLEngine on...
unexpected apache redirection
apache httpd;configuration
null
_unix.23369
How to stop a bunch of processes on unix fast. I can use ps/top, kill. If the number of processes is very large, I use shell script.Are there other ways, only one or two unix commands can do it well ?
How to stop a bunch of processes on unix fast?
linux;shell;process;kill
null
_softwareengineering.140033
I'm writing a document describing process improvement recommendations. I have some best practices I've learned so far. I use those practices most of the time. But I suppose there are other ways to approach the task of writing formal process description document.So, how do you usually structure your documentation? As you could already find out, I'm especially interested in structuring formal process description documents which are usually created by QA and sometimes Project Managers. What sections should it contain?My documents usually contain following sections:ContentGoal ScopeTerms and abbreviationsInputsProcess descriptionArtifactsOutputsWhat else should there be? Did I forget something? Are there other approaches for structuring formal documents? Link to best practices descriptions are greatly appreciated.
How to structure process description documentation?
development process;documentation;qa;technical manuals
null
_webmaster.47018
Do we of course need a host for domain.com? Unfortunately currently the host for domain.com is down, so sub.domain.com cannot be redirected to where it should.Is there a way to at least activate the sub.domain.com using DirectI control panel that I have?
Pointing a sub domain to another server using directi control panel while domain.com is not hosted
domains;redirects;dns;301 redirect;subdomain
null
_softwareengineering.203438
So I am working on an MVC ASP.NET web application which centers around lots of data and data manipulation.PROBLEM OVERVIEW:We have an advanced search with 25 different filter criteria. I am using a stored procedure for this search. The stored procedure takes in parameters, filter for specific objects, and calculates return data from those objects. It queries large tables 14 millions records on some table, filtering and temp tables helped alleviate some of the bottle necks for those queries.ISSUE:The stored procedure used to take 1 min to run, which creates a timeout returning 0 results to the browser. I rewrote the procedure and got it down to 21 secs so the timeout does not occur. This ONLY occurs this slow the FIRST time the search is run, after that it takes like 5 secs.I am wondering should I take a different approach to this problem, should I worry about this type of performance issue if it does not timeout?
Advanced Search Stored procedure
sql;sql server;asp.net mvc 3;stored procedures
No, you don't need to worry about a problem that isn't occurring. If your query isn't timing out or running unacceptably slow, you can allocate your time elsewhere.If you are allowing very complex queries, the question of why you are not allowing slightly sanitized SQL arises. And if you allow arbitrary select statements, then how you handle timeouts and errors and cancellations becomes more important than the performance of your gui-sourced sql.I've seen and worked with web apps that don't allow direct sql even for competent users, and the results have always been disappointing. Empowerment lets you handle the edge cases from a design perspective, and focus your attention on where it can get the most benefit instead of doing a user's work for them all the time.
_softwareengineering.236713
Someone told me Session or HttpContext.Current.User will return different value when we are working with ajax postback, in case of when the are so many user are live to site.For example, suppose there are 1000 people are online in site, and from them 100 call ajax method/ wenmethod and on this we might get different value of Session or HttpContext.Current.User as per every user's Session or HttpContext.Current.User value are different.But my concept says it will return specific person's (who requested) Session or HttpContext.Current.User value, and server will be clear whose value need to be return.What you all guys says?
Session states in ajax callback
c#;asp.net;session
The session ID is stored in a cookie (or in the query string if so configured). It's unique per browser session.ASP.NET provides session-state management to enable you to store information associated with a unique browser session across multiple requests. For each request, the session ID cookie is referenced and the session for that unique ID is populated. Additional information can be found on the HttpSessionState Class Page under the Remarks section.
_cogsci.10373
We are observing the behaviors (either through behavioral experiments or observing brain activation through neuroscience experiments) based on the GIVEN TASK to the participants and trying to understand the observed behavior characteristics.I wonder (i) if there is any limitation in our ability to speculate through hypothesis and theory to understand whats happening in the brain,(ii) if there is any limitation in manipulate the third person to test the hypothesis(iii) Is there a pattern in our 'ability' to coming up with a hypothesis or theory in understand the brain. Like we tend to make analogies, extrapolations etc.
Is there any limitation in our ability to come up with a theory or hypothesis to understand brain?
experimental psychology;consciousness;unconscious
null
_codereview.3686
I have a method to parse strings. It can be used to parse a float using a specific culture and then get a new string with a specific out-culture. The same applies to ints, doubles and decimals. The code that I've written is quite repetitive for each of the different parse methods which makes it hard to maintain (especially as I am just about to make the method a lot more complex).Is it possible to make this code less repetitive? if (mapping.ParseDecimal){ decimal i; if (decimal.TryParse(value, numberStyle, inCulture, out i)) { return i.ToString(outCulture); } else { if (logger.IsDebugEnabled) logger.DebugFormat(Could not parse value \{0\ to a decimal using the culture \{1}\., value, inCulture); }}else if (mapping.ParseDouble){ double i; if (double.TryParse(value, numberStyle, inCulture, out i)) { return i.ToString(outCulture); } else { if (logger.IsDebugEnabled) logger.DebugFormat(Could not parse value \{0\ to a double using the culture \{1}\., value, inCulture); }}else if (mapping.ParseFloat){ float i; if (float.TryParse(value, numberStyle, inCulture, out i)) { return i.ToString(outCulture); } else { if (logger.IsDebugEnabled) logger.DebugFormat(Could not parse value \{0\ to a float using the culture \{1}\., value, inCulture); }}else if (mapping.ParseInt){ int i; if (int.TryParse(value, numberStyle, inCulture, out i)) { return i.ToString(outCulture); } else { if (logger.IsDebugEnabled) logger.DebugFormat(Could not parse value \{0\ to a int using the culture \{1}\., value, inCulture); }}
Same code to parse int, float, decimal?
c#;parsing
If repetition is your primary concern, you could try doing something like this:public delegate string ParserMethod(string value, NumberStyles numberStyle, CultureInfo inCulture, CultureInfo outCulture);public static class NumericParser{ public static readonly ParserMethod ParseInt = Create<int>(int.TryParse); public static readonly ParserMethod ParseFloat = Create<float>(float.TryParse); public static readonly ParserMethod ParseDouble = Create<double>(double.TryParse); public static readonly ParserMethod ParseDecimal = Create<decimal>(decimal.TryParse); public static Logger Logger { get; set; } delegate bool TryParseMethod<T>(string s, NumberStyles style, IFormatProvider provider, out T result); static ParserMethod Create<T>(TryParseMethod<T> tryParse) where T : IFormattable { return (value, numberStyle, inCulture, outCulture) => { T result; if (tryParse(value, numberStyle, inCulture, out result)) { return result.ToString(null, outCulture); } else { if (Logger != null && Logger.IsDebugEnabled) Logger.DebugFormat(Could not parse value \{0}\ to a {1} using the culture \{2}\., value, typeof(T).Name, inCulture); return ; } }; }}This way, you only have to pass around the appropriate ParserMethod you want to use. In your case, you could map your different mapping values to the appropriate ParserMethod. And call it when needed.
_webmaster.38617
Possible Duplicate:How do I rename a domain and preserve PageRank? I made my addon domain as a main domain, but Google search still show my new main domain as addon domain. What do i need to do to repair that? Do I need to use Webmasters tool to delete my old main domain or what? I really need help.
Why does my main domain sill show up as addon domain?
seo;google search console
null
_unix.60462
I just ordered a new SSD, and so I am planning to reinstall my Debian system. I have an Intel Core 2 Quad CPU (Q9450). Does it make sense to install the 64bit version (I have just 4GB of RAM, plus 1GB on the graphics card if it makes any difference, but I am considering increasing this amount). Which architecture should I choose? Also I am using the proprietary nvidia drivers, do they work at all and are they stable on the 64 bit systems?
Debian on 64 bit Intel Core 2 Quad
debian;64bit
There's no reason to stay 32-bit with this machine: it's fully capable of 64-bit. The nVidia drivers work, too.
_codereview.14936
While preparing an automated process to manipulate files, I've made the following function to check for the base directory and sub-directories presence, as to allow or deny the remaining code to be executed:/* Check Directories * * Confirm that the necessary directories exist, and if not, * tries to create them. * * @_param string $whereAmI -> Script location * @_param string $backup_path -> target directory relative path * @_param array $subDirArr -> sub-directories array * * @return * success -> Boolean * failure -> string with failure path */function check_directories($whereAmI, $backup_path, $subDirArr) { // initialize return variable $status = FALSE; // check for the merchant base directory if (!is_dir($whereAmI./.$backup_path)) { // trying to create the directory with permissions exec(mkdir .$backup_path. -m 777 2>&1, $output, $return_value); // error returned, stop and return status $status = (is_array($output) && count($output)>=1) ? $backup_path : TRUE; } else { $status = TRUE; } // base directory exists, continue if ($status===TRUE) { // run by each sub-directory foreach ($subDirArr as $subDir) { /* keep checking for status value, if changed, * one directory has failed and we can't proceed */ if ($status===TRUE) { // check for the sub-directory presence if (!is_dir($whereAmI./.$subDir)) { // trying to create the directory with permissions exec(mkdir .$subDir. -m 777 2>&1, $output, $return_value); // error returned, stop and update status $status = (is_array($output) && count($output)>=1) ? $subDir : TRUE; } } else { return $status; } } } return $status;}Considerations:Directory permissions when in production aren't 777.Server is running PHP Version 5.3.10.Can this be simplified, thus reducing the amount of code, and perhaps in such reduction, have it improved?
Confirming the presence/create directories
php;performance;security
Quite a few things wrong with your PHPDoc. I'm actually learning quite a bit myself. Doccomments require two asterisks to begin, not one, no matter what language you are using. One asterisk just defines a multi-line comment. Your IDE will skip over this, essentially making these useless as doccomments. The underscore before param, the yields operator ->, and your return syntax all cause my IDE to ignore this comment because it is in the wrong syntax. The multi-return sequences are best done using the bitwise OR operator separating the types, but you can also use a mixed type to accomplish the same thing./** Check Directories * etc... * @param string $whereAmI Script location * @return bool|string String on success, FALSE on failure//OR * @return mixed String on success, FALSE on failure */Be careful with clever variables. For a few seconds there I was quite confused about your $whereAmI variable, thinking that last letter was an L and not an I. I'm a bit slow sometimes and I was sitting there scratching my head trying to figure out what an aml was (in case you're curious, my best guess was animal). I would stick with simple variable names, such as $location or $position. Additionally, pick a style and be consistent. I see both camelCase AND under_score variables. One or the other, not both. You can do one for functions and the other for variables, if you wish. I've seen that done frequently enough that I wouldn't comment on it, but switching between the styles on the same datatype is just confusing.Internal comments, especially ones that explain what is already in the doccomments, are just noise. Anyone reading this function will know that $status is important, and once they reach the end of the function they will know its also returned. If, like me, you skip to the bottom of the function to see what it returns and then work backwards from there, then you will already know that it is returned. Same for the rest of these comments. The only time I use internal comments is for explaining confusing lines, such as REGEX, compact(), extract(), or, in your case, that exec().Alright, lets move on to actual code.Why did you bother setting an initial value to $status, your very next line of code overwrites that value no matter what. Let the if/else statements assign it for you. Or lose the else and use that as the default value.Avoid using not ! unless you have to. In this instance you have an if/else statement. Switch them around and remove the not. In the above scenario you would keep the not.if( is_dir( $whereAmI . '/' . $backup_path ) ) { $status TRUE;} else {Explaining Not:If you are doing a single if statement, then the not is fine and necessary. Though this could be argued that the empty() function should be used, either is fine, but I find the not syntax preferrable in this case.if( ! $var ) {//fine//ORif( empty( $var ) ) {//also fine, though less preferrable IMOIf you are using an if/else statement, as I demonstrated in initial answer, the not is implied by else and should be removed for clarity as well as efficiency.if( $var ) {} else {//not implied here}This helps with clarity because the not operator ! is not always immediately noticeable, depending on how you space your statements (which is one of the reasons my statements are so spacious). This helps with efficiency because typically your not clause holds the least amount of coding and might even contain an early escape, resulting in the else syntax not even being necessary which meaning less indentation and increased clarity.if( $var ) { return FALSE;}//contents of elseThe only time to compare a value explicitly to a boolean is when doing a type comparison. Such as an integer 0 to a boolean FALSE, or string FALSE to FALSE. And the above rules hold true for this as well.if( $var !== FALSE ) {//ORif( $var === FALSE ) {} else {}//ORif( $var === FALSE ) { return FALSE;}//contents of elseEnd of ExplanationYou mentioned security; You should be careful of functions such as exec(). Functions, by definition, are public. So, this script is public, therefore you can not guarantee the validity of those parameters being passed to it, or rather, you aren't validating them. This is a potential security issue, but only if you plan on using this in a public domain (internet). That and the 777 permissions. Directories should not be executable. I can't remember off the top of my head what the proper permissions to use are, but I DO know that 777 will get you in trouble. Though, admittedly I do this in my server as well, but mine is a small intranet and security isn't one of the things I'm particularly worried about.Split up your function. It is concerned with entirely too much. check_directories() should only be concerned with checking the directories, but then you start adding on subdirectories. This is a different concern altogether. Take a look at the Single Responsibility Principle. Additionally, look at Don't Repeat Yourself (DRY) Principle. Your if/else statement that I mentioned above is repeated, therefore it can be made into a function to avoid this. This will be the biggest contributor to reducing the size of this program. That and removing those unnecessary comments. Besides that, I don't think there are any other ways to do this.
_unix.39423
I would like to try to set up a computer so that it has multiple Linux installs all in the same filesystem. For example, the filesytem would have 3 folders: /Ubuntu_Precise, /Ubuntu_Oneiric, and /Ubuntu_Natty. (I know you can do this with BTRFS and subvolumes, but I would like to use EXT4 for speed).I once set up multiple installs of different distros using BTRFS, and from getting that working, I know Grub does just fine with booting the vmlinuz and initrd image from 'nonstandard' paths. But when I was doing the BTRFS thing, there was the rootflags=subvol=@<subvolume_name> that told the kernel to mount that subvolume as / in the filesystem. Is there any argument that you could pass the kernel that would make it bind mount a subfolder in a partition as / and then boot?I think for the other parts, I'm fairly close. I know how to specific a bind mount in /etc/fstab. Also, from when I set up my system with multiple linux installs in BTRFS subvolumes, I'm used to installing a distro in a VM and then migrating it using rsync, so I'm not too worried about what I would need to do to get the right configuration, I'm just trying to find out what the right configuration would be. Once I know that, I should be able to do the migration into the subfolders and file editing easily enough.I already know about virtualization and partitions, but that's not what I'm looking for. The target computer does not have enough power to do virtualization, and partitions do not share free space. I'm looking to set up a system that dual/triple/quad/etc boots linux distros, but that does it with one filesystem, so that there is no case of I have free space, but it's in the wrong partition!'If anyone has suggestions how to edit my question or its title to be clearer, I'm all ears.
Boot Linux system from a subdirectory on a partition?
linux;filesystems;boot;chroot
null
_codereview.169129
Taking a problem from the website, Project Euler #1, I have created my first C# program. The problem I have used is as follows:If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23.Find the sum of all the multiples of 3 or 5 below 1000.The program I have made:using System;namespace _5_multiples{ public class MainClass { public static void Main() { int currentNumber = 1000; int allSum = 0; currentNumber = currentNumber - 1; while (currentNumber > 0) { if (currentNumber % 3 == 0) { allSum = allSum + currentNumber; } else { if (currentNumber % 5 == 0) { allSum = allSum + currentNumber; } } currentNumber = currentNumber - 1; } Console.WriteLine(allSum); } }}I understand there is probably a lot to improve - this is my first C# program after all. I did run into a few problems such as referencing non static members from a static context and no suitable entry method. Rather than finding a definitive fix, the solution I implemented was more of a workaround; hence why I feel there may be a lot to improve.namespace _5_multiples - this doesn't look right? Name of the program is actually 3_5_multiples - perhaps a badly chosen name?Anyway, please let me know on anything that could be improved.
Project Euler #1 - Multiples of 3 and 5 - First Program
c#;beginner;programming challenge
Here's what I would change:I'd use a for loop instead of a while loop here: for (int currentNumber = 999; currentNumber > 0; currentNumber--). This keeps all loop-related things grouped together. Loop bodies can be skipped or broken out of, so if code that must be executed every cycle is put at the end of a loop body it's easily broken when the loop body grows more complex and someone adds a continue or break in there.Actually, there is no need to count down - the result will be the same either way. Counting up is often a little easier to understand: for (int currentNumber = 0; currentNumber < 1000; currentNumber++). Note that using < here means you don't need to decrement currentNumber by one before the loop starts.You can write a += b instead of a = a + b (the same goes for various other operators).else { if { .. } } can be simplified to else if { ... }.Though in this case, a single if statement is sufficient: if (currentNumber % 3 == 0 || currentNumber % 5 == 0) { ... }. The 'or' operator (||) short-circuits if the first expression is true, so if a number can be divided by 3 it won't also try to divide it by 5.C# naming conventions typically write class, method and namespace names in PascalCase.Regarding your questions:Static things belong to a class, non-static things belong to instances of that class. Classes are to instances what blueprints are to actual buildings. Trying to access a non-static (instance) members from a static context is like trying to open a door on a blueprint. The blueprint doesn't contain any real doors, only actual buildings do.Programs need a place to start executing, and in C# the starting point is a static Main method. Putting everything in there is fine for a small program like this.Identifier names in C# cannot start with a number, so 3_5_multiples isn't a valid name. I'd probably use EulerProblem1 or something like that here.Namespaces are useful for organizing larger programs (namespaces to classes are a bit like folders to files). The root namespace of a project is usually the name of that project itself, and child namespaces often correspond to folders.EDIT: Part of the fun on Project Euler is coming up with more efficient solutions. Can you find a way to solve this without checking every number below 1000?
_codereview.70674
Okay, so I just started learning Haskell around a week ago and this is my first real program that I worked on all of yesterday with a lot of help from IRC. I know that using indicies and arrays is not very Haskellish but I found constantly manipulating lists and traversing them was extremely slow and sometimes the program took over 10 minutes to execute while this version is instant.Afterwards I found you can do it with some Zipper package in 10 lines of code trivially but I didn't want to use anything too fancy.I find that when I'm writing Haskell because there is no state I find myself simulating state with function parameters (saving variables and mutating them in a recursive call). I'm pretty sure I took it too far because most of these functions take four parameters and rarely change them but I'm not sure about many alternatives.I haven't gotten to monads, functors, or applicatives yet, so while I'm sure they could solve this quite elegantly, they're still just magic to me. Unless the explanation is quite simple, I'd prefer if replies didn't mention them. I'm mostly looking for refactorings, style-advice, and better ways of implementing some things.import qualified Data.Sequence as Simport Data.Char (chr, ord)import Data.Array-- Current Index, Indentation Depth, Program Array -> Bracket IndexprevBracketIndex :: Int -> Int -> Array Int Char -> IntprevBracketIndex i depth cs | cs ! i == '[' = if (depth - 1) == 0 then i else prevBracketIndex (i - 1) (depth - 1) cs | cs ! i == ']' = prevBracketIndex (i - 1) (depth + 1) cs | otherwise = prevBracketIndex (i - 1) depth csnextBracketIndex :: Int -> Int -> Array Int Char -> IntnextBracketIndex i depth cs | cs ! i == '[' = nextBracketIndex (i + 1) (depth + 1) cs | cs ! i == ']' = if (depth - 1) == 0 then i else nextBracketIndex (i + 1) (depth - 1) cs | otherwise = nextBracketIndex (i + 1) depth csexecCode :: Int -> S.Seq Int -> Int -> Array Int Char -> IO ()execCode tapePos ts codePos cs | codePos == (snd . bounds $ cs) = return () | cmd == '+' = execCode tapePos (S.update tapePos (value + 1) ts) nextPos cs | cmd == '-' = execCode tapePos (S.update tapePos (value - 1) ts) nextPos cs | cmd == '>' = execCode (tapePos + 1) ts nextPos cs | cmd == '<' = execCode (tapePos - 1) ts nextPos cs | cmd == '[' && value == 0 = execCode tapePos ts (nextBracketIndex codePos 0 cs + 1) cs | cmd == ']' && value /= 0 = execCode tapePos ts (prevBracketIndex codePos 0 cs + 1) cs | cmd == '.' = putStr [chr $ S.index ts tapePos] >> execCode tapePos ts nextPos cs | cmd == ',' = do { c <- getChar; let newTape = S.update tapePos (ord c) ts in execCode tapePos newTape nextPos cs } | otherwise = execCode tapePos ts nextPos cs where value = S.index ts tapePos cmd = cs ! codePos nextPos = codePos + 1tape = S.fromList $ replicate 30000 0main = do file <- readFile example.bf execCode 0 tape 0 (listArray (0, length file - 1) file)
Brainfuck interpreter in Haskell
beginner;haskell;recursion;interpreter;brainfuck
Use case instead of == and guards everywhere:prevBracketIndex :: Int -> Int -> Array Int Char -> IntprevBracketIndex i depth cs = case cs ! i of '[' -> if (depth - 1) == 0 then i else prevBracketIndex (i - 1) (depth - 1) cs ']' -> prevBracketIndex (i - 1) (depth + 1) cs _ -> prevBracketIndex (i - 1) depth csUse State and lens to carry state around instead of manual threading.Use monad-loops to spin the loop instead of manual tail calls.prevBracketIndex should avoid recursion too by using lists of indices.Initial depth is always 0 in prevBracketIndex so it should be made local to improve readability. Also, cs is not changed across recursive calls so there is no need to pass it across. Applying both ideas:prevBracketIndex :: Int -> Array Int Char -> IntprevBracketIndex i cs = pbi i 0 where pbi i depth = case cs ! i of '[' -> if (depth - 1) == 0 then i else pbi (i - 1) (depth - 1) ']' -> pbi (i - 1) (depth + 1) _ -> pbi (i - 1) depthFor execCode we can do the same transformation: cs is invariant across loops, and initial positions are always 0.Note also that prevBracketIndex can be completely precalculated (replaced by a single array lookup), as cs doesn't change.Applying everything above but case we get:import qualified Data.Sequence as Simport Data.Char (chr, ord)import Data.Arrayimport Data.ListcachePrev cs = listArray (bounds cs) $ snd $ mapAccumL f [] $ assocs cs where f l (i, c) = case c of '[' -> (i : l, Nothing) ']' -> (tail l, Just $ head l) _ -> (l, Nothing)cacheNext cs = listArray (bounds cs) $ snd $ mapAccumR f [] $ assocs cs where f l (i, c) = case c of ']' -> (i : l, Nothing) '[' -> (tail l, Just $ head l) _ -> (l, Just i)cache arr i = case arr ! i of Nothing -> error oops! Just idx -> idxexecCode' :: S.Seq Int -> Array Int Char -> IO ()execCode' ts cs = execCode 0 ts 0 where prev = cachePrev cs next = cacheNext cs execCode tapePos ts codePos | codePos == (snd . bounds $ cs) = return () | cmd == '+' = execCode tapePos (S.update tapePos (value + 1) ts) nextPos | cmd == '-' = execCode tapePos (S.update tapePos (value - 1) ts) nextPos | cmd == '>' = execCode (tapePos + 1) ts nextPos | cmd == '<' = execCode (tapePos - 1) ts nextPos | cmd == '[' && value == 0 = execCode tapePos ts (cache next codePos + 1) | cmd == ']' && value /= 0 = execCode tapePos ts (cache prev codePos + 1) | cmd == '.' = putStr [chr $ S.index ts tapePos] >> execCode tapePos ts nextPos | cmd == ',' = do { c <- getChar; let newTape = S.update tapePos (ord c) ts in execCode tapePos newTape nextPos } | otherwise = execCode tapePos ts nextPos where value = S.index ts tapePos cmd = cs ! codePos nextPos = codePos + 1For tests I found csFromString to be a convenient helper:csFromString file = listArray (0, length file - 1) fileAnd it helps to write main in a more compact way:main = readFile example.bf >>= execCode' tape . csFromStringNow let's apply the case proposal:execCode _ _ codePos | codePos == (snd . bounds $ cs) = return ()execCode tapePos ts codePos = case cs ! codePos of '+' -> execCode tapePos (S.update tapePos (value + 1) ts) nextPos '-' -> execCode tapePos (S.update tapePos (value - 1) ts) nextPos '>' -> execCode (tapePos + 1) ts nextPos '<' -> execCode (tapePos - 1) ts nextPos '[' -> if value == 0 then execCode tapePos ts (cache next codePos + 1) else execNext ']' -> if value /= 0 then execCode tapePos ts (cache prev codePos + 1) else execNext '.' -> putStr [chr $ S.index ts tapePos] >> execNext ',' -> do { c <- getChar; let newTape = S.update tapePos (ord c) ts in execCode tapePos newTape nextPos } _ -> execNext where value = S.index ts tapePos nextPos = codePos + 1 execNext = execCode tapePos ts nextPosNow cmd is not needed anymore, and [ and ] required some additional plumbing.Now let's remove duplication in 3 symmetrical pairs of instructions - updatePos, updateVal and branch:execCode _ _ codePos | codePos == (snd . bounds $ cs) = return ()execCode tapePos ts codePos = case cs ! codePos of '+' -> updatePos succ '-' -> updatePos pred '>' -> updateVal succ '<' -> updateVal pred '[' -> branch (== 0) next ']' -> branch (/= 0) prev '.' -> putStr [chr $ S.index ts tapePos] >> execNext ',' -> do { c <- getChar; let newTape = S.update tapePos (ord c) ts in execCode tapePos newTape nextPos } _ -> execNext where value = S.index ts tapePos nextPos = codePos + 1 execNext = execCode tapePos ts nextPos updatePos f = execCode tapePos (S.update tapePos (f value) ts) nextPos updateVal f = execCode (f tapePos) ts nextPos branch cond dir = if cond value then execCode tapePos ts (cache dir codePos + 1) else execNextNow it's time to remove duplication between cachePrev and cacheNext:mkCache cs mapAccumX bracketPush bracketPop = listArray (bounds cs) $ snd $ mapAccumX f [] $ assocs cs where f l (i, c) | c == bracketPush = (i : l, Nothing) | c == bracketPop = (tail l, Just $ head l) | otherwise = (l, Nothing)cache arr i = case arr ! i of Nothing -> error oops! Just idx -> idxexecCode' :: S.Seq Int -> Array Int Char -> IO ()execCode' ts cs = execCode 0 ts 0 where prev = mkCache cs mapAccumL '[' ']' next = mkCache cs mapAccumR ']' '[' execCode _ _ codePos | codePos == (snd . bounds $ cs) = return () execCode tapePos ts codePos = case cs ! codePos of '+' -> updatePos succ '-' -> updatePos pred '>' -> updateVal succ '<' -> updateVal pred '[' -> branch (== 0) next ']' -> branch (/= 0) prev '.' -> putStr [chr $ S.index ts tapePos] >> execNext ',' -> do { c <- getChar; let newTape = S.update tapePos (ord c) ts in execCode tapePos newTape nextPos } _ -> execNext where value = S.index ts tapePos nextPos = codePos + 1 execNext = execCode tapePos ts nextPos updatePos f = execCode tapePos (S.update tapePos (f value) ts) nextPos updateVal f = execCode (f tapePos) ts nextPos branch cond dir = if cond value then execCode tapePos ts (cache dir codePos + 1) else execNextHere is complete final source:import qualified Data.Sequence as Simport Data.Char (chr, ord)import Data.Arrayimport Data.ListmkCache cs mapAccumX bracketPush bracketPop = listArray (bounds cs) $ snd $ mapAccumX f [] $ assocs cs where f l (i, c) | c == bracketPush = (i : l, Nothing) | c == bracketPop = (tail l, Just $ head l) | otherwise = (l, Nothing)cache arr i = case arr ! i of Nothing -> error oops! Just idx -> idxexecCode' :: S.Seq Int -> Array Int Char -> IO ()execCode' ts cs = execCode 0 ts 0 where prev = mkCache cs mapAccumL '[' ']' next = mkCache cs mapAccumR ']' '[' execCode _ _ codePos | codePos == (snd . bounds $ cs) = return () execCode tapePos ts codePos = case cs ! codePos of '+' -> updatePos succ '-' -> updatePos pred '>' -> updateVal succ '<' -> updateVal pred '[' -> branch (== 0) next ']' -> branch (/= 0) prev '.' -> putStr [chr $ S.index ts tapePos] >> execNext ',' -> do { c <- getChar; let newTape = S.update tapePos (ord c) ts in execCode tapePos newTape nextPos } _ -> execNext where value = S.index ts tapePos nextPos = codePos + 1 execNext = execCode tapePos ts nextPos updatePos f = execCode tapePos (S.update tapePos (f value) ts) nextPos updateVal f = execCode (f tapePos) ts nextPos branch cond dir = if cond value then execCode tapePos ts (cache dir codePos + 1) else execNexttape = S.fromList $ replicate 30000 0csFromString file = listArray (0, length file - 1) filemain = readFile example.bf >>= execCode' tape . csFromStringNote that my suggestions about monad-loops, lens and State are still to be applied.The first step is to declare a datatype for our future state. data M = M { _tapePos :: Int , _tape :: S.Seq Int , _codePos :: Int }and make inner execCode accept a single parameter. execCode :: M -> IO () execCode (M _ _ codePos) | codePos == (snd . bounds $ cs) = return () execCode (M tapePos ts codePos) = case cs ! codePos of '+' -> updatePos succ '-' -> updatePos pred '>' -> updateVal succ '<' -> updateVal pred '[' -> branch (== 0) next ']' -> branch (/= 0) prev '.' -> putStr [chr $ S.index ts tapePos] >> execNext ',' -> do { c <- getChar; let newTape = S.update tapePos (ord c) ts in execCode (M tapePos newTape nextPos) } _ -> execNext where value = S.index ts tapePos nextPos = codePos + 1 execNext = execCode (M tapePos ts nextPos) updatePos f = execCode (M tapePos (S.update tapePos (f value) ts) nextPos) updateVal f = execCode (M (f tapePos) ts nextPos) branch cond dir = if cond value then execCode (M tapePos ts (cache dir codePos + 1)) else execNextM stands for machine state :) and underscores are signals to Data.Lens.TH template Haskell code we will start using a bit later.We will need a state monad transformer ran on top of IO monad. Let's declare the type of our monad stack:type ExecT a = StateT M IO aOur future execCode'' will be of type ExecT () instead of current M -> IO (). To execute it and discard the state (as we do now) we'll use evalStateT:execCode' :: S.Seq Int -> Array Int Char -> IO ()execCode' ts cs = evalStateT execCode'' (M 0 ts 0) where prev = mkCache cs mapAccumL '[' ']' next = mkCache cs mapAccumR ']' '[' execCode'' :: ExecT () execCode'' = do s <- get lift (execCode s) execCode :: M -> IO () execCode = ... -- unchangedNow execCode'' is just a wrapper around our old inner execCode. And we fix execCode so it can be called directly. Note that we should do the following:lift all IOReplace recursive calls execCode (M ...) with put (M ...) >> execCodemove where statements around so M tapePos ts codePos is in scopereplace old termination guard with whenHere is the result:execCode' :: S.Seq Int -> Array Int Char -> IO ()execCode' ts cs = evalStateT execCode (M 0 ts 0) where prev = mkCache cs mapAccumL '[' ']' next = mkCache cs mapAccumR ']' '[' execCode :: ExecT () execCode = do M tapePos ts codePos <- get let value = S.index ts tapePos let nextPos = codePos + 1 let execNext = put (M tapePos ts nextPos) >> execCode let updatePos f = put (M tapePos (S.update tapePos (f value) ts) nextPos) >> execCode let updateVal f = put (M (f tapePos) ts nextPos) >> execCode let branch cond dir = if cond value then put (M tapePos ts (cache dir codePos + 1)) >> execCode else execNext when (codePos /= (snd . bounds $ cs)) $ case cs ! codePos of '+' -> updatePos succ '-' -> updatePos pred '>' -> updateVal succ '<' -> updateVal pred '[' -> branch (== 0) next ']' -> branch (/= 0) prev '.' -> lift (putStr [chr $ S.index ts tapePos]) >> execNext ',' -> do { c <- lift getChar; let newTape = S.update tapePos (ord c) ts in put (M tapePos newTape nextPos) >> execCode } _ -> execNextNow note that all branches of case end in >> execCode. So it can be moved around to form a nice while control structure:execCode :: ExecT ()execCode = do M tapePos ts codePos <- get let value = S.index ts tapePos let nextPos = codePos + 1 let execNext = put (M tapePos ts nextPos) let updatePos f = put (M tapePos (S.update tapePos (f value) ts) nextPos) let updateVal f = put (M (f tapePos) ts nextPos) let branch cond dir = if cond value then put (M tapePos ts (cache dir codePos + 1)) else execNext when (codePos /= (snd . bounds $ cs)) $ do case cs ! codePos of '+' -> updatePos succ '-' -> updatePos pred '>' -> updateVal succ '<' -> updateVal pred '[' -> branch (== 0) next ']' -> branch (/= 0) prev '.' -> lift (putStr [chr $ S.index ts tapePos]) >> execNext ',' -> do { c <- lift getChar; let newTape = S.update tapePos (ord c) ts in put (M tapePos newTape nextPos) } _ -> execNext execCodeNow it's finally time for lens to shine.{-# LANGUAGE TemplateHaskell #-}import Control.Lens.THimport Control.Lensimport Control.Applicative...data M = M { _mTapePos :: Int , _mTape :: S.Seq Int , _mCodePos :: Int }$(makeLenses ''M)type ExecT a = StateT M IO aexecCode' :: S.Seq Int -> Array Int Char -> IO ()execCode' ts cs = evalStateT execCode (M 0 ts 0) where prev = mkCache cs mapAccumL '[' ']' next = mkCache cs mapAccumR ']' '[' execCode :: ExecT () execCode = do M tapePos ts codePos <- get let value = S.index ts tapePos let nextPos = codePos + 1 let execNext = mCodePos += 1 let updatePos f = mTape %= S.update tapePos (f value) >> execNext let updateVal f = mTapePos %= f >> execNext let branch cond dir = if cond value then mCodePos %= succ . cache dir else execNext when (codePos /= (snd . bounds $ cs)) $ do case cs ! codePos of '+' -> updatePos succ '-' -> updatePos pred '>' -> updateVal succ '<' -> updateVal pred '[' -> branch (== 0) next ']' -> branch (/= 0) prev '.' -> lift (putStr [chr $ S.index ts tapePos]) >> execNext ',' -> do { c <- lift getChar; mTape %= S.update tapePos (ord c) >> execNext } _ -> execNext execCodeI did the following:Added imports and TemplateHaskell pragmaAdded a template Haskell call to generate definitions for mTapePos from _mTapePosreplaced all put calls with one or many lens-based state modifiers, joined by >>. E.g. if 2 fields of M are modified I chain 2 modifiers. If just 1 - no need to chain. Basically it's just >> execNext instead of nextPos.removed unused nextPosNow it turns out that branch has its own hidden execNext (note succ . in the code above):let branch cond dir = if cond value then mCodePos %= cache dir >> execNext else execNextSo we can transform it to when easily:let branch cond dir = when (cond value) (mCodePos %= cache dir) >> execNextAnd now it turns out that >> execNext is everywhere. We can move it after case and inline:execCode' :: S.Seq Int -> Array Int Char -> IO ()execCode' ts cs = evalStateT execCode (M 0 ts 0) where prev = mkCache cs mapAccumL '[' ']' next = mkCache cs mapAccumR ']' '[' execCode :: ExecT () execCode = do M tapePos ts codePos <- get let value = S.index ts tapePos let updatePos f = mTape %= S.update tapePos (f value) let updateVal f = mTapePos %= f let branch cond dir = when (cond value) (mCodePos %= cache dir) when (codePos /= (snd . bounds $ cs)) $ do case cs ! codePos of '+' -> updatePos succ '-' -> updatePos pred '>' -> updateVal succ '<' -> updateVal pred '[' -> branch (== 0) next ']' -> branch (/= 0) prev '.' -> lift (putStr [chr $ S.index ts tapePos]) ',' -> do { c <- lift getChar; mTape %= S.update tapePos (ord c) } _ -> return () mCodePos += 1 execCodeAfter some more cleanup we get:{-# LANGUAGE TemplateHaskell #-}import Control.Lens.THimport Control.Lensimport Control.Applicativeimport qualified Data.Sequence as Simport Data.Char (chr, ord)import Data.Arrayimport Data.Listimport Control.Monad.StatemkCache cs mapAccumX bracketPush bracketPop = listArray (bounds cs) $ snd $ mapAccumX f [] $ assocs cs where f l (i, c) | c == bracketPush = (i : l, Nothing) | c == bracketPop = (tail l, Just $ head l) | otherwise = (l, Nothing)cache arr i = case arr ! i of Nothing -> error oops! Just idx -> idxdata M = M { _mTapePos :: Int , _mTape :: S.Seq Int , _mCodePos :: Int }$(makeLenses ''M)type ExecT a = StateT M IO aexecCode' :: S.Seq Int -> Array Int Char -> IO ()execCode' ts cs = evalStateT execCode (M 0 ts 0) where prev = mkCache cs mapAccumL '[' ']' next = mkCache cs mapAccumR ']' '[' execCode :: ExecT () execCode = do M tapePos ts codePos <- get let value = S.index ts tapePos let tapeAtPos = mTape . ix tapePos let branch cond dir = when (cond value) (mCodePos %= cache dir) when (codePos /= (snd . bounds $ cs)) $ do case cs ! codePos of '+' -> tapeAtPos += 1 '-' -> tapeAtPos -= 1 '>' -> mTapePos += 1 '<' -> mTapePos -= 1 '[' -> branch (== 0) next ']' -> branch (/= 0) prev '.' -> lift (putChar $ chr value) ',' -> do { c <- lift getChar; mTape %= S.update tapePos (ord c) } _ -> return () mCodePos += 1 execCodetape = S.fromList $ replicate 30000 0csFromString file = listArray (0, length file - 1) filemain = readFile example.bf >>= execCode' tape . csFromStringOne more iteration of tuning for execCode' to make code more uniform which is good for readability and maintenance:tapeAtPos is made a self-contained compound lens, without reliance on tapePos and tapeAtPos is moved out of execState' body to global scope, and renamed to mTapeAtPos uniformly with other lensesvalue is renamed tapeAtPos as it corresponds to mTapeAtPosmTapeAtPos is used uniformly for both getting and updating the value everywhere including the getChar branchunsafeUse helper is used to get tapeAtPos uniformly with codePos. It is called unsafe because tapeAtPos may fail if mTapePos is out of range!lens API is used to read code uniformly with reading tapeextra parenthesis/$ are removed from when conditionM ... <- get is removed as it is not used anymoreThe code:mTapeAtPos f m = (mTape . ix (m ^. mTapePos)) f munsafeUse traversal = (^?! traversal) <$> getexecCode' :: S.Seq Int -> Array Int Char -> IO ()execCode' ts cs = evalStateT execCode (M 0 ts 0) where prev = mkCache cs mapAccumL '[' ']' next = mkCache cs mapAccumR ']' '[' execCode :: ExecT () execCode = do tapeAtPos <- unsafeUse mTapeAtPos codePos <- use mCodePos let branch cond dir = when (cond tapeAtPos) (mCodePos %= cache dir) when (codePos /= snd (bounds cs)) $ do case cs ^?! ix codePos of '+' -> mTapeAtPos += 1 '-' -> mTapeAtPos -= 1 '>' -> mTapePos += 1 '<' -> mTapePos -= 1 '[' -> branch (== 0) next ']' -> branch (/= 0) prev '.' -> lift (putChar $ chr tapeAtPos) ',' -> do { c <- lift getChar; mTapeAtPos .= ord c } _ -> return () mCodePos += 1 execCodeIt can be made even more uniform:at this point tapePos is used only in branch it can be moved into branch branch then can be moved outside of execCode as it doesn't a closure any moreit seems inner execCode is better named loop, and ExecT is LoopStateTThe code:branch cond dir = do tapeAtPos <- unsafeUse mTapeAtPos when (cond tapeAtPos) (mCodePos %= cache dir)execCode :: S.Seq Int -> Array Int Char -> IO ()execCode ts cs = evalStateT loop (M 0 ts 0) where prev = mkCache cs mapAccumL '[' ']' next = mkCache cs mapAccumR ']' '[' loop :: LoopStateT () loop = do codePos <- use mCodePos when (codePos /= snd (bounds cs)) $ do case cs ^?! ix codePos of '+' -> mTapeAtPos += 1 '-' -> mTapeAtPos -= 1 '>' -> mTapePos += 1 '<' -> mTapePos -= 1 '[' -> branch (== 0) next ']' -> branch (/= 0) prev '.' -> unsafeUse mTapePos >>= lift . putChar . chr ',' -> lift (ord <$> getChar) >>= (mTapeAtPos .=) _ -> return () mCodePos += 1 loopAt this point it seems branch and ./, symmetry aren't worth it, so I inlined branch, reverted getChar branch to use do-notation and moved tapeAtPos <- to the top of loop: loop :: LoopStateT () loop = do tapeAtPos <- unsafeUse mTapeAtPos codePos <- use mCodePos when (codePos /= snd (bounds cs)) $ do case cs ^?! ix codePos of '+' -> mTapeAtPos += 1 '-' -> mTapeAtPos -= 1 '>' -> mTapePos += 1 '<' -> mTapePos -= 1 '[' -> when (tapeAtPos == 0) (mCodePos %= cache next) ']' -> when (tapeAtPos /= 0) (mCodePos %= cache prev) '.' -> lift $ putChar (chr tapeAtPos) ',' -> do { c <- lift getChar; mTapeAtPos .= ord c } _ -> return () mCodePos += 1 loopAnother improvement is better diagnostics of match failures in cache:cache arr i = case arr ! i of Nothing -> error $ No matching bracket at offset ++ show i Just idx -> idxSo we get:{-# LANGUAGE TemplateHaskell #-}import Control.Lens.THimport Control.Lensimport Control.Applicativeimport qualified Data.Sequence as Simport Data.Char (chr, ord)import Data.Arrayimport Data.List (mapAccumR, mapAccumL)import Control.Monad.State (lift, get, evalStateT, when, StateT(..))mkCache cs mapAccumX bracketPush bracketPop = listArray (bounds cs) $ snd $ mapAccumX f [] $ assocs cs where f l (i, c) | c == bracketPush = (i : l, Nothing) | c == bracketPop = (tail l, Just $ head l) | otherwise = (l, Nothing)cache arr i = case arr ! i of Nothing -> error $ No matching bracket at offset ++ show i Just idx -> idxdata M = M { _mTapePos :: Int , _mTape :: S.Seq Int , _mCodePos :: Int }$(makeLenses ''M)type LoopStateT a = StateT M IO amTapeAtPos f m = (mTape . ix (m ^. mTapePos)) f munsafeUse traversal = (^?! traversal) <$> getexecCode :: S.Seq Int -> Array Int Char -> IO ()execCode ts cs = evalStateT loop (M 0 ts 0) where prev = mkCache cs mapAccumL '[' ']' next = mkCache cs mapAccumR ']' '[' loop :: LoopStateT () loop = do tapeAtPos <- unsafeUse mTapeAtPos codePos <- use mCodePos when (codePos /= snd (bounds cs)) $ do case cs ^?! ix codePos of '+' -> mTapeAtPos += 1 '-' -> mTapeAtPos -= 1 '>' -> mTapePos += 1 '<' -> mTapePos -= 1 '[' -> when (tapeAtPos == 0) (mCodePos %= cache next) ']' -> when (tapeAtPos /= 0) (mCodePos %= cache prev) '.' -> lift $ putChar $ chr tapeAtPos ',' -> do { c <- lift getChar; mTapeAtPos .= ord c } _ -> return () mCodePos += 1 looptape = S.fromList $ replicate 30000 0csFromString file = listArray (0, length file - 1) filemain = readFile example.bf >>= execCode tape . csFromString
_webapps.84737
I used to be able to see how long time ago someone was on Facebook. She uses the desktop website, not a smartphone app. For the last couple of weeks I can't see that. Do you know why? She didn't unfriend me. I can send her messages and find her in a search bar, so I don't think she blocked me. But why I can't see when she is on Facebook or how long ago she was on Facebook?
Why can't I see how long ago someone was on Facebook?
facebook;facebook chat
null
_unix.353939
Can apache run without the htdocs directive ? I don't need the htdocs directive. I'm using apache with a middleware application so in order to reduce the risk I wanted to remove the htdocs directory both of : DocumentRoot /var/www/htdocs><Directory /var/www/htdocs>......</Directory>When I remove the above lines, it seems like apache is taking default installation directory and try to open the htdocs.
Apache HTTP without the htdocs directive
apache httpd;configuration;reverse proxy
null
_codereview.106611
This is a follow-up of Wikipedia Trie pseudocode in PythonCode quality improvementsfind has been fixed and a regression test (bana vs banana) has been added so that it will never again be broken that way.Used ordinary instance methods when sensible (previous code suffered from staticmethods overuse).Significant simplification from the use of Python defaultdictOperator overloading to provide easy membership test and printing (__repr__, __contains__)Removed the value argument as I saw no use for it.Minor doc-style language adjustments.The printing is not used in testing anymore, as it should not be, because it is arbitrary (as Python's dicts are)Functionality improvementsThe printing is now as it should be, not reversed.There is no more a weird inspect method, you can simply print the trie.Example outputA trie from the words ('banning', 'banned', 'banana', 'bad', 'cooking', 'cought', 'count') is printed as:c o o k i n g # u g h t # n t # b a n a n a # n i n g # e d # d ## signals the end of a word.The codeimport collectionsimport doctestclass Trie: Implements a Trie (also known as 'digital tree', 'radix tree' or 'prefix tree'). Where common starting letters of many words are stored just once. def __init__(self): self.child = collections.defaultdict(Trie) def insert(self, string): Add a given string to the trie, modifying it **in place**. >>> t = Trie() >>> t.insert('hello') >>> t.insert('hi') >>> list(sorted(t.child.keys())) ['h'] >>> first_node = t.child['h'] >>> list(sorted(first_node.child.keys())) ['e', 'i'] As you can see, the same `h` was written only once, and it is connected with both `i` and `e`. node = self for char in string: node = node.child[char] node = node.child[None] def __contains__(self, word): >>> t = Trie() >>> t.insert('example') >>> 'example' in t True >>> 'exemplum' in t False >>> t.insert('bana') >>> 'banana' in t False >>> t.insert('banning') >>> t.insert('banned') trie = self for char in word: if char in trie.child: trie = trie.child[char] else: return False return True def __str__(self, depth = 0): Shows a nicely formatted and indented Trie. Cannot be tested as equivalent representations are arbitrarly chosen from (`dict`s are not ordered). s = [] for i in self.child: s.append( '{}{} {}'.format( ' ' * depth, i or '#', '\n' + self.child[i].__str__(depth + 1))) return ''.join(s)if __name__ == '__main__': doctest.testmod() trie = Trie() for word in ('banning', 'banned', 'banana', 'bad', 'cooking', 'cought', 'count'): trie.insert(word) print(trie)
Implementing a Trie in Python - follow-up
python;python 3.x;trie
null
_webapps.18765
I was thinking about setting up Paypal. One issue I have is tracking. If I accept money today and want to refund it a year or two from now, do I have the proper tracking to know who sent me $$ and how much?
Paypal and tracking
paypal
Paypal provides you with pretty much all the details from their history tab. You can go search on specific dates Clicking on details will give you the full detailsWithin the date-range you can drilldown to specific events
_softwareengineering.211688
If a controller gets too fat and model instantiation starts to add up, a service layer could be used.If I just wrap the logic inside a service class, I will get a bunch of Services with one/two methods. This feels like a code smell. Any best practices regarding this?Can a service instantiate models?If a service instantiates models, the services can't be unit tested. They can only be covered by integration tests?
Use a service layer with MVC
mvc;services
null
_datascience.694
I'm using Neural Networks to solve different Machine learning problems. I'm using Python and pybrain but this library is almost discontinued. Are there other good alternatives in Python?
Best python library for neural networks
machine learning;python;neural network
UPDATE: the landscape has changed quite a bit since I answered this question in July '14, and some new players have entered the space. In particular, I would recommend checking out:TensorFlowBlocksLasagneKerasDeepyNolearnNeuPyThey each have their strengths and weaknesses, so give them all a go and see which best suits your use case. Although I would have recommended using PyLearn2 a year ago, the community is no longer active so I would recommend looking elsewhere. My original response to the answer is included below but is largely irrelevant at this point.PyLearn2 is generally considered the library of choice for neural networks and deep learning in python. It's designed for easy scientific experimentation rather than ease of use, so the learning curve is rather steep, but if you take your time and follow the tutorials I think you'll be happy with the functionality it provides. Everything from standard Multilayer Perceptrons to Restricted Boltzmann Machines to Convolutional Nets to Autoencoders is provided. There's great GPU support and everything is built on top of Theano, so performance is typically quite good. The source for PyLearn2 is available on github.Be aware that PyLearn2 has the opposite problem of PyBrain at the moment -- rather than being abandoned, PyLearn2 is under active development and is subject to frequent changes.
_softwareengineering.59184
How to for non-programmer : formulate web-programming requirements for designing a website by a third party how to ask and what to askExample : I want to hire someone to design or implement any existing Photo Voting Contest with some extra features.How do I formulate my needs for a web-programmer to understand me ?Are there any templates ? Books ?
How to for non-programmer : formulate web-programming requirements
web development;web applications;freelancing;communication
null
_softwareengineering.177880
I am creating an application that maintains a database of files of a certain type in a given folder (and all subfolders) Initially the program will recurse the folders and add any file it finds of that type to the database.I want the application to have the ability to re-scan the folder and add any files that were not there the last time the folders were scanned.It can't use the date created property of the file because there is a high chance of a file being added to the folders that isn't a new file.I am wondering what the most efficient way of doing this is, and if there is a way that doesn't involve checking each file is in the database already (which, if there are 5000 files would mean 5000 queries of a list 5000 items in size, or 25 million 'checks' for the sql engine to perform) I suppose a more specific question to acheive the same goal would be - is there a property of a file (in Microsoft Windows) that will reliably tell you when that file arrived in that folder.Edit: The app would not be running all the time, so monitoring the folder for change events is not an option. A typical scenario might be. Run the app. get new files. close the app. A week later (after normal computer usage and files being added to the folder) run the app again, look for changes since the app was last used.
Efficient way to check for changes to the contents of folders
file handling
null
_webapps.73390
I have a Google Plus account and could successfully create a brand page that is associated with that account.But I created that account with the sole purpose of having the brand page.So I will not post anything or interact with people on the account, just on the brand page.But the account is the default. So already I've mistakenly posted things on the account thinking I was on the brand page.So, I don't want to have the account, just the brand page.Another reason for that is that I got the right to choose a google plus url, but unfortunately it has to have the account in the address like this:google.com/MyAccountName/MyBrandNameBut I wanted it to be justgoogle.com/MyBrandNameAny clue on how to get rid of the account and remain just with the brand on Google Plus?
How to create a Google Plus Brand Page not associated with a Google Plus account
google plus;google plus pages
null
_scicomp.26739
I need to generate a discontinuous plot (piecewise in each triangle) in matlab, something like:This plot is from http://www.alecjacobson.com/weblog/?p=3616, but I don't understand how generate it.I have two matrices corresponding to a triangular mesh of a domain (typical of finite elements method, that is, no overlaped triangles, the union of all triangles is equal to the domain...):coord=[x1,y1;x2,y2;...;xnod,ynod] % vertices of the meshele =[n1,n2,n3;n1,n2,n3;...;n1,n2,n3] % mesh conectivityFor example:coord(1,:)=[x1,y1] are the coordinates of vertex 1 of the mesh (vertex of some triangle)coord(2,:)=[x2,y2] are the coordinates of vertex 2 of the mesh (vertex of some triangle)etc,ele(1,:)=[n1,n2,n3] is the number of the vertices of triangle 1ele(2,:)=[n1,n2,n3] is the number of the vertices of triangle 2etc.This way, for example,coord(ele(4,:),:)=[x1,y1;x2,y2;x3,y3]are the coordinates (x1,y1), (x2,y2) and (x3,y3) of the vertices of triangle 4.The solution that I need to plot is a vector u which size is the number of triangles of the mesh (=number of rows of elem), because this solution is one constant over each triangle.How can I plot this discontinuous solution?
Plot 2D piecewise constant in matlab in a finite elements mesh
finite element;matlab;discontinuous galerkin;octave
I use typically the following approach. The idea is to make a new mesh where every vertex is duplicated so that each triangle has its own copy. Then you can use standard trisurf command to the resulting mesh structure.p=coord';t=ele';x=p(1,:);y=p(2,:);P=[x(t(:));y(t(:))];T=reshape(1:size(P,2),[3 size(P,2)/3]);% create random u for testingu=rand(size(P,2)/3,1);tmp=[u';u';u'];trisurf(T',P(1,:),P(2,:),tmp(:))If you have PDE toolbox there is also a built in command for this called pdesurf.
_vi.7139
I want to ssh into a Linux server from my macbook, and code Matlab, execute my Matlab code and see the figures that are produced by Matlab all through the one single SSH channel. I note that, I don't want to use x11 forwarding and prefer to see everything inside a SSH channel. Can I have the above setup with vim? Or should I go for Emacs?
Vim remotely over SSH channel and see the MATLAB figures
matlab;ssh
null
_webmaster.21845
Possible Duplicate:What are the best ways to increase your site's position in Google? I searched the web and find a lot of techniques to be on the first page results of google or yahoo.Is it possible to pay for that?
How to be on google first page results?
seo;google;yahoo
The short answer is no.The only real way to get yourself higher in search rankings is to actually have the best/most relevant content for whatever the search is. There are services that will offer ways to do this, which often involve things like them placing links to your site in various directories at best, in spam farms at worst. The search engines actively work against this sort of thing and while you might see a temporary benefit, it almost certainly won't last, and might even end up harming you. It's also worth noting that with the degree of customization and geotargeting that Google in particular does now, there's almost no such thing as a single first page for you to be aiming for. Make your site good and the rest will take care of itself.You can, of course, buy your way into the ads that appear at the top of some search results(with the yellow background), but I don't think that's what you're asking about.
_unix.342760
I have Debian Stretch and I would like to have a custom grub entry in order to run the system without a graphical desktop. I thought that would be as easy as running a different runlevel, but reading about that, I was aware that, in systemd everything is different.After reading this question about Red Hat and also this other for Debian Jessie, I learnt about systemd targets, and I think that what I want to do is running in multi-user.target.I've found this fedora link, this archlinux kernel link and this other one. All them explain that there is one option systemd.unit that can be appended in the linux line in the grub menu entry. So, I searched for links to explain how to create a custom menu entry: this one. But, looking my own automatic grub entries with the key 'e' in the grub screen, they are more complex than the one in the link. The problem is that I don't know if I must copy all that stuff in the custom menu entry. setparams 'Debian gnu/linux, con linux 4.8.0-2-amd64' load_video insmod gzio if [ x$grub_platform = xxen ]; then insmod xzio; insmod lzopio; fi insmod part-msdos insmod ext2 set root='hd0,msdos5' if [ x$feature_platform_search_hint = xy ]; then search --no-floppy --fs-uuid --set=root --hint-bios=hd0,msdos5 --hint-efi=hd0,msdos5 --hint-baremetal=ahci0,msdos5 3202c741-ef05-40e4-9368-8617e7b1fb3celse search --no-floppy --fs-uuid --set=root 3202c741-ef05-40e4-9368-8617e7b1fb3cfiecho 'Cargando Linux 4.8...'linux /vmlinuz-4.8.0-2-amd64 root=UUID=17f74892-fe09-46ec-91ca-2dca457565a1 ro quietecho 'Cargando imagen de memoria inicial...'initrd /initrd.img-4.8.0-2-amd64This is my automatically created entry for my last kernel. Can I simply copy all this in a custom menu entry and change only thelinux /vmlinuz-4.8.0-2-amd64 root=UUID=17f74892-fe09-46ec-91ca-2dca457565a1 ro quietline to belinux /vmlinuz-4.8.0-2-amd64 root=UUID=17f74892-fe09-46ec-91ca-2dca457565a1 ro quiet systemd.unit=multi-user.target?
Create debian grub custom entry for running systemd multiuser.target
debian;systemd;grub2;systemd boot
After some days of researching, I have 3 approaches to the problem of creating custom entries for running a Systemd Debian without graphical desktop from the Grub. I think that the best approach is 1.1. Creating a new /etc/grub.d/* configuration fileTo do that, i copied /etc/grub.d/10_linux file as a template:sudo cp /etc/grub.d/10_linux /etc/grub.d/11_multiuserThe original file creates the root entry for the latest kernel and also the Advanced options submenu. So, I edited my 11_multiuser file a little bit, just to create a new submenu for the multiuser options, and create inside a new option for each kernel, for the multiuser mode. Here I'll add a patch with the modified lines:--- /etc/grub.d/10_linux+++ /etc/grub.d/11_multiuser@@ -118,6 +118,8 @@ case $type in recovery) title=$(gettext_printf %s, with Linux %s (%s) ${os} ${version} $(gettext ${GRUB_RECOVERY_TITLE})) ;;+ multiuser)+ title=$(gettext_printf %s, with Linux %s (multiuser) ${os} ${version}) ;; init-*) title=$(gettext_printf %s, with Linux %s (%s) ${os} ${version} ${type#init-}) ;; *)@@ -227,57 +229,18 @@ boot_device_id= title_correction_code=-cat << 'EOF'-function gfxmode {- set gfxpayload=${1}-EOF-if [ $vt_handoff = 1 ]; then- cat << 'EOF'- if [ ${1} = keep ]; then- set vt_handoff=vt.handoff=7- else- set vt_handoff=- fi-EOF-fi-cat << EOF-}-EOF--# Use ELILO's generic efifb when it's known to be available.-# FIXME: We need an interface to select vesafb in case efifb can't be used.-if [ x$GRUB_GFXPAYLOAD_LINUX != x ] || [ $gfxpayload_dynamic = 0 ]; then- echo set linux_gfx_mode=$GRUB_GFXPAYLOAD_LINUX-else- cat << EOF-if [ \${recordfail} != 1 ]; then- if [ -e \${prefix}/gfxblacklist.txt ]; then- if hwmatch \${prefix}/gfxblacklist.txt 3; then- if [ \${match} = 0 ]; then- set linux_gfx_mode=keep- else- set linux_gfx_mode=text- fi- else- set linux_gfx_mode=text- fi- else- set linux_gfx_mode=keep- fi-else- set linux_gfx_mode=text-fi-EOF-fi-cat << EOF-export linux_gfx_mode-EOF- # Extra indentation to add to menu entries in a submenu. We're not in a submenu # yet, so it's empty. In a submenu it will be equal to '\t' (one tab). submenu_indentation=-is_top_level=true+# para el menu de multiuser+submenu_indentation=$grub_tab+if [ -z $boot_device_id ]; then+ boot_device_id=$(grub_get_device_id ${GRUB_DEVICE})+fi+gettext_printf Agregando entradas multiuser...\n >&2+echo submenu '$(gettext_printf Advanced options for %s ${OS} | grub_quote) (MultiUser)' \$menuentry_id_option 'gnulinux-advanced-$boot_device_id' {+is_top_level=false while [ x$list != x ] ; do linux=`version_find_latest $list` case $linux in@@ -331,34 +294,9 @@ linux_root_device_thisversion=${GRUB_DEVICE} fi- if [ x$is_top_level = xtrue ] && [ x${GRUB_DISABLE_SUBMENU} != xy ]; then- linux_entry ${OS} ${version} simple \- ${GRUB_CMDLINE_LINUX} ${GRUB_CMDLINE_LINUX_DEFAULT}-- submenu_indentation=$grub_tab- - if [ -z $boot_device_id ]; then- boot_device_id=$(grub_get_device_id ${GRUB_DEVICE})- fi- # TRANSLATORS: %s is replaced with an OS name- echo submenu '$(gettext_printf Advanced options for %s ${OS} | grub_quote)' \$menuentry_id_option 'gnulinux-advanced-$boot_device_id' {- is_top_level=false- fi-- linux_entry ${OS} ${version} advanced \- ${GRUB_CMDLINE_LINUX} ${GRUB_CMDLINE_LINUX_DEFAULT}-- for supported_init in ${SUPPORTED_INITS}; do- init_path=${supported_init#*:}- if [ -x ${init_path} ] && [ $(readlink -f /sbin/init) != ${init_path} ]; then- linux_entry ${OS} ${version} init-${supported_init%%:*} \- ${GRUB_CMDLINE_LINUX} ${GRUB_CMDLINE_LINUX_DEFAULT} init=${init_path}- fi- done- if [ x${GRUB_DISABLE_RECOVERY} != xtrue ]; then- linux_entry ${OS} ${version} recovery \- ${GRUB_CMDLINE_LINUX_RECOVERY} ${GRUB_CMDLINE_LINUX}- fi+ linux_entry ${OS} ${version} multiuser \+ ${GRUB_CMDLINE_LINUX} ${GRUB_CMDLINE_LINUX_DEFAULT} systemd.unit=multi-user.target+ list=`echo $list | tr ' ' '\n' | fgrep -vx $linux | tr '\n' ' '` doneWith this solution, if I add/remove kernels, or perform some action that involves any reconfiguration of the grub menu, my desired multiuser entries will be automatically added for each kernel. Also, I think (but not completely sure) that, if i update the grub, my new configuration file 11_multiuser won't be removed, given that it's not part of the Grub's predefined configuration files.2. Modifying /etc/grub.d/10_linux fileThis is another approach, but i think this is worse than the first one. This way, you are modifying the official file, so you could break the Grub's configuration and the whole system startup. Also, if any update leads to the file replacement, you could loose your configuration. There is only one advantage on doing this: you could insert your multiuser entries in the Advanced options submenu.The patch added for the first approach is partially valid for this. Anyway, I totally disagree this approach.3. Modifying /etc/grub.d/40_custom fileThis file is intended to insert specific entries. You could copy the entry from /boot/grub/grub.cfg and paste it in this file adding the systemd.. It is perfectly ok, but the problem is that you must do it for every kernel you want. Also, when removing/adding new kernels to the system, you must maintain this file manually. Plus, these entries appears at the end of the grub menu, and if you have other operating systems like Windows, then your custom entries will be separated from the first Linux entries.
_softwareengineering.39353
After some googling I was unable to find out hierarchy of different titles in career development. Especially the connection between Senior Engineer and Software Specialist is unclear to me. Which one is higher?Most of all I would like to see links pointing to some career development paths from the trainee to the highest boss, and what kind of different routes there are including technical and managerial paths.
Work advancement titles
organization
The Short Version:There is no industry standard for these things, they're specific to each company and in some cases won't even be consistent within a single company. The skills and abilities that make someone a developer in one company might mean that they're a senior developer in another company, and a technical architect somewhere else.The Longer Version:Broadly speaking large numbers of different job titles are an indicator of a large company, frequently one which either doesn't specialise in programming, started out in an area other than programming, or in some cases started it's life some time ago in a period when companies were far more hierarchical than they are now. Smaller and more modern companies on the other hand can frequently encompass hundreds or even thousands of employees with maybe four or five titles (something along the lines of developer, senior developer, development manager, technical architect, and chief technical officer).But you shouldn't confuse your job title with your career development. Here there is a common thread to the way many people will develop though there are still plenty of exceptions.Typically you'll start out as a developer (though you maybe called a trainee, or a junior developer or a programmer or whatever), making small changes, bug fixes and doing support. Over a year or two you'll move on to more substantial work, though still very much hands on, picking up new skills and increasing your competence and experience.Somewhere around five years in (maybe a little more, maybe a little less), people will often start to wonder what next and there are a few options. First they can stay technical and start moving towards being a senior programmer which will involve programming but also mentoring and looking at how the team can do things better. In many cases this is where people will stay, happy in the role and not wanting to become more hands off or get involved in the politics of management. In other cases they may stay hands on but start taking on a few managerial tasks as a team leader.Alternatives are moving into some sort of business analyst or project management role. I won't go into these here as it's outside the realms of this site but these are fairly common options and things where a few years programming experience can be a good kick start to a career.Assuming that the person chooses to remain a programmer and wants to move on from senior developer, the options tend to be technical architect (that is remaining technical but looking at defining the solutions at a higher level, getting involved in requirements and technical platform selection and so on) or becoming a development manager (so actually managing a team of developers which basically involves doing whatever is necessary to have them deliver the work being assigned to the team).And from there a small number of people will move to more senior management roles (in larger companies) and potentially into a CTO position and / or beyond. Once you've shown you've got the ability to operate at senior levels lateral movement (that is across disciplines) is far more common and you will often see people moving out of roles which are anything to do with technology.But a few things worth noting:There is no single route. I've seen people mess around as a programmer for decades and then just jump massively when they committed themselves and I've seen people get to CTO and then drop back down to be a programmer again. It's whatever works for you at whatever time of life you're at.A lot of the opportunities to progress are around luck and the ability to identify opportunities and take them. You'll get luckier the harder you work and the more talented you are but just sticking around and plugging away is no guarantee of progress.Seniority != happiness and in many cases seniority != more money. Programmers who like programming will frequently find themselves very unhappy in management roles, even where they do find themselves competent and capable in that position. You're going to be working for 40 or 50 years, happiness is a big factor so don't just assume that you want to climb the greasy pole for the sake of climbing it.
_unix.289270
I am unable to connect to laravel homestead via mysql workbench here is how i connected Homestead Connection http://simple.harry.zone/linux/homestead_connect.pngbut i am facing the below issueHomestead Issue http://simple.harry.zone/linux/homestead_issue.pngPlz Plz help me !!!Edit:Here are the details as requested by Gerath in the comments below 1 : The server is running in localhost, 127.0.0.1 only2.) The homestead server is using 33060 port virtually ( 3306 as guest, and 33060 as host ) 3.) I am not sure4.) he username and password for databases is homestead / secret which is default in laravel homestead . Here is the details for pt 1 and 2 Homestead Port http://simple.harry.zone/linux/homestead_port.png
Couldn't connect to Larvel homestead database with MYSQL workbench
ubuntu;mysql;database;php7
null
_codereview.71081
I am making an application to save a layout of user control into an .xps document. But, the export doesn't seem completed if the data is too many. I've tried to encapsulate each part with DispatcherPriority.Loaded but it seems the data binding is horrifyingly take too long to be finished (I honestly don't know what is the problem to be exact).How can I optimize the below code properly to wait for each process to be finished before moving on to the next one, so saving to XPS document will be properly loaded?P.S. This doesn't happen if the data is not too large.public class MyClass{ public static void CreatePortableFile(List<MyViewModelVM> myViewModels, string path) { List<MyViewV> views = new List<MyViewV>(); List<FixedPage> fixedPages = new List<FixedPage>(); List<PageContent> pageContents = new List<PageContent>(); FixedDocument fixedDoc = new FixedDocument(); Dispatcher.CurrentDispatcher.Invoke(new Action(() => { foreach (MyViewModelVM item in myViewModels) { views.Add(new MyViewV() { DataContext = item }); } Console.WriteLine(Setting datacontext + DateTime.Now.TimeOfDay); }), DispatcherPriority.Loaded); Dispatcher.CurrentDispatcher.Invoke(new Action(() => { foreach (MyViewV item in views) { FixedPage newFixedPage = new FixedPage(); newFixedPage.Children.Add(item); fixedPages.Add(newFixedPage); } Console.WriteLine(Setting fixedpage + DateTime.Now.TimeOfDay); }), DispatcherPriority.Loaded); Dispatcher.CurrentDispatcher.Invoke(new Action(() => { foreach (FixedPage item in fixedPages) { PageContent newPageContent = new PageContent(); ((System.Windows.Markup.IAddChild)newPageContent).AddChild(item); pageContents.Add(newPageContent); } Console.WriteLine(Setting pagecontent + DateTime.Now.TimeOfDay); }), DispatcherPriority.Loaded); Dispatcher.CurrentDispatcher.Invoke(new Action(() => { foreach (PageContent item in pageContents) { fixedDoc.Pages.Add(item); } Console.WriteLine(Setting fixeddoc + DateTime.Now.TimeOfDay); }), DispatcherPriority.Loaded); Dispatcher.CurrentDispatcher.Invoke(new Action(() => WriteToXps(path, fixedDoc)), DispatcherPriority.Loaded); } private static void WriteToXps(string path, FixedDocument fixedDoc) { XpsDocument xpsDoc = new XpsDocument(path, FileAccess.Write); XpsDocumentWriter xWriter = XpsDocument.CreateXpsDocumentWriter(xpsDoc); xWriter.Write(fixedDoc); xpsDoc.Close(); Console.WriteLine(Setting write + DateTime.Now.TimeOfDay); }}Sample call:MyClass.CreatePortableFile(listOfMyViewModels, path);Console.Writeline result:Setting datacontext 17:51:37.8885505Setting fixedpage 17:51:37.8915518Setting pagecontent 17:51:37.8935523Setting fixeddoc 17:51:37.8945527Setting write 17:51:40.1266807
Saving a collection of WPF user control layout into an XPS document
c#;multithreading;wpf;mvvm
I don't really see the sense of splitting this into different sections. Also some of them only add overhead which isn't needed at all. For each MyViewModelVM in List<MyViewModelVM> you are creating a MyViewV with it as DataContext which is in the next loop added as a child to a FixedPage which then is added as a child to a PageContent which is then added to the Pages of the FixedDocument. This can be simplified to public static void CreatePortableFile(List<MyViewModelVM> myViewModels, string path){ Dispatcher.CurrentDispatcher.Invoke(new Action(() => { FixedDocument fixedDoc = new FixedDocument(); foreach (MyViewModelVM item in myViewModels) { MyViewV view = new MyViewV() { DataContext = item }; FixedPage newFixedPage = new FixedPage(); newFixedPage.Children.Add(view); PageContent newPageContent = new PageContent(); ((System.Windows.Markup.IAddChild)newPageContent).AddChild(newFixedPage); pageContents.Add(newPageContent); fixedDoc.Pages.Add(newPageContent); } WriteToXps(path, fixedDoc) }), DispatcherPriority.Loaded);} EDIT After reading the comment, I digged a little bit more into this. As this is similiar to printing with datacontext this should fix the issue: public static void CreatePortableFile(List<MyViewModelVM> myViewModels, string path){ FixedDocument fixedDoc = new FixedDocument(); foreach (MyViewModelVM item in myViewModels) { MyViewV view = new MyViewV() { DataContext = item }; FixedPage newFixedPage = new FixedPage(); newFixedPage.Children.Add(view); PageContent newPageContent = new PageContent(); ((System.Windows.Markup.IAddChild)newPageContent).AddChild(newFixedPage); pageContents.Add(newPageContent); fixedDoc.Pages.Add(newPageContent); } Dispatcher.CurrentDispatcher.Invoke (new Action (delegate { }), DispatcherPriority.ApplicationIdle, null); WriteToXps(path, fixedDoc)} A detailed explaination can be found here
_webapps.4406
As a computer programmer I have no problem understanding labels/tags. However I find it very hard to make people understand that an email can have more than one label.Also I find it hard to explain that you can still get to an email if it is not in your inbox and does not have a label.(Moving a email to a folder seem to be how most people think)Is gmail just designed by computer programmers for computer programmers? (Hence why I like it so match)
How should I explain to a normal person about gmail labels?
gmail;gmail labels
Just tell them that every e-mail is like a movie. The same movie can belong to more than one categories/genres like comedy, action, adventure, etc..
_webmaster.63449
We have recently built a new website with a new domain to replace an old website, and on the advice of our IT guys and web dev team have pointed both oldaddress.com's & newaddress.com's a records to the new website.Now, they both share the same google analytics code (UA-12345-1) and as such we have two entries in the Google Analytics dashboard. The problem is I'm still fairly novice with GA and as the reports seem VERY similar (~25k pageviews for each domain), are these figures exclusively for that domain?For example: oldaddress.com 25,400 pageviewsnewaddress.com 25,600 pageviewsDoes this mean that in total for this website I have 51,000 pageviews.Hope this is clear enough but let me know if anything needs clarifying. Thanks.
Are my Google Analytics ( 2 domains 1 site) duplicated or unique?
google analytics;domains
null
_softwareengineering.255386
I've been spending some time on 're-tuning' some of my OOP understanding, and I've come up against a concept that is confusing me.Lets say I have two objects. A user object and an account object. Back to basics here, but each object has state, behaviour and identity (often referred to as an entity object).The user object manages behaviour purely associated with a user, for example we could have a login(credentials) method that returns if successfully logged in or throws exception if not.The account object manages behaviour purely associated with a users account. For example we could have a method checkActive() that checks if the account is active. The account object checks if the account has an up-to-date subscription, checks if there are any admin flags added which would make it inactive. It returns if checks pass, or throws exception if not.Now here lies my problem. There is clearly a relationship between user and account, but I feel that there are actually two TYPES of association to consider. One that is data driven (exists only in the data/state of the objects and the database) and one that is behaviour driven (represents an object call to methods of the associated object).Data Driven AssociationIn the example I have presented, there is clearly a data association between user and account. In a database schema we could have the following table:----------------- USER_ACCOUNTS----------------- id user_id ----------------When we instantiate the account and load the database data into it, there will be a class variable containing user_id. In essence, the account object holds an integer representation of user through user_idBehaviour Driven AssociationBehaviour driven associations are really the dependencies of an object. If object A calls methods on object B there is an association going from A to B. A holds an object representation of B.In my example case, neither the user object nor the account object depend on each other to perform their tasks i.e. neither object calls methods on the other object. There is therefore no behaviour driven association between the two and neither object holds an object reference to the other.QuestionIs the case I presented purely a case of entity representation? The association between user and account is always present, but its being represented in different ways?ie. the user entity has an identity that can be represented in different forms. It can be represented as an object (the instantiated user object) or as a unique integer from the users table in the databases.Is this a formalised way of recognising different implementations of associations or have I completely lost my mind?One thing that bugs me is how would I describe the differences in UML or similar? Or is it just an implementation detail?
Are there two type of associations between objects or are there just different representations?
object oriented;database;uml;dependencies;entity
It s a bit hard to fully understand what is problem actually in :). I think you are a bit mixing some concepts.Entities representationAt first, all models we are implementing is just a simplification of some real-world objects and environment. Your conceptual entity user corresponds to some real user and contains some attributes we are intresteing in according to application needs. Thus there are some models we have to implement.Thus, all user defined by DB schema, user defined by class, and probably user defined by some presentation logic is normally present some real-world user. It is only about implementation purposes, we have to store, view and manipulate with user.On the other hand let's take a look on Single Responsibility Principle. It tells us to use every class or program unit just for certain needs. Using single user (class or program unit) for storage and presentation needs violates this principles.the user entity has an identity that can be represented in different forms. It can be represented as an object (the instantiated user object) or as a unique integer from the users table in the databases.So, the answer is yes.Associations and dependenciesI think it is more about terminology than about nature of problem. But in general you are right - there are different types of object realtions (especially, I will use other terms to list it). For example referencing, creation, using, coordinating, storing, inheriting (!) .... According to this, user instance references account instance. And A instance uses B instance.For most cases it is good enough to distinguish just referencing and using. It is what you've just written. It's enough common and abstract to be understood by other person when talking about domain, e.g. But sometimes, to emphasize some aspects you should describe relations in a way like A dispatches ensemble of Bs or R stores X to database. It's more applicable for specification and modelling.Is this a formalised way of recognising different implementations of associations or have I completely lost my mind?To call something formalized I suggest to look at UML.UML and other modelling instrumentsOne thing that bugs me is how would I describe the differences in UML or similar? Or is it just an implementation detail?There are a lot of UML models (diagrams). Let's take a look at most well-known - Classes and Objects Diagram.It's interesting that UML allows to present all type of object relations, and moreover allows to decide is it just an implementation detail.Martin Fowler describes 3 levels (or point of views) of understanding of Classes Diagram.Conceptual. Diagram is considered as high-level domain model, independent from implementation.Specification. Diagram is considered as high-level realization model containing of interfaces.Implementation. Diagram is considered as low-level technical paper containing interfaces, classes, references, other types of relations.Is this a formalised way of recognising different implementations of associations or have I completely lost my mind?Yes, you have to fix some point of view and choose appropriate relations set.For example, let's take a look at Classes Diagram and consider it from an implementation point of view. UML defines 3 type of relations (and propose corresponding means to its designation):AssociationAssociation corresponds referencing between instances.DependencyDependency combines all types of relations such as using, creating, storing, etc.Inheritance.Inheritances as a fundamental OOP instrument is presented by UML in a distinct way. It's more about classes than about instances, but also one can say that A instance inherit attributes of B instance. So, that's ok.First and second points of view on Class Diagram, as I remember uses only one type of relation unifying both associations and dependencies and is designated like association (no inheritance, of course).Also, UML proposes Objects Diagram which is same to Classes Diagram, but fits better for runtime modelling needs.Finally, a choice of a set of relations taken into consideration depends on a context and point of view. UML provides some ones.
_unix.206479
I can't figure out what the icon between my settings and lock-screen icon in Gnome3 is supposed to do.It has two states, locked and unlocked, but it's not the lock screen. When I click it, it toggles its status.
What is the icon between settings and lock-screen in Gnome3?
gnome3;gui;gnome shell
That button locks/unlocks screen orientation on devices that can detect which way they are held - usually touchscreen devices - see Screen orientation section on the gnome wiki touchscreen page: Easy ability to temporarily disable and enable auto-rotation (rotation lock). It was introduced a couple of years ago: Add an orientation lock action button. The icon used is rotation-locked-symbolic and it's counterpart is rotation-allowed-symbolic. They're under /usr/share/icons/Adwaita/scalable/status and they're both part of the symbolic icons set.On some systems with hard disk accelerometers that button appears even if your device doesn't support screen orientation detection. If that's the case you can disable the gnome-settings-daemon plugin via gsettings:gsettings set org.gnome.settings-daemon.plugins.orientation active false
_codereview.43744
My code works, but the aesthetic of the code seems to need some real work. The logic is simple enough: Create three lists of words that will be used throughout the conversionCreate three functions: One for sub-1000 conversionAnother for 1000-and-up conversionAnother function for splitting and storing into a list of numbers above 1000.First the code:# Create the lists of word-equivalents from 1-19, then one for the tens group.# Finally, a list of the (for lack of a better word) zero-groups.ByOne = [zero,one,two,three,four,five,six,seven,eight,nine,ten,eleven,twelve,thirteen,fourteen,fifteen,sixteen,seventeen,eighteen,nineteen]ByTen = [zero,ten,twenty,thirty,forty,fifty,sixty,seventy,eighty,ninety]zGroup = [,thousand,million,billion,trillion,quadrillion,quintillion,sextillion,septillion,octillion,nonillion,decillion,undecillion,duodecillion,tredecillion,quattuordecillion,sexdecillion,septendecillion,octodecillion,novemdecillion,vigintillion]strNum = raw_input(Please enter an integer:\n>> )# A recursive function to get the word equivalent for numbers under 1000.def subThousand(inputNum): num = int(inputNum) if 0 <= num <= 19: return ByOne[num] elif 20 <= num <= 99: if inputNum[-1] == 0: return ByTen[int(inputNum[0])] else: return ByTen[int(inputNum[0])] + - + ByOne[int(inputNum[1])] elif 100 <= num <= 999: rem = num % 100 dig = num / 100 if rem == 0: return ByOne[dig] + hundred else: return ByOne[dig] + hundred and + subThousand(str(rem))# A looping function to get the word equivalent for numbers above 1000# by splitting a number by the thousands, storing them in a list, and # calling subThousand on each of them, while appending the correct# zero-group.def thousandUp(inputNum): num = int(inputNum) arrZero = splitByThousands(num) lenArr = len(arrZero) - 1 resArr = [] for z in arrZero[::-1]: wrd = subThousand(str(z)) + zap = zGroup[lenArr] + , if wrd == : break elif wrd == zero : wrd, zap = , resArr.append(wrd + zap) lenArr -= 1 res = .join(resArr).strip() if res[-1] == ,: res = res[:-1] return res# Function to return a list created from splitting a number above 1000.def splitByThousands(inputNum): num = int(inputNum) arrThousands = [] while num != 0: arrThousands.append(num % 1000) num /= 1000 return arrThousands### Last part is pretty much just the output.intNum = int(strNum)if intNum < 0: print Minus, intNum *= -1 strNum = strNum[1:]if intNum < 1000: print subThousand(strNum)else: print thousandUp(strNum)Sample run:>>> Please enter an integer:>> 95505896639631893ninety-five quadrillion, five hundred and five trillion, eight hundred and ninety-six billion, six hundred and thirty-nine million, six hundred and thirty-one thousand, eight hundred and ninety-three >>>Issues:Basically, the peeves I'm having are as follows:My first two functions seems to be taking up too many lines. The first one, subThousand, seems to be this way because of the check made against the last digit of a number from 20 to 99. The logic I applied was, if it ends in 0, use the ByTen list. If it doesn't combined ByTen and ByOne. While effective, I feel like it could use some real work since I think it qualifies as a DRY-violation. Even the final part checking for numbers from 100 to 999 seems to follow the same pattern. Alas, I've tried paring it down but I've honestly hit a roadblock on this one insofar as trying to come up with a creative and clean solution.My second function, thousandUp is quite the disaster. I tried coming up with a looping function that creates a list of one to three-digit numbers, so that I can call subThousand on each of them from the front to the back (hence the arrZero[::-1]. At the same time, after converting each element in the list to a word, I concatenate it with the appropriate equivalent in the zGroup list, or the list of the zero-groups. However, I personally can't find a safer and more precise way of landing on the correct spot in the zGroup list to start concatenating.To get around this, I took the length of the splitByThousands array, adjusted for 0-index, and used it to get the appropriate zero-append (zap). Before the loop ends, I subtract one from its current value so that it's adjusted accordingly.In addition, as I'm attempting to make the output as clean as possible, I add a to the wrd variable so it doesn't concatenate poorly with the zero-append, as well as add a , to the zero-append to separate it from the next zero-group. However, there will be instances that there are no values for some zero-groups. To avoid showing stuff like zero million, I added a check inside. This is the part that makes me die a little inside:for z in arrZero[::-1]: wrd = subThousand(str(z)) + zap = zGroup[lenArr] + , if wrd == : break elif wrd == zero : wrd, zap = , resArr.append(wrd + zap) lenArr -= 1It works, but it just doesn't look good. Is it possible to do this in list-comprehension form or a better for-loop without turning it into more confusing mush?I must admit as well that the last part of the code sucks a little. I've done a lot of str-->int conversions inside the functions, then I did one more outside of them. On top of that, my approach to negative numbers is hackish at best (print Minus).
Number-to-word converter
python;converting;numbers to words
Try to extract the different pieces of logic in functions to make things clearer. In your case, you have defined little functions but the place where you are using them is a bit in the middle of nowhere.Here's what you could do :def get_number_as_words(strNum): intNum = int(strNum) if intNum < 0: print Minus, # I didn't see this at the beginning but I'll fix it at the end intNum *= -1 strNum = strNum[1:] if intNum < 1000: return subThousand(strNum) else: return thousandUp(strNum)Also, all these functions should be properly documented using docstrings.If your code can potentially be used as a module, it is advised to put the part which actually does stuff in a function and guard the call to this function behind a if __name__ == __main__: condition.In your case, here's what I have :def main(): strNum = '95505896639631893' #raw_input(Please enter an integer:\n>> ) expected='ninety-five quadrillion, five hundred and five trillion, eight hundred and ninety-six billion, six hundred and thirty-nine million, six hundred and thirty-one thousand, eight hundred and ninety-three ' print(get_number_as_words(strNum)) print(expected) assert(get_number_as_words(strNum)==expected)if __name__ == __main__: main()As I am messing around with your code, I use assert to detect quickly if I break the test case you have provided. Also, it shows something that we might want to fix on the long run : the trailing whitespace.subThousand: You can use divmod to divide integers and get the quotient and the remainder.subThousand: In successive if, else if, else if, etc, you do not need to check the same condition multiple times. Also, you can use assert to check that your function is used on proper values (described in the documentation).subThousand: This function (and it might be the case for the others as well but I have to progress one function at a time) should probably be fed an integer and not a string. Operation on numbers should be enough for pretty much everything.subThousand: You can use implicit evaluation of integers as boolean : 0 is False, anything else is True. subThousand: As explained in Janne Karila's comment, it might be worth returning an empty string when the input is 0.After taking into account the comments (except the last), the function looks like :def subThousand(n): assert(isinstance(n,(int, long))) assert(0 <= n <= 999) if n <= 19: return ByOne[n] elif n <= 99: q, r = divmod(n, 10) return ByTen[q] + (- + subThousand(r) if r else ) else: q, r = divmod(n, 100) return ByOne[q] + hundred + ( and + subThousand(r) if r else )I am a big fan of the ternary operator but it's purely personal. Also I used a recursive call to make things more consistent.splitByThousands: By taking into account previous comments (types, divmode, etc), you can already get something more concise :def splitByThousands(n): assert(isinstance(n,(int, long))) assert(0 <= n) res = [] while n: n, r = divmod(n, 1000) res.append(r) return resthousandUp: The same comments are still relevant.thousandUp: We maintain one list resArr and a number lenArr through the function but we can see that the property lenArr + len(resArr) + 1 == len(arrZero) is (almost) always true. One can easily get convinced of this or add assert(lenArr + len(resArr) + 1 == len(arrZero)) everywhere. Thus lenArr = len(arrZero) - len(resArr) - 1.thousandUp: The block wrd = subThousand(z) + zap = zGroup[len(arrZero) - len(resArr) - 1] + , if wrd == : break elif wrd == zero : wrd, zap = , resArr.append(wrd + zap)can be simplified a lot :there is not point in adding a whitespace to wrd at this stage.you do not need to use zGroup in all cases.as subThousand(X) is zero only if X==0, wrd == zero becomes z == 0as subThousand(X) is never an empty string, this test has no reason to be there. (If you ever perform the change described above, subThousand(X) will be empty when X==0 which is a case already handled).Thus the block becomes : if z: wrd = subThousand(z) zap = zGroup[len(arrZero) - len(resArr) - 1] + , else: wrd, zap = , resArr.append(wrd + + zap)which can then be rewritten : if z: wrd_zap = subThousand(z) + + zGroup[len(arrZero) - len(resArr) - 1] + , else: wrd_zap = resArr.append(wrd_zap)Then, because there is not point in adding spaces because all elements of the array already end with a space, we can do : if z: wrd_zap = subThousand(z) + + zGroup[len(arrZero) - len(resArr) - 1] + , else: wrd_zap = resArr.append(wrd_zap)thousandUp: Once the code is simplified, we realise that as described in Janne Karila's comment, we could use , to call join(), killing many birds with one stone :the hack to remove the trailing comma is not required anymorethe trailing whitespace also disappears (this is a nice side-effect as the strip() now performs what is was supposed to do in the first place but was then prevented by the commas)everything becomes easierWe now have :for z in arrZero[::-1]: resArr.append(subThousand(z) + + zGroup[len(arrZero) - len(resArr) - 1] if z else )return , .join(resArr).strip()thousandUp: Something that wasn't obvious at first but starts to be : for each element in arrZero, we add an element in resArr so that len(arrZero) - len(resArr) - 1 goes from len(arrZero)-1 to 0. It looks like we could achieve this with enumerate(). We could do it in a complicated way but let's keep things simple instead : let's iterate in the obvious order and call reversed at the very end when joining.resArr = []for i,z in enumerate(arrZero): resArr.append(subThousand(z) + + zGroup[i] if z else )return , .join(reversed(resArr))But he, what do we have here ?! A typical list comprehensiondef thousandUp(n): assert(isinstance(n,(int, long))) return , .join(reversed([subThousand(z) + + zGroup[i] if z else for i,z in enumerate(splitByThousands(n))]))Oops, I might have gone too far . Actually, we can go even further to handle the trailing whitespaces in a smart way :def thousandUp(n): assert(isinstance(n,(int, long))) assert(0 <= n) return , .join(reversed([subThousand(z) + ( + zGroup[i] if i else ) if z else for i,z in enumerate(splitByThousands(n))]))I have kept this mistake for too long to fix the whole answer but :def thousandUp(n): assert(isinstance(n,(int, long))) assert(0 <= n) return , .join(reversed([subThousand(z) + ( + zGroup[i] if i else ) for i,z in enumerate(splitByThousands(n)) if z]))is probably a better version (we don't get repetitions of the separator).get_number_as_words: Finally, this should also take an integer as a parameter. Also, it is probably the right place to handle 0 as a special value. Morehover, thousandUp() handles properly inputs smaller than 1000.At the end, here's what my code is like :def subThousand(n): assert(isinstance(n,(int, long))) assert(0 <= n <= 999) if n <= 19: return ByOne[n] elif n <= 99: q, r = divmod(n, 10) return ByTen[q] + (- + subThousand(r) if r else ) else: q, r = divmod(n, 100) return ByOne[q] + hundred + ( and + subThousand(r) if r else )def thousandUp(n): assert(isinstance(n,(int, long))) assert(0 <= n) return , .join(reversed([subThousand(z) + ( + zGroup[i] if i else ) if z else for i,z in enumerate(splitByThousands(n))]))def splitByThousands(n): assert(isinstance(n,(int, long))) assert(0 <= n) res = [] while n: n, r = divmod(n, 1000) res.append(r) return resdef get_number_as_words(n): assert(isinstance(n,(int, long))) if n==0: return Zero return (Minus if n < 0 else ) + thousandUp(abs(n))def main(): n = 95505896639631893 # int(raw_input(Please enter an integer:\n>> )) expected='ninety-five quadrillion, five hundred and five trillion, eight hundred and ninety-six billion, six hundred and thirty-nine million, six hundred and thirty-one thousand, eight hundred and ninety-three' print(get_number_as_words(n)) print(expected) assert(get_number_as_words(n)==expected)if __name__ == __main__: main()I hope you will find it more pleasing and that my comments will help you. Also, despite the big number of comments, your initial code was pretty good.Disclaimer : I have done everything only based on your example (which was enough to spot mistakes as I was writing them). I might have missed a few things or done a few things wrong so I have tried to detail the different steps as much as possible just in case.Edit I just thought that you could change splitByThousands to make it yield values instead of returning a list...
_computerscience.421
In his classic paper Ray Tracing with Cones, John Amanatides describes a variation on classical ray tracing. By extending the concept of a ray by an aperture angle, making it a cone, aliasing effects (including those originating from too few Monte Carlo samples) can be reduced.During cone-triangle intersection, a scalar coverage value is calculated. This value represents the fraction of the cone that is covered by the triangle. If it is less than $1$, it means that the triangle doesn't fully cover the cone. Further tests are required. Without the usage of more advanced techniques however, we only know how much of the cone is covered, but not which parts.Amanatides states:Since at present only the fractional coverage value is used in mixing the contributions from the various objects, overlapping surfaces will be calculated correctly but abutting surfaces will not.This does not make sense to me. From my point of view it is the other way around. Let's take an example: We have two abutting triangles, a green and a blue one, each of which covers exactly 50% of our cone. They are at the same distance from the viewer.The green triangle is tested first. It has a coverage value of 0.5, so the blue triangle is tested next. With the blue one's coverage value of 0.5 our cone is fully covered, so we're done and end up with a 50:50 green-blue mixture. Great!Now imagine that we kill the blue triangle and add a red one some distance behind the green one - overlapping. Greeny gives us a coverage value of 0.5 again. Since we don't have the blue one to test anymore we look further down the cone and soon find the red one. This too returns some coverage value greater than 0, which it shouldn't because it is behind the green one.So, from this I conclude that abutting triangles work fine, while overlapping triangles would need some more magic like coverage masks to be correct. This is the opposite of what Amanatides says. Did I misunderstand something or is this a slip in the paper?
Ray Tracing with Cones: coverage, overlapping and abutting triangles
raytracing
I did implement a ray tracer based on Amantides' work but, as that was years ago, my memory of the paper is a little rusty.However, ignoring this particular case, in general when it comes to working with fractional coverage e.g. Alpha compositing, (see A over B) my understanding is that the usual assumption is that the items being composited are uncorrelated.Thus if A with X% coverage is on top of B with Y% coverage and C in the background, then it's assumed that one will see X%*A + (100-X%)*Y% * B + (100-X%)(100-Y%)*CDoes that make sense? Obviously this will give leaks in the case where A and B are strongly correlated.I think I might have put a small bit mask on the rays to avoid these problems, but it was a very long time ago.
_unix.41773
I want to share my VPN connection with other devices. I just got a D-Link router, and I'm a little bit lost about how to deal with it. I connect to internet using the VPN of my university (PPTP). The LAN has a DHCP server, giving to each computer an IP address according to their MAC address (if the mac address is unknown by the DHCP server, then it gives an IP address under a subnetwork that cannot connect to the VPN). The connection to internet can be successful only if the combination IP:VPN_login:VPN_password is correct.I already connected to the router, using telnet on port 23. I know how to change MAC address using ifconfig ethx down && ifconfig ethx up hw ether <mac address>. So changing the mac address of the router to mine (which is recognized by the university), the router will get my usual IP address.But now? To connect to vpn, I usually use network-manager on ubuntu, and the toolkit nmcli
share vpn conneciton using router
vpn;router;connection sharing;pptp
null
_cs.4721
My homework is implementing algorithms BFS, DFS, depth-limited and IDS for the map as a 2D grid with 8 directions of movement. I read that the IDS algorithm is optimal, but in my case is not optimal paths.Is the IDS algorithm optimal for graphs (e.g. map as a grid), or does optimality only apply to searching trees?
IDS algorithm optimality for grid?
graph theory;graph traversal
null
_codereview.67364
Ruby's Fixnum and Bignum can hold very large integers and automatically handle overflow. I decided to implement BitArray using an integer as storage. Please let me know what you think about code and functionality of a BitArray.class BitArray include Enumerable def initialize size @size = size @field = 2**size end def set positions bits = positions.kind_of?(Integer) ? [positions] : positions bits.each { |position| @field |= 1 << (@size - position) } self end def clear positions bits = positions.kind_of?(Integer) ? [positions] : positions bits.each { |position| @field ^= 1 << (@size - position) } self end def get position (@field >> (@size - position) ).to_s(2)[-1].to_i end def each(&block) @field.to_s(2)[1..-1].split().each { |bit_string| yield(bit_string.to_i) } end def to_s @field.to_s(2)[1..-1] end def count @field.to_s(2)[1..-1].split().inject(0) {|sum,bit| sum + bit.to_i} endendAnd the Testrequire 'minitest/autorun'require_relative 'bit_array'class BitArrayTest < MiniTest::Unit::TestCase def test_equal assert_equal 00000, BitArray.new(5).to_s end def test_set assert_equal 00100, BitArray.new(5).set(3).to_s assert_equal 00010, BitArray.new(5).set(4).to_s assert_equal 11100, BitArray.new(5).set([1,2,3]).to_s assert_equal 100000000101, BitArray.new(12).set([1,10,12]).to_s end def test_clear assert_equal 01000, BitArray.new(5).set([1,2]).clear(1).to_s end def test_get assert_equal 0, BitArray.new(5).set([1,2]).get(3) assert_equal 0, BitArray.new(12).set([12]).get(11) assert_equal 0, BitArray.new(12).set([12]).get(1) end def test_count assert_equal 2, BitArray.new(5).set([1,2]).count end def test_iterate assert_equal [0, 0, 0, 0, 0], BitArray.new(5).each {|n| puts n } end end
Bit Array in Ruby using integer as a storage
ruby;array
My 2nd question on this site concerned bitmasks, actually. As @tokland correctly pointed out back then, bitmasks are really a pretty low-level technique that doesn't quite belong in a high-level language. Yes, it has its uses, but a hash or an array of symbols would solve the same kinds of problems in a more high-level, more expressive, and more readable manner.In fact, trying to do this in a high-level language like Ruby presents some oddities, since you're using strings and integers to work with bits. And strings and integers are objects in Ruby, so you've got all these really high-level concepts and constructs involved in trying to do something with the lowest-level aspect of computing. It works, but it's like packing something tiny in a giant padded box.Still, code's code, so:Your #test_iterate doesn't quite test what you think. You're just printing to stdout. What the assertion is actually testing is the value of @field.to_s(2)[1..-1].split() - it doesn't really test that anything is being iterated. This implementation would also pass your test, even though it never iterates anything:def each(&block) # block argument ignored, no iteration; passes tests anyway @field.to_s(2)[1..-1].split()endWith your current code, I'd write the test like:expected = [1, 1, 0]BitArray.new(3).set([1,2]).each do |bit| assert_equal expected.shift, bitendassert expected.empty? # check that we looped the expected number of timesYou test for #get is also kinda strange. In all cases you check for 0 - how about a test where you expect to get 1 back? There really are only two possible return values, so you should probably check both.Why on Earth is your class using a 1-based indexing scheme? It's an array of a sort, so why not use a zero-based index like, well, like an array would do? I know this confused me greatly.Your #get method could perhaps just return a boolean. That's the closest you'll get to returning a raw bit.Update: I wasn't aware of this, but Fixnum actually has subscripting access to bits already. So #get can be written as simply:def get position @field[position]endYour #set and #clear methods should probably just use splats - it'd let you skip the array-boxing if just a single integer's passed in:def set *positions # use a splat positions.each { |position| @field |= 1 << (@size - position) } selfendunset might be a better name than clear - I'd expect clear to reset the entire array.You're going out of your way to always calculating @size - (some position) to make sure that you fill your array from the left. I.e. position 1 is the left-most bit, rather than the (more natural) right-most bit. However, there's really no need. Inside your class, you can do whatever you want. Your current tests mostly check the string representation, so you can just flip that, rather than flip everything else. Filling from the right would also let you skip the size argument entirely; as you say, Ruby will catch overflows anyway, but you never actually check (or test) trying to get or set a bit outside the size. It's also just simpler to find and flip bits if you go from the right, and use a zero-based index, since any bit is given as simply 2**position. For instance:def set *positions positions.each { |position| @field |= 2**position } selfendYou're including Enumerable, so use it. Your #count method could be written as justdef count inject(0, :+)endsince it'll use each, and each will yield either 1 or 0.Again, this is all academic, as you're no doubt better off just using regular arrays or hashes. Still, here's a refactored version, because why not? Slightly different output for #to_s since I'm not using a size argument. And I'm using 0-based indices. But otherwise it's the same.class BitArray include Enumerable def initialize @bits = 0 end def set *positions positions.each { |position| @bits |= 2**position } self end def unset *positions positions.each { |position| @bits ^= 2**position } self end def get position @bits[position] end def each(&block) bits = @bits until bits == 0 yield bits & 1 bits = bits >> 1 end end def to_s @bits.to_s(2).reverse end def count inject(0, :+) endendrequire 'minitest/autorun'class BitArrayTest < MiniTest::Test def test_equal assert_equal 0, BitArray.new.to_s end def test_set assert_equal 0001, BitArray.new.set(3).to_s assert_equal 1011, BitArray.new.set(0, 2, 3).to_s assert_equal 0100000000101, BitArray.new.set(1, 10, 12).to_s end def test_unset assert_equal 101, BitArray.new.set(0, 1, 2).unset(1).to_s end def test_get assert_equal 1, BitArray.new.set(1, 2).get(2) assert_equal 0, BitArray.new.set(12).get(11) assert_equal 1, BitArray.new.set(12).get(12) end def test_count assert_equal 3, BitArray.new.set(1, 2, 5).count end def test_iterate expected = [1, 1, 0, 1] BitArray.new.set(0, 1, 3).each do |bit| assert_equal expected.shift, bit end assert expected.empty? endendNote that this version and its tests still don't deal with the issue of passing, say, negative indices or non-numeric indices.
_cs.28971
I was reading about decision trees and this is what I understood:We build decision trees by choosing an attribute and building subtrees (which are also decision trees) as children of the node representing that attribute. This means that as we go down, in building a decision tree, the no. of training instances that we use to build a subtree decreases. This is because when we build a subtree, we only have to train it using a subset of the original data consisting of instances which satisfy the conditions on the path towards the root node of the subtree.Now here's the question: We usually believe that if training data is not enough, the model built is incorrect. This would mean that the subtrees down in a decision tree may not be good models (as they were trained using much less data than its ancestors in the tree) and if they are not, how can they be recursively combined to give a full tree which may give a good model? Where is the fault in my reasoning?
How can we combine badly trained decision trees to a good one?
machine learning
null
_softwareengineering.104919
Here is an example: early in the morning of the second day of the Sprint (during the stand up) I go to the board and see that the story I worked the previous day (first day of Sprint) contains a big 1 IDEAL DAY written on it (estimate). Right now (early in the second day of Sprint) the story is not completed and I guesstimate it will take half a REAL DAY to complete it. Question: so to track progress and update the burndown right now, shouldn't I update that 1 IDEAL DAY on the card with something else (recap.: original estimate in 1 ideal day, remaining work in 1/2 real day)? What would be that something else in this particular example?
Daily Scrum Meeting (Burndown chart)
scrum;estimation;user story;burndown
When I am talking personal finances with my wife as we go over the checking account I don't start respond to her questions with numbers in Ocatal or Hexadecimal format. She started spouting figures in Decimal so I communicate Decimal back to her.If you start your Scrum in IDEAL days then all discussions should revolve around IDEAL days. Don't even talk REAL days, because nobody will ever really know how to relate to that in comparison to IDEAL days.You THINK it will take you 1/2 REAL days to finish the story, but then about 10am your breakfast doesn't agree with you and you can't leave the bathroom for 6 hours. Or some important client meeting comes up that you need to attend. This is why you chose IDEAL days in the first place.
_webmaster.18995
The company I'm working at handles the hosting our self. We got www.domain.com up now and we want to make the domain.com to work as well. We use IIS7. I have seen some articles and found one with URL rewrite setting that can be added to IIS7. What's the best way to go? We don't want to cut any corners we want to do it properly.This is the article that I mentioned above: http://weblogs.asp.net/owscott/archive/2009/11/27/iis-url-rewrite-rewriting-non-www-to-www.aspx
How to setup site for no www
dns;iis7;no www;configuration
null
_codereview.61183
I wish to iterate through a Lua table, but want to get one particular element first always (its key is known beforehand).Since the pairs function does not guarantee the order of a table's elements, I wrote a custom iterator function (I can insert every element into a indexed table and use the table.sort function, but I wanted to try writing a custom iterator).local function pairsWithPriority(t,priorityKey) local i = 0 local function closure(_,lastReturnedKey) local k,v if i == 0 then --the first element should always be t[prioritykey] k,v = priorityKey,t[priorityKey] elseif i == 1 then --since we have returned the first element we reset the next pointer k,v = next(t, nil) else k,v = next(t,lastReturnedKey) end if i > 0 then if k == priorityKey then --the first element is encountered AFTER it has been manually returned, so discard k,v = next(t, k) end end i = i + 1 return k,v end return closure end When I use this iterator function:local t = {a=1,b=2,c=6,d=4,e=4}for i,v in pairsWithPriority(t,c) do print(i,v)endt[c] is always returned first.This iterator works, but I'd like to know if there is a better way of doing this in a cleaner and more efficient way
Return one particular element first, when iterating a lua table
lua;iterator;lua table
null
_codereview.88765
I decided to write a sieving prime-finding programme and make it fast. It works correctly but counting the primes below 10**9 takes 36 times as primesieve downloaded from http://primesieve.org/.Now I understand that they have a World Record so they better be fast, but we all know that a compiler is much better than a human at making optimization, so I fear I missed a massive optimization in my implementation.Some timings:time ./sieve-c 100000000Up to 100000000 there are 5761455 primes.real 0m1.914suser 0m1.876ssys 0m0.032stime ./primesieve 100000000Sieve size = 32 kilobytesThreads = 4100%Prime numbers : 5761455Time elapsed : 0.0568619 secreal 0m0.206suser 0m0.192ssys 0m0.004stime ./sieve-c 1000000000Up to 1000000000 there are 50847534 primes.real 0m21.408suser 0m21.128ssys 0m0.252stime ./primesieve 1000000000Sieve size = 32 kilobytesThreads = 4100%Prime numbers : 50847534Time elapsed : 0.582213 secreal 0m0.607suser 0m2.220ssys 0m0.000sWhere should I go next for better performance (wheel factorization, memoization, segmentation, parallelism)?Also, my code is not able to calculate 10**10 or more because that would take up too much space and my computer crashes. Maybe I should try segmentation?Please note that R-Python is a subset of Python, so you can run my programme with a regular Python too (but it will be much slower).import doctestimport mathimport sysimport doctestdef sieve(limit): Returns the primes below the `limit` >>> sieve(9) [False, False, True, True, False, True, False, True, False] >>> sieve(100000).count(True) 9592 sieve = [True]*limit sieve[0],sieve[1] = False, False sqrt_limit = math.sqrt(limit) for number, prime in enumerate(sieve): if prime: for multiple in xrange(number*2, limit, number): sieve[multiple] = False if number > sqrt_limit: break return sievedoctest.testmod()def _sum(lst): s = 0 for i in lst: s += i return sdef main(argv): print(Up to + argv[1] + there are +str(_sum(sieve(int(argv[1])+1))) + primes.) return 0def target(*args): return main, Noneif __name__ == '__main__': import sys main(sys.argv)
R-Python sieve, 36 times slower than World Record
python;performance;primes;sieve of eratosthenes;rpython
null
_unix.312360
It's there a way to start instances of xterm using different configuration files. Ex: xterm -load .Xresources-1, xterm -load .Xresources-1Using xrdb -load ~/.Xdefaults changes the configs globaly which I try to avoid.
Start xterm instance with different configurations
x11;configuration;xterm;x resources
That's usually done by changing the instance name, which by default is the name of the program which is run, but can be overridden using the -name option. (If you make a symbolic link to a program and run that link, that's a quick way of renaming a program as well).If you have a resource file with settings likexterm*font: fixedthe instance is the xterm at the beginning of the line.You can also change the class name (which you commonly see as XTerm, also at the beginning of the resource lines). The uxterm script uses the -class option to override this to change settings to make xterm work consistently in UTF-8 mode.If you have different class names, then you can use the app-defaults search mechanism to support different resource files. I set the environment variable XAPPLRESDIR to my own directory, and have locally-customized resource files (each named for a class). That is documented in X(7):application-specific files Directories named by the environment variable XUSERFILESEARCHPATH or the environment variable XAPPLRESDIR (which names a single directory and should end with a '/' on POSIX systems), plus directories in a standard place (usually under /tmp/Xorg-KEM/lib/X11/, but this can be overridden with the XFILESEARCHPATH environment variable) are searched for for application-specific resources. For example, application default resources are usually kept in /tmp/Xorg-KEM/lib/X11/app-defaults/. See the X Toolkit Intrinsics - C Language Interface manual for details.
_softwareengineering.131332
Possible Duplicate:When to use abstract classes instead of interfaces and extension methods in C#?What other reasons are there to write interfaces rather than abstract classes? This question sounded a bit trivial to me as well, till I gave a it serious thought myself.What is the point of a Java interface? Is it really Java's answer to multiple inheritance?Despite using interfaces for a while I never got around to think the point of all this. I know what an interface is, but still pondering on the why.
What is the point of an interface?
java;object oriented;interfaces
null
_unix.146645
What is the most proper way to distribute shell scripts, if behaviours of shells can be modified by set and thus are unpredictable?For example, rm *.txt wouldn't be executed as expected on the environments in which set -f has been run. How should I make sure that rm *.txt removes all text files in a current directory in any environments?How should I make that sure that shell scripts will run as expected before distributing them in general?
Proper way to distribute shell scripts
bash;shell;shell script
Shell scripts are normally treated as if they were the same as any other kind of executable file, such as binaries, Python scripts, Perl scripts, or any other kind of script. They have a shebang at the top that directs the kernel to execute them through the shell. They are expected to be invoked the same way as any other command.As such, a new shell is started every time the script is invoked, and any settings such as set -f present in the invoking shell or in any other shell instance in the system are irrelevant.Of course it's possible for users to source your script instead of running it, for instance like this:. /path/to/your/scriptor to execute it in a shell that has non-default settings like this:sh -f /path/to/your/scriptbut those are not considered normal ways or invoking your script, and users who do that should expect whatever they get as a result.Note that there are some scripts that are intended to be sourced, not executed, because their purpose is to to things such as changing the cwd or setting environment variables that must be reflected in the environment of the sourcing shell, but these are in the minority and it's usually done as part of an agreed protocol. These files can be considered more like plugins to whatever system they expect to be sourced by, not so much as independent scripts. For example, files in /etc/rc*.d with names that end in .sh are sourced by the startup script subsystem, not executed, and it is documented that this is what will happen if you place a file with such a name in /etc/rc*.d and when it's done it's done on purpose. The convention of naming files intended to be sourced instead of executed in this way is followed elsewhere too, but not universally.It's true that files intended to be sourced in this way have to take care of what settings might be present in their caller's environment that might affect the behaviour of the shell, but then the agreed protocol should ideally promise a predictable execution environment.
_unix.40142
I have been trying to install a gedit plugin called RunC to compile C programs using the text editor.Although the instructions tell me to use ubuntu 9 or 10 for this, I'm currently running Fedora 16. I thought that I would not have much problems, but after running the shell script using the sh command to install the plugin, gedit showed no signs of the plugin being installed. I've made sure that gedit is updated to the current release.Is there something I'm missing from this installation?Additionally, does the file structure of various distributions differ greatly from each other? Will scripts that work for one distro be incompatible with another? Thanks for indulging me with my silly questions.Attached below is the RunC plugin for gedit:http://plg1.uwaterloo.ca/~gvcormac/RunC/
Installing a gedit plugin on Fedora
shell;fedora;gedit
I didn't manage to install it because RunC wasn't compatible with other distros. I confirmed this by installing Ubuntu 10, and it worked perfectly.
_softwareengineering.350032
I read bytes from a socket. The data consists only of two types of structures:struct A{ unsigned char type_id; //0x01 unsigned char sequence_id; //Incremented in successive packets. //Some other fields unsigned char checksum;};struct B{ unsigned char type_id; //0x02 unsigned char sequence_id; //Incremented in successive packets. This sequence is independent from A's sequence_ids. //Some different fields unsigned char checksum;};These structures are written on the wire with no particular pattern to how they occur. These are the only data that I receive. I can read the data.I am trying to figure out how best to identify these packets in the unsigned char* array I read. Right now I'm iterating until I find the bytes 0x01 or 0x02 and then based on the size of the relevant struct, try to get the next struct. Once I establish that I've found a packet boundary, I just keep on offsetting by sizeof(A) or sizeof(B) accordingly and just hope I'm fine.My problem is that if there's a 0x01 or 0x02 value in a field inside a struct itself when I'm first checking for the beginning of a struct, this method would return invalid structures.Unfortunately I cannot touch the server side. It's quite an old system.One other point is that I join as a client mid-way, that is, the server side always keeps sending data - this means that the first byte I read most of the time is not the beginning of a packet.What can I do to identify structures robustly?
What is the best method to identify objects in a byte array?
c++
Any valid type_id is a possible packet start, but you have two additional fields that can test whether this is the case:For all except the first packet, you can compare the sequence number. If it doesn't match, you got it wrong and should continue to search for the next possible packet start. This would require you to buffer until at least the second packet while establishing a client connection.Once you've read a packet, you can compare the checksum. If it doesn't match, discard the packet and look for the next possible start.Of course this becomes less useful the more type ids there are, because the start symbols might be very frequent. This doesn't make the above approach impossible, just more expensive as you have to test for a packet start more often.Depending on the size of data and on the checksum algorithm, an 8 bit checksum is also extremely slim. E.g. if bytes are just XORed with each other, I'd expect too many false positives for this to be feasible.If you are indeed using sockets as in TCP sockets, the data is guaranteed to arrive uncorrupted, which means the checksum can never be invalid and can be relied on for detecting package boundaries. But in that case, it would also be impossible to receive half a packet.If you are actually listening to a broadcast over a public medium (e.g. radio), there are usually protocols in force that allow synchronization, e.g. a carrier frequency. For data transmission, a start symbol (syncword, sync character, preamble) that cannot occur in the message itself can also be used. For example, Ethernet has a Start Frame Delimiter. In general, taking a look at data link layer protocols might be enlightening to see how these synchronization problems are solved in practice, though many protocols like Ethernet must also handle the problem of multiple senders which doesn't apply in your case.
_cogsci.9985
I am trying to build up a timeline of the development of operant conditioning. So far I have the following$1898$ - Thorndike performs experiments using his puzzle box of which Skinner based his work on.$1930$ - Skinner invents a similiar box called the Skinner box while studying as a graduate student.$1938$ - Skinner coins the term operant conditioning.$1947$ - Skinner performs experiments on pigeons.$1948$ - Skinner performs experiments on rats. However, this timeline leaves me skeptical for two reasons. The first is that the timeline is very stretched out. Skinner invented the box and then it took him 17-18 years to publish his famous studies. What experiments was he performing with his box that weren't as groundbreaking in all those years when his groundbreaking results features a very basic setup?The second is that Skinner apparently swapped from rats to pigeons in his experiments because they lived longer, learnt faster and were easier to handle yet the according to my timeline the rats experiment took place after the pigeon one (or perhaps not and he just published them in that order).I suspect my timeline is not quite right (afterall my references are random internet links). My question is what are the (correct) key dates of the development of operant conditioning? Key dates being the events I have highlighted above and any others you think I have missed. I had more references but unfortunately I can only post 2 links due to my reputation.
What are the key dates for the development of operant conditioning?
learning;conditioning;history of psychology
null
_unix.143813
At home, I have setup a Raspberry Pi with the latest version of Raspbian and with ssh enabled.I also use a Mac Book Pro with OS X 10.9.3. I use ssh [email protected] to do some work on the Raspberry Pi, so far no problems.Then I configured my router, so that the ssh Port of the Raspberry Pi is also available on the Internet.In my office, I tried to access the Raspberry Pi with ssh over the Internet by ssh [email protected] and I get as a result:ssh_exchange_identification: Connection closed by remote host So, I took another machine in my office with Putty on it and I could login with the same command without problem. I tried even an ssh client on my iPad and even that one could connect. Only the Mac Book is not working, from the outside.At home, I attached my Macbook to the LAN and I could access the Raspberry Pi.So I edited /etc/hosts.allow on the Raspberry Pi by placing SSHD: ALL Than I set the sshd LogLevel to debug and I checked if my MacBook shows up in /etc/hosts.deny, but there was nothing.Why does the Mac Book, as the only device, receive the error message:ssh_exchange_identification: Connection closed by remote host while trying to access from remote.ssh -Y -p 80 -i ~/bin/keys/pie.rsa -vvv [email protected]:OpenSSH_6.2p2, OSSLShim 0.9.8r 8 Dec 2011debug1: Reading configuration data /Users/piuser/.ssh/configdebug1: /Users/piuser/.ssh/config line 2: Applying options for *debug1: Reading configuration data /etc/ssh_configdebug1: /etc/ssh_config line 20: Applying options for *debug1: /etc/ssh_config line 53: Applying options for *debug2: ssh_connect: needpriv 0debug1: Connecting to myhomepie.at [xx.x.xxx.xxx] port 80.debug1: Connection established.debug3: Incorrect RSA1 identifierdebug3: Could not load /Users/piuser/bin/keys/pie.rsa as a RSA1 public keydebug1: identity file /Users/piuser/bin/keys/pie.rsa type -1debug1: identity file /Users/piuser/bin/keys/pie.rsa-cert type -1debug1: Enabling compatibility mode for protocol 2.0debug1: Local version string SSH-2.0-OpenSSH_6.2ssh_exchange_identification: Connection closed by remote hostSome Update:I installed a SLES on the OS X in a VM wit NAT and if I try to connect, I get the same Error.After changed to Bridged, I can connect with the SLES Machine from the VM.Soooooo, as I see that there are only two possible ways.1.) Somehow my Mac is blocking the access to the Raspberry Pi from the outside orrrrrr2.) Somewhere on the Raspberry Pi there is the Mac Address of my WLAN or my Eth0 blocked :( but because there is no Firewall right now on and there is nothing within /etc/hosts.deny sooooooo, is there a place within Raspbian where my Mac could be blocked?So, some Update, I changed the Hosname of my OS X, still no progress.Than, I decided to spoof my Mac Address, which I use to connect from my OS X to the local Router, odd thing I get this, the first time I try to connect by ssh:ssh: Could not resolve hostname mypi.dyndns.org: nodename nor servname provided, or not knownThe Second time I try I get again:ssh_exchange_identification: Connection closed by remote hostSo, right now I grep -r the mac address and the hostname on the Raspberry Pi, I will also look for the local IP my box uses within my Intranet.
Can't figure out why ssh_exchange_identification: Connection closed by remote host appear
ssh;osx;openssh;raspbian
null
_unix.198048
I'm running a chat application on my machine called foo - how do I view all connection attempts (failed or successful) for foo?I have attempted this with netstat, but I am unsure how to associate these connections with a specific application.
How can I monitor network connections for an app
linux;networking;process
null
_codereview.97513
I'm pretty sure my code is mostly correct. I think I'm having formatting errors more than anything. I keep receiving warnings about double to float and int to float,possible loss of data. Here is what I am trying to accomplish exactly. Write a function that accepts a pointer to a C- string as an argument and calculates the number of words contained in the string as well as the number of letters in the string. Communicate (or send) both of these values back to the main function, but DO NOT use global variables (variables defined outside of a function).Write another function that accepts the number of letters and the number of words and sends the average number of letters per word (or average word size) back to the main function.Demonstrate the functions in a program that asks the user to input a string. First, store the input in a large array. The program should dynamically allocate just enough memory to store the contents of that array. Copy the contents of the large array into the dynamically allocated memory. Then the program should pass that new, dynamically allocated array to the first function. Both the number of words and the average word size should be displayed on the screen. Round the average word size to 2 decimal places.For instance, if the string argument is Four score and seven years ago the first function (word count) should calculate and send back a word count of 6 and a letter count of 25. The second function (average word size) should send back 4.17, or 25 / 6.Extra challenge: See if you can prevent the program from counting punctuation (such as quotes or periods) as part of the sentence. Also, see if you can prevent the program from counting extra spaces as new words. For in stance, 2 spaces will often follow a colon, such as the sentence: There are 3 primary colors : red, blue, and green. In this example, the word count should be 9 (the number 3 does count as a 1-letter word), and the letter count should be 37, for an average of 4.11#include<iostream>#include<string>using namespace std;int CountWordsAndLetters(char* str, int& words, int& letters){ words = 0; unsigned int i = 0; letters = 0; while (str[i] == ' ') i++; for (; str[i]; i++) { if (((str[i] >= 'a') && (str[i] <= 'z')) || ((str[i] >= 'A') && (str[i] <= 'Z'))) letters++; if (str[i] == ' ') { words++; while (1) if (str[i] == ' ') i++; else { i--; break; } } } words = words + 1; return (words);}float avg(float words, float letters){ float a = (double)(letters / words); return a;}int main(){ char array[1000000]; int words = 0; int letters = 0; cout << enter the string\n\n; gets(array); int size; for (size = 0; array[size]; size++); char* str = new char[size]; strcpy(str, array); CountWordsAndLetters(str, words, letters); cout << \nword count= << words << endl; cout << \n letter count= << letters << endl; cout << \naverage number of letters per word= << avg(words, letters); return 1;}
Counting words, letters, average word length, and letter frequency
c++;strings;integer;statistics
It's gonna be a small review, but do you know strtok ? As said on the link I provided : A sequence of calls to this function split str into tokens, which are sequences of contiguous characters separated by any of the characters that are part of delimiters.So if you want to split this into words, just put the delimiters to space and punctuation, and you already have most of the job done. An other thing :float avg(float words, float letters){ float a = (double)(letters / words); return a;}If you want a float, why do you explicitly cast it to a double ? That's why you have warnings.Same goes for the typing of words and letters : can it take a negative value ? If not, why does your type allows it to do so, instead of using an unsigned type ? And last, I think you are re-inventing strlen with this piece of code : for (size = 0; array[size]; size++);. And of course you should not, just use the function instead.
_unix.189867
Every time I suspend my machine (at least the options say it is a suspend) the machine freezes within one minute of resuming. It seems to work fine for about a minute. After running for a bit, the mouse will move, but I can't click anything. After another short time, the mouse freezes as well, and I can't SSH in or toggle caps lock. If I hard reboot and read /var/log/kern/log, it has several lines about a hibernation error:Mar 11 22:41:02 vera kernel: [ 1.367003] PM: Hibernation image partition 8:5 presentMar 11 22:41:02 vera kernel: [ 1.367004] PM: Looking for hibernation image.Mar 11 22:41:02 vera kernel: [ 1.367149] PM: Image not found (code -22)Mar 11 22:41:02 vera kernel: [ 1.367151] PM: Hibernation image not present or could not be loaded.I'm not sure exactly what logs to attach, but here is my /var/log/kern.log: http://pastebin.com/raw.php?i=Cf3vudnyAny ideas on how to debug this? I've tried this solution, and it still freezes: How to hibernate in Debian Jessie--EDIT--I think it's a driver issue. I ran a Mint live disk, and it would suspend just fine. If I try to runlspciit also freezes like the suspend issue. I'm guessing drivers at this point, but it is hard to debug driver problems without lspci. Any ideas?
How to debug Debian Jessie freezing on resume?
debian;freeze;hibernate
null
_unix.97653
A friend of mine wanted the libboost libraries installed on our shared computer so after installing libboost-all-dev 1.49.0.1 (a Debian wheezy machine), I get this error when using the pydoc modules command on the commandline. It spits out the following error --root@debian:/usr/include/c++/4.7# pydoc modulesPlease wait a moment while I gather a list of all available modules...**[debian:49065] [[INVALID],INVALID] ORTE_ERROR_LOG: A system-required executable either could not be found or was not executable by this user in file ../../../../../../orte/mca/ess/singleton/ess_singleton_module.c at line 357[debian:49065] [[INVALID],INVALID] ORTE_ERROR_LOG: A system-required executable either could not be found or was not executable by this user in file ../../../../../../orte/mca/ess/singleton/ess_singleton_module.c at line 230[debian:49065] [[INVALID],INVALID] ORTE_ERROR_LOG: A system-required executable either could not be found or was not executable by this user in file ../../../orte/runtime/orte_init.c at line 132--------------------------------------------------------------------------It looks like orte_init failed for some reason; your parallel process islikely to abort. There are many reasons that a parallel process canfail during orte_init; some of which are due to configuration orenvironment problems. This failure appears to be an internal failure;here's some additional information (which may only be relevant to anOpen MPI developer): orte_ess_set_name failed --> Returned value A system-required executable either could not be found or was not executable by this user (-127) instead of ORTE_SUCCESS----------------------------------------------------------------------------------------------------------------------------------------------------It looks like MPI_INIT failed for some reason; your parallel process islikely to abort. There are many reasons that a parallel process canfail during MPI_INIT; some of which are due to configuration or environmentproblems. This failure appears to be an internal failure; here's someadditional information (which may only be relevant to an Open MPIdeveloper): ompi_mpi_init: orte_init failed --> Returned A system-required executable either could not be found or was not executable by this user (-127) instead of Success (0)--------------------------------------------------------------------------*** The MPI_Init() function was called before MPI_INIT was invoked.*** This is disallowed by the MPI standard.*** Your MPI job will now abort.[debian:49065] Abort before MPI_INIT completed successfully; not able to guarantee that all other processes were killed!**root@debian:/usr/include/c++/4.7# I tried looking into the problem and ended up uninstalling the following to get it to work again.openmpi common all 1.4.5-1libibverbs-dev amd64 1.1.6-1 libopenmpi-dev amd64 1.4.5-1 mpi-default-dev amd64 1.0.1libboost-mpi-python1.49.0Although pydoc works again, I'm assuming the packages I removed are going to hurt something else down the track? As you guessed I'm not a C/C++ programmer. So I guess my question is, will this hurt something later? Is there a way to install those packages without hurting Python?
Python error after installing libboost-all-dev on debian
linux;debian;python;c++
null
_unix.93341
I have an ARM Chromebook. There is a project called Crouton that is able to install Ubuntu to the internal harddrive pretty easily (I've done it).But most packages in the repositories are of course only for X86 and X64.Is it even possible technically to take an arbitrary apt package and cross-compile it to run on my ARM Chromebook?If it is possible, how can I do so relatively painlessly? I have access to a very fast 24-core system for the compilation if I can figure out a way to automate the process of making ARM packages.
Cross-compile any aptitude package?
arm;packaging;chrome book
null
_codereview.158781
JavaScript has some inaccurate rounding behavior so I've been looking for a reliable solution without success. There are many answers in this SO post but none cover all the edge cases as far as I can tell. I wrote the following which handles all the edge cases presented. Will it be reliable with edge cases I haven't tested?If this is a viable solution, any enhancements to make it more efficient would be appreciated. It's not fast (time to run function 1000000 times: 778ms) but doesn't seem to be terrible either. If there is a better solution, please post.The edge cases that seemed to give the most problem were the first two:console.log(round(1.005, 2)); // 1.01console.log(round(1234.00000254495, 10)); //1234.000002545console.log(round(1835.665, 2)); // 1835.67))console.log(round(-1835.665, 2)); // -1835.67))console.log(round(10.8034, 2)); // 10.8console.log(round(1.275, 2)); // 1.28console.log(round(1.27499, 2)); // 1.27console.log(round(1.2345678e+2, 2)); // 123.46console.log(round(1234.5678, -1)); // 1230console.log(round(1235.5678, -1)); // 1240console.log(round(1234.5678, -2)); // 1200console.log(round(1254.5678, -2)); // 1300console.log(round(1254, 2)); // 1254console.log(round(123.45)); // 123console.log(round(123.55)); // 124function round(number, precision) { precision = precision ? precision : 0; var sNumber = + number; var a = sNumber.split(.); if (a.length == 1 || precision < 0) { // from MDN https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/round var factor = Math.pow(10, precision); var tempNumber = number * factor; var roundedTempNumber = Math.round(tempNumber); return roundedTempNumber / factor; } // use one decimal place beyond the precision var factor = Math.pow(10, precision + 1); // separate out decimals and trim or pad as necessary var sDec = a[1].substr(0, precision + 1); if (sDec.length < precision + 1) { for (var i = 0; i < (precision - sDec.length); i++) sDec = sDec.concat(0); } // put the number back together var sNumber = a[0] + . + sDec; var number = parseFloat(sNumber); // test the last digit var last = sDec.substr(sDec.length - 1); if (last >= 5) { // round up by correcting float error // UPDATED - for negative numbers will round away from 0 // e.g. round(-2.5, 0) == -3 number += 1/(factor) * (+number < 0 ? -1 : 1); } number = +number.toFixed(precision); return number;};
Rounding JavaScript decimals
javascript;floating point
Here is my solution that I came up with by starting off from your own code, which as I was tracing the logic I realized was a bit bloated, through quite efficient. What's more, when I was adding a few more test cases I discovered 2 bugs in it. I also did benchmark of your, @mseifert and mine code.But first, few comments about your code:You have quite numerous unnecessary (in my opinion) variables,The loop and surrounding conditional statement is completely unnecessary as well,Variables' names could be more descriptive, especially a's,sDec += '0' is shorter and faster than sDec = sDec.concat(0),Why to assign +number.toFixed(precision) to variable number, if in the very next line you just return number? Go with return +number.toFixed(precision) instead.Test casesJust one note: this is definitely not the elegant way of writing tests, but test cases themselves weren't in the scope of this question, so I allowed myself to do it this way.Two lines with comments next to them are those which revealed bugs in your original code to me.console.clear();console.log(round(1.0e-5, 5) === 0.00001);console.log(round(1.0e-20, 20) === 1e-20);console.log(round(1.0e20, 2) === 100000000000000000000);console.log(round(1.005, 2) === 1.01);console.log(round(1234.00000254495, 10) === 1234.000002545);console.log(round(1835.665, 2) === 1835.67);console.log(round(-1835.665, 2) === -1835.67);console.log(round(1.27499, 2) === 1.27);console.log(round(1.2345678e+2, 2) === 123.46);console.log(round(1234.5678, -1) === 1230);console.log(round(1234.5678, -2) === 1200);console.log(round(1254.5678, -2) === 1300);console.log(round(1254, 2) === 1254);console.log(round(1254) === 1254);console.log(round('1254') === 1254);console.log(round('123.55') === 124);console.log(round('123.55', 1) === 123.6);console.log(round('123.55', '1') === 123.6);console.log(round(123.55, '1') === 123.6);console.log(round('-1835.665', 2) === -1835.67);console.log(round('-1835.665', '2') === -1835.67); // Made me turn precision into +precisionconsole.log(round(-1835.665, '2') === -1835.67);console.log(round('1.0e-5', 5) === 0.00001); // Made me add number = +number;console.log(round('1.0e-5', '5') === 0.00001);console.log(round(1.0e-5, '5') === 0.00001);console.log(round('1.0e-20', 20) === 1e-20);console.log(round('1.0e-20', '20') === 1e-20);console.log(round(1.0e-20, '20') === 1e-20);console.log(round('1.0e20', 2) === 100000000000000000000);console.log(round('1.0e20', '2') === 100000000000000000000);console.log(round(1.0e20, '2') === 100000000000000000000);BenchmarkNotice that my code is not much faster than yours.(made with JSBench.Me and using Chrome 57)Complete codeYou can change the only var keyword to const from ES6 if you want to use it, since the variables that this keyword applies to are indeed constant.function round(number, precision) { 'use strict'; precision = precision ? +precision : 0; var sNumber = number + '', periodIndex = sNumber.indexOf('.'), factor = Math.pow(10, precision); if (periodIndex === -1 || precision < 0) { return Math.round(number * factor) / factor; } number = +number; // sNumber[periodIndex + precision + 1] is the last digit if (sNumber[periodIndex + precision + 1] >= 5) { // Correcting float error // factor * 10 to use one decimal place beyond the precision number += (number < 0 ? -1 : 1) / (factor * 10); } return +number.toFixed(precision);}
_codereview.109817
I wrote a tool to send emails with data saved in excel spreadsheets (.xlsx).It uses the openpyxl module to interact with the spreadsheets and smtplib to send the mails.It is only tested with (and should only run for) xlsx files generated by google spreadsheets.It should work with everything openpyxl is supporting, but it doesn't really matter for now.The tool shall be used to send emails for an secret santa event (i suggested using a website but the organizer want to use mails).So my main concern is loosing data or sending this private data to the wrong recipient.Also there is a send limit implemented as the mails accounts that are going to be used are accounts from free hoster (gmail in this case) with a limit of maximum sends per day.If there would be an error in that part of the code that would be critical, too, as it could lead to a (temporary) banned account.The complete code follows, but the project here is also containing an README.from openpyxl import load_workbookfrom openpyxl.utils.exceptions import CellCoordinatesException, InvalidFileExceptionimport smtplibfrom email.mime.text import MIMEText import jsonimport loggingimport argparseimport socketfrom exceptions import ValueErrorfrom time import sleep, time as nowfrom math import ceilimport oslog_debug, log_info, log_warn, log_error = logging.debug, logging.info, logging.warn, logging.errordef excel_data_iterator(workbook, sheet_index=0, selected_rows=tuple([0, 5]), row_offset=1): workbook.active = sheet_index sheet = workbook.active for row_number, row in enumerate(sheet.iter_rows()): if row_number < row_offset: continue yield (row_number,) + tuple([row[i].value for i in selected_rows])class Mail_sender(): def __init__(self, server_config, quota_persist=None, nosend=False): self.host = server_config['host'] self.port = server_config['port'] self.username = server_config['username'] self.password = server_config['password'] self.sender = server_config['sender_addr'] self.use_ssl = server_config['user_ssl'] self.config = server_config self.nosend = nosend self.connected = False self.quota_persist = quota_persist self.next_send = 0 self._update_quotas(mail_send=False) def _update_quotas(self, mail_send=True): curtime = now() if curtime >= self.config['timeframe_end']: self.config['timeframe_end'] = now() + self.config['timeframe'] self.config['remaining_requests'] = self.config['allowed_requests'] self.next_send = now() + self.config['timeframe'] log_debug('New timeframe for %s@%s:%i. %i Requests till %s' % (self.username, self.host, self.port, self.config['remaining_requests'], self.config['timeframe_end'])) # TODO: check if send was allowed ? if mail_send: self.config['remaining_requests'] -= 1 if self.config['update_config']: self.quota_persist() if self.config['use_fixed_delay']: if mail_send: self.next_send = curtime + ceil(self.config['timeframe'] / self.config['allowed_requests']) else: self.next_send = curtime else: if self.config['remaining_requests'] > 0: mail_delay = ceil( (self.config['timeframe_end'] - curtime) / self.config['remaining_requests'] ) log_debug('Can send 1 mail every %i second for %s@%s:%i. %i requests remaining in frame.' % (mail_delay, self.username, self.host, self.port, self.config['remaining_requests'])) self.next_send = curtime + (mail_delay if mail_send else 0) else: self.next_send = self.config['timeframe_end'] def connect_to_server(self): log_debug(Connecting to server %s@%s:%i. % (self.username, self.host, self.port)) if self.use_ssl: self.server = smtplib.SMTP_SSL(self.host, self.port) self.server.ehlo() # optional, called by login() else: self.server = smtplib.SMTP(self.host, self.port) self.server.ehlo() self.server.starttls() self.server.login(self.username, self.password) log_info('Logged in to %s:%i as %s' % (self.host, self.port, self.username)) self.connected = True # TODO: unused atm. remove ? def can_send(self): return now() >= self.next_send def _send_mail(self, mail_to, mail_subject, mail_body): if not self.connected: self.connect_to_server() if isinstance(mail_to, (str, unicode)): mail_to = [mail_to] msg = MIMEText(mail_body) msg['Subject'] = mail_subject msg['From'] = self.sender msg['To'] = ,.join(mail_to) if not self.nosend: self.server.sendmail(self.sender, mail_to, msg.as_string()) log_info('Send mail to %s over %s@%s:%i' % (mail_to, self.username, self.host, self.port)) self._update_quotas(mail_send=True) def send_mail(self, mail_to, mail_subject, mail_body, retries=3, sleep_time=5): Tries to send the mail trying to resolve some errors (up to `retries` times). If this function returns false, the connection is bugged and should be closed. May throw Exceptions: - smtplib.SMTPRecipientsRefused Wrong recipient. Valid recipients might work. for i in xrange(retries): try: self._send_mail(mail_to, mail_subject, mail_body) log_info('Successful send mail from %s to %s (Subject: %s) at %i try.' % (self.sender, mail_to, mail_subject, i+1)) return True except smtplib.SMTPRecipientsRefused as e: # Avoid this being catched by the SMPTException catcher further down raise e except smtplib.SMTPSenderRefused as e: log_error('The server refused the sender (%s) %s@%s:%i' % (self.sender, self.username, self.host, self.port)) return False except smtplib.SMTPAuthenticationError as e: log_error('SMTP authentication problem for connection %s@%s:%i' % (self.username, self.host, self.port)) return False except smtplib.SMTPHeloError as e: log_error('The server didn\'t reply properly to the HELO greeting.') return False except smtplib.SMTPDataError as e: log_warn('The server replied with an unexpected error code.') if i < retries: log_info('Trying to reconnect to %s@%s:%i ...' % (self.username, self.host, self.port)) self.close() sleep(sleep_time) self.connected = False except socket.error as e: log_warn('Socket error: %s .' % e) if i < retries: log_info('Trying to reconnect to %s@%s:%i ...' % (self.username, self.host, self.port)) self.close() sleep(sleep_time) self.connected = False except smtplib.SMTPConnectError as e: log_warn('Connection error for %s@%s:%i.' % (self.username, self.host, self.port)) if i < retries: log_info('Trying to reconnect to %s@%s:%i ...' % (self.username, self.host, self.port)) self.close() sleep(sleep_time) self.connected = False except smtplib.SMTPException as e: log_error('Smtplib error: %s .' % e) return False log_error('To many retries sending to %s:%i via %s. Giving up.' % (self.host, self.port, self.username)) return False def close(self): try: # DealWithIt.mov self.server.quit() except: pass self.connected = Falseclass Excel_Mail_Sender(): def __init__(self, config): self.config = config; self.server_config = None self.server_connections = None self.wb = None self.sheet = None def init(self): try: log_debug(Open, self.config['excel_file']) self.wb = load_workbook(self.config['excel_file'], data_only=True, keep_vba=True) except CellCoordinatesException, InvalidFileException: log_error('Failed to load spreadsheet file %s' % self.config['excel_file']) return False if self.config['cleancomments']: log_info(Clean comments from spreadsheet to avoid comment bug.) for i in xrange(len(self.wb.get_sheet_names())): self.wb.active = i for row in self.wb.active.rows: for cell in row: cell.comment = None self.wb.active = self.config['sheetindex'] self.sheet = self.wb.active log_info('Loaded excel file %s' % self.config['excel_file']) try: self.server_config = json.load(self.config['config']) if not 'mail_user' in self.server_config: raise ValueError() log_debug('Parsed config file %s.' % self.config['config'].name) except ValueError: log_error('Failed to parse config %s.' % self.config['config'].name) return False connections = list() for connection in self.server_config['mail_user']: sender = Mail_sender(connection, self._persist_config, nosend=self.config['nosend']) # TODO: should we try to connect with every user once here ? log_info('Loaded data for connection %s@%s:%i' % (sender.username, sender.host, sender.port)) connections.append(sender) self.server_connections = connections return True def _persist_config(self): self.config['config'].seek(0) json.dump(self.server_config, self.config['config'], indent=True, sort_keys=True) self.config['config'].truncate() self.config['config'].flush() # Do we need this here ? def _find_free_server(self): a = sorted(((x.next_send, x) for x in self.server_connections)) return a[0] def close(self): self.wb.save(self.config['excel_file']) def run(self): row_select = (self.config['colmail'], self.config['colsubject'], self.config['colbody'], self.config['colsend']) for row in excel_data_iterator(self.wb, self.config['sheetindex'], row_select): row = list(row) if 'staticsubject' in self.config: row[2] = self.config['staticsubject'] if row[4] and row[4] >= 1: log_debug('Skip row %i, because it was already send.' % row[0]) continue next_send, connection = self._find_free_server() if now() < next_send: _next = next_send - now() log_debug('Next mail send in %i seconds' % _next) sleep(_next) failed = False if not row[1] or not row[2] or not row[3]: log_error('Invalid data in row %i. Skipping row.' % (row[0] + 1)) failed = True else: try: status = connection.send_mail(row[1], row[2], row[3], retries=3, sleep_time=5) except smtplib.SMTPRecipientsRefused as e: log_error('Recipients refused (%s) for connection %s@%s:%i' % (row[1], connection.username, connection.host, connection.port)) failed = True if failed: self.sheet.cell(row=row[0] + 1, column=self.config['colsend'] + 1).value = 2 self.wb.save(self.config['excel_file']) continue if status: self.sheet.cell(row=row[0] + 1, column=self.config['colsend'] + 1).value = 1 self.wb.save(self.config['excel_file']) else: # We do NOT set the error or done flag when there is an critical error # as it is probably some network or configuration error- log_error('Critical error. Going down.') return Falsedef test_mail(config): server_config = json.load(config['config']) for connection in server_config['mail_user']: log_info('Try to connect to %s@%s:%i ...' % (connection['username'], connection['host'], connection['port'])) sender = Mail_sender(connection, None) try: sender.connect_to_server() log_info('Connection valid.') sender.close() except smtplib.SMTPHeloError: log_error('The server didn\'t reply properly to the HELO greeting.') except smtplib.SMTPAuthenticationError: log_error('The server didn\'t accept the username/password combination.') except smtplib.SMTPException: log_error('No suitable authentication method was found.') except socket.error as e: log_error('Network error: %s' % e)def test_spreadsheet_file(config, cleancomments): from tempfile import mkstemp error = False log_info('Trying to load spreadsheet file %s ...' % config['excel_file']) try: book = load_workbook(config['excel_file'], use_iterators=False, keep_vba=True) except InvalidFileException: log_error('Invalid file. Can\'t open') return True if cleancomments: log_info('Cleaning comments for temporary file') for i in xrange(len(book.get_sheet_names())): book.active = i for row in book.active.rows: for cell in row: cell.comment = None suffix = '.' + config['excel_file'].split('.')[-1] book.active = config['sheetindex'] # Sometimes max_row isn't correct, so we shouldn't rely on it. TODO: it would be great if we could log_info('Loading succesfully. %i rows found.' % book.active.max_row) # https://bitbucket.org/openpyxl/openpyxl/issues/536/cant-save-and-reopen-xlsx-file-with log_debug('Checking for comment bug ...') tmp_file, tmp_name = mkstemp(suffix=suffix) os.close(tmp_file) book.save(tmp_name) fd = open(tmp_name, 'rb') try: book = load_workbook(filename=fd, use_iterators=False, keep_vba=True) book.active = config['sheetindex'] log_info('Comment bug not detected. Rows %i' % book.active.max_row) except TypeError as e: log_error('Failed reloading file. Try again with added --cleancomments parameter.') error = True fd.close() os.remove(tmp_name) return error if __name__ == '__main__': # TODO: CSV ? The default lib has unicode problems import sys parser = argparse.ArgumentParser(description='Sends emails with data supplied by excel files.') parser.add_argument('--config', '-c', required=True, type=argparse.FileType('r+b'), default='./config.json', help='Choose the configuration file.') parser.add_argument('--loglvl', '-l', help='Set the log level.', default='INFO', choices=('DEBUG', 'INFO', 'WARN', 'ERROR')) parser.add_argument('--logfile', '-f', type=argparse.FileType('w'), help='Also write log to file.') parser.add_argument('--colmail', '-m', type=int, default=0, help='The column containing the email address.') parser.add_argument('--colsubject', '-s', type=int, default=1, help='The column containing the email subjects.') parser.add_argument('--colbody', '-b', type=int, default=2, help='The column containing the email message.') parser.add_argument('--colsend', '-o', type=int, default=3, help='The column used to mark if the mail was send.') parser.add_argument('--staticsubject', '-x', help='Can be used to use a static subject.') parser.add_argument('--sheetindex', '-i', type=int, default=0, help='The sheet to use.') parser.add_argument('--cleancomments', action='store_true', help='Remove comments from file. Openpyxl has/hadd a bug leading to corrupt files otherwise.') parser.add_argument('excel_file', type=argparse.FileType('r+b'), help='The excel file to get data from.') parser.add_argument('--test', action='store_true', help='Only test all mail accounts and the spreadsheet file. Then exit.') parser.add_argument('--nosend', action='store_true', help='Do NOT send the mails. Used for testing.') parser.add_argument('--notest', action='store_true', help='Do NOT test for comment bug on startup.') params = vars(parser.parse_args(sys.argv[1:])) # Get filename of the excel file (used to check perms) file_name = params['excel_file'].name params['excel_file'].close() params['excel_file'] = file_name # Set up logging if params['loglvl'] == 'DEBUG': log_lvl = logging.DEBUG elif params['loglvl'] == 'INFO': log_lvl = logging.INFO elif params['loglvl'] == 'WARN': log_lvl = logging.WARN elif params['loglvl'] == 'ERROR': log_lvl = logging.ERROR if 'logfile' in params: logging.basicConfig(filename=params['logfile'], format='%(asctime)s %(levelname)s: %(message)s', datefmt='%m/%d %I:%M %p', level=log_lvl) else: logging.basicConfig(level=log_lvl, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%m/%d %I:%M %p') if params['test']: test_mail(params) test_spreadsheet_file(params, params['cleancomments']) exit() if not params['notest']: log_info('Testing file integrity and comment bug...') if test_spreadsheet_file(params, params['cleancomments']): exit() sender = Excel_Mail_Sender(params) if sender.init(): try: sender.run() except KeyboardInterrupt: sender.close()I hope the code isn't to large and in scope of this site. It is my first time posting on codereview.
Send emails with data from spreadsheet files
python;python 2.7
In general long code is ok here, though it's easier for reviewers if there's a compact piece of code to review. As it is, I'll give some notes I thought of from reading through your code.You double up on some code in _update_quotasif mail_send: self.next_send = curtime + ceil(self.config['timeframe'] / self.config['allowed_requests'])else: self.next_send = curtimeIn either case you will need self.next_send = curtime, so why not initialise that first and then add the extra value if mail_send.self.next_send = curtimeif mail_send: self.next_send += ceil(self.config['timeframe'] / self.config['allowed_requests'])If you intend for it to be self.next_send = (curtime + ceil(self.config['timeframe']) / self.config['allowed_requests'])then I don't believe that's how your code interprets. It's definitely not how the order of operations on mine works:>>> 10 + 10 / 1011>>> (10 + 10) / 102You're using the old string formatting with %, you should use str.format instead as it's easier. It coerces data to strings and offers syntax for additional formatting so it's good to be used to. It's not a big change in your case.log_debug('Can send 1 mail every {} second for {}@{}:{}. {} requests remaining in frame.'.format(mail_delay, self.username, self.host, self.port, self.config['remaining_requests']))Every {} substitutes in one the values passed to it. You'll notice that I don't need to indicate datatype any more.The other note about this line is that it's far too long! You should try to keep to 79 characters per line as the style guide dictates. One way of doing that is with implicit string concatenation. If you have multiple string literals in sequence with nothing between them, they'll be concatenated:>>> concat enation'concatenation'>>> (concat enation)'concatenation'As you can see this works over multiple lines, and even works with str.format:>>> ({} concat enation.format(This is ))'This is concatenation'So you could split the line up like this:log_debug('Can send 1 mail every {} second for {}@{}:{}.' '{} requests remaining in frame.' .format(mail_delay, self.username, self.host, self.port, self.config['remaining_requests']))Note that I'm matching the indentation to where the brackets open, to make it clear where each parentheses open.Bare excepts are always bad, and they're worse when you're trying to close a connection. What if the user hits a keyboard interrupt? You're just going to let that pass without safely closing anything? Instead, figure out what exceptions could occur and only catch those to handle or log, you did a good job with the exceptions in send_mail, do similar here. At the very least you should log the exception somehow so you'll be able to refer back and see what you've been letting pass.Instead of escaping quotes with backslash, you can just wrap the text in double quotes when you need single quotes and vice versalog_error(The server didn't accept the username/password combination.)Instead of using a chain of string comparisons, you can use a dictionary to set your log levels:LOG_LEVELS = { 'DEBUG': logging.DEBUG, 'INFO': logging.INFO, 'WARN': logging.WARN, 'ERROR': logging.ERROR, }log_lvl = LOG_LEVELS[params['loglvl']]
_unix.293339
According to the documentation:To prevent the shell from sending the SIGHUP signal to a particular job, it should be removed from the jobs table with the disown builtin or marked to not receive SIGHUP using disown -h.https://www.gnu.org/software/bash/manual/html_node/Signals.htmlNote the OR in the quote.I can confirm that simply using disown without -h and re-logging that the process did not exit:#!/bin/bash( sleep 10s; echo 1 > b ) &disownIt seems that the -h option is not necessary? If it works without then what is its purpose?
Clarification for Bash documentation on disown builtin option -h
bash;signals;job control;disown
Without -h the job is removed from the table of active jobs, with -h it is not.Everything is in the manual: disown [-ar] [-h] [jobspec ...] (...) If the -h option is given, each jobspec is not removed from the table, but is marked so that SIGHUP is not sent to the job if the shell receives a SIGHUP.To see the difference run jobs after disowning the job with and without -h.
_codereview.11496
I created this as part of an interview process of graduate programming job. Submitted the code to them, and I was called for an interview. This is the assignment brief: http://knight-path.sourceforge.net/puzzle.html, however I was told I can accept the input and produce the output in whatever form I think is most effective, and I have to display an OOP understanding as well.I am a beginner in Java and haven't code in it for a while (2 years since my last Java programming subject in uni), thus was curious on areas of improvements. I would like to be evaluated on my OOP, design pattern, and overall code quality.import java.util.Scanner;import java.util.LinkedList;public class Board { private final int BOARD_WIDTH = 8; private final int BOARD_HEIGHT = 8; private LinkedList<Square> squares = new LinkedList(); public Board() { this.build(); } private void build() { this.squares.clear(); for (int y = 1; y <= BOARD_HEIGHT; y++) { for (int x = 1; x <= BOARD_WIDTH; x++) { this.squares.add(new Square(x, y, new Blank())); } } } public void play() { this.printWelcome(); while (true) { // build or re-build board from previous play this.build(); // prompt and validates move input, then creates a move object Move move = null; while (move == null) { try { move = new Move(this.readMove(), this); } catch (IllegalArgumentException e) { System.out.println(ERROR: + e.getMessage() + Please try again.); } } // set our source square with a knight piece, solve move, and print our boards + solutions move.getSource().setPiece(new Knight()); LinkedList<LinkedList<Square>> solutions = getKnightTravailsSolutions(move); this.print(solutions); } } private String readMove() { Scanner scanner = new Scanner(System.in); System.out.println(); System.out.println(); System.out.println(=======================================================================================); System.out.print(MOVES: ); return scanner.nextLine(); } public Square getSquare(int x, int y) { for (Square square: squares) { if (square.matches(x, y)) return square; } return null; } // ------------------------------------------------------------------------------------------------ // KNIGHT TRAVAILS FUNCTIONS // ------------------------------------------------------------------------------------------------ public LinkedList<LinkedList<Square>> getKnightTravailsSolutions(Move move) { // breadth-first search implementation // return a list of shortest path solution, each solution contains a list of squares it travels LinkedList<LinkedList<Square>> solutions = new LinkedList(); LinkedList<LinkedList<Square>> queue = new LinkedList(); LinkedList<Square> visited = new LinkedList(); boolean solutionFound = false; // initialize first path, our very first path is the source square of the move // this is technically a path, imagine in a case where our source and destination is the same square LinkedList<Square> firstPath = new LinkedList(); firstPath.add(move.getSource()); queue.add(firstPath); while (!queue.isEmpty()) { LinkedList<Square> currentPath = queue.removeFirst(); Square currentSquare = currentPath.getLast(); if (currentSquare == move.getDestination()) { solutions.add(currentPath); solutionFound = true; } if (!solutionFound) { for (Square nextLegalSquare: this.getKnightNextLegalMoves(currentSquare)) { if (!visited.contains(nextLegalSquare)) { LinkedList<Square> nextPath = new LinkedList(); nextPath.addAll(currentPath); nextPath.add(nextLegalSquare); queue.addLast(nextPath); } } } if (!visited.contains(currentSquare)) visited.add(currentSquare); } return solutions; } private LinkedList<Square> getKnightNextLegalMoves(Square source) { // brute force search to find next legal squares to all squares on board // returns a list of possible next move squares given a source square LinkedList<Square> nextMoves = new LinkedList(); for (Square destination: squares) { int xMoveDistance = destination.getX() - source.getX(); int yMoveDistance = destination.getY() - source.getY(); // since a knight hops over other pieces, we just need to ensure no piece exist within our destination square // and, whether we moved 2 then 1 space or 1 then 2 spaces if ((destination.getPiece().isBlank()) && (Math.abs(xMoveDistance * yMoveDistance) == 2)) { nextMoves.add(destination); } } return nextMoves; } // ------------------------------------------------------------------------------------------------ // PRINTING FUNCTIONS // ------------------------------------------------------------------------------------------------ private void printWelcome() { System.out.println(); System.out.println(); System.out.println(=======================================================================================); System.out.println(The Knight's Travails Challenge); System.out.println(=======================================================================================); System.out.println(); System.out.println(Welcome! :) I accept two squares identified by algebraic chess notation.); System.out.println(The first square is the starting position and the second square is the ending position.); System.out.println(I will then find the shortest sequence of valid moves to take a Knight piece from the); System.out.println(starting position to the ending solution.); System.out.println(); System.out.println(Example input would be: A8 B7); } private void print(LinkedList<LinkedList<Square>> solutions) { if (solutions.isEmpty()) { System.out.println( SOLUTION #1: No solution exists); } else { for (LinkedList<Square> solution: solutions) { System.out.println(this.getBoardLine(solution)); System.out.println( SOLUTION # + ((int)solutions.indexOf(solution) + 1) + : + this.getSolutionLine(solution)); } } } public String getSolutionLine(LinkedList<Square> solution) { String line = ; if (solution.getFirst() == solution.getLast()) { line += No travel required; } else { for (Square square: solution) { if (square != solution.getFirst()) line += square.toChessNotation() + ; } } return line; } private String getBoardLine(LinkedList<Square> solution) { String line = \n; line += this.getBoardTopLine() + \n; line += this.getBoardMiddleLine() + \n; for (int y = 1; y <= this.BOARD_HEIGHT; y++) { for (int x = 1; x <= this.BOARD_WIDTH; x++) { Square square = this.getSquare(x, y); if (!square.getPiece().isBlank()) { line += | + square.getPiece().toChessNotation(); } else if (solution.contains(square)) { line += | + solution.indexOf(square); } else { line += | + square.getPiece().toChessNotation(); } } line += | + y + \n; line += this.getBoardMiddleLine() + \n; } return line; } private String getBoardMiddleLine() { String line = ; for (int i = 0; i < this.BOARD_WIDTH; i++) { line += +---; } line += ; return line; } private String getBoardTopLine() { String line = ; char startChar = 'a'; for (int i = 0; i < this.BOARD_WIDTH; i++) { line += + startChar + ; startChar++; } return line; }}Other classes:public class Move { private Square source, destination; public Move(String moveInput, Board board) { if (moveInput == null || moveInput.length() != 5 || moveInput.charAt(2) != ' ') { throw new IllegalArgumentException(Invalid input.); } int fromX = (int)moveInput.toUpperCase().charAt(0) - '@'; int fromY = (int)moveInput.toUpperCase().charAt(1) - '0'; int toX = (int)moveInput.toUpperCase().charAt(3) - '@'; int toY = (int)moveInput.toUpperCase().charAt(4) - '0'; // check whether the squares are actually exists // e.g given input a9 a1, square a9 might not be exist in 8x8 board if ((board.getSquare(fromX, fromY) == null) || (board.getSquare(toX, toY)) == null) { throw new IllegalArgumentException(Invalid input.); } this.source = board.getSquare(fromX, fromY); this.destination = board.getSquare(toX, toY); } public Square getSource() { return this.source; } public Square getDestination() { return this.destination; } public String toChessNotation() { String line = this.getSource().toString() + + this.getDestination().toString(); return line; }}public class Square { private int x, y; private Piece piece; public Square(int x, int y, Piece piece) { this.x = x; this.y = y; this.piece = piece; } public int getX() { return this.x; } public int getY() { return this.y; } public Piece getPiece() { return this.piece; } public void setPiece(Piece piece) { this.piece = piece; } public String toChessNotation() { return (char)(this.x + 64) + + (this.y); } public boolean matches(int x, int y) { return (this.x == x && this.y == y); }}public abstract class Piece { public Piece() {} public abstract PieceType getPieceType(); public abstract String toChessNotation(); public boolean isKnight() { return getPieceType() == PieceType.KNIGHT; } public boolean isBlank() { return getPieceType() == PieceType.BLANK; }}public enum PieceType { KNIGHT, BLANK;}public class Blank extends Piece { public PieceType getPieceType() { return PieceType.BLANK; } public String toChessNotation() { return ; }}public class Knight extends Piece { public PieceType getPieceType() { return PieceType.KNIGHT; } public String toChessNotation() { return N; }}public class Main { public static void main(String[] args) { Board board = new Board(); board.play(); }}
Knight's Travails solution
java;design patterns;object oriented
I personally wouldn't do things like this.build(); but just build();... although that's a matter of taste. IMO this doesn't clarify anything but just adds noise to the code.I don't see any reason why your squares is a linked list. A matrix of Piece would suffice and it represents the purpose of squares better in my opinion. It would also simplify it all, since getSquare() would be a matter of querying the matrix indices instead of performing a search. It would also be more robust since you can have several squares with the same x and y. You're forcing it when you build the loop, but still...I would not remove Square completely, but I would keep it as a little bean for getKnightTravailsSolutions, Move, etc. ONLY representing the actual square (not the piece in it.)I would also move the play() (and related methods like printWelcome(), readMove(), etc.) out of Board. It has too many responsibilities: it not only is a board, but it also plays itself and carries the main program logic. Why? It fits better in your class Main (which I would call something like KnightsTravails to fit better its new purpose.It's clear you massed too much functionality because you had to separate it with things like this:// ---...// KNIGHT TRAVAILS FUNCTIONS// ---...Your public enum PieceType belongs to Piece just as Piece.Type.I see no reason why your notations are strings. Characters would be enough and it represents better the notion of one piece = one character in chess notation.I also see no reason to extend concrete pieces such as Blank and Knight since they're not encapsulating very much and just disseminate code all over the place. IMO I would just instance Pieces (that is, making it non-abstract) specifying WHICH piece in the constructor. To implement notation I would do like this:private enum Type { BLANK (' '), ... KNIGHT ('N'); private final char notation; Type(char notation) { this.notation = notation; } private char notation() { return notation; }}It would make sense to extend in their own classes if you were actually adding specific logic to each piece (like movement types and so) but you're not doing so. Some would argue your approach offers better extensibility and reusability so your approach might be valid.Not sure if Java lets you do this (I think so, but can't check it) moveInput.charAt(2) could be written more succint as moveInput[2]. The same applies for many of your strings.What's going on in getKnightNextLegalMoves? Why would you bruteforce all over your board?If you have a source, you just have to check in [1,2]; [1,-2]; [-1,2]; [-1,-2]; [2,1]; [2,-1]; [-2,1]; [-2,-1].Regarding getKnightTravailsSolutions:Although you must instance your queue as a LinkedList (or any other class implementing Queue), I would prefer to specify its type as the interface, i.e.: Queue<LinkedList<Square>> queue = new LinkedList<LinkedList<Square>>(); It is more clear that it's a queue and will protect you from doing weird non-queue things. Use the queue interface (i.e. poll() and offer()) instead of removeFirst() and addLast().It might make more sense for visited to be a Set instead (since you're enforcing it anyways when checking in contains(currentSquare).This is not very important, but your visited.add(currentSquare); could be inside the if (!solutionFound) since you don't need to care about visited once you're on your last step.
_webapps.14513
I'm a big fan of Google Docs for SME, having used it for two years in my current company, set it up for a startup project, and recommending it to folks starting a business. While it takes away much of the pain and cost of providing these services locally, there is the nagging issue of not having a comprehensive 3rd party disaster recovery system in place. This was highlighted to me by a recent blog post from e1lven.com titled Why no company that values their data should EVER Go Google.Google has a couple of stop gap, partial measures (like automatic forwarding of emails to another address). But there does not seem any native-Google approach to backup Google Apps data. Are there any automated periodic backup services for Google Apps data?
Automated periodic backup of Google Apps (email, docs, calendar)?
google drive;backup
I use backupify for online backups of gmail, flickr, twitter etc. There also appears to be a version for google apps(business version so you'll probably pay for it) but it's a great service and I've restored from it before.
_webapps.69111
I'm trying to find a link I saved to Delicious. I cannot remember how I tagged it but I am pretty sure that I set it to private. Is there a way to list one's private bookmarks in Delicious?
Can I list my private bookmarks on Delicious?
delicious
null
_unix.375860
Is there something similar to Vim's Command Line Window for Bash where I can see/edit/execute items from the history?In Vim when I press : and then Ctrl-F it opens the window that shows the entire command history:7. Command-line window *cmdline-window* *cmdwin* *command-line-window*In the command-line window the command line can be edited just like editingtext in any window. It is a special kind of window, because you cannot leaveit in a normal way.OPEN *c_CTRL-F* *q:* *q/* *q?*[..]When the window opens it is filled with the command-line history. The lastline contains the command as typed so far. The left column will show acharacter that indicates the type of command-line being edited, see|cmdwin-char|.When you press Enter the current line is executed.(I know that I can search the history with Ctrl-R, / (vi-mode), etc.)
Is there a Vim like Command Line Window for Bash?
bash;vim;vi mode
null
_softwareengineering.219427
I've need an interface that assures me a certain method, including specific signature, is available. So far his is what I have:public interface Mappable<M> { M mapTo(M mappableEntity);}The problem arises when a class should be mappable to multiple other entities. The ideal case would be this (not java):public class Something implements Mappable<A>, Mappable<B> { public A mapTo(A someObject) {...} public B mapTo(B someOtherObject) {...}}What would be the best way to achieve this remaining as generic as possible?
Implementing multiple generic Interfaces in java
java;generics
This is, of course, not something you can do due to Type Erasure. At runtime, you have two methods public Object mapTo(Object), which obviously cannot coexist.Unfortunately, what you are trying to do is simply beyond Java's type system. Assuming your generic type is always a first class type, and not itself generic, you could achieve similar outward-facing behaviour by having the method mapTo(Object, Class), which would allow you to do runtime inspection of the given class and decide which behaviour to use. Obviously this is pretty inelegant--and will require manual casting of the return value--but I think it's the best you can do. If your generic types are themselves generic, then their generic parameters will be erased too and their Classes will be equal, so this method won't work.However, I would point towards @Joachim's answer as well, this may be a case where you can split the behaviour out into separated components and sidestep the whole issue.
_unix.10168
In recent weeks I've gone from a fairly 'hands-on' approach to .flac --> .mp3 transcoding, to one that's far more 'set & forget'.The first step was to stop using a GUI front end (Audacity with a LAME plug-in) and instead use the method I outlined here.The second step was to find a bash shell script that would tell that command loop to work recursively, allowing directories with many subdirectories containing .flac files to be transcoded in one simple step. That answer was provided by a user at askubuntu.com.Now I wish to learn how to further refine things so that ID3 tag information is preserved. The methods linked to above strip ID3 tag data, leaving the bare minimum (i.e. only the title field remains).Can anyone teach me how to write such a shell script?The shell script has been updated thus:#!/bin/bashfile=$1flac -cd $file | lame --preset fast extreme - ${file%.flac}.mp3id3cp $file ${file%.flac}.mp3Doing find . -name '*.flac' -exec ~/bin/flac2mp3 '{}' \; in ~/Desktop/stack gives the following output:01 - Amon Tobin - Chomp Samba.flac: done LAME 3.98.4 64bits (http://www.mp3dev.org/)Using polyphase lowpass filter, transition band: 19383 Hz - 19916 HzEncoding <stdin> to ./01 - Amon Tobin - Chomp Samba.mp3Encoding as 44.1 kHz j-stereo MPEG-1 Layer III VBR(q=0)Parsing ./01 - Amon Tobin - Chomp Samba.flac: done. Copying to ./01 - Amon Tobin - Chomp Samba.mp3: doneid3info for the original .flac and resultant .mp3 gives, respectively:*** Tag information for 01 - Amon Tobin - Chomp Samba.flac(i.e. nothing);*** Tag information for 01 - Amon Tobin - Chomp Samba.mp3*** mp3 infoMPEG1/layer IIIBitrate: 128KBpsFrequency: 44KHzThe .flac definitely has tag information. I can verify this by opening up EasyTAG. EasyTAG refers to this as 'FLAC Vorbis Tag' but 'ID3 Tag' for the .mp3. Is this the problem?
Mass .flac --> .mp3 transcoding: How to write a shell script that preserves ID3 tag information?
shell script;mp3;id3;tagging;flac
#!/bin/shfile=$1outfile=${file%.flac}.mp3eval $(metaflac --export-tags-to - $file | sed s/=\(.*\)/='\1'/)flac -cd $file | lame --preset fast extreme \ --add-id3v2 --tt $TITLE --ta $ARTIST --tl $ALBUM \ --ty $DATE --tn $TRACKNUMBER --tg $GENRE \ - $outfile
_computerscience.5429
What is the best way to save video from faces of a cube? The render can produce 6 squares (90x90) of 1024x1024 resolution. Faces oriented along axies and only position is changed continuously. I want to save them into the video stream and then play back them into 360 panorama viewver by means of projection on the cube or using cubemap (OpenGL ES).What is the best codec suitable and what is the best way to save video for above purposes? Is it better to use six separate synchronized video streams or, instead, to enclose all six faces into the one?If cube is unfolded into six square bitmaps and composed into a 3x2 (or 2x3) rectangle, then it is possible to get artifacts on glued inner edges and on the outer edge of the resulting rectangle. I sure it may depend on codec used to compress/decompress resulting video.In case of 6 separate video streams it is possible to render only 5 of 6 (or 3 of 6 in case of only pitch and yaw (but not roll) rotations are possible) faces of cube, i.e. only visible ones. But on the other hand it may be waste of resources to support six separate streams due to necessity of attendant metainformation handling.What are expected performance issues inherent to different approaches?The relevant platform is average home desktop PC.
A way to save 360 panorama video
opengl;rendering;projections;video
null
_webmaster.107868
I have built a desktop site on angularjs.The problem i am facing is that i am not able to open it on uc browser on mobile.Is there any way to debug this issue ?
Desktop angularjs website not opening on uc browser
javascript;browsers;angular.js
null
_webmaster.25282
On my website, I track when users click on a search result that brings them to a details page for an item. I then save what search query they used before they clicked.My current solution is this:Each link in search results is in the form /goto/<item_id>/?search_id=.../goto/<item_id>/ saves that a user used a given search id to get to a given item, and then returns a 302 redirect to /details/<item_id>//details/<item_id>/ displays the details page and does not do any tracking.For the users, everything works fine, but when I check Google search results for my page, the direct links to the details page URLs say /goto/<item_id>/?search_id=... with some old search_id, instead of /details/<item_id>/.I feel like I'm missing an obvious solution :) The only thing I came up with so far is using /details/<item_id>/ links in HTML and using JavaScript to replace them all with /goto/<item_id>/search_id=..., but that seems like an overkill.Any better ideas?
How to track click sources in a Google-compatible way?
seo;redirects;tracking
null
_codereview.163081
I created simple function, that generate values of series representing repeating sequence of steps in time. User can define:step valueswidth of the stepshow many times the step sequence should repeator the size of returned seriesIf the size is not defined, the size of returned data should be determined by number of repeats.So the call steps(2, [1, 2, 3], repeat=2)should return[1 1 2 2 3 3 1 1 2 2 3 3]The function followsdef steps(step_width, values, repeat=1, size=None): This function generates steps from given values. **Args:** * `step_width` - desired width of every step (int) * `values` - values for steps (1d array) **Kwargs:** * `repeat` - number of step sequence repetions (int), this value is used, if the `size` is not defined * `size` - size of output data in samples (int), if the `size` is used, the `repeat` is ignored. **Returns:** * array of values representing desired steps (1d array) try: step_width = int(step_width) except: raise ValueError('Step width must be an int.') try: repeat = int(repeat) except: raise ValueError('Repeat arg must be an int.') try: values = np.array(values) except: raise ValueError('Values must be a numpy array or similar.') # generate steps x = np.repeat(values, step_width) if size is None: # repeat according to the desired repetions x_full = np.tile(x, repeat) else: try: repeat = int(repeat) except: raise ValueError('Repeat arg must be an int.') # repeat till size is reached and crop the data to match size repeat = int(np.ceil(size / float(len(x)))) x_full = np.tile(x, repeat)[:size] return x_fullI would appreciate any feedback. Especially I am not sure about effectivity of the error raising as it is implemented right now.
Function that generates steps time series from user given values
python;error handling;numpy
Some comments related to your code, in addition to the comments made in Jaime's answer:Lookout for code repetitions Whenever you catch yourself doing a copy-paste in your code, or typing the same over and over, you should consider either writing loops or methods.This applies especially to the try...except pattern. You could either use the option provide by Jaime, or the basic extract based upon your code:def validate_int(value, error_msg): try: return int(value) except: raise ValueError(error_msg)Repeated conversion of repeat Why do you repeat the conversion of repeat when size is not None? You've already done this, no need to do it again. Is it a typo, and you should have validated the size number?Write good comments and docstrings If I read only the code, I wouldn't understand what your method actually does. Your docstring is also rather large and a little too much space consuming. For alternate version, see code example below. This is less then half the size of your docstring, and still I've also added some examples to further understand the inner working of the method.Regarding the comments, I would focus on what we gain from doing the next code statement. For example, see code below.What do you gain from doing the raise ValueError in this method? In general when a method is called, you have control on where the numbers come from. If you have calculated them you now they are the proper value already, and steps shouldn't need to verify it. If they are read either from the command line, or from keyboard input, or possibly through a web request, I would perform the validation closer to the actual retrieval of the value.For example, if read from keyboard, and you don't validate until this method is called, how would handle it? If you validate when it was typed in, you could enforce the type there, and repeat asking for new input until your type requirements are satisfied.Another benefit of assuming validation earlier (or closer to the retrieval), is that it would greatly simplify your code. If using the slice trick and repeat calculation presented by Jaime your code would then look like this:def steps(step_width, values, repeat=1, size=None): Generate 'step_width' copies of each of the elements of 'values', and either repeat this step sequence 'repeat' times, or repeat it until the entire sequences has 'size' elements. Two examples: step_with(2, [5, 6] -> [5, 5, 6, 6, 5, 5, 6, 6, 5, 5, 6, 6] steps(3, [7, 8], size=5) -> [7, 7, 7, 8, 8] if size is not None: # Calculate how many 'repeat's we need to get # at least 'size' elements repeat = int(np.ceil(size / float(len(x)))) return np.tile(np.repeat(values, step_width), repeat)[:size]
_unix.271004
I followed this wiki but I didn't get it work at boot automatically, I mean every time I boot I have to run this command:setxkbmap -model pc104 -layout fr,ara -variant ,azerty -option grp:alt_shift_toggleI'm using Deepin DE and this is /etc/X11/xorg.conf.d/00-keyboard.conf:Section InputClass Identifier system-keyboard MatchIsKeyboard on Option XkbLayout fr,ara Option XkbModel pc104 Option XkbVariant ,azerty Option XkbOptions grp:alt_shift_toggleEndSection~/.xinitrc:#!/bin/shuserresources=$HOME/.Xresourcesusermodmap=$HOME/.Xmodmapsysresources=/etc/X11/xinit/.Xresourcessysmodmap=/etc/X11/xinit/.Xmodmap# merge in defaults and keymapsif [ -f $sysresources ]; then xrdb -merge $sysresourcesfiif [ -f $sysmodmap ]; then xmodmap $sysmodmapfiif [ -f $userresources ]; then xrdb -merge $userresourcesfiif [ -f $usermodmap ]; then xmodmap $usermodmapfi# start some nice programsif [ -d /etc/X11/xinit/xinitrc.d ] ; then for f in /etc/X11/xinit/xinitrc.d/?*.sh ; do [ -x $f ] && . $f done unset ffitwm &xclock -geometry 50x50-1+1 &xterm -geometry 80x50+494+51 &xterm -geometry 80x20+494-0 &exec startdde
Arch: cannot make setxkbmap persistant
arch linux;keyboard layout
null
_reverseengineering.3949
How do I specify to radare2 that what I'm disassembling when I know it is a DOS MZ executable?As it does not auto-detect this for me.
Disassembling an unknown DOS MZ executable using radare2
disassembly;radare2
You can disassemble a DOS 16-bit executable using radare2 as follows:Example using TEXTINST.EXE from the FreeDOS 1.1 CDROM image:bash$ radare2 TEXTINST.EXESS : 214fSP : 4c00IP : 0CS : 0NRELOCS: 1RELOC : 1cCHKSUM : 0[0000:0020]> aa[0000:0020]> pd 10 ; [0] va=0x00000020 pa=0x00000020 sz=67272 vsz=67272 rwx=-rwx .text ;-- section..text: 0000:0020 b93b03 mov cx, 0x33b 0000:0023 be7406 mov si, 0x674 0000:0026 89f7 mov di, si 0000:0028 1e push ds 0000:0029 a9b580 test ax, 0x80b5 | 0000:002c 8cc8 mov ax, cs | 0000:002e 050510 add ax, 0x1005 | 0000:0031 8ed8 mov ds, ax | 0000:0033 05f010 add ax, 0x10f0 | 0000:0036 8ec0 mov es, ax(etc)If required, you can force the disassembler, etc. to assume DOS as follows:e asm.arch=x86e asm.os=dosIf you think you have a DOS executable and the above doesn't work, there may be something more subtle going on, and you should post a question showing specifically what you tried, what you expect and what is not working. Note: showing what you tried, etc. is more likely to elicit answers on the SE sites...
_webmaster.83527
We're running into an issue where when we do an internal search on our website, we're not showing a particular page (http://education.illinois.edu/faculty/mercier). We're using the Google Search tool. Doing a Google search with site:education.illinois.edu Emma Mercier doesn't return the page. What we've tried:Added the page to the sitemap and submitted the sitemap to Google Search Console. Entered the link to Google Search Console and asked to Fetch as Google. It got a 200. I then recrawled the page and waited 24 hours. Went to the robots.txt tester and entered the URL. It said it was allowed. Confirmed it wasn't part of the Blocked URL list. I realize this is a generic question (with similar yet dated questions), but I'm stumped on what else to check. Thoughts?
Page not getting picked up on Google Search
seo;google search console
null
_softwareengineering.304331
PremiseI have a number of database servers (different DBMS instances, installations in different physical locations), each of them contains data and uses the same schema. Most of the data is completely the same across all databases, but unfortunately not in every case.I need to write an application that will issue queries (only selects, not inserts, updates or deletes) against a table across all databases, retrieve all results so that the tool can compare them to be able to report back possible discrepancies to users.Use caseFor example imagine that I have a web based form, where users enter company details. Submitting the form will issue an insert against the same table in all databases (via different database connection, each of them separate). While users typing the company's name the application needs to be able to verify that the value entered doesn't exist across all databases. If it exists or doesn't exist on all databases all is fine. Either the company name is taken or can be used by the user. However if the value exists on some databases, but not on others I have to raise a validation error.My planThe tool I'm writing is a Java application, using JDBC (connecting to multiple Oracle databases), so right now my thinking is that I create a Connection towards each of my database servers. Issue the same query in parallel, wait for the results and compare them. My use case involves a handful of users and 5-10 different database servers (database migration or slave/master replication is completely out of question).In the past I've answered someone's question which required a solution similar to what I'm thinking I should do right now.QuestionWhat are the possible ways to handle a scenario like this?
Executing queries against multiple separate database instances
database;jdbc
null
_softwareengineering.100671
Possible Duplicate:Making money with Open Source as a developer? I've been hearing recently that more and more developers contribute their efforts to many open-source products. Being a novice in many aspects of software development I've never thought about myself as being a member of a team that builds a product almost anybody can use for free.But this trend is getting more and more attention, and today it is almost considered that any self-respected software engineer should have at least one example when they participated in an open-source project. It gives you a few more credits on the interview, it's cool, it means something very good, especially if the product is popular and successful. However, it seems that it's not just cool, but somehow it is prosperous business today. Not just some students or passionate enthusiasts anymore, but well known developers quit their jobs and start spending most of their time in that sector, proprietary companies spend good amount of money to support them.I understand there are many different types of licensing and stuff. But I simply don't understand, how those developers get paid? How the companies make their revenue of it? I don't believe that behind all that is just people's altruistic nature, and they just happy to work for virtually nothing. Can you explain me taking any well known project as an example, the business model of it?How can somebody participate? What should you know first? How can you monetize your efforts? Is there any guide to beginners or complete manual for idiots to start with?
Making money from developing Open Source Software. How does that work?
open source
null
_codereview.90520
Chandan got bored playing with the arrays all the time. Therefore he has decided to buy a string \$S\$ consists of \$N\$ lower case letters. Once he purchased the string, He starts formulating his own terminologies over his string \$S\$. Chandan calls a string str A Balanced String if and only if the characters of the string str can be paritioned into two multisets \$M_1\$ and \$M_2\$ such that \$M_1=M_2\$ .For example:Strings like abccba , abaccb , aabbcc are all balanced strings as their characters can be partitioned in the two multisets \$M_1\$ and \$M2\$ such that \$M_1=M_2\$.$$M1 = {a,b,c} \\M2 = {c,b,a}$$whereas strings like ababab , abcabb are not balanced at all.Chandan wonders how many substrings of his string \$S\$ are Balanced Strings? Chandan is a little guy and do not know how to calculate the count of such substrings.For input abccba Balanced substring are cc , bccb , abccba ie count=3 (Provided as per problem statement discussion) But I guess aa, bb, cc, abba, acca, cbbc are also balanced sub string for the same input which makes count=6 Any wrong in my interpretation ?Program import java.util.HashMap; import java.util.LinkedHashMap; import java.util.Scanner; import java.util.Set; public class BalancedStrings { static int returnbalance(String input){ HashMap<Character, Integer> characters=new LinkedHashMap<>(); int count=0; Character c=null; for(int i=0;i<input.length();i++){ c=input.charAt(i); if(characters.containsKey(c)==true){ count=characters.get(c); characters.put(c, count+1); } else characters.put(c,1); } int countunique=0; Set<Character> s=characters.keySet(); for(Character cc:s){ count=characters.get(cc); if(count%2==0) countunique++; } if(countunique!=s.size()) return 0; Object[] sar= s.toArray(); int len=0; for(int i=sar.length-1;i>=0;i--){ len=len+i; } return len+countunique;}public static void main(String[] args) { Scanner in=new Scanner(System.in); int numberOfInputs=in.nextInt(); in.nextLine(); for(int i=0;i<numberOfInputs;i++){ System.out.println(BalancedStrings.returnbalance(in.nextLine())); }}}
Count balanced substring
java;algorithm;strings
null
_unix.107891
I was compiling a custom linux kernel for a newly installed machine, and after booting into the new kernel (3.12), the init process fails to find a root device, which I traced to the system getting an unknown partition table error on the device in question (/dev/sda). The generic kernel boots up and mounts the root partition just fine. I cannot seem to find anything that looks relevant in the kernel config, what could it be missing?
unknown partition table - misconfigured kernel
linux;kernel;partition;grub2;ext4
There are a bunch of options mostly named CONFIG_.*_PARTITION, you probably didn't set the one you need. These may only show up if you answer yes to CONFIG_PARTITION_ADVANCED (Advanced partition selection).You're going to want (on a PC) at least:CONFIG_MSDOS_PARTITION=y # traditional MS-DOS partition tableCONFIG_EFI_PARTITION=y # EFI GPT partition tableand maybe:LDM_PARTITION=y # Windows logical (dynamic) disksYou may also want a few more (such as CONFIG_MAC_PARTITION and BSD_DISKLABEL) to read partition tables from other operating systems' disks you may actually run in to.You can see all of the partition table options in your kernel source tree (in block/partitions/Kconfig) or at Linux Cross Reference.
_webmaster.85355
If anywhere within body of the email I place link to my website in format <a href=http://mywebsite.com>more info here</a> , it gets flagged as spam I tested many different combinations and interesting thing is that it's only link to this particular website (my website) in which case email goes to spam. I tried few other website links, all I changed was href= attribute and email is NOT marked as spam I'm sending email from my hosting to gmail. On web server I have installed postfix and I'm managing email server on my own. I added SPF and DKIM records correctly so I dont think this would cause any problems. This is the same web server where I have website hosted, same website whose link is marked as spam in gmail. Any other emails which I send from this server to gmail without my website link in content are valid and are NOT going to spamNow, could this be related to my site ranking in google? I wonder which factors could possibly use to send emails with my website link to spam. My site rank is not so bad (but not highly ranked either), it's not penalized as far as I know, I used couple of web services to test and it looked fine. It's also indexed properly and I can see it in google search results for relevant keywords If anyone can provide more infos about this I would appreciate it
Gmail flags email as spam if it contains link to my website
email;spam;gmail
null
_codereview.101887
This code inserts the number 4 into a vector's even indexes.vector<double> vecCoeffs; // Put the coefficient 4 into each even index and 2 for each odd index for(int i = 0; i <= 10; i++){ vecCoeffs[i]; if(i % 2 == 0 ){ vecCoeffs.push_back(4); }else{ vecCoeffs.push_back(2); } // Starting and ending with coefficient 1 vecCoeffs[0] = 1; vecCoeffs[10] = 1;
Populating a vector with alternating numbers
c++;vectors
If you know how large the vector needs to be, the specify the size in the constructor so that it doesn't need to guess.vecCoeffs[i]; is a useless statement.It would be clearer to pull the assignment of starting and ending coefficients out of the loop, and to avoid assigning vecCoeffs[10] twice. You could assign the starting and ending coefficients in the same statement.The if-else would be better as a ternary conditional.vector<double> vecCoeffs(11);// Starting and ending coefficientsvecCoeffs[0] = vecCoeffs[vecCoeffs.size() - 1] = 1;// Put the coefficient 2 into each odd index and 4 into each even indexfor (int i = 1; i < vecCoeffs.size() - 1; i++) { vecCoeffs[i] = (i % 2 ? 2 : 4);}But the modulo operator is relatively slow. You would be better off with two loopsvector<double> vecCoeffs(11);// Starting and ending coefficientsvecCoeffs[0] = vecCoeffs[vecCoeffs.size() - 1] = 1;for (int i = 1; i < vecCoeffs.size() - 1; i += 2) { vecCoeffs[i] = 2; // Odd coefficients}for (int i = 2; i < vecCoeffs.size() - 1; i += 2) { vecCoeffs[i] = 4; // Even coefficients} or maybe just one, but getting the termination correct is trickier. This version might eliminate an instruction or two from the loop, but I don't recommend it.vector<double> vecCoeffs(11);for (int i = 0; i < vecCoeffs.size() - 1; i += 2) { vecCoeffs[i] = 4; // Even coefficients vecCoeffs[i + 1] = 2; // Odd coefficients}// Starting and ending coefficientsvecCoeffs[0] = vecCoeffs[vecCoeffs.size() - 1] = 1;
_unix.32351
For example, in X session, I can use Ctrl-Alt-L to lock the screen, so it would ask for password to unlock and prevent somebody from messing with mine computer.But if I have an open terminal session on one of the tty's (which I can access with Ctrl-Alt-F1, for example) - then it is not locked, and somebody can still use it to do some harm. Is there a way to 'lock' that command line (with some background processes running in it, maybe)?
Is there a way to lock command line?
terminal;screen lock
vlock will do as you ask. However, if you want to run background processes, consider screen instead, which will let you also log off and keep processes running in the background, and then reattach -- even when logged in from alternate places.
_unix.34478
Distro-specific support channels are good only if you run their clean stable versions out of the box and basically don't install anything other than what you get out of the box. But what about those who want to play around and experiment and learn a little bit more?I'm looking for a non-distro-specific channel that will offer support and troubleshooting advice for people who have hacked their machine to shreds.I am really having a hard time finding support and a positive atmosphere when it comes to experimentation.
Non-distro specific, linux tweaking, hacking and learning support IRC
linux;troubleshooting;irc
null
_unix.17732
The following code best describes the situation. Why is the last line not outputting the trailing newline char? Each line's output is shown in the comment. I'm using GNU bash, version 4.1.5 echo -n $'a\nb\n' | xxd -p # 610a620a x=$'a\nb\n' ; echo -n $x | xxd -p # 610a620a echo -ne a\nb\n | xxd -p # 610a620ax=$(echo -ne a\nb\n) ; echo -n $x | xxd -p # 610a62
Where has the trailing newline char gone from my command substitution?
shell;command line;text processing;command substitution
The command substitution function $() (and its cousin the backtick) specifically removes trailing newlines. This is the documented behavior, and you should always be aware of it when using the construct.Newlines inside the text body are not removed by the substitution operator, but they may also be removed when doing word splitting on the shell, so how that turns out depends on whether you used quotes or not. Note the difference between these two usages:$ echo -n $(echo -n 'a\nb')ab$ !! | xxd -p610a62$ echo -n $(echo -n 'a\nb')a b$ !! | xxd -p 612062In the second example, the output wasn't quoted and the newline was interpreted as a word-split, making it show up in the output as a space!