Changing css dependent on domain and adding "?" into domain - javascript

First post here but I'll get on with it, I was having some trouble with finding out how to change a site's CSS dependent on the domain, what I mean by that is like if I wanted "example.com" to display a coming soon page but if you change the domain to "example.com?bypass=keyhere" then it displays the actual site that is being built, I mentioned CSS because I was thinking of changing visibility of elements, etc. but I do not even know how to get started with having it detect what the proper domain is.

You can query params using
function urlParam(name){
var results = new RegExp('[\?&]' + name + '=([^&#]*)').exec(window.location.href);
if (results == null){
return null;
}
else {
return decodeURI(results[1]) || 0;
}
}
if (urlParam.bypass === 'keyhere') {
// if you want to do something
} else {
document.body.classList.add('hideContent')
}

Related

Attempting window.location or window.location.href redirect which is causing a loop

I'm attempting to use javascript to determine if the user is using a certain language and if they're not using english then for the page to load a different page BUT with the params of which I've grabbed from the url.
I have been able to load the page with the params but I keep falling into a loop reloading the page, even after skimming through the countless other examples, such as: this or this.
function locateUserLanguage() {
var languageValue = (navigator.languages ? navigator.languages[0] : (navigator.language || navigator.userLanguage)).split('-');
var url = window.location.href.split('?');
var baseUrl = url[0];
var urlParams = url[1];
if (languageValue[0] === 'en') {
console.log('no redirect needed, stay here.');
} else {
// I tried to set location into a variable but also wasn't working.
// var newURL = window.location.href.replace(window.location.href, 'https://www.mysite.dog/?' + urlParams);
window.location.href = 'https://www.mysite.dog/?' + urlParams
}
} locateUserLanguage();
I've attempted to place a return true; as well as return false; but neither stop the loop.
I've tried window.location.replace(); and setting the window.location.href straight to what I need, but it's continuing to loop.
There is a possibility that the script in which this function is written is executed in both of your pages (english and non-english) on load. So, as soon as the page is loaded, locateUserLanguage function is executed in both english and non-english website causing the infinite loop.
You need to put a check before you call locateUserLanguage function.
Suppose english website has url = "www.myside.com" and non-english website has url "www.myside.aus". So the condition needs to be
if (window.location.host === "www.myside.com") { locateUserLanguage() }
This will make sure that locateUserLanguage is called only in english website.
Or other apporach can be to load this script only in english website which will avoid the usage of conditional statement.
Hope it helps. Revert for any doubts.

I'm trying to utilize Click Funnels, and inject into the "Header Custom Tracking" some JS, but its loading after the Page?

I'm attempting to redirect someone that is outside of the United States, but the Page loads before the script does and it looks tacky. Obviously I'm not able to hide the body or anything like that, I'm not certain if the issue lies with the redirect itself waiting for the page to load, or if it is because the html keeps loading, which I didn't think was a thing.
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.1.1/jquery.min.js"></script>
<script type="text/javascript">
var QueryString = function () {
var query_string = {};
var query = window.location.search.substring(1);
var vars = query.split("&");
for (var i=0;i<vars.length;i++) {
var pair = vars[i].split("=");
// If first entry with this name
if (typeof query_string[pair[0]] === "undefined") {
query_string[pair[0]] = decodeURIComponent(pair[1]);
// If second entry with this name
} else if (typeof query_string[pair[0]] === "string") {
var arr = [ query_string[pair[0]],decodeURIComponent(pair[1]) ];
query_string[pair[0]] = arr;
// If third or later entry with this name
} else {
query_string[pair[0]].push(decodeURIComponent(pair[1]));
}
}
return query_string;
}();
$.getJSON( "https://freegeoip.net/json/", function( data ) {
if(data.country_name = "United States")
{
window.location.replace("https://my.draxe.com/hlg-success-intl" + '?' + 'inf_field_Email=' + QueryString.inf_field_Email);
//window.location.href = "https://my.draxe.com/hlg-success-intl";
}
});
</script>
I'm attempting to redirect someone that is outside of the United States, but the Page loads before the script does and it looks tacky. Obviously I'm not able to hide the body or anything like that, I'm not certain if the issue lies with the redirect itself waiting for the page to load, or if it is because the html keeps loading, which I didn't think was a thing.
Clickfunnels is kind of weird with how it handles it's custom html/javascript.
If you look at the page source, you'll see that your code is not injected into the dom as the page is created on the server, it's stored inside another element and then click funnel's JS code injects it into the dom. This makes it impossible to run your code before theirs(I assume this is why they do it this way).
So, basically, there is no way to do what you're asking. At least as far as we've been able to figure out, and we're pretty deep in their code(currently looking at trying to hijack their form submissions to inject custom variables into the redirect url... I wouldn't advise it)

Cannot read "Google" from object in JavaScript

I've written a script that detects the referring URL from a couple of search engines and then passes this value (not the mc_u20 variable) to a server to be used somewhere. The script works like a treat except for one big problem, it simply won't track Google search results. So any result that comes from Google, simply doesn't register. Here is the script:
var mc_searchProviders = {"search_google":"google.co","search_bing":"bing.com","search_msn":"search.msn","search_yahoo":"search.yahoo","search_mywebsearch":"mywebsearch.com","search_aol":"search.aol.co", "search_baidu":"baidu.co","search_yandex":"yandex.com"};
var mc_socialNetworks = {"social_facebook":"facebook.co","social_twitter":"twitter.co","social_google":"plus.google."};
var mc_pageURL = window.location +'';
var mc_refURL = document.referrer +'';
function mc_excludeList() {
if (mc_refURL.search('some URL') != -1) {
return false; //exclude some URL
}
if (mc_refURL.search('mail.google.') != -1) {
return false; //exclude Gmail
}
if (mc_refURL.search(mc_paidSearch) != -1) {
return false; //exclude paidsearch
}
else {
mc_checkURL();
}
}
mc_excludeList();
function mc_checkURL() {
var mc_urlLists = [mc_searchProviders, mc_socialNetworks],
i,mc_u20;
for (i = 0; i < mc_urlLists.length; i++) {
for (mc_u20 in mc_urlLists[i]) {
if(!mc_urlLists[i].hasOwnProperty(mc_u20))
continue;
if (mc_refURL.search(mc_urlLists[i][mc_u20]) != -1) {
mc_trackerReport(mc_u20);
return false;
}
else if ((mc_refURL == '') && (mc_directTracking === true)){
mc_u20 = "direct_traffic";
mc_trackerReport(mc_u20);
return false;
}
}
}
}
The most annoying thing is, I have tested this on my local machine (by populating the mc_refURL with an actual google search URL and it works like a charm. I've also thought that maybe when searching through the first mc_searchProviders object it is somehow skipping the first instance, so I added a blank one. But still this doesn't work. What's even more annoying is that for every other search engine, the mc_u20 variable seems to populate with what I need.
This is driving me insane. Can anyone see what's wrong here? I might also mention that I'm signed into Google but I don't see how this would affect the script as their blogpost (in November) said they were filtering keywords not stopping the referring URL from being passed.
Right so I figured out what was going on. The first part of the script excludes your own URL (see where it says 'some URL'. Say this was set to www.example.com. In Google if I searched for say example and Google returned www.example.com as the first search result, in the referring URL it would contain www.example.com. Hence why the script was breaking, maybe someone will find this useful in future.

Navigating / scraping hashbang links with javascript (phantomjs)

I'm trying to download the HTML of a website that is almost entirely generated by JavaScript. So, I need to simulate browser access and have been playing around with PhantomJS. Problem is, the site uses hashbang URLs and I can't seem to get PhantomJS to process the hashbang -- it just keeps calling up the homepage.
The site is http://www.regulations.gov. The default takes you to #!home. I've tried using the following code (from here) to try and process different hashbangs.
if (phantom.state.length === 0) {
if (phantom.args.length === 0) {
console.log('Usage: loadreg_1.js <some hash>');
phantom.exit();
}
var address = 'http://www.regulations.gov/';
console.log(address);
phantom.state = Date.now().toString();
phantom.open(address);
} else {
var hash = phantom.args[0];
document.location = hash;
console.log(document.location.hash);
var elapsed = Date.now() - new Date().setTime(phantom.state);
if (phantom.loadStatus === 'success') {
if (!first_time) {
var first_time = true;
if (!document.addEventListener) {
console.log('Not SUPPORTED!');
}
phantom.render('result.png');
var markup = document.documentElement.innerHTML;
console.log(markup);
phantom.exit();
}
} else {
console.log('FAIL to load the address');
phantom.exit();
}
}
This code produces the correct hashbang (for instance, I can set the hash to '#!contactus') but it doesn't dynamically generate any different HTML--just the default page. It does, however, correctly output that has when I call document.location.hash.
I've also tried to set the initial address to the hashbang, but then the script just hangs and doesn't do anything. For example, if I set the url to http://www.regulations.gov/#!searchResults;rpp=10;po=0 the script just hangs after printing the address to the terminal and nothing ever happens.
The issue here is that the content of the page loads asynchronously, but you're expecting it to be available as soon as the page is loaded.
In order to scrape a page that loads content asynchronously, you need to wait to scrape until the content you're interested in has been loaded. Depending on the page, there might be different ways of checking, but the easiest is just to check at regular intervals for something you expect to see, until you find it.
The trick here is figuring out what to look for - you need something that won't be present on the page until your desired content has been loaded. In this case, the easiest option I found for top-level pages is to manually input the H1 tags you expect to see on each page, keying them to the hash:
var titleMap = {
'#!contactUs': 'Contact Us',
'#!aboutUs': 'About Us'
// etc for the other pages
};
Then in your success block, you can set a recurring timeout to look for the title you want in an h1 tag. When it shows up, you know you can render the page:
if (phantom.loadStatus === 'success') {
// set a recurring timeout for 300 milliseconds
var timeoutId = window.setInterval(function () {
// check for title element you expect to see
var h1s = document.querySelectorAll('h1');
if (h1s) {
// h1s is a node list, not an array, hence the
// weird syntax here
Array.prototype.forEach.call(h1s, function(h1) {
if (h1.textContent.trim() === titleMap[hash]) {
// we found it!
console.log('Found H1: ' + h1.textContent.trim());
phantom.render('result.png');
console.log("Rendered image.");
// stop the cycle
window.clearInterval(timeoutId);
phantom.exit();
}
});
console.log('Found H1 tags, but not ' + titleMap[hash]);
}
console.log('No H1 tags found.');
}, 300);
}
The above code works for me. But it won't work if you need to scrape search results - you'll need to figure out an identifying element or bit of text that you can look for without having to know the title ahead of time.
Edit: Also, it looks like the newest version of PhantomJS now triggers an onResourceReceived event when it gets new data. I haven't looked into this, but you might be able to bind a listener to this event to achieve the same effect.

How can I determine if the document.referrer is from my own site?

Each time a page is requested I get the referrer of the page it came from. I need to track just referrer from other sites, I don't want to track going from one page to another within my site. How can I do that?
document.referrer.indexOf(location.protocol + "//" + location.host) === 0;
Originally posted at JavaScript - Am I the Referrer?
When someone comes to our website for the first time, we store the referrer in a cookie. This way, if they download our demo, we can get the original referrer from the cookie and we learn what sites are effective in driving leads to us.
Of course, every subsequent page a visitor hits on our website will show the referrer as our website. We don't want those. What we first did to avoid this was look for the text "windward" in the referrer and if so, assume that was from our site. The problem with this is we found a lot of referrer urls now have windward in them, either as a search term or part of a url that talks about Windward. (This is good news, it means we are now a well known product.)
So that brought me to our most recent approach. This should work for any site and should only reject referrers from the same site.
function IsReferredFromMe()
{
var ref = document.referrer;
if ((ref == null) || (ref.length == 0)) {
return false;
}
if (ref.indexOf("http://") == 0) {
ref = ref.substring(7);
}
ref = ref.toLowerCase();
var myDomain = document.domain;
if ((myDomain == null) || (myDomain.length == 0)) {
return false;
}
if (myDomain.indexOf("http://") == 0) {
myDomain = myDomain.substring(7);
}
myDomain = myDomain.toLowerCase();
return ref.indexOf(myDomain) == 0;
}
Solutions presented works in case of no sub domain in website in case of sub domain is there then we have to check just before the domain itself if any sub domains presented:
document.referrer.replace("http://", '').replace("https://", '').split('/')[0].match(new RegExp(".*" +location.host.replace("www.", '')))
this solution will add .* before the domain to detect that sub domain is from same domain.
If pages of “the same website” you think have the same origin (the same protocol, host, and port.),
check it this way:
function the_referrer_has_the_same_origin() {
try {
const referrer = new URL(document.referrer);
return (referrer.origin === location.origin);
} catch(invalid_url_error) {
return false;
}
}
// Works as intended for `https://www.google.com` and `https://www.google.com:443`.
.
If you’d like a short one and not to consider unlikely situations, try this:
document.referrer.startsWith(location.origin)
// Fails for `https://www.google.com` and `https://www.google.com:443`.
.
document.referrer.includes(location.host);

Categories

Resources